1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements extra semantic analysis beyond what is enforced
10 // by the C type system.
12 //===----------------------------------------------------------------------===//
14 #include "clang/AST/APValue.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/AttrIterator.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclBase.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/DeclarationName.h"
24 #include "clang/AST/EvaluatedExprVisitor.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/FormatString.h"
30 #include "clang/AST/NSAPI.h"
31 #include "clang/AST/NonTrivialTypeVisitor.h"
32 #include "clang/AST/OperationKinds.h"
33 #include "clang/AST/RecordLayout.h"
34 #include "clang/AST/Stmt.h"
35 #include "clang/AST/TemplateBase.h"
36 #include "clang/AST/Type.h"
37 #include "clang/AST/TypeLoc.h"
38 #include "clang/AST/UnresolvedSet.h"
39 #include "clang/Basic/AddressSpaces.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/IdentifierTable.h"
43 #include "clang/Basic/LLVM.h"
44 #include "clang/Basic/LangOptions.h"
45 #include "clang/Basic/OpenCLOptions.h"
46 #include "clang/Basic/OperatorKinds.h"
47 #include "clang/Basic/PartialDiagnostic.h"
48 #include "clang/Basic/SourceLocation.h"
49 #include "clang/Basic/SourceManager.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
52 #include "clang/Basic/TargetBuiltins.h"
53 #include "clang/Basic/TargetCXXABI.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57 #include "clang/Sema/Initialization.h"
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
63 #include "clang/Sema/SemaInternal.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/STLExtras.h"
71 #include "llvm/ADT/SmallBitVector.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallString.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/StringSet.h"
78 #include "llvm/ADT/StringSwitch.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/ConvertUTF.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/Format.h"
85 #include "llvm/Support/Locale.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/SaveAndRestore.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/TargetParser/Triple.h"
103 using namespace clang
;
104 using namespace sema
;
106 SourceLocation
Sema::getLocationOfStringLiteralByte(const StringLiteral
*SL
,
107 unsigned ByteNo
) const {
108 return SL
->getLocationOfByte(ByteNo
, getSourceManager(), LangOpts
,
109 Context
.getTargetInfo());
112 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A
,
113 Sema::FormatArgumentPassingKind B
) {
117 /// Checks that a call expression's argument count is at least the desired
118 /// number. This is useful when doing custom type-checking on a variadic
119 /// function. Returns true on error.
120 static bool checkArgCountAtLeast(Sema
&S
, CallExpr
*Call
,
121 unsigned MinArgCount
) {
122 unsigned ArgCount
= Call
->getNumArgs();
123 if (ArgCount
>= MinArgCount
)
126 return S
.Diag(Call
->getEndLoc(), diag::err_typecheck_call_too_few_args
)
127 << 0 /*function call*/ << MinArgCount
<< ArgCount
128 << Call
->getSourceRange();
131 /// Checks that a call expression's argument count is at most the desired
132 /// number. This is useful when doing custom type-checking on a variadic
133 /// function. Returns true on error.
134 static bool checkArgCountAtMost(Sema
&S
, CallExpr
*Call
, unsigned MaxArgCount
) {
135 unsigned ArgCount
= Call
->getNumArgs();
136 if (ArgCount
<= MaxArgCount
)
138 return S
.Diag(Call
->getEndLoc(),
139 diag::err_typecheck_call_too_many_args_at_most
)
140 << 0 /*function call*/ << MaxArgCount
<< ArgCount
141 << Call
->getSourceRange();
144 /// Checks that a call expression's argument count is in the desired range. This
145 /// is useful when doing custom type-checking on a variadic function. Returns
147 static bool checkArgCountRange(Sema
&S
, CallExpr
*Call
, unsigned MinArgCount
,
148 unsigned MaxArgCount
) {
149 return checkArgCountAtLeast(S
, Call
, MinArgCount
) ||
150 checkArgCountAtMost(S
, Call
, MaxArgCount
);
153 /// Checks that a call expression's argument count is the desired number.
154 /// This is useful when doing custom type-checking. Returns true on error.
155 static bool checkArgCount(Sema
&S
, CallExpr
*Call
, unsigned DesiredArgCount
) {
156 unsigned ArgCount
= Call
->getNumArgs();
157 if (ArgCount
== DesiredArgCount
)
160 if (checkArgCountAtLeast(S
, Call
, DesiredArgCount
))
162 assert(ArgCount
> DesiredArgCount
&& "should have diagnosed this");
164 // Highlight all the excess arguments.
165 SourceRange
Range(Call
->getArg(DesiredArgCount
)->getBeginLoc(),
166 Call
->getArg(ArgCount
- 1)->getEndLoc());
168 return S
.Diag(Range
.getBegin(), diag::err_typecheck_call_too_many_args
)
169 << 0 /*function call*/ << DesiredArgCount
<< ArgCount
170 << Call
->getArg(1)->getSourceRange();
173 static bool convertArgumentToType(Sema
&S
, Expr
*&Value
, QualType Ty
) {
174 if (Value
->isTypeDependent())
177 InitializedEntity Entity
=
178 InitializedEntity::InitializeParameter(S
.Context
, Ty
, false);
180 S
.PerformCopyInitialization(Entity
, SourceLocation(), Value
);
181 if (Result
.isInvalid())
183 Value
= Result
.get();
187 /// Check that the first argument to __builtin_annotation is an integer
188 /// and the second argument is a non-wide string literal.
189 static bool SemaBuiltinAnnotation(Sema
&S
, CallExpr
*TheCall
) {
190 if (checkArgCount(S
, TheCall
, 2))
193 // First argument should be an integer.
194 Expr
*ValArg
= TheCall
->getArg(0);
195 QualType Ty
= ValArg
->getType();
196 if (!Ty
->isIntegerType()) {
197 S
.Diag(ValArg
->getBeginLoc(), diag::err_builtin_annotation_first_arg
)
198 << ValArg
->getSourceRange();
202 // Second argument should be a constant string.
203 Expr
*StrArg
= TheCall
->getArg(1)->IgnoreParenCasts();
204 StringLiteral
*Literal
= dyn_cast
<StringLiteral
>(StrArg
);
205 if (!Literal
|| !Literal
->isOrdinary()) {
206 S
.Diag(StrArg
->getBeginLoc(), diag::err_builtin_annotation_second_arg
)
207 << StrArg
->getSourceRange();
211 TheCall
->setType(Ty
);
215 static bool SemaBuiltinMSVCAnnotation(Sema
&S
, CallExpr
*TheCall
) {
216 // We need at least one argument.
217 if (TheCall
->getNumArgs() < 1) {
218 S
.Diag(TheCall
->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least
)
219 << 0 << 1 << TheCall
->getNumArgs()
220 << TheCall
->getCallee()->getSourceRange();
224 // All arguments should be wide string literals.
225 for (Expr
*Arg
: TheCall
->arguments()) {
226 auto *Literal
= dyn_cast
<StringLiteral
>(Arg
->IgnoreParenCasts());
227 if (!Literal
|| !Literal
->isWide()) {
228 S
.Diag(Arg
->getBeginLoc(), diag::err_msvc_annotation_wide_str
)
229 << Arg
->getSourceRange();
237 /// Check that the argument to __builtin_addressof is a glvalue, and set the
238 /// result type to the corresponding pointer type.
239 static bool SemaBuiltinAddressof(Sema
&S
, CallExpr
*TheCall
) {
240 if (checkArgCount(S
, TheCall
, 1))
243 ExprResult
Arg(TheCall
->getArg(0));
244 QualType ResultType
= S
.CheckAddressOfOperand(Arg
, TheCall
->getBeginLoc());
245 if (ResultType
.isNull())
248 TheCall
->setArg(0, Arg
.get());
249 TheCall
->setType(ResultType
);
253 /// Check that the argument to __builtin_function_start is a function.
254 static bool SemaBuiltinFunctionStart(Sema
&S
, CallExpr
*TheCall
) {
255 if (checkArgCount(S
, TheCall
, 1))
258 ExprResult Arg
= S
.DefaultFunctionArrayLvalueConversion(TheCall
->getArg(0));
262 TheCall
->setArg(0, Arg
.get());
263 const FunctionDecl
*FD
= dyn_cast_or_null
<FunctionDecl
>(
264 Arg
.get()->getAsBuiltinConstantDeclRef(S
.getASTContext()));
267 S
.Diag(TheCall
->getBeginLoc(), diag::err_function_start_invalid_type
)
268 << TheCall
->getSourceRange();
272 return !S
.checkAddressOfFunctionIsAvailable(FD
, /*Complain=*/true,
273 TheCall
->getBeginLoc());
276 /// Check the number of arguments and set the result type to
277 /// the argument type.
278 static bool SemaBuiltinPreserveAI(Sema
&S
, CallExpr
*TheCall
) {
279 if (checkArgCount(S
, TheCall
, 1))
282 TheCall
->setType(TheCall
->getArg(0)->getType());
286 /// Check that the value argument for __builtin_is_aligned(value, alignment) and
287 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
288 /// type (but not a function pointer) and that the alignment is a power-of-two.
289 static bool SemaBuiltinAlignment(Sema
&S
, CallExpr
*TheCall
, unsigned ID
) {
290 if (checkArgCount(S
, TheCall
, 2))
293 clang::Expr
*Source
= TheCall
->getArg(0);
294 bool IsBooleanAlignBuiltin
= ID
== Builtin::BI__builtin_is_aligned
;
296 auto IsValidIntegerType
= [](QualType Ty
) {
297 return Ty
->isIntegerType() && !Ty
->isEnumeralType() && !Ty
->isBooleanType();
299 QualType SrcTy
= Source
->getType();
300 // We should also be able to use it with arrays (but not functions!).
301 if (SrcTy
->canDecayToPointerType() && SrcTy
->isArrayType()) {
302 SrcTy
= S
.Context
.getDecayedType(SrcTy
);
304 if ((!SrcTy
->isPointerType() && !IsValidIntegerType(SrcTy
)) ||
305 SrcTy
->isFunctionPointerType()) {
306 // FIXME: this is not quite the right error message since we don't allow
307 // floating point types, or member pointers.
308 S
.Diag(Source
->getExprLoc(), diag::err_typecheck_expect_scalar_operand
)
313 clang::Expr
*AlignOp
= TheCall
->getArg(1);
314 if (!IsValidIntegerType(AlignOp
->getType())) {
315 S
.Diag(AlignOp
->getExprLoc(), diag::err_typecheck_expect_int
)
316 << AlignOp
->getType();
319 Expr::EvalResult AlignResult
;
320 unsigned MaxAlignmentBits
= S
.Context
.getIntWidth(SrcTy
) - 1;
321 // We can't check validity of alignment if it is value dependent.
322 if (!AlignOp
->isValueDependent() &&
323 AlignOp
->EvaluateAsInt(AlignResult
, S
.Context
,
324 Expr::SE_AllowSideEffects
)) {
325 llvm::APSInt AlignValue
= AlignResult
.Val
.getInt();
326 llvm::APSInt
MaxValue(
327 llvm::APInt::getOneBitSet(MaxAlignmentBits
+ 1, MaxAlignmentBits
));
328 if (AlignValue
< 1) {
329 S
.Diag(AlignOp
->getExprLoc(), diag::err_alignment_too_small
) << 1;
332 if (llvm::APSInt::compareValues(AlignValue
, MaxValue
) > 0) {
333 S
.Diag(AlignOp
->getExprLoc(), diag::err_alignment_too_big
)
334 << toString(MaxValue
, 10);
337 if (!AlignValue
.isPowerOf2()) {
338 S
.Diag(AlignOp
->getExprLoc(), diag::err_alignment_not_power_of_two
);
341 if (AlignValue
== 1) {
342 S
.Diag(AlignOp
->getExprLoc(), diag::warn_alignment_builtin_useless
)
343 << IsBooleanAlignBuiltin
;
347 ExprResult SrcArg
= S
.PerformCopyInitialization(
348 InitializedEntity::InitializeParameter(S
.Context
, SrcTy
, false),
349 SourceLocation(), Source
);
350 if (SrcArg
.isInvalid())
352 TheCall
->setArg(0, SrcArg
.get());
353 ExprResult AlignArg
=
354 S
.PerformCopyInitialization(InitializedEntity::InitializeParameter(
355 S
.Context
, AlignOp
->getType(), false),
356 SourceLocation(), AlignOp
);
357 if (AlignArg
.isInvalid())
359 TheCall
->setArg(1, AlignArg
.get());
360 // For align_up/align_down, the return type is the same as the (potentially
361 // decayed) argument type including qualifiers. For is_aligned(), the result
363 TheCall
->setType(IsBooleanAlignBuiltin
? S
.Context
.BoolTy
: SrcTy
);
367 static bool SemaBuiltinOverflow(Sema
&S
, CallExpr
*TheCall
,
368 unsigned BuiltinID
) {
369 if (checkArgCount(S
, TheCall
, 3))
372 // First two arguments should be integers.
373 for (unsigned I
= 0; I
< 2; ++I
) {
374 ExprResult Arg
= S
.DefaultFunctionArrayLvalueConversion(TheCall
->getArg(I
));
375 if (Arg
.isInvalid()) return true;
376 TheCall
->setArg(I
, Arg
.get());
378 QualType Ty
= Arg
.get()->getType();
379 if (!Ty
->isIntegerType()) {
380 S
.Diag(Arg
.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int
)
381 << Ty
<< Arg
.get()->getSourceRange();
386 // Third argument should be a pointer to a non-const integer.
387 // IRGen correctly handles volatile, restrict, and address spaces, and
388 // the other qualifiers aren't possible.
390 ExprResult Arg
= S
.DefaultFunctionArrayLvalueConversion(TheCall
->getArg(2));
391 if (Arg
.isInvalid()) return true;
392 TheCall
->setArg(2, Arg
.get());
394 QualType Ty
= Arg
.get()->getType();
395 const auto *PtrTy
= Ty
->getAs
<PointerType
>();
397 !PtrTy
->getPointeeType()->isIntegerType() ||
398 PtrTy
->getPointeeType().isConstQualified()) {
399 S
.Diag(Arg
.get()->getBeginLoc(),
400 diag::err_overflow_builtin_must_be_ptr_int
)
401 << Ty
<< Arg
.get()->getSourceRange();
406 // Disallow signed bit-precise integer args larger than 128 bits to mul
407 // function until we improve backend support.
408 if (BuiltinID
== Builtin::BI__builtin_mul_overflow
) {
409 for (unsigned I
= 0; I
< 3; ++I
) {
410 const auto Arg
= TheCall
->getArg(I
);
411 // Third argument will be a pointer.
412 auto Ty
= I
< 2 ? Arg
->getType() : Arg
->getType()->getPointeeType();
413 if (Ty
->isBitIntType() && Ty
->isSignedIntegerType() &&
414 S
.getASTContext().getIntWidth(Ty
) > 128)
415 return S
.Diag(Arg
->getBeginLoc(),
416 diag::err_overflow_builtin_bit_int_max_size
)
425 struct BuiltinDumpStructGenerator
{
428 SourceLocation Loc
= TheCall
->getBeginLoc();
429 SmallVector
<Expr
*, 32> Actions
;
430 DiagnosticErrorTrap ErrorTracker
;
431 PrintingPolicy Policy
;
433 BuiltinDumpStructGenerator(Sema
&S
, CallExpr
*TheCall
)
434 : S(S
), TheCall(TheCall
), ErrorTracker(S
.getDiagnostics()),
435 Policy(S
.Context
.getPrintingPolicy()) {
436 Policy
.AnonymousTagLocations
= false;
439 Expr
*makeOpaqueValueExpr(Expr
*Inner
) {
440 auto *OVE
= new (S
.Context
)
441 OpaqueValueExpr(Loc
, Inner
->getType(), Inner
->getValueKind(),
442 Inner
->getObjectKind(), Inner
);
443 Actions
.push_back(OVE
);
447 Expr
*getStringLiteral(llvm::StringRef Str
) {
448 Expr
*Lit
= S
.Context
.getPredefinedStringLiteralFromCache(Str
);
449 // Wrap the literal in parentheses to attach a source location.
450 return new (S
.Context
) ParenExpr(Loc
, Loc
, Lit
);
453 bool callPrintFunction(llvm::StringRef Format
,
454 llvm::ArrayRef
<Expr
*> Exprs
= {}) {
455 SmallVector
<Expr
*, 8> Args
;
456 assert(TheCall
->getNumArgs() >= 2);
457 Args
.reserve((TheCall
->getNumArgs() - 2) + /*Format*/ 1 + Exprs
.size());
458 Args
.assign(TheCall
->arg_begin() + 2, TheCall
->arg_end());
459 Args
.push_back(getStringLiteral(Format
));
460 Args
.insert(Args
.end(), Exprs
.begin(), Exprs
.end());
462 // Register a note to explain why we're performing the call.
463 Sema::CodeSynthesisContext Ctx
;
464 Ctx
.Kind
= Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall
;
465 Ctx
.PointOfInstantiation
= Loc
;
466 Ctx
.CallArgs
= Args
.data();
467 Ctx
.NumCallArgs
= Args
.size();
468 S
.pushCodeSynthesisContext(Ctx
);
470 ExprResult RealCall
=
471 S
.BuildCallExpr(/*Scope=*/nullptr, TheCall
->getArg(1),
472 TheCall
->getBeginLoc(), Args
, TheCall
->getRParenLoc());
474 S
.popCodeSynthesisContext();
475 if (!RealCall
.isInvalid())
476 Actions
.push_back(RealCall
.get());
477 // Bail out if we've hit any errors, even if we managed to build the
478 // call. We don't want to produce more than one error.
479 return RealCall
.isInvalid() || ErrorTracker
.hasErrorOccurred();
482 Expr
*getIndentString(unsigned Depth
) {
486 llvm::SmallString
<32> Indent
;
487 Indent
.resize(Depth
* Policy
.Indentation
, ' ');
488 return getStringLiteral(Indent
);
491 Expr
*getTypeString(QualType T
) {
492 return getStringLiteral(T
.getAsString(Policy
));
495 bool appendFormatSpecifier(QualType T
, llvm::SmallVectorImpl
<char> &Str
) {
496 llvm::raw_svector_ostream
OS(Str
);
498 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
499 // than trying to print a single character.
500 if (auto *BT
= T
->getAs
<BuiltinType
>()) {
501 switch (BT
->getKind()) {
502 case BuiltinType::Bool
:
505 case BuiltinType::Char_U
:
506 case BuiltinType::UChar
:
509 case BuiltinType::Char_S
:
510 case BuiltinType::SChar
:
518 analyze_printf::PrintfSpecifier Specifier
;
519 if (Specifier
.fixType(T
, S
.getLangOpts(), S
.Context
, /*IsObjCLiteral=*/false)) {
520 // We were able to guess how to format this.
521 if (Specifier
.getConversionSpecifier().getKind() ==
522 analyze_printf::PrintfConversionSpecifier::sArg
) {
523 // Wrap double-quotes around a '%s' specifier and limit its maximum
524 // length. Ideally we'd also somehow escape special characters in the
525 // contents but printf doesn't support that.
526 // FIXME: '%s' formatting is not safe in general.
528 Specifier
.setPrecision(analyze_printf::OptionalAmount(32u));
529 Specifier
.toString(OS
);
531 // FIXME: It would be nice to include a '...' if the string doesn't fit
532 // in the length limit.
534 Specifier
.toString(OS
);
539 if (T
->isPointerType()) {
540 // Format all pointers with '%p'.
548 bool dumpUnnamedRecord(const RecordDecl
*RD
, Expr
*E
, unsigned Depth
) {
549 Expr
*IndentLit
= getIndentString(Depth
);
550 Expr
*TypeLit
= getTypeString(S
.Context
.getRecordType(RD
));
551 if (IndentLit
? callPrintFunction("%s%s", {IndentLit
, TypeLit
})
552 : callPrintFunction("%s", {TypeLit
}))
555 return dumpRecordValue(RD
, E
, IndentLit
, Depth
);
558 // Dump a record value. E should be a pointer or lvalue referring to an RD.
559 bool dumpRecordValue(const RecordDecl
*RD
, Expr
*E
, Expr
*RecordIndent
,
561 // FIXME: Decide what to do if RD is a union. At least we should probably
562 // turn off printing `const char*` members with `%s`, because that is very
563 // likely to crash if that's not the active member. Whatever we decide, we
564 // should document it.
566 // Build an OpaqueValueExpr so we can refer to E more than once without
567 // triggering re-evaluation.
568 Expr
*RecordArg
= makeOpaqueValueExpr(E
);
569 bool RecordArgIsPtr
= RecordArg
->getType()->isPointerType();
571 if (callPrintFunction(" {\n"))
574 // Dump each base class, regardless of whether they're aggregates.
575 if (const auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
)) {
576 for (const auto &Base
: CXXRD
->bases()) {
578 RecordArgIsPtr
? S
.Context
.getPointerType(Base
.getType())
579 : S
.Context
.getLValueReferenceType(Base
.getType());
580 ExprResult BasePtr
= S
.BuildCStyleCastExpr(
581 Loc
, S
.Context
.getTrivialTypeSourceInfo(BaseType
, Loc
), Loc
,
583 if (BasePtr
.isInvalid() ||
584 dumpUnnamedRecord(Base
.getType()->getAsRecordDecl(), BasePtr
.get(),
590 Expr
*FieldIndentArg
= getIndentString(Depth
+ 1);
593 for (auto *D
: RD
->decls()) {
594 auto *IFD
= dyn_cast
<IndirectFieldDecl
>(D
);
595 auto *FD
= IFD
? IFD
->getAnonField() : dyn_cast
<FieldDecl
>(D
);
596 if (!FD
|| FD
->isUnnamedBitfield() || FD
->isAnonymousStructOrUnion())
599 llvm::SmallString
<20> Format
= llvm::StringRef("%s%s %s ");
600 llvm::SmallVector
<Expr
*, 5> Args
= {FieldIndentArg
,
601 getTypeString(FD
->getType()),
602 getStringLiteral(FD
->getName())};
604 if (FD
->isBitField()) {
606 QualType SizeT
= S
.Context
.getSizeType();
607 llvm::APInt
BitWidth(S
.Context
.getIntWidth(SizeT
),
608 FD
->getBitWidthValue(S
.Context
));
609 Args
.push_back(IntegerLiteral::Create(S
.Context
, BitWidth
, SizeT
, Loc
));
615 IFD
? S
.BuildAnonymousStructUnionMemberReference(
616 CXXScopeSpec(), Loc
, IFD
,
617 DeclAccessPair::make(IFD
, AS_public
), RecordArg
, Loc
)
618 : S
.BuildFieldReferenceExpr(
619 RecordArg
, RecordArgIsPtr
, Loc
, CXXScopeSpec(), FD
,
620 DeclAccessPair::make(FD
, AS_public
),
621 DeclarationNameInfo(FD
->getDeclName(), Loc
));
622 if (Field
.isInvalid())
625 auto *InnerRD
= FD
->getType()->getAsRecordDecl();
626 auto *InnerCXXRD
= dyn_cast_or_null
<CXXRecordDecl
>(InnerRD
);
627 if (InnerRD
&& (!InnerCXXRD
|| InnerCXXRD
->isAggregate())) {
628 // Recursively print the values of members of aggregate record type.
629 if (callPrintFunction(Format
, Args
) ||
630 dumpRecordValue(InnerRD
, Field
.get(), FieldIndentArg
, Depth
+ 1))
634 if (appendFormatSpecifier(FD
->getType(), Format
)) {
635 // We know how to print this field.
636 Args
.push_back(Field
.get());
638 // We don't know how to print this field. Print out its address
639 // with a format specifier that a smart tool will be able to
640 // recognize and treat specially.
642 ExprResult FieldAddr
=
643 S
.BuildUnaryOp(nullptr, Loc
, UO_AddrOf
, Field
.get());
644 if (FieldAddr
.isInvalid())
646 Args
.push_back(FieldAddr
.get());
649 if (callPrintFunction(Format
, Args
))
654 return RecordIndent
? callPrintFunction("%s}\n", RecordIndent
)
655 : callPrintFunction("}\n");
658 Expr
*buildWrapper() {
659 auto *Wrapper
= PseudoObjectExpr::Create(S
.Context
, TheCall
, Actions
,
660 PseudoObjectExpr::NoResult
);
661 TheCall
->setType(Wrapper
->getType());
662 TheCall
->setValueKind(Wrapper
->getValueKind());
668 static ExprResult
SemaBuiltinDumpStruct(Sema
&S
, CallExpr
*TheCall
) {
669 if (checkArgCountAtLeast(S
, TheCall
, 2))
672 ExprResult PtrArgResult
= S
.DefaultLvalueConversion(TheCall
->getArg(0));
673 if (PtrArgResult
.isInvalid())
675 TheCall
->setArg(0, PtrArgResult
.get());
677 // First argument should be a pointer to a struct.
678 QualType PtrArgType
= PtrArgResult
.get()->getType();
679 if (!PtrArgType
->isPointerType() ||
680 !PtrArgType
->getPointeeType()->isRecordType()) {
681 S
.Diag(PtrArgResult
.get()->getBeginLoc(),
682 diag::err_expected_struct_pointer_argument
)
683 << 1 << TheCall
->getDirectCallee() << PtrArgType
;
686 const RecordDecl
*RD
= PtrArgType
->getPointeeType()->getAsRecordDecl();
688 // Second argument is a callable, but we can't fully validate it until we try
690 QualType FnArgType
= TheCall
->getArg(1)->getType();
691 if (!FnArgType
->isFunctionType() && !FnArgType
->isFunctionPointerType() &&
692 !FnArgType
->isBlockPointerType() &&
693 !(S
.getLangOpts().CPlusPlus
&& FnArgType
->isRecordType())) {
694 auto *BT
= FnArgType
->getAs
<BuiltinType
>();
695 switch (BT
? BT
->getKind() : BuiltinType::Void
) {
696 case BuiltinType::Dependent
:
697 case BuiltinType::Overload
:
698 case BuiltinType::BoundMember
:
699 case BuiltinType::PseudoObject
:
700 case BuiltinType::UnknownAny
:
701 case BuiltinType::BuiltinFn
:
702 // This might be a callable.
706 S
.Diag(TheCall
->getArg(1)->getBeginLoc(),
707 diag::err_expected_callable_argument
)
708 << 2 << TheCall
->getDirectCallee() << FnArgType
;
713 BuiltinDumpStructGenerator
Generator(S
, TheCall
);
715 // Wrap parentheses around the given pointer. This is not necessary for
716 // correct code generation, but it means that when we pretty-print the call
717 // arguments in our diagnostics we will produce '(&s)->n' instead of the
718 // incorrect '&s->n'.
719 Expr
*PtrArg
= PtrArgResult
.get();
720 PtrArg
= new (S
.Context
)
721 ParenExpr(PtrArg
->getBeginLoc(),
722 S
.getLocForEndOfToken(PtrArg
->getEndLoc()), PtrArg
);
723 if (Generator
.dumpUnnamedRecord(RD
, PtrArg
, 0))
726 return Generator
.buildWrapper();
729 static bool SemaBuiltinCallWithStaticChain(Sema
&S
, CallExpr
*BuiltinCall
) {
730 if (checkArgCount(S
, BuiltinCall
, 2))
733 SourceLocation BuiltinLoc
= BuiltinCall
->getBeginLoc();
734 Expr
*Builtin
= BuiltinCall
->getCallee()->IgnoreImpCasts();
735 Expr
*Call
= BuiltinCall
->getArg(0);
736 Expr
*Chain
= BuiltinCall
->getArg(1);
738 if (Call
->getStmtClass() != Stmt::CallExprClass
) {
739 S
.Diag(BuiltinLoc
, diag::err_first_argument_to_cwsc_not_call
)
740 << Call
->getSourceRange();
744 auto CE
= cast
<CallExpr
>(Call
);
745 if (CE
->getCallee()->getType()->isBlockPointerType()) {
746 S
.Diag(BuiltinLoc
, diag::err_first_argument_to_cwsc_block_call
)
747 << Call
->getSourceRange();
751 const Decl
*TargetDecl
= CE
->getCalleeDecl();
752 if (const FunctionDecl
*FD
= dyn_cast_or_null
<FunctionDecl
>(TargetDecl
))
753 if (FD
->getBuiltinID()) {
754 S
.Diag(BuiltinLoc
, diag::err_first_argument_to_cwsc_builtin_call
)
755 << Call
->getSourceRange();
759 if (isa
<CXXPseudoDestructorExpr
>(CE
->getCallee()->IgnoreParens())) {
760 S
.Diag(BuiltinLoc
, diag::err_first_argument_to_cwsc_pdtor_call
)
761 << Call
->getSourceRange();
765 ExprResult ChainResult
= S
.UsualUnaryConversions(Chain
);
766 if (ChainResult
.isInvalid())
768 if (!ChainResult
.get()->getType()->isPointerType()) {
769 S
.Diag(BuiltinLoc
, diag::err_second_argument_to_cwsc_not_pointer
)
770 << Chain
->getSourceRange();
774 QualType ReturnTy
= CE
->getCallReturnType(S
.Context
);
775 QualType ArgTys
[2] = { ReturnTy
, ChainResult
.get()->getType() };
776 QualType BuiltinTy
= S
.Context
.getFunctionType(
777 ReturnTy
, ArgTys
, FunctionProtoType::ExtProtoInfo());
778 QualType BuiltinPtrTy
= S
.Context
.getPointerType(BuiltinTy
);
781 S
.ImpCastExprToType(Builtin
, BuiltinPtrTy
, CK_BuiltinFnToFnPtr
).get();
783 BuiltinCall
->setType(CE
->getType());
784 BuiltinCall
->setValueKind(CE
->getValueKind());
785 BuiltinCall
->setObjectKind(CE
->getObjectKind());
786 BuiltinCall
->setCallee(Builtin
);
787 BuiltinCall
->setArg(1, ChainResult
.get());
794 class ScanfDiagnosticFormatHandler
795 : public analyze_format_string::FormatStringHandler
{
796 // Accepts the argument index (relative to the first destination index) of the
797 // argument whose size we want.
798 using ComputeSizeFunction
=
799 llvm::function_ref
<std::optional
<llvm::APSInt
>(unsigned)>;
801 // Accepts the argument index (relative to the first destination index), the
802 // destination size, and the source size).
803 using DiagnoseFunction
=
804 llvm::function_ref
<void(unsigned, unsigned, unsigned)>;
806 ComputeSizeFunction ComputeSizeArgument
;
807 DiagnoseFunction Diagnose
;
810 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument
,
811 DiagnoseFunction Diagnose
)
812 : ComputeSizeArgument(ComputeSizeArgument
), Diagnose(Diagnose
) {}
814 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier
&FS
,
815 const char *StartSpecifier
,
816 unsigned specifierLen
) override
{
817 if (!FS
.consumesDataArgument())
820 unsigned NulByte
= 0;
821 switch ((FS
.getConversionSpecifier().getKind())) {
824 case analyze_format_string::ConversionSpecifier::sArg
:
825 case analyze_format_string::ConversionSpecifier::ScanListArg
:
828 case analyze_format_string::ConversionSpecifier::cArg
:
832 analyze_format_string::OptionalAmount FW
= FS
.getFieldWidth();
833 if (FW
.getHowSpecified() !=
834 analyze_format_string::OptionalAmount::HowSpecified::Constant
)
837 unsigned SourceSize
= FW
.getConstantAmount() + NulByte
;
839 std::optional
<llvm::APSInt
> DestSizeAPS
=
840 ComputeSizeArgument(FS
.getArgIndex());
844 unsigned DestSize
= DestSizeAPS
->getZExtValue();
846 if (DestSize
< SourceSize
)
847 Diagnose(FS
.getArgIndex(), DestSize
, SourceSize
);
853 class EstimateSizeFormatHandler
854 : public analyze_format_string::FormatStringHandler
{
858 EstimateSizeFormatHandler(StringRef Format
)
859 : Size(std::min(Format
.find(0), Format
.size()) +
860 1 /* null byte always written by sprintf */) {}
862 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
&FS
,
863 const char *, unsigned SpecifierLen
,
864 const TargetInfo
&) override
{
866 const size_t FieldWidth
= computeFieldWidth(FS
);
867 const size_t Precision
= computePrecision(FS
);
869 // The actual format.
870 switch (FS
.getConversionSpecifier().getKind()) {
872 case analyze_format_string::ConversionSpecifier::cArg
:
873 case analyze_format_string::ConversionSpecifier::CArg
:
874 Size
+= std::max(FieldWidth
, (size_t)1);
877 case analyze_format_string::ConversionSpecifier::dArg
:
878 case analyze_format_string::ConversionSpecifier::DArg
:
879 case analyze_format_string::ConversionSpecifier::iArg
:
880 case analyze_format_string::ConversionSpecifier::oArg
:
881 case analyze_format_string::ConversionSpecifier::OArg
:
882 case analyze_format_string::ConversionSpecifier::uArg
:
883 case analyze_format_string::ConversionSpecifier::UArg
:
884 case analyze_format_string::ConversionSpecifier::xArg
:
885 case analyze_format_string::ConversionSpecifier::XArg
:
886 Size
+= std::max(FieldWidth
, Precision
);
889 // %g style conversion switches between %f or %e style dynamically.
890 // %f always takes less space, so default to it.
891 case analyze_format_string::ConversionSpecifier::gArg
:
892 case analyze_format_string::ConversionSpecifier::GArg
:
894 // Floating point number in the form '[+]ddd.ddd'.
895 case analyze_format_string::ConversionSpecifier::fArg
:
896 case analyze_format_string::ConversionSpecifier::FArg
:
897 Size
+= std::max(FieldWidth
, 1 /* integer part */ +
898 (Precision
? 1 + Precision
899 : 0) /* period + decimal */);
902 // Floating point number in the form '[-]d.ddde[+-]dd'.
903 case analyze_format_string::ConversionSpecifier::eArg
:
904 case analyze_format_string::ConversionSpecifier::EArg
:
907 1 /* integer part */ +
908 (Precision
? 1 + Precision
: 0) /* period + decimal */ +
909 1 /* e or E letter */ + 2 /* exponent */);
912 // Floating point number in the form '[-]0xh.hhhhp±dd'.
913 case analyze_format_string::ConversionSpecifier::aArg
:
914 case analyze_format_string::ConversionSpecifier::AArg
:
917 2 /* 0x */ + 1 /* integer part */ +
918 (Precision
? 1 + Precision
: 0) /* period + decimal */ +
919 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
923 case analyze_format_string::ConversionSpecifier::sArg
:
924 case analyze_format_string::ConversionSpecifier::SArg
:
928 // Just a pointer in the form '0xddd'.
929 case analyze_format_string::ConversionSpecifier::pArg
:
930 Size
+= std::max(FieldWidth
, 2 /* leading 0x */ + Precision
);
934 case analyze_format_string::ConversionSpecifier::PercentArg
:
942 Size
+= FS
.hasPlusPrefix() || FS
.hasSpacePrefix();
944 if (FS
.hasAlternativeForm()) {
945 switch (FS
.getConversionSpecifier().getKind()) {
948 // Force a leading '0'.
949 case analyze_format_string::ConversionSpecifier::oArg
:
952 // Force a leading '0x'.
953 case analyze_format_string::ConversionSpecifier::xArg
:
954 case analyze_format_string::ConversionSpecifier::XArg
:
957 // Force a period '.' before decimal, even if precision is 0.
958 case analyze_format_string::ConversionSpecifier::aArg
:
959 case analyze_format_string::ConversionSpecifier::AArg
:
960 case analyze_format_string::ConversionSpecifier::eArg
:
961 case analyze_format_string::ConversionSpecifier::EArg
:
962 case analyze_format_string::ConversionSpecifier::fArg
:
963 case analyze_format_string::ConversionSpecifier::FArg
:
964 case analyze_format_string::ConversionSpecifier::gArg
:
965 case analyze_format_string::ConversionSpecifier::GArg
:
966 Size
+= (Precision
? 0 : 1);
970 assert(SpecifierLen
<= Size
&& "no underflow");
971 Size
-= SpecifierLen
;
975 size_t getSizeLowerBound() const { return Size
; }
978 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier
&FS
) {
979 const analyze_format_string::OptionalAmount
&FW
= FS
.getFieldWidth();
980 size_t FieldWidth
= 0;
981 if (FW
.getHowSpecified() == analyze_format_string::OptionalAmount::Constant
)
982 FieldWidth
= FW
.getConstantAmount();
986 static size_t computePrecision(const analyze_printf::PrintfSpecifier
&FS
) {
987 const analyze_format_string::OptionalAmount
&FW
= FS
.getPrecision();
988 size_t Precision
= 0;
990 // See man 3 printf for default precision value based on the specifier.
991 switch (FW
.getHowSpecified()) {
992 case analyze_format_string::OptionalAmount::NotSpecified
:
993 switch (FS
.getConversionSpecifier().getKind()) {
996 case analyze_format_string::ConversionSpecifier::dArg
: // %d
997 case analyze_format_string::ConversionSpecifier::DArg
: // %D
998 case analyze_format_string::ConversionSpecifier::iArg
: // %i
1001 case analyze_format_string::ConversionSpecifier::oArg
: // %d
1002 case analyze_format_string::ConversionSpecifier::OArg
: // %D
1003 case analyze_format_string::ConversionSpecifier::uArg
: // %d
1004 case analyze_format_string::ConversionSpecifier::UArg
: // %D
1005 case analyze_format_string::ConversionSpecifier::xArg
: // %d
1006 case analyze_format_string::ConversionSpecifier::XArg
: // %D
1009 case analyze_format_string::ConversionSpecifier::fArg
: // %f
1010 case analyze_format_string::ConversionSpecifier::FArg
: // %F
1011 case analyze_format_string::ConversionSpecifier::eArg
: // %e
1012 case analyze_format_string::ConversionSpecifier::EArg
: // %E
1013 case analyze_format_string::ConversionSpecifier::gArg
: // %g
1014 case analyze_format_string::ConversionSpecifier::GArg
: // %G
1017 case analyze_format_string::ConversionSpecifier::pArg
: // %d
1022 case analyze_format_string::OptionalAmount::Constant
:
1023 Precision
= FW
.getConstantAmount();
1034 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl
*FD
,
1035 CallExpr
*TheCall
) {
1036 if (TheCall
->isValueDependent() || TheCall
->isTypeDependent() ||
1037 isConstantEvaluated())
1040 bool UseDABAttr
= false;
1041 const FunctionDecl
*UseDecl
= FD
;
1043 const auto *DABAttr
= FD
->getAttr
<DiagnoseAsBuiltinAttr
>();
1045 UseDecl
= DABAttr
->getFunction();
1046 assert(UseDecl
&& "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1050 unsigned BuiltinID
= UseDecl
->getBuiltinID(/*ConsiderWrappers=*/true);
1055 const TargetInfo
&TI
= getASTContext().getTargetInfo();
1056 unsigned SizeTypeWidth
= TI
.getTypeWidth(TI
.getSizeType());
1058 auto TranslateIndex
= [&](unsigned Index
) -> std::optional
<unsigned> {
1059 // If we refer to a diagnose_as_builtin attribute, we need to change the
1060 // argument index to refer to the arguments of the called function. Unless
1061 // the index is out of bounds, which presumably means it's a variadic
1065 unsigned DABIndices
= DABAttr
->argIndices_size();
1066 unsigned NewIndex
= Index
< DABIndices
1067 ? DABAttr
->argIndices_begin()[Index
]
1068 : Index
- DABIndices
+ FD
->getNumParams();
1069 if (NewIndex
>= TheCall
->getNumArgs())
1070 return std::nullopt
;
1074 auto ComputeExplicitObjectSizeArgument
=
1075 [&](unsigned Index
) -> std::optional
<llvm::APSInt
> {
1076 std::optional
<unsigned> IndexOptional
= TranslateIndex(Index
);
1078 return std::nullopt
;
1079 unsigned NewIndex
= *IndexOptional
;
1080 Expr::EvalResult Result
;
1081 Expr
*SizeArg
= TheCall
->getArg(NewIndex
);
1082 if (!SizeArg
->EvaluateAsInt(Result
, getASTContext()))
1083 return std::nullopt
;
1084 llvm::APSInt Integer
= Result
.Val
.getInt();
1085 Integer
.setIsUnsigned(true);
1089 auto ComputeSizeArgument
=
1090 [&](unsigned Index
) -> std::optional
<llvm::APSInt
> {
1091 // If the parameter has a pass_object_size attribute, then we should use its
1092 // (potentially) more strict checking mode. Otherwise, conservatively assume
1095 // This check can fail for variadic functions.
1096 if (Index
< FD
->getNumParams()) {
1097 if (const auto *POS
=
1098 FD
->getParamDecl(Index
)->getAttr
<PassObjectSizeAttr
>())
1099 BOSType
= POS
->getType();
1102 std::optional
<unsigned> IndexOptional
= TranslateIndex(Index
);
1104 return std::nullopt
;
1105 unsigned NewIndex
= *IndexOptional
;
1107 if (NewIndex
>= TheCall
->getNumArgs())
1108 return std::nullopt
;
1110 const Expr
*ObjArg
= TheCall
->getArg(NewIndex
);
1112 if (!ObjArg
->tryEvaluateObjectSize(Result
, getASTContext(), BOSType
))
1113 return std::nullopt
;
1115 // Get the object size in the target's size_t width.
1116 return llvm::APSInt::getUnsigned(Result
).extOrTrunc(SizeTypeWidth
);
1119 auto ComputeStrLenArgument
=
1120 [&](unsigned Index
) -> std::optional
<llvm::APSInt
> {
1121 std::optional
<unsigned> IndexOptional
= TranslateIndex(Index
);
1123 return std::nullopt
;
1124 unsigned NewIndex
= *IndexOptional
;
1126 const Expr
*ObjArg
= TheCall
->getArg(NewIndex
);
1128 if (!ObjArg
->tryEvaluateStrLen(Result
, getASTContext()))
1129 return std::nullopt
;
1130 // Add 1 for null byte.
1131 return llvm::APSInt::getUnsigned(Result
+ 1).extOrTrunc(SizeTypeWidth
);
1134 std::optional
<llvm::APSInt
> SourceSize
;
1135 std::optional
<llvm::APSInt
> DestinationSize
;
1136 unsigned DiagID
= 0;
1137 bool IsChkVariant
= false;
1139 auto GetFunctionName
= [&]() {
1140 StringRef FunctionName
= getASTContext().BuiltinInfo
.getName(BuiltinID
);
1141 // Skim off the details of whichever builtin was called to produce a better
1142 // diagnostic, as it's unlikely that the user wrote the __builtin
1145 FunctionName
= FunctionName
.drop_front(std::strlen("__builtin___"));
1146 FunctionName
= FunctionName
.drop_back(std::strlen("_chk"));
1147 } else if (FunctionName
.startswith("__builtin_")) {
1148 FunctionName
= FunctionName
.drop_front(std::strlen("__builtin_"));
1150 return FunctionName
;
1153 switch (BuiltinID
) {
1156 case Builtin::BI__builtin_strcpy
:
1157 case Builtin::BIstrcpy
: {
1158 DiagID
= diag::warn_fortify_strlen_overflow
;
1159 SourceSize
= ComputeStrLenArgument(1);
1160 DestinationSize
= ComputeSizeArgument(0);
1164 case Builtin::BI__builtin___strcpy_chk
: {
1165 DiagID
= diag::warn_fortify_strlen_overflow
;
1166 SourceSize
= ComputeStrLenArgument(1);
1167 DestinationSize
= ComputeExplicitObjectSizeArgument(2);
1168 IsChkVariant
= true;
1172 case Builtin::BIscanf
:
1173 case Builtin::BIfscanf
:
1174 case Builtin::BIsscanf
: {
1175 unsigned FormatIndex
= 1;
1176 unsigned DataIndex
= 2;
1177 if (BuiltinID
== Builtin::BIscanf
) {
1182 const auto *FormatExpr
=
1183 TheCall
->getArg(FormatIndex
)->IgnoreParenImpCasts();
1185 const auto *Format
= dyn_cast
<StringLiteral
>(FormatExpr
);
1189 if (!Format
->isOrdinary() && !Format
->isUTF8())
1192 auto Diagnose
= [&](unsigned ArgIndex
, unsigned DestSize
,
1193 unsigned SourceSize
) {
1194 DiagID
= diag::warn_fortify_scanf_overflow
;
1195 unsigned Index
= ArgIndex
+ DataIndex
;
1196 StringRef FunctionName
= GetFunctionName();
1197 DiagRuntimeBehavior(TheCall
->getArg(Index
)->getBeginLoc(), TheCall
,
1198 PDiag(DiagID
) << FunctionName
<< (Index
+ 1)
1199 << DestSize
<< SourceSize
);
1202 StringRef FormatStrRef
= Format
->getString();
1203 auto ShiftedComputeSizeArgument
= [&](unsigned Index
) {
1204 return ComputeSizeArgument(Index
+ DataIndex
);
1206 ScanfDiagnosticFormatHandler
H(ShiftedComputeSizeArgument
, Diagnose
);
1207 const char *FormatBytes
= FormatStrRef
.data();
1208 const ConstantArrayType
*T
=
1209 Context
.getAsConstantArrayType(Format
->getType());
1210 assert(T
&& "String literal not of constant array type!");
1211 size_t TypeSize
= T
->getSize().getZExtValue();
1213 // In case there's a null byte somewhere.
1215 std::min(std::max(TypeSize
, size_t(1)) - 1, FormatStrRef
.find(0));
1217 analyze_format_string::ParseScanfString(H
, FormatBytes
,
1218 FormatBytes
+ StrLen
, getLangOpts(),
1219 Context
.getTargetInfo());
1221 // Unlike the other cases, in this one we have already issued the diagnostic
1222 // here, so no need to continue (because unlike the other cases, here the
1223 // diagnostic refers to the argument number).
1227 case Builtin::BIsprintf
:
1228 case Builtin::BI__builtin___sprintf_chk
: {
1229 size_t FormatIndex
= BuiltinID
== Builtin::BIsprintf
? 1 : 3;
1230 auto *FormatExpr
= TheCall
->getArg(FormatIndex
)->IgnoreParenImpCasts();
1232 if (auto *Format
= dyn_cast
<StringLiteral
>(FormatExpr
)) {
1234 if (!Format
->isOrdinary() && !Format
->isUTF8())
1237 StringRef FormatStrRef
= Format
->getString();
1238 EstimateSizeFormatHandler
H(FormatStrRef
);
1239 const char *FormatBytes
= FormatStrRef
.data();
1240 const ConstantArrayType
*T
=
1241 Context
.getAsConstantArrayType(Format
->getType());
1242 assert(T
&& "String literal not of constant array type!");
1243 size_t TypeSize
= T
->getSize().getZExtValue();
1245 // In case there's a null byte somewhere.
1247 std::min(std::max(TypeSize
, size_t(1)) - 1, FormatStrRef
.find(0));
1248 if (!analyze_format_string::ParsePrintfString(
1249 H
, FormatBytes
, FormatBytes
+ StrLen
, getLangOpts(),
1250 Context
.getTargetInfo(), false)) {
1251 DiagID
= diag::warn_fortify_source_format_overflow
;
1252 SourceSize
= llvm::APSInt::getUnsigned(H
.getSizeLowerBound())
1253 .extOrTrunc(SizeTypeWidth
);
1254 if (BuiltinID
== Builtin::BI__builtin___sprintf_chk
) {
1255 DestinationSize
= ComputeExplicitObjectSizeArgument(2);
1256 IsChkVariant
= true;
1258 DestinationSize
= ComputeSizeArgument(0);
1265 case Builtin::BI__builtin___memcpy_chk
:
1266 case Builtin::BI__builtin___memmove_chk
:
1267 case Builtin::BI__builtin___memset_chk
:
1268 case Builtin::BI__builtin___strlcat_chk
:
1269 case Builtin::BI__builtin___strlcpy_chk
:
1270 case Builtin::BI__builtin___strncat_chk
:
1271 case Builtin::BI__builtin___strncpy_chk
:
1272 case Builtin::BI__builtin___stpncpy_chk
:
1273 case Builtin::BI__builtin___memccpy_chk
:
1274 case Builtin::BI__builtin___mempcpy_chk
: {
1275 DiagID
= diag::warn_builtin_chk_overflow
;
1276 SourceSize
= ComputeExplicitObjectSizeArgument(TheCall
->getNumArgs() - 2);
1278 ComputeExplicitObjectSizeArgument(TheCall
->getNumArgs() - 1);
1279 IsChkVariant
= true;
1283 case Builtin::BI__builtin___snprintf_chk
:
1284 case Builtin::BI__builtin___vsnprintf_chk
: {
1285 DiagID
= diag::warn_builtin_chk_overflow
;
1286 SourceSize
= ComputeExplicitObjectSizeArgument(1);
1287 DestinationSize
= ComputeExplicitObjectSizeArgument(3);
1288 IsChkVariant
= true;
1292 case Builtin::BIstrncat
:
1293 case Builtin::BI__builtin_strncat
:
1294 case Builtin::BIstrncpy
:
1295 case Builtin::BI__builtin_strncpy
:
1296 case Builtin::BIstpncpy
:
1297 case Builtin::BI__builtin_stpncpy
: {
1298 // Whether these functions overflow depends on the runtime strlen of the
1299 // string, not just the buffer size, so emitting the "always overflow"
1300 // diagnostic isn't quite right. We should still diagnose passing a buffer
1301 // size larger than the destination buffer though; this is a runtime abort
1302 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1303 DiagID
= diag::warn_fortify_source_size_mismatch
;
1304 SourceSize
= ComputeExplicitObjectSizeArgument(TheCall
->getNumArgs() - 1);
1305 DestinationSize
= ComputeSizeArgument(0);
1309 case Builtin::BImemcpy
:
1310 case Builtin::BI__builtin_memcpy
:
1311 case Builtin::BImemmove
:
1312 case Builtin::BI__builtin_memmove
:
1313 case Builtin::BImemset
:
1314 case Builtin::BI__builtin_memset
:
1315 case Builtin::BImempcpy
:
1316 case Builtin::BI__builtin_mempcpy
: {
1317 DiagID
= diag::warn_fortify_source_overflow
;
1318 SourceSize
= ComputeExplicitObjectSizeArgument(TheCall
->getNumArgs() - 1);
1319 DestinationSize
= ComputeSizeArgument(0);
1322 case Builtin::BIsnprintf
:
1323 case Builtin::BI__builtin_snprintf
:
1324 case Builtin::BIvsnprintf
:
1325 case Builtin::BI__builtin_vsnprintf
: {
1326 DiagID
= diag::warn_fortify_source_size_mismatch
;
1327 SourceSize
= ComputeExplicitObjectSizeArgument(1);
1328 DestinationSize
= ComputeSizeArgument(0);
1333 if (!SourceSize
|| !DestinationSize
||
1334 llvm::APSInt::compareValues(*SourceSize
, *DestinationSize
) <= 0)
1337 StringRef FunctionName
= GetFunctionName();
1339 SmallString
<16> DestinationStr
;
1340 SmallString
<16> SourceStr
;
1341 DestinationSize
->toString(DestinationStr
, /*Radix=*/10);
1342 SourceSize
->toString(SourceStr
, /*Radix=*/10);
1343 DiagRuntimeBehavior(TheCall
->getBeginLoc(), TheCall
,
1345 << FunctionName
<< DestinationStr
<< SourceStr
);
1348 static bool SemaBuiltinSEHScopeCheck(Sema
&SemaRef
, CallExpr
*TheCall
,
1349 Scope::ScopeFlags NeededScopeFlags
,
1351 // Scopes aren't available during instantiation. Fortunately, builtin
1352 // functions cannot be template args so they cannot be formed through template
1353 // instantiation. Therefore checking once during the parse is sufficient.
1354 if (SemaRef
.inTemplateInstantiation())
1357 Scope
*S
= SemaRef
.getCurScope();
1358 while (S
&& !S
->isSEHExceptScope())
1360 if (!S
|| !(S
->getFlags() & NeededScopeFlags
)) {
1361 auto *DRE
= cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
1362 SemaRef
.Diag(TheCall
->getExprLoc(), DiagID
)
1363 << DRE
->getDecl()->getIdentifier();
1370 static inline bool isBlockPointer(Expr
*Arg
) {
1371 return Arg
->getType()->isBlockPointerType();
1374 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1375 /// void*, which is a requirement of device side enqueue.
1376 static bool checkOpenCLBlockArgs(Sema
&S
, Expr
*BlockArg
) {
1377 const BlockPointerType
*BPT
=
1378 cast
<BlockPointerType
>(BlockArg
->getType().getCanonicalType());
1379 ArrayRef
<QualType
> Params
=
1380 BPT
->getPointeeType()->castAs
<FunctionProtoType
>()->getParamTypes();
1381 unsigned ArgCounter
= 0;
1382 bool IllegalParams
= false;
1383 // Iterate through the block parameters until either one is found that is not
1384 // a local void*, or the block is valid.
1385 for (ArrayRef
<QualType
>::iterator I
= Params
.begin(), E
= Params
.end();
1386 I
!= E
; ++I
, ++ArgCounter
) {
1387 if (!(*I
)->isPointerType() || !(*I
)->getPointeeType()->isVoidType() ||
1388 (*I
)->getPointeeType().getQualifiers().getAddressSpace() !=
1389 LangAS::opencl_local
) {
1390 // Get the location of the error. If a block literal has been passed
1391 // (BlockExpr) then we can point straight to the offending argument,
1392 // else we just point to the variable reference.
1393 SourceLocation ErrorLoc
;
1394 if (isa
<BlockExpr
>(BlockArg
)) {
1395 BlockDecl
*BD
= cast
<BlockExpr
>(BlockArg
)->getBlockDecl();
1396 ErrorLoc
= BD
->getParamDecl(ArgCounter
)->getBeginLoc();
1397 } else if (isa
<DeclRefExpr
>(BlockArg
)) {
1398 ErrorLoc
= cast
<DeclRefExpr
>(BlockArg
)->getBeginLoc();
1401 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args
);
1402 IllegalParams
= true;
1406 return IllegalParams
;
1409 static bool checkOpenCLSubgroupExt(Sema
&S
, CallExpr
*Call
) {
1410 // OpenCL device can support extension but not the feature as extension
1411 // requires subgroup independent forward progress, but subgroup independent
1412 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1413 if (!S
.getOpenCLOptions().isSupported("cl_khr_subgroups", S
.getLangOpts()) &&
1414 !S
.getOpenCLOptions().isSupported("__opencl_c_subgroups",
1416 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_requires_extension
)
1417 << 1 << Call
->getDirectCallee()
1418 << "cl_khr_subgroups or __opencl_c_subgroups";
1424 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema
&S
, CallExpr
*TheCall
) {
1425 if (checkArgCount(S
, TheCall
, 2))
1428 if (checkOpenCLSubgroupExt(S
, TheCall
))
1431 // First argument is an ndrange_t type.
1432 Expr
*NDRangeArg
= TheCall
->getArg(0);
1433 if (NDRangeArg
->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1434 S
.Diag(NDRangeArg
->getBeginLoc(), diag::err_opencl_builtin_expected_type
)
1435 << TheCall
->getDirectCallee() << "'ndrange_t'";
1439 Expr
*BlockArg
= TheCall
->getArg(1);
1440 if (!isBlockPointer(BlockArg
)) {
1441 S
.Diag(BlockArg
->getBeginLoc(), diag::err_opencl_builtin_expected_type
)
1442 << TheCall
->getDirectCallee() << "block";
1445 return checkOpenCLBlockArgs(S
, BlockArg
);
1448 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1449 /// get_kernel_work_group_size
1450 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
1451 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema
&S
, CallExpr
*TheCall
) {
1452 if (checkArgCount(S
, TheCall
, 1))
1455 Expr
*BlockArg
= TheCall
->getArg(0);
1456 if (!isBlockPointer(BlockArg
)) {
1457 S
.Diag(BlockArg
->getBeginLoc(), diag::err_opencl_builtin_expected_type
)
1458 << TheCall
->getDirectCallee() << "block";
1461 return checkOpenCLBlockArgs(S
, BlockArg
);
1464 /// Diagnose integer type and any valid implicit conversion to it.
1465 static bool checkOpenCLEnqueueIntType(Sema
&S
, Expr
*E
,
1466 const QualType
&IntType
);
1468 static bool checkOpenCLEnqueueLocalSizeArgs(Sema
&S
, CallExpr
*TheCall
,
1469 unsigned Start
, unsigned End
) {
1470 bool IllegalParams
= false;
1471 for (unsigned I
= Start
; I
<= End
; ++I
)
1472 IllegalParams
|= checkOpenCLEnqueueIntType(S
, TheCall
->getArg(I
),
1473 S
.Context
.getSizeType());
1474 return IllegalParams
;
1477 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1478 /// 'local void*' parameter of passed block.
1479 static bool checkOpenCLEnqueueVariadicArgs(Sema
&S
, CallExpr
*TheCall
,
1481 unsigned NumNonVarArgs
) {
1482 const BlockPointerType
*BPT
=
1483 cast
<BlockPointerType
>(BlockArg
->getType().getCanonicalType());
1484 unsigned NumBlockParams
=
1485 BPT
->getPointeeType()->castAs
<FunctionProtoType
>()->getNumParams();
1486 unsigned TotalNumArgs
= TheCall
->getNumArgs();
1488 // For each argument passed to the block, a corresponding uint needs to
1489 // be passed to describe the size of the local memory.
1490 if (TotalNumArgs
!= NumBlockParams
+ NumNonVarArgs
) {
1491 S
.Diag(TheCall
->getBeginLoc(),
1492 diag::err_opencl_enqueue_kernel_local_size_args
);
1496 // Check that the sizes of the local memory are specified by integers.
1497 return checkOpenCLEnqueueLocalSizeArgs(S
, TheCall
, NumNonVarArgs
,
1501 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1502 /// overload formats specified in Table 6.13.17.1.
1503 /// int enqueue_kernel(queue_t queue,
1504 /// kernel_enqueue_flags_t flags,
1505 /// const ndrange_t ndrange,
1506 /// void (^block)(void))
1507 /// int enqueue_kernel(queue_t queue,
1508 /// kernel_enqueue_flags_t flags,
1509 /// const ndrange_t ndrange,
1510 /// uint num_events_in_wait_list,
1511 /// clk_event_t *event_wait_list,
1512 /// clk_event_t *event_ret,
1513 /// void (^block)(void))
1514 /// int enqueue_kernel(queue_t queue,
1515 /// kernel_enqueue_flags_t flags,
1516 /// const ndrange_t ndrange,
1517 /// void (^block)(local void*, ...),
1518 /// uint size0, ...)
1519 /// int enqueue_kernel(queue_t queue,
1520 /// kernel_enqueue_flags_t flags,
1521 /// const ndrange_t ndrange,
1522 /// uint num_events_in_wait_list,
1523 /// clk_event_t *event_wait_list,
1524 /// clk_event_t *event_ret,
1525 /// void (^block)(local void*, ...),
1526 /// uint size0, ...)
1527 static bool SemaOpenCLBuiltinEnqueueKernel(Sema
&S
, CallExpr
*TheCall
) {
1528 unsigned NumArgs
= TheCall
->getNumArgs();
1531 S
.Diag(TheCall
->getBeginLoc(),
1532 diag::err_typecheck_call_too_few_args_at_least
)
1533 << 0 << 4 << NumArgs
;
1537 Expr
*Arg0
= TheCall
->getArg(0);
1538 Expr
*Arg1
= TheCall
->getArg(1);
1539 Expr
*Arg2
= TheCall
->getArg(2);
1540 Expr
*Arg3
= TheCall
->getArg(3);
1542 // First argument always needs to be a queue_t type.
1543 if (!Arg0
->getType()->isQueueT()) {
1544 S
.Diag(TheCall
->getArg(0)->getBeginLoc(),
1545 diag::err_opencl_builtin_expected_type
)
1546 << TheCall
->getDirectCallee() << S
.Context
.OCLQueueTy
;
1550 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1551 if (!Arg1
->getType()->isIntegerType()) {
1552 S
.Diag(TheCall
->getArg(1)->getBeginLoc(),
1553 diag::err_opencl_builtin_expected_type
)
1554 << TheCall
->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1558 // Third argument is always an ndrange_t type.
1559 if (Arg2
->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1560 S
.Diag(TheCall
->getArg(2)->getBeginLoc(),
1561 diag::err_opencl_builtin_expected_type
)
1562 << TheCall
->getDirectCallee() << "'ndrange_t'";
1566 // With four arguments, there is only one form that the function could be
1567 // called in: no events and no variable arguments.
1569 // check that the last argument is the right block type.
1570 if (!isBlockPointer(Arg3
)) {
1571 S
.Diag(Arg3
->getBeginLoc(), diag::err_opencl_builtin_expected_type
)
1572 << TheCall
->getDirectCallee() << "block";
1575 // we have a block type, check the prototype
1576 const BlockPointerType
*BPT
=
1577 cast
<BlockPointerType
>(Arg3
->getType().getCanonicalType());
1578 if (BPT
->getPointeeType()->castAs
<FunctionProtoType
>()->getNumParams() > 0) {
1579 S
.Diag(Arg3
->getBeginLoc(),
1580 diag::err_opencl_enqueue_kernel_blocks_no_args
);
1585 // we can have block + varargs.
1586 if (isBlockPointer(Arg3
))
1587 return (checkOpenCLBlockArgs(S
, Arg3
) ||
1588 checkOpenCLEnqueueVariadicArgs(S
, TheCall
, Arg3
, 4));
1589 // last two cases with either exactly 7 args or 7 args and varargs.
1591 // check common block argument.
1592 Expr
*Arg6
= TheCall
->getArg(6);
1593 if (!isBlockPointer(Arg6
)) {
1594 S
.Diag(Arg6
->getBeginLoc(), diag::err_opencl_builtin_expected_type
)
1595 << TheCall
->getDirectCallee() << "block";
1598 if (checkOpenCLBlockArgs(S
, Arg6
))
1601 // Forth argument has to be any integer type.
1602 if (!Arg3
->getType()->isIntegerType()) {
1603 S
.Diag(TheCall
->getArg(3)->getBeginLoc(),
1604 diag::err_opencl_builtin_expected_type
)
1605 << TheCall
->getDirectCallee() << "integer";
1608 // check remaining common arguments.
1609 Expr
*Arg4
= TheCall
->getArg(4);
1610 Expr
*Arg5
= TheCall
->getArg(5);
1612 // Fifth argument is always passed as a pointer to clk_event_t.
1613 if (!Arg4
->isNullPointerConstant(S
.Context
,
1614 Expr::NPC_ValueDependentIsNotNull
) &&
1615 !Arg4
->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1616 S
.Diag(TheCall
->getArg(4)->getBeginLoc(),
1617 diag::err_opencl_builtin_expected_type
)
1618 << TheCall
->getDirectCallee()
1619 << S
.Context
.getPointerType(S
.Context
.OCLClkEventTy
);
1623 // Sixth argument is always passed as a pointer to clk_event_t.
1624 if (!Arg5
->isNullPointerConstant(S
.Context
,
1625 Expr::NPC_ValueDependentIsNotNull
) &&
1626 !(Arg5
->getType()->isPointerType() &&
1627 Arg5
->getType()->getPointeeType()->isClkEventT())) {
1628 S
.Diag(TheCall
->getArg(5)->getBeginLoc(),
1629 diag::err_opencl_builtin_expected_type
)
1630 << TheCall
->getDirectCallee()
1631 << S
.Context
.getPointerType(S
.Context
.OCLClkEventTy
);
1638 return checkOpenCLEnqueueVariadicArgs(S
, TheCall
, Arg6
, 7);
1641 // None of the specific case has been detected, give generic error
1642 S
.Diag(TheCall
->getBeginLoc(),
1643 diag::err_opencl_enqueue_kernel_incorrect_args
);
1647 /// Returns OpenCL access qual.
1648 static OpenCLAccessAttr
*getOpenCLArgAccess(const Decl
*D
) {
1649 return D
->getAttr
<OpenCLAccessAttr
>();
1652 /// Returns true if pipe element type is different from the pointer.
1653 static bool checkOpenCLPipeArg(Sema
&S
, CallExpr
*Call
) {
1654 const Expr
*Arg0
= Call
->getArg(0);
1655 // First argument type should always be pipe.
1656 if (!Arg0
->getType()->isPipeType()) {
1657 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg
)
1658 << Call
->getDirectCallee() << Arg0
->getSourceRange();
1661 OpenCLAccessAttr
*AccessQual
=
1662 getOpenCLArgAccess(cast
<DeclRefExpr
>(Arg0
)->getDecl());
1663 // Validates the access qualifier is compatible with the call.
1664 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1665 // read_only and write_only, and assumed to be read_only if no qualifier is
1667 switch (Call
->getDirectCallee()->getBuiltinID()) {
1668 case Builtin::BIread_pipe
:
1669 case Builtin::BIreserve_read_pipe
:
1670 case Builtin::BIcommit_read_pipe
:
1671 case Builtin::BIwork_group_reserve_read_pipe
:
1672 case Builtin::BIsub_group_reserve_read_pipe
:
1673 case Builtin::BIwork_group_commit_read_pipe
:
1674 case Builtin::BIsub_group_commit_read_pipe
:
1675 if (!(!AccessQual
|| AccessQual
->isReadOnly())) {
1676 S
.Diag(Arg0
->getBeginLoc(),
1677 diag::err_opencl_builtin_pipe_invalid_access_modifier
)
1678 << "read_only" << Arg0
->getSourceRange();
1682 case Builtin::BIwrite_pipe
:
1683 case Builtin::BIreserve_write_pipe
:
1684 case Builtin::BIcommit_write_pipe
:
1685 case Builtin::BIwork_group_reserve_write_pipe
:
1686 case Builtin::BIsub_group_reserve_write_pipe
:
1687 case Builtin::BIwork_group_commit_write_pipe
:
1688 case Builtin::BIsub_group_commit_write_pipe
:
1689 if (!(AccessQual
&& AccessQual
->isWriteOnly())) {
1690 S
.Diag(Arg0
->getBeginLoc(),
1691 diag::err_opencl_builtin_pipe_invalid_access_modifier
)
1692 << "write_only" << Arg0
->getSourceRange();
1702 /// Returns true if pipe element type is different from the pointer.
1703 static bool checkOpenCLPipePacketType(Sema
&S
, CallExpr
*Call
, unsigned Idx
) {
1704 const Expr
*Arg0
= Call
->getArg(0);
1705 const Expr
*ArgIdx
= Call
->getArg(Idx
);
1706 const PipeType
*PipeTy
= cast
<PipeType
>(Arg0
->getType());
1707 const QualType EltTy
= PipeTy
->getElementType();
1708 const PointerType
*ArgTy
= ArgIdx
->getType()->getAs
<PointerType
>();
1709 // The Idx argument should be a pointer and the type of the pointer and
1710 // the type of pipe element should also be the same.
1712 !S
.Context
.hasSameType(
1713 EltTy
, ArgTy
->getPointeeType()->getCanonicalTypeInternal())) {
1714 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg
)
1715 << Call
->getDirectCallee() << S
.Context
.getPointerType(EltTy
)
1716 << ArgIdx
->getType() << ArgIdx
->getSourceRange();
1722 // Performs semantic analysis for the read/write_pipe call.
1723 // \param S Reference to the semantic analyzer.
1724 // \param Call A pointer to the builtin call.
1725 // \return True if a semantic error has been found, false otherwise.
1726 static bool SemaBuiltinRWPipe(Sema
&S
, CallExpr
*Call
) {
1727 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1728 // functions have two forms.
1729 switch (Call
->getNumArgs()) {
1731 if (checkOpenCLPipeArg(S
, Call
))
1733 // The call with 2 arguments should be
1734 // read/write_pipe(pipe T, T*).
1735 // Check packet type T.
1736 if (checkOpenCLPipePacketType(S
, Call
, 1))
1741 if (checkOpenCLPipeArg(S
, Call
))
1743 // The call with 4 arguments should be
1744 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1745 // Check reserve_id_t.
1746 if (!Call
->getArg(1)->getType()->isReserveIDT()) {
1747 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg
)
1748 << Call
->getDirectCallee() << S
.Context
.OCLReserveIDTy
1749 << Call
->getArg(1)->getType() << Call
->getArg(1)->getSourceRange();
1754 const Expr
*Arg2
= Call
->getArg(2);
1755 if (!Arg2
->getType()->isIntegerType() &&
1756 !Arg2
->getType()->isUnsignedIntegerType()) {
1757 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg
)
1758 << Call
->getDirectCallee() << S
.Context
.UnsignedIntTy
1759 << Arg2
->getType() << Arg2
->getSourceRange();
1763 // Check packet type T.
1764 if (checkOpenCLPipePacketType(S
, Call
, 3))
1768 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num
)
1769 << Call
->getDirectCallee() << Call
->getSourceRange();
1776 // Performs a semantic analysis on the {work_group_/sub_group_
1777 // /_}reserve_{read/write}_pipe
1778 // \param S Reference to the semantic analyzer.
1779 // \param Call The call to the builtin function to be analyzed.
1780 // \return True if a semantic error was found, false otherwise.
1781 static bool SemaBuiltinReserveRWPipe(Sema
&S
, CallExpr
*Call
) {
1782 if (checkArgCount(S
, Call
, 2))
1785 if (checkOpenCLPipeArg(S
, Call
))
1788 // Check the reserve size.
1789 if (!Call
->getArg(1)->getType()->isIntegerType() &&
1790 !Call
->getArg(1)->getType()->isUnsignedIntegerType()) {
1791 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg
)
1792 << Call
->getDirectCallee() << S
.Context
.UnsignedIntTy
1793 << Call
->getArg(1)->getType() << Call
->getArg(1)->getSourceRange();
1797 // Since return type of reserve_read/write_pipe built-in function is
1798 // reserve_id_t, which is not defined in the builtin def file , we used int
1799 // as return type and need to override the return type of these functions.
1800 Call
->setType(S
.Context
.OCLReserveIDTy
);
1805 // Performs a semantic analysis on {work_group_/sub_group_
1806 // /_}commit_{read/write}_pipe
1807 // \param S Reference to the semantic analyzer.
1808 // \param Call The call to the builtin function to be analyzed.
1809 // \return True if a semantic error was found, false otherwise.
1810 static bool SemaBuiltinCommitRWPipe(Sema
&S
, CallExpr
*Call
) {
1811 if (checkArgCount(S
, Call
, 2))
1814 if (checkOpenCLPipeArg(S
, Call
))
1817 // Check reserve_id_t.
1818 if (!Call
->getArg(1)->getType()->isReserveIDT()) {
1819 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg
)
1820 << Call
->getDirectCallee() << S
.Context
.OCLReserveIDTy
1821 << Call
->getArg(1)->getType() << Call
->getArg(1)->getSourceRange();
1828 // Performs a semantic analysis on the call to built-in Pipe
1830 // \param S Reference to the semantic analyzer.
1831 // \param Call The call to the builtin function to be analyzed.
1832 // \return True if a semantic error was found, false otherwise.
1833 static bool SemaBuiltinPipePackets(Sema
&S
, CallExpr
*Call
) {
1834 if (checkArgCount(S
, Call
, 1))
1837 if (!Call
->getArg(0)->getType()->isPipeType()) {
1838 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg
)
1839 << Call
->getDirectCallee() << Call
->getArg(0)->getSourceRange();
1846 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1847 // Performs semantic analysis for the to_global/local/private call.
1848 // \param S Reference to the semantic analyzer.
1849 // \param BuiltinID ID of the builtin function.
1850 // \param Call A pointer to the builtin call.
1851 // \return True if a semantic error has been found, false otherwise.
1852 static bool SemaOpenCLBuiltinToAddr(Sema
&S
, unsigned BuiltinID
,
1854 if (checkArgCount(S
, Call
, 1))
1857 auto RT
= Call
->getArg(0)->getType();
1858 if (!RT
->isPointerType() || RT
->getPointeeType()
1859 .getAddressSpace() == LangAS::opencl_constant
) {
1860 S
.Diag(Call
->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg
)
1861 << Call
->getArg(0) << Call
->getDirectCallee() << Call
->getSourceRange();
1865 if (RT
->getPointeeType().getAddressSpace() != LangAS::opencl_generic
) {
1866 S
.Diag(Call
->getArg(0)->getBeginLoc(),
1867 diag::warn_opencl_generic_address_space_arg
)
1868 << Call
->getDirectCallee()->getNameInfo().getAsString()
1869 << Call
->getArg(0)->getSourceRange();
1872 RT
= RT
->getPointeeType();
1873 auto Qual
= RT
.getQualifiers();
1874 switch (BuiltinID
) {
1875 case Builtin::BIto_global
:
1876 Qual
.setAddressSpace(LangAS::opencl_global
);
1878 case Builtin::BIto_local
:
1879 Qual
.setAddressSpace(LangAS::opencl_local
);
1881 case Builtin::BIto_private
:
1882 Qual
.setAddressSpace(LangAS::opencl_private
);
1885 llvm_unreachable("Invalid builtin function");
1887 Call
->setType(S
.Context
.getPointerType(S
.Context
.getQualifiedType(
1888 RT
.getUnqualifiedType(), Qual
)));
1893 static ExprResult
SemaBuiltinLaunder(Sema
&S
, CallExpr
*TheCall
) {
1894 if (checkArgCount(S
, TheCall
, 1))
1897 // Compute __builtin_launder's parameter type from the argument.
1898 // The parameter type is:
1899 // * The type of the argument if it's not an array or function type,
1901 // * The decayed argument type.
1902 QualType ParamTy
= [&]() {
1903 QualType ArgTy
= TheCall
->getArg(0)->getType();
1904 if (const ArrayType
*Ty
= ArgTy
->getAsArrayTypeUnsafe())
1905 return S
.Context
.getPointerType(Ty
->getElementType());
1906 if (ArgTy
->isFunctionType()) {
1907 return S
.Context
.getPointerType(ArgTy
);
1912 TheCall
->setType(ParamTy
);
1914 auto DiagSelect
= [&]() -> std::optional
<unsigned> {
1915 if (!ParamTy
->isPointerType())
1917 if (ParamTy
->isFunctionPointerType())
1919 if (ParamTy
->isVoidPointerType())
1921 return std::optional
<unsigned>{};
1924 S
.Diag(TheCall
->getBeginLoc(), diag::err_builtin_launder_invalid_arg
)
1925 << *DiagSelect
<< TheCall
->getSourceRange();
1929 // We either have an incomplete class type, or we have a class template
1930 // whose instantiation has not been forced. Example:
1932 // template <class T> struct Foo { T value; };
1933 // Foo<int> *p = nullptr;
1934 // auto *d = __builtin_launder(p);
1935 if (S
.RequireCompleteType(TheCall
->getBeginLoc(), ParamTy
->getPointeeType(),
1936 diag::err_incomplete_type
))
1939 assert(ParamTy
->getPointeeType()->isObjectType() &&
1940 "Unhandled non-object pointer case");
1942 InitializedEntity Entity
=
1943 InitializedEntity::InitializeParameter(S
.Context
, ParamTy
, false);
1945 S
.PerformCopyInitialization(Entity
, SourceLocation(), TheCall
->getArg(0));
1946 if (Arg
.isInvalid())
1948 TheCall
->setArg(0, Arg
.get());
1953 // Emit an error and return true if the current object format type is in the
1954 // list of unsupported types.
1955 static bool CheckBuiltinTargetNotInUnsupported(
1956 Sema
&S
, unsigned BuiltinID
, CallExpr
*TheCall
,
1957 ArrayRef
<llvm::Triple::ObjectFormatType
> UnsupportedObjectFormatTypes
) {
1958 llvm::Triple::ObjectFormatType CurObjFormat
=
1959 S
.getASTContext().getTargetInfo().getTriple().getObjectFormat();
1960 if (llvm::is_contained(UnsupportedObjectFormatTypes
, CurObjFormat
)) {
1961 S
.Diag(TheCall
->getBeginLoc(), diag::err_builtin_target_unsupported
)
1962 << TheCall
->getSourceRange();
1968 // Emit an error and return true if the current architecture is not in the list
1969 // of supported architectures.
1971 CheckBuiltinTargetInSupported(Sema
&S
, unsigned BuiltinID
, CallExpr
*TheCall
,
1972 ArrayRef
<llvm::Triple::ArchType
> SupportedArchs
) {
1973 llvm::Triple::ArchType CurArch
=
1974 S
.getASTContext().getTargetInfo().getTriple().getArch();
1975 if (llvm::is_contained(SupportedArchs
, CurArch
))
1977 S
.Diag(TheCall
->getBeginLoc(), diag::err_builtin_target_unsupported
)
1978 << TheCall
->getSourceRange();
1982 static void CheckNonNullArgument(Sema
&S
, const Expr
*ArgExpr
,
1983 SourceLocation CallSiteLoc
);
1985 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo
&TI
, unsigned BuiltinID
,
1986 CallExpr
*TheCall
) {
1987 switch (TI
.getTriple().getArch()) {
1989 // Some builtins don't require additional checking, so just consider these
1992 case llvm::Triple::arm
:
1993 case llvm::Triple::armeb
:
1994 case llvm::Triple::thumb
:
1995 case llvm::Triple::thumbeb
:
1996 return CheckARMBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
1997 case llvm::Triple::aarch64
:
1998 case llvm::Triple::aarch64_32
:
1999 case llvm::Triple::aarch64_be
:
2000 return CheckAArch64BuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2001 case llvm::Triple::bpfeb
:
2002 case llvm::Triple::bpfel
:
2003 return CheckBPFBuiltinFunctionCall(BuiltinID
, TheCall
);
2004 case llvm::Triple::hexagon
:
2005 return CheckHexagonBuiltinFunctionCall(BuiltinID
, TheCall
);
2006 case llvm::Triple::mips
:
2007 case llvm::Triple::mipsel
:
2008 case llvm::Triple::mips64
:
2009 case llvm::Triple::mips64el
:
2010 return CheckMipsBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2011 case llvm::Triple::systemz
:
2012 return CheckSystemZBuiltinFunctionCall(BuiltinID
, TheCall
);
2013 case llvm::Triple::x86
:
2014 case llvm::Triple::x86_64
:
2015 return CheckX86BuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2016 case llvm::Triple::ppc
:
2017 case llvm::Triple::ppcle
:
2018 case llvm::Triple::ppc64
:
2019 case llvm::Triple::ppc64le
:
2020 return CheckPPCBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2021 case llvm::Triple::amdgcn
:
2022 return CheckAMDGCNBuiltinFunctionCall(BuiltinID
, TheCall
);
2023 case llvm::Triple::riscv32
:
2024 case llvm::Triple::riscv64
:
2025 return CheckRISCVBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2026 case llvm::Triple::loongarch32
:
2027 case llvm::Triple::loongarch64
:
2028 return CheckLoongArchBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2029 case llvm::Triple::wasm32
:
2030 case llvm::Triple::wasm64
:
2031 return CheckWebAssemblyBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2032 case llvm::Triple::nvptx
:
2033 case llvm::Triple::nvptx64
:
2034 return CheckNVPTXBuiltinFunctionCall(TI
, BuiltinID
, TheCall
);
2038 // Check if \p Ty is a valid type for the elementwise math builtins. If it is
2039 // not a valid type, emit an error message and return true. Otherwise return
2041 static bool checkMathBuiltinElementType(Sema
&S
, SourceLocation Loc
,
2043 if (!Ty
->getAs
<VectorType
>() && !ConstantMatrixType::isValidElementType(Ty
)) {
2044 return S
.Diag(Loc
, diag::err_builtin_invalid_arg_type
)
2045 << 1 << /* vector, integer or float ty*/ 0 << Ty
;
2051 static bool checkFPMathBuiltinElementType(Sema
&S
, SourceLocation Loc
,
2052 QualType ArgTy
, int ArgIndex
) {
2053 QualType EltTy
= ArgTy
;
2054 if (auto *VecTy
= EltTy
->getAs
<VectorType
>())
2055 EltTy
= VecTy
->getElementType();
2057 if (!EltTy
->isRealFloatingType()) {
2058 return S
.Diag(Loc
, diag::err_builtin_invalid_arg_type
)
2059 << ArgIndex
<< /* vector or float ty*/ 5 << ArgTy
;
2066 Sema::CheckBuiltinFunctionCall(FunctionDecl
*FDecl
, unsigned BuiltinID
,
2067 CallExpr
*TheCall
) {
2068 ExprResult
TheCallResult(TheCall
);
2070 // Find out if any arguments are required to be integer constant expressions.
2071 unsigned ICEArguments
= 0;
2072 ASTContext::GetBuiltinTypeError Error
;
2073 Context
.GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
2074 if (Error
!= ASTContext::GE_None
)
2075 ICEArguments
= 0; // Don't diagnose previously diagnosed errors.
2077 // If any arguments are required to be ICE's, check and diagnose.
2078 for (unsigned ArgNo
= 0; ICEArguments
!= 0; ++ArgNo
) {
2079 // Skip arguments not required to be ICE's.
2080 if ((ICEArguments
& (1 << ArgNo
)) == 0) continue;
2082 llvm::APSInt Result
;
2083 // If we don't have enough arguments, continue so we can issue better
2084 // diagnostic in checkArgCount(...)
2085 if (ArgNo
< TheCall
->getNumArgs() &&
2086 SemaBuiltinConstantArg(TheCall
, ArgNo
, Result
))
2088 ICEArguments
&= ~(1 << ArgNo
);
2091 switch (BuiltinID
) {
2092 case Builtin::BI__builtin___CFStringMakeConstantString
:
2093 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2094 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2095 if (CheckBuiltinTargetNotInUnsupported(
2096 *this, BuiltinID
, TheCall
,
2097 {llvm::Triple::GOFF
, llvm::Triple::XCOFF
}))
2099 assert(TheCall
->getNumArgs() == 1 &&
2100 "Wrong # arguments to builtin CFStringMakeConstantString");
2101 if (CheckObjCString(TheCall
->getArg(0)))
2104 case Builtin::BI__builtin_ms_va_start
:
2105 case Builtin::BI__builtin_stdarg_start
:
2106 case Builtin::BI__builtin_va_start
:
2107 if (SemaBuiltinVAStart(BuiltinID
, TheCall
))
2110 case Builtin::BI__va_start
: {
2111 switch (Context
.getTargetInfo().getTriple().getArch()) {
2112 case llvm::Triple::aarch64
:
2113 case llvm::Triple::arm
:
2114 case llvm::Triple::thumb
:
2115 if (SemaBuiltinVAStartARMMicrosoft(TheCall
))
2119 if (SemaBuiltinVAStart(BuiltinID
, TheCall
))
2126 // The acquire, release, and no fence variants are ARM and AArch64 only.
2127 case Builtin::BI_interlockedbittestandset_acq
:
2128 case Builtin::BI_interlockedbittestandset_rel
:
2129 case Builtin::BI_interlockedbittestandset_nf
:
2130 case Builtin::BI_interlockedbittestandreset_acq
:
2131 case Builtin::BI_interlockedbittestandreset_rel
:
2132 case Builtin::BI_interlockedbittestandreset_nf
:
2133 if (CheckBuiltinTargetInSupported(
2134 *this, BuiltinID
, TheCall
,
2135 {llvm::Triple::arm
, llvm::Triple::thumb
, llvm::Triple::aarch64
}))
2139 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2140 case Builtin::BI_bittest64
:
2141 case Builtin::BI_bittestandcomplement64
:
2142 case Builtin::BI_bittestandreset64
:
2143 case Builtin::BI_bittestandset64
:
2144 case Builtin::BI_interlockedbittestandreset64
:
2145 case Builtin::BI_interlockedbittestandset64
:
2146 if (CheckBuiltinTargetInSupported(*this, BuiltinID
, TheCall
,
2147 {llvm::Triple::x86_64
, llvm::Triple::arm
,
2148 llvm::Triple::thumb
,
2149 llvm::Triple::aarch64
}))
2153 case Builtin::BI__builtin_set_flt_rounds
:
2154 if (CheckBuiltinTargetInSupported(*this, BuiltinID
, TheCall
,
2155 {llvm::Triple::x86
, llvm::Triple::x86_64
,
2156 llvm::Triple::arm
, llvm::Triple::thumb
,
2157 llvm::Triple::aarch64
}))
2161 case Builtin::BI__builtin_isgreater
:
2162 case Builtin::BI__builtin_isgreaterequal
:
2163 case Builtin::BI__builtin_isless
:
2164 case Builtin::BI__builtin_islessequal
:
2165 case Builtin::BI__builtin_islessgreater
:
2166 case Builtin::BI__builtin_isunordered
:
2167 if (SemaBuiltinUnorderedCompare(TheCall
))
2170 case Builtin::BI__builtin_fpclassify
:
2171 if (SemaBuiltinFPClassification(TheCall
, 6))
2174 case Builtin::BI__builtin_isfpclass
:
2175 if (SemaBuiltinFPClassification(TheCall
, 2))
2178 case Builtin::BI__builtin_isfinite
:
2179 case Builtin::BI__builtin_isinf
:
2180 case Builtin::BI__builtin_isinf_sign
:
2181 case Builtin::BI__builtin_isnan
:
2182 case Builtin::BI__builtin_isnormal
:
2183 case Builtin::BI__builtin_signbit
:
2184 case Builtin::BI__builtin_signbitf
:
2185 case Builtin::BI__builtin_signbitl
:
2186 if (SemaBuiltinFPClassification(TheCall
, 1))
2189 case Builtin::BI__builtin_shufflevector
:
2190 return SemaBuiltinShuffleVector(TheCall
);
2191 // TheCall will be freed by the smart pointer here, but that's fine, since
2192 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
2193 case Builtin::BI__builtin_prefetch
:
2194 if (SemaBuiltinPrefetch(TheCall
))
2197 case Builtin::BI__builtin_alloca_with_align
:
2198 case Builtin::BI__builtin_alloca_with_align_uninitialized
:
2199 if (SemaBuiltinAllocaWithAlign(TheCall
))
2202 case Builtin::BI__builtin_alloca
:
2203 case Builtin::BI__builtin_alloca_uninitialized
:
2204 Diag(TheCall
->getBeginLoc(), diag::warn_alloca
)
2205 << TheCall
->getDirectCallee();
2207 case Builtin::BI__arithmetic_fence
:
2208 if (SemaBuiltinArithmeticFence(TheCall
))
2211 case Builtin::BI__assume
:
2212 case Builtin::BI__builtin_assume
:
2213 if (SemaBuiltinAssume(TheCall
))
2216 case Builtin::BI__builtin_assume_aligned
:
2217 if (SemaBuiltinAssumeAligned(TheCall
))
2220 case Builtin::BI__builtin_dynamic_object_size
:
2221 case Builtin::BI__builtin_object_size
:
2222 if (SemaBuiltinConstantArgRange(TheCall
, 1, 0, 3))
2225 case Builtin::BI__builtin_longjmp
:
2226 if (SemaBuiltinLongjmp(TheCall
))
2229 case Builtin::BI__builtin_setjmp
:
2230 if (SemaBuiltinSetjmp(TheCall
))
2233 case Builtin::BI__builtin_classify_type
:
2234 if (checkArgCount(*this, TheCall
, 1)) return true;
2235 TheCall
->setType(Context
.IntTy
);
2237 case Builtin::BI__builtin_complex
:
2238 if (SemaBuiltinComplex(TheCall
))
2241 case Builtin::BI__builtin_constant_p
: {
2242 if (checkArgCount(*this, TheCall
, 1)) return true;
2243 ExprResult Arg
= DefaultFunctionArrayLvalueConversion(TheCall
->getArg(0));
2244 if (Arg
.isInvalid()) return true;
2245 TheCall
->setArg(0, Arg
.get());
2246 TheCall
->setType(Context
.IntTy
);
2249 case Builtin::BI__builtin_launder
:
2250 return SemaBuiltinLaunder(*this, TheCall
);
2251 case Builtin::BI__sync_fetch_and_add
:
2252 case Builtin::BI__sync_fetch_and_add_1
:
2253 case Builtin::BI__sync_fetch_and_add_2
:
2254 case Builtin::BI__sync_fetch_and_add_4
:
2255 case Builtin::BI__sync_fetch_and_add_8
:
2256 case Builtin::BI__sync_fetch_and_add_16
:
2257 case Builtin::BI__sync_fetch_and_sub
:
2258 case Builtin::BI__sync_fetch_and_sub_1
:
2259 case Builtin::BI__sync_fetch_and_sub_2
:
2260 case Builtin::BI__sync_fetch_and_sub_4
:
2261 case Builtin::BI__sync_fetch_and_sub_8
:
2262 case Builtin::BI__sync_fetch_and_sub_16
:
2263 case Builtin::BI__sync_fetch_and_or
:
2264 case Builtin::BI__sync_fetch_and_or_1
:
2265 case Builtin::BI__sync_fetch_and_or_2
:
2266 case Builtin::BI__sync_fetch_and_or_4
:
2267 case Builtin::BI__sync_fetch_and_or_8
:
2268 case Builtin::BI__sync_fetch_and_or_16
:
2269 case Builtin::BI__sync_fetch_and_and
:
2270 case Builtin::BI__sync_fetch_and_and_1
:
2271 case Builtin::BI__sync_fetch_and_and_2
:
2272 case Builtin::BI__sync_fetch_and_and_4
:
2273 case Builtin::BI__sync_fetch_and_and_8
:
2274 case Builtin::BI__sync_fetch_and_and_16
:
2275 case Builtin::BI__sync_fetch_and_xor
:
2276 case Builtin::BI__sync_fetch_and_xor_1
:
2277 case Builtin::BI__sync_fetch_and_xor_2
:
2278 case Builtin::BI__sync_fetch_and_xor_4
:
2279 case Builtin::BI__sync_fetch_and_xor_8
:
2280 case Builtin::BI__sync_fetch_and_xor_16
:
2281 case Builtin::BI__sync_fetch_and_nand
:
2282 case Builtin::BI__sync_fetch_and_nand_1
:
2283 case Builtin::BI__sync_fetch_and_nand_2
:
2284 case Builtin::BI__sync_fetch_and_nand_4
:
2285 case Builtin::BI__sync_fetch_and_nand_8
:
2286 case Builtin::BI__sync_fetch_and_nand_16
:
2287 case Builtin::BI__sync_add_and_fetch
:
2288 case Builtin::BI__sync_add_and_fetch_1
:
2289 case Builtin::BI__sync_add_and_fetch_2
:
2290 case Builtin::BI__sync_add_and_fetch_4
:
2291 case Builtin::BI__sync_add_and_fetch_8
:
2292 case Builtin::BI__sync_add_and_fetch_16
:
2293 case Builtin::BI__sync_sub_and_fetch
:
2294 case Builtin::BI__sync_sub_and_fetch_1
:
2295 case Builtin::BI__sync_sub_and_fetch_2
:
2296 case Builtin::BI__sync_sub_and_fetch_4
:
2297 case Builtin::BI__sync_sub_and_fetch_8
:
2298 case Builtin::BI__sync_sub_and_fetch_16
:
2299 case Builtin::BI__sync_and_and_fetch
:
2300 case Builtin::BI__sync_and_and_fetch_1
:
2301 case Builtin::BI__sync_and_and_fetch_2
:
2302 case Builtin::BI__sync_and_and_fetch_4
:
2303 case Builtin::BI__sync_and_and_fetch_8
:
2304 case Builtin::BI__sync_and_and_fetch_16
:
2305 case Builtin::BI__sync_or_and_fetch
:
2306 case Builtin::BI__sync_or_and_fetch_1
:
2307 case Builtin::BI__sync_or_and_fetch_2
:
2308 case Builtin::BI__sync_or_and_fetch_4
:
2309 case Builtin::BI__sync_or_and_fetch_8
:
2310 case Builtin::BI__sync_or_and_fetch_16
:
2311 case Builtin::BI__sync_xor_and_fetch
:
2312 case Builtin::BI__sync_xor_and_fetch_1
:
2313 case Builtin::BI__sync_xor_and_fetch_2
:
2314 case Builtin::BI__sync_xor_and_fetch_4
:
2315 case Builtin::BI__sync_xor_and_fetch_8
:
2316 case Builtin::BI__sync_xor_and_fetch_16
:
2317 case Builtin::BI__sync_nand_and_fetch
:
2318 case Builtin::BI__sync_nand_and_fetch_1
:
2319 case Builtin::BI__sync_nand_and_fetch_2
:
2320 case Builtin::BI__sync_nand_and_fetch_4
:
2321 case Builtin::BI__sync_nand_and_fetch_8
:
2322 case Builtin::BI__sync_nand_and_fetch_16
:
2323 case Builtin::BI__sync_val_compare_and_swap
:
2324 case Builtin::BI__sync_val_compare_and_swap_1
:
2325 case Builtin::BI__sync_val_compare_and_swap_2
:
2326 case Builtin::BI__sync_val_compare_and_swap_4
:
2327 case Builtin::BI__sync_val_compare_and_swap_8
:
2328 case Builtin::BI__sync_val_compare_and_swap_16
:
2329 case Builtin::BI__sync_bool_compare_and_swap
:
2330 case Builtin::BI__sync_bool_compare_and_swap_1
:
2331 case Builtin::BI__sync_bool_compare_and_swap_2
:
2332 case Builtin::BI__sync_bool_compare_and_swap_4
:
2333 case Builtin::BI__sync_bool_compare_and_swap_8
:
2334 case Builtin::BI__sync_bool_compare_and_swap_16
:
2335 case Builtin::BI__sync_lock_test_and_set
:
2336 case Builtin::BI__sync_lock_test_and_set_1
:
2337 case Builtin::BI__sync_lock_test_and_set_2
:
2338 case Builtin::BI__sync_lock_test_and_set_4
:
2339 case Builtin::BI__sync_lock_test_and_set_8
:
2340 case Builtin::BI__sync_lock_test_and_set_16
:
2341 case Builtin::BI__sync_lock_release
:
2342 case Builtin::BI__sync_lock_release_1
:
2343 case Builtin::BI__sync_lock_release_2
:
2344 case Builtin::BI__sync_lock_release_4
:
2345 case Builtin::BI__sync_lock_release_8
:
2346 case Builtin::BI__sync_lock_release_16
:
2347 case Builtin::BI__sync_swap
:
2348 case Builtin::BI__sync_swap_1
:
2349 case Builtin::BI__sync_swap_2
:
2350 case Builtin::BI__sync_swap_4
:
2351 case Builtin::BI__sync_swap_8
:
2352 case Builtin::BI__sync_swap_16
:
2353 return SemaBuiltinAtomicOverloaded(TheCallResult
);
2354 case Builtin::BI__sync_synchronize
:
2355 Diag(TheCall
->getBeginLoc(), diag::warn_atomic_implicit_seq_cst
)
2356 << TheCall
->getCallee()->getSourceRange();
2358 case Builtin::BI__builtin_nontemporal_load
:
2359 case Builtin::BI__builtin_nontemporal_store
:
2360 return SemaBuiltinNontemporalOverloaded(TheCallResult
);
2361 case Builtin::BI__builtin_memcpy_inline
: {
2362 clang::Expr
*SizeOp
= TheCall
->getArg(2);
2363 // We warn about copying to or from `nullptr` pointers when `size` is
2364 // greater than 0. When `size` is value dependent we cannot evaluate its
2365 // value so we bail out.
2366 if (SizeOp
->isValueDependent())
2368 if (!SizeOp
->EvaluateKnownConstInt(Context
).isZero()) {
2369 CheckNonNullArgument(*this, TheCall
->getArg(0), TheCall
->getExprLoc());
2370 CheckNonNullArgument(*this, TheCall
->getArg(1), TheCall
->getExprLoc());
2374 case Builtin::BI__builtin_memset_inline
: {
2375 clang::Expr
*SizeOp
= TheCall
->getArg(2);
2376 // We warn about filling to `nullptr` pointers when `size` is greater than
2377 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2379 if (SizeOp
->isValueDependent())
2381 if (!SizeOp
->EvaluateKnownConstInt(Context
).isZero())
2382 CheckNonNullArgument(*this, TheCall
->getArg(0), TheCall
->getExprLoc());
2385 #define BUILTIN(ID, TYPE, ATTRS)
2386 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2387 case Builtin::BI##ID: \
2388 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2389 #include "clang/Basic/Builtins.def"
2390 case Builtin::BI__annotation
:
2391 if (SemaBuiltinMSVCAnnotation(*this, TheCall
))
2394 case Builtin::BI__builtin_annotation
:
2395 if (SemaBuiltinAnnotation(*this, TheCall
))
2398 case Builtin::BI__builtin_addressof
:
2399 if (SemaBuiltinAddressof(*this, TheCall
))
2402 case Builtin::BI__builtin_function_start
:
2403 if (SemaBuiltinFunctionStart(*this, TheCall
))
2406 case Builtin::BI__builtin_is_aligned
:
2407 case Builtin::BI__builtin_align_up
:
2408 case Builtin::BI__builtin_align_down
:
2409 if (SemaBuiltinAlignment(*this, TheCall
, BuiltinID
))
2412 case Builtin::BI__builtin_add_overflow
:
2413 case Builtin::BI__builtin_sub_overflow
:
2414 case Builtin::BI__builtin_mul_overflow
:
2415 if (SemaBuiltinOverflow(*this, TheCall
, BuiltinID
))
2418 case Builtin::BI__builtin_operator_new
:
2419 case Builtin::BI__builtin_operator_delete
: {
2420 bool IsDelete
= BuiltinID
== Builtin::BI__builtin_operator_delete
;
2422 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult
, IsDelete
);
2423 if (Res
.isInvalid())
2424 CorrectDelayedTyposInExpr(TheCallResult
.get());
2427 case Builtin::BI__builtin_dump_struct
:
2428 return SemaBuiltinDumpStruct(*this, TheCall
);
2429 case Builtin::BI__builtin_expect_with_probability
: {
2430 // We first want to ensure we are called with 3 arguments
2431 if (checkArgCount(*this, TheCall
, 3))
2433 // then check probability is constant float in range [0.0, 1.0]
2434 const Expr
*ProbArg
= TheCall
->getArg(2);
2435 SmallVector
<PartialDiagnosticAt
, 8> Notes
;
2436 Expr::EvalResult Eval
;
2438 if ((!ProbArg
->EvaluateAsConstantExpr(Eval
, Context
)) ||
2439 !Eval
.Val
.isFloat()) {
2440 Diag(ProbArg
->getBeginLoc(), diag::err_probability_not_constant_float
)
2441 << ProbArg
->getSourceRange();
2442 for (const PartialDiagnosticAt
&PDiag
: Notes
)
2443 Diag(PDiag
.first
, PDiag
.second
);
2446 llvm::APFloat Probability
= Eval
.Val
.getFloat();
2447 bool LoseInfo
= false;
2448 Probability
.convert(llvm::APFloat::IEEEdouble(),
2449 llvm::RoundingMode::Dynamic
, &LoseInfo
);
2450 if (!(Probability
>= llvm::APFloat(0.0) &&
2451 Probability
<= llvm::APFloat(1.0))) {
2452 Diag(ProbArg
->getBeginLoc(), diag::err_probability_out_of_range
)
2453 << ProbArg
->getSourceRange();
2458 case Builtin::BI__builtin_preserve_access_index
:
2459 if (SemaBuiltinPreserveAI(*this, TheCall
))
2462 case Builtin::BI__builtin_call_with_static_chain
:
2463 if (SemaBuiltinCallWithStaticChain(*this, TheCall
))
2466 case Builtin::BI__exception_code
:
2467 case Builtin::BI_exception_code
:
2468 if (SemaBuiltinSEHScopeCheck(*this, TheCall
, Scope::SEHExceptScope
,
2469 diag::err_seh___except_block
))
2472 case Builtin::BI__exception_info
:
2473 case Builtin::BI_exception_info
:
2474 if (SemaBuiltinSEHScopeCheck(*this, TheCall
, Scope::SEHFilterScope
,
2475 diag::err_seh___except_filter
))
2478 case Builtin::BI__GetExceptionInfo
:
2479 if (checkArgCount(*this, TheCall
, 1))
2482 if (CheckCXXThrowOperand(
2483 TheCall
->getBeginLoc(),
2484 Context
.getExceptionObjectType(FDecl
->getParamDecl(0)->getType()),
2488 TheCall
->setType(Context
.VoidPtrTy
);
2490 case Builtin::BIaddressof
:
2491 case Builtin::BI__addressof
:
2492 case Builtin::BIforward
:
2493 case Builtin::BIforward_like
:
2494 case Builtin::BImove
:
2495 case Builtin::BImove_if_noexcept
:
2496 case Builtin::BIas_const
: {
2497 // These are all expected to be of the form
2498 // T &/&&/* f(U &/&&)
2499 // where T and U only differ in qualification.
2500 if (checkArgCount(*this, TheCall
, 1))
2502 QualType Param
= FDecl
->getParamDecl(0)->getType();
2503 QualType Result
= FDecl
->getReturnType();
2504 bool ReturnsPointer
= BuiltinID
== Builtin::BIaddressof
||
2505 BuiltinID
== Builtin::BI__addressof
;
2506 if (!(Param
->isReferenceType() &&
2507 (ReturnsPointer
? Result
->isAnyPointerType()
2508 : Result
->isReferenceType()) &&
2509 Context
.hasSameUnqualifiedType(Param
->getPointeeType(),
2510 Result
->getPointeeType()))) {
2511 Diag(TheCall
->getBeginLoc(), diag::err_builtin_move_forward_unsupported
)
2517 // OpenCL v2.0, s6.13.16 - Pipe functions
2518 case Builtin::BIread_pipe
:
2519 case Builtin::BIwrite_pipe
:
2520 // Since those two functions are declared with var args, we need a semantic
2521 // check for the argument.
2522 if (SemaBuiltinRWPipe(*this, TheCall
))
2525 case Builtin::BIreserve_read_pipe
:
2526 case Builtin::BIreserve_write_pipe
:
2527 case Builtin::BIwork_group_reserve_read_pipe
:
2528 case Builtin::BIwork_group_reserve_write_pipe
:
2529 if (SemaBuiltinReserveRWPipe(*this, TheCall
))
2532 case Builtin::BIsub_group_reserve_read_pipe
:
2533 case Builtin::BIsub_group_reserve_write_pipe
:
2534 if (checkOpenCLSubgroupExt(*this, TheCall
) ||
2535 SemaBuiltinReserveRWPipe(*this, TheCall
))
2538 case Builtin::BIcommit_read_pipe
:
2539 case Builtin::BIcommit_write_pipe
:
2540 case Builtin::BIwork_group_commit_read_pipe
:
2541 case Builtin::BIwork_group_commit_write_pipe
:
2542 if (SemaBuiltinCommitRWPipe(*this, TheCall
))
2545 case Builtin::BIsub_group_commit_read_pipe
:
2546 case Builtin::BIsub_group_commit_write_pipe
:
2547 if (checkOpenCLSubgroupExt(*this, TheCall
) ||
2548 SemaBuiltinCommitRWPipe(*this, TheCall
))
2551 case Builtin::BIget_pipe_num_packets
:
2552 case Builtin::BIget_pipe_max_packets
:
2553 if (SemaBuiltinPipePackets(*this, TheCall
))
2556 case Builtin::BIto_global
:
2557 case Builtin::BIto_local
:
2558 case Builtin::BIto_private
:
2559 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID
, TheCall
))
2562 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2563 case Builtin::BIenqueue_kernel
:
2564 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall
))
2567 case Builtin::BIget_kernel_work_group_size
:
2568 case Builtin::BIget_kernel_preferred_work_group_size_multiple
:
2569 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall
))
2572 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange
:
2573 case Builtin::BIget_kernel_sub_group_count_for_ndrange
:
2574 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall
))
2577 case Builtin::BI__builtin_os_log_format
:
2578 Cleanup
.setExprNeedsCleanups(true);
2580 case Builtin::BI__builtin_os_log_format_buffer_size
:
2581 if (SemaBuiltinOSLogFormat(TheCall
))
2584 case Builtin::BI__builtin_frame_address
:
2585 case Builtin::BI__builtin_return_address
: {
2586 if (SemaBuiltinConstantArgRange(TheCall
, 0, 0, 0xFFFF))
2589 // -Wframe-address warning if non-zero passed to builtin
2590 // return/frame address.
2591 Expr::EvalResult Result
;
2592 if (!TheCall
->getArg(0)->isValueDependent() &&
2593 TheCall
->getArg(0)->EvaluateAsInt(Result
, getASTContext()) &&
2594 Result
.Val
.getInt() != 0)
2595 Diag(TheCall
->getBeginLoc(), diag::warn_frame_address
)
2596 << ((BuiltinID
== Builtin::BI__builtin_return_address
)
2597 ? "__builtin_return_address"
2598 : "__builtin_frame_address")
2599 << TheCall
->getSourceRange();
2603 case Builtin::BI__builtin_nondeterministic_value
: {
2604 if (SemaBuiltinNonDeterministicValue(TheCall
))
2609 // __builtin_elementwise_abs restricts the element type to signed integers or
2610 // floating point types only.
2611 case Builtin::BI__builtin_elementwise_abs
: {
2612 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall
))
2615 QualType ArgTy
= TheCall
->getArg(0)->getType();
2616 QualType EltTy
= ArgTy
;
2618 if (auto *VecTy
= EltTy
->getAs
<VectorType
>())
2619 EltTy
= VecTy
->getElementType();
2620 if (EltTy
->isUnsignedIntegerType()) {
2621 Diag(TheCall
->getArg(0)->getBeginLoc(),
2622 diag::err_builtin_invalid_arg_type
)
2623 << 1 << /* signed integer or float ty*/ 3 << ArgTy
;
2629 // These builtins restrict the element type to floating point
2631 case Builtin::BI__builtin_elementwise_ceil
:
2632 case Builtin::BI__builtin_elementwise_cos
:
2633 case Builtin::BI__builtin_elementwise_exp
:
2634 case Builtin::BI__builtin_elementwise_exp2
:
2635 case Builtin::BI__builtin_elementwise_floor
:
2636 case Builtin::BI__builtin_elementwise_log
:
2637 case Builtin::BI__builtin_elementwise_log2
:
2638 case Builtin::BI__builtin_elementwise_log10
:
2639 case Builtin::BI__builtin_elementwise_roundeven
:
2640 case Builtin::BI__builtin_elementwise_round
:
2641 case Builtin::BI__builtin_elementwise_rint
:
2642 case Builtin::BI__builtin_elementwise_nearbyint
:
2643 case Builtin::BI__builtin_elementwise_sin
:
2644 case Builtin::BI__builtin_elementwise_trunc
:
2645 case Builtin::BI__builtin_elementwise_canonicalize
: {
2646 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall
))
2649 QualType ArgTy
= TheCall
->getArg(0)->getType();
2650 if (checkFPMathBuiltinElementType(*this, TheCall
->getArg(0)->getBeginLoc(),
2655 case Builtin::BI__builtin_elementwise_fma
: {
2656 if (SemaBuiltinElementwiseTernaryMath(TheCall
))
2661 // These builtins restrict the element type to floating point
2662 // types only, and take in two arguments.
2663 case Builtin::BI__builtin_elementwise_pow
: {
2664 if (SemaBuiltinElementwiseMath(TheCall
))
2667 QualType ArgTy
= TheCall
->getArg(0)->getType();
2668 if (checkFPMathBuiltinElementType(*this, TheCall
->getArg(0)->getBeginLoc(),
2670 checkFPMathBuiltinElementType(*this, TheCall
->getArg(1)->getBeginLoc(),
2676 // These builtins restrict the element type to integer
2678 case Builtin::BI__builtin_elementwise_add_sat
:
2679 case Builtin::BI__builtin_elementwise_sub_sat
: {
2680 if (SemaBuiltinElementwiseMath(TheCall
))
2683 const Expr
*Arg
= TheCall
->getArg(0);
2684 QualType ArgTy
= Arg
->getType();
2685 QualType EltTy
= ArgTy
;
2687 if (auto *VecTy
= EltTy
->getAs
<VectorType
>())
2688 EltTy
= VecTy
->getElementType();
2690 if (!EltTy
->isIntegerType()) {
2691 Diag(Arg
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
2692 << 1 << /* integer ty */ 6 << ArgTy
;
2698 case Builtin::BI__builtin_elementwise_min
:
2699 case Builtin::BI__builtin_elementwise_max
:
2700 if (SemaBuiltinElementwiseMath(TheCall
))
2704 case Builtin::BI__builtin_elementwise_bitreverse
: {
2705 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall
))
2708 const Expr
*Arg
= TheCall
->getArg(0);
2709 QualType ArgTy
= Arg
->getType();
2710 QualType EltTy
= ArgTy
;
2712 if (auto *VecTy
= EltTy
->getAs
<VectorType
>())
2713 EltTy
= VecTy
->getElementType();
2715 if (!EltTy
->isIntegerType()) {
2716 Diag(Arg
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
2717 << 1 << /* integer ty */ 6 << ArgTy
;
2723 case Builtin::BI__builtin_elementwise_copysign
: {
2724 if (checkArgCount(*this, TheCall
, 2))
2727 ExprResult Magnitude
= UsualUnaryConversions(TheCall
->getArg(0));
2728 ExprResult Sign
= UsualUnaryConversions(TheCall
->getArg(1));
2729 if (Magnitude
.isInvalid() || Sign
.isInvalid())
2732 QualType MagnitudeTy
= Magnitude
.get()->getType();
2733 QualType SignTy
= Sign
.get()->getType();
2734 if (checkFPMathBuiltinElementType(*this, TheCall
->getArg(0)->getBeginLoc(),
2736 checkFPMathBuiltinElementType(*this, TheCall
->getArg(1)->getBeginLoc(),
2741 if (MagnitudeTy
.getCanonicalType() != SignTy
.getCanonicalType()) {
2742 return Diag(Sign
.get()->getBeginLoc(),
2743 diag::err_typecheck_call_different_arg_types
)
2744 << MagnitudeTy
<< SignTy
;
2747 TheCall
->setArg(0, Magnitude
.get());
2748 TheCall
->setArg(1, Sign
.get());
2749 TheCall
->setType(Magnitude
.get()->getType());
2752 case Builtin::BI__builtin_reduce_max
:
2753 case Builtin::BI__builtin_reduce_min
: {
2754 if (PrepareBuiltinReduceMathOneArgCall(TheCall
))
2757 const Expr
*Arg
= TheCall
->getArg(0);
2758 const auto *TyA
= Arg
->getType()->getAs
<VectorType
>();
2760 Diag(Arg
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
2761 << 1 << /* vector ty*/ 4 << Arg
->getType();
2765 TheCall
->setType(TyA
->getElementType());
2769 // These builtins support vectors of integers only.
2770 // TODO: ADD/MUL should support floating-point types.
2771 case Builtin::BI__builtin_reduce_add
:
2772 case Builtin::BI__builtin_reduce_mul
:
2773 case Builtin::BI__builtin_reduce_xor
:
2774 case Builtin::BI__builtin_reduce_or
:
2775 case Builtin::BI__builtin_reduce_and
: {
2776 if (PrepareBuiltinReduceMathOneArgCall(TheCall
))
2779 const Expr
*Arg
= TheCall
->getArg(0);
2780 const auto *TyA
= Arg
->getType()->getAs
<VectorType
>();
2781 if (!TyA
|| !TyA
->getElementType()->isIntegerType()) {
2782 Diag(Arg
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
2783 << 1 << /* vector of integers */ 6 << Arg
->getType();
2786 TheCall
->setType(TyA
->getElementType());
2790 case Builtin::BI__builtin_matrix_transpose
:
2791 return SemaBuiltinMatrixTranspose(TheCall
, TheCallResult
);
2793 case Builtin::BI__builtin_matrix_column_major_load
:
2794 return SemaBuiltinMatrixColumnMajorLoad(TheCall
, TheCallResult
);
2796 case Builtin::BI__builtin_matrix_column_major_store
:
2797 return SemaBuiltinMatrixColumnMajorStore(TheCall
, TheCallResult
);
2799 case Builtin::BI__builtin_get_device_side_mangled_name
: {
2800 auto Check
= [](CallExpr
*TheCall
) {
2801 if (TheCall
->getNumArgs() != 1)
2803 auto *DRE
= dyn_cast
<DeclRefExpr
>(TheCall
->getArg(0)->IgnoreImpCasts());
2806 auto *D
= DRE
->getDecl();
2807 if (!isa
<FunctionDecl
>(D
) && !isa
<VarDecl
>(D
))
2809 return D
->hasAttr
<CUDAGlobalAttr
>() || D
->hasAttr
<CUDADeviceAttr
>() ||
2810 D
->hasAttr
<CUDAConstantAttr
>() || D
->hasAttr
<HIPManagedAttr
>();
2812 if (!Check(TheCall
)) {
2813 Diag(TheCall
->getBeginLoc(),
2814 diag::err_hip_invalid_args_builtin_mangled_name
);
2820 // Since the target specific builtins for each arch overlap, only check those
2821 // of the arch we are compiling for.
2822 if (Context
.BuiltinInfo
.isTSBuiltin(BuiltinID
)) {
2823 if (Context
.BuiltinInfo
.isAuxBuiltinID(BuiltinID
)) {
2824 assert(Context
.getAuxTargetInfo() &&
2825 "Aux Target Builtin, but not an aux target?");
2827 if (CheckTSBuiltinFunctionCall(
2828 *Context
.getAuxTargetInfo(),
2829 Context
.BuiltinInfo
.getAuxBuiltinID(BuiltinID
), TheCall
))
2832 if (CheckTSBuiltinFunctionCall(Context
.getTargetInfo(), BuiltinID
,
2838 return TheCallResult
;
2841 // Get the valid immediate range for the specified NEON type code.
2842 static unsigned RFT(unsigned t
, bool shift
= false, bool ForceQuad
= false) {
2843 NeonTypeFlags
Type(t
);
2844 int IsQuad
= ForceQuad
? true : Type
.isQuad();
2845 switch (Type
.getEltType()) {
2846 case NeonTypeFlags::Int8
:
2847 case NeonTypeFlags::Poly8
:
2848 return shift
? 7 : (8 << IsQuad
) - 1;
2849 case NeonTypeFlags::Int16
:
2850 case NeonTypeFlags::Poly16
:
2851 return shift
? 15 : (4 << IsQuad
) - 1;
2852 case NeonTypeFlags::Int32
:
2853 return shift
? 31 : (2 << IsQuad
) - 1;
2854 case NeonTypeFlags::Int64
:
2855 case NeonTypeFlags::Poly64
:
2856 return shift
? 63 : (1 << IsQuad
) - 1;
2857 case NeonTypeFlags::Poly128
:
2858 return shift
? 127 : (1 << IsQuad
) - 1;
2859 case NeonTypeFlags::Float16
:
2860 assert(!shift
&& "cannot shift float types!");
2861 return (4 << IsQuad
) - 1;
2862 case NeonTypeFlags::Float32
:
2863 assert(!shift
&& "cannot shift float types!");
2864 return (2 << IsQuad
) - 1;
2865 case NeonTypeFlags::Float64
:
2866 assert(!shift
&& "cannot shift float types!");
2867 return (1 << IsQuad
) - 1;
2868 case NeonTypeFlags::BFloat16
:
2869 assert(!shift
&& "cannot shift float types!");
2870 return (4 << IsQuad
) - 1;
2872 llvm_unreachable("Invalid NeonTypeFlag!");
2875 /// getNeonEltType - Return the QualType corresponding to the elements of
2876 /// the vector type specified by the NeonTypeFlags. This is used to check
2877 /// the pointer arguments for Neon load/store intrinsics.
2878 static QualType
getNeonEltType(NeonTypeFlags Flags
, ASTContext
&Context
,
2879 bool IsPolyUnsigned
, bool IsInt64Long
) {
2880 switch (Flags
.getEltType()) {
2881 case NeonTypeFlags::Int8
:
2882 return Flags
.isUnsigned() ? Context
.UnsignedCharTy
: Context
.SignedCharTy
;
2883 case NeonTypeFlags::Int16
:
2884 return Flags
.isUnsigned() ? Context
.UnsignedShortTy
: Context
.ShortTy
;
2885 case NeonTypeFlags::Int32
:
2886 return Flags
.isUnsigned() ? Context
.UnsignedIntTy
: Context
.IntTy
;
2887 case NeonTypeFlags::Int64
:
2889 return Flags
.isUnsigned() ? Context
.UnsignedLongTy
: Context
.LongTy
;
2891 return Flags
.isUnsigned() ? Context
.UnsignedLongLongTy
2892 : Context
.LongLongTy
;
2893 case NeonTypeFlags::Poly8
:
2894 return IsPolyUnsigned
? Context
.UnsignedCharTy
: Context
.SignedCharTy
;
2895 case NeonTypeFlags::Poly16
:
2896 return IsPolyUnsigned
? Context
.UnsignedShortTy
: Context
.ShortTy
;
2897 case NeonTypeFlags::Poly64
:
2899 return Context
.UnsignedLongTy
;
2901 return Context
.UnsignedLongLongTy
;
2902 case NeonTypeFlags::Poly128
:
2904 case NeonTypeFlags::Float16
:
2905 return Context
.HalfTy
;
2906 case NeonTypeFlags::Float32
:
2907 return Context
.FloatTy
;
2908 case NeonTypeFlags::Float64
:
2909 return Context
.DoubleTy
;
2910 case NeonTypeFlags::BFloat16
:
2911 return Context
.BFloat16Ty
;
2913 llvm_unreachable("Invalid NeonTypeFlag!");
2916 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID
, CallExpr
*TheCall
) {
2917 // Range check SVE intrinsics that take immediate values.
2918 SmallVector
<std::tuple
<int,int,int>, 3> ImmChecks
;
2920 switch (BuiltinID
) {
2923 #define GET_SVE_IMMEDIATE_CHECK
2924 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
2925 #undef GET_SVE_IMMEDIATE_CHECK
2926 #define GET_SME_IMMEDIATE_CHECK
2927 #include "clang/Basic/arm_sme_sema_rangechecks.inc"
2928 #undef GET_SME_IMMEDIATE_CHECK
2931 // Perform all the immediate checks for this builtin call.
2932 bool HasError
= false;
2933 for (auto &I
: ImmChecks
) {
2934 int ArgNum
, CheckTy
, ElementSizeInBits
;
2935 std::tie(ArgNum
, CheckTy
, ElementSizeInBits
) = I
;
2937 typedef bool(*OptionSetCheckFnTy
)(int64_t Value
);
2939 // Function that checks whether the operand (ArgNum) is an immediate
2940 // that is one of the predefined values.
2941 auto CheckImmediateInSet
= [&](OptionSetCheckFnTy CheckImm
,
2942 int ErrDiag
) -> bool {
2943 // We can't check the value of a dependent argument.
2944 Expr
*Arg
= TheCall
->getArg(ArgNum
);
2945 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
2948 // Check constant-ness first.
2950 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Imm
))
2953 if (!CheckImm(Imm
.getSExtValue()))
2954 return Diag(TheCall
->getBeginLoc(), ErrDiag
) << Arg
->getSourceRange();
2958 switch ((SVETypeFlags::ImmCheckType
)CheckTy
) {
2959 case SVETypeFlags::ImmCheck0_31
:
2960 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 31))
2963 case SVETypeFlags::ImmCheck0_13
:
2964 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 13))
2967 case SVETypeFlags::ImmCheck1_16
:
2968 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 1, 16))
2971 case SVETypeFlags::ImmCheck0_7
:
2972 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 7))
2975 case SVETypeFlags::ImmCheckExtract
:
2976 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0,
2977 (2048 / ElementSizeInBits
) - 1))
2980 case SVETypeFlags::ImmCheckShiftRight
:
2981 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 1, ElementSizeInBits
))
2984 case SVETypeFlags::ImmCheckShiftRightNarrow
:
2985 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 1,
2986 ElementSizeInBits
/ 2))
2989 case SVETypeFlags::ImmCheckShiftLeft
:
2990 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0,
2991 ElementSizeInBits
- 1))
2994 case SVETypeFlags::ImmCheckLaneIndex
:
2995 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0,
2996 (128 / (1 * ElementSizeInBits
)) - 1))
2999 case SVETypeFlags::ImmCheckLaneIndexCompRotate
:
3000 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0,
3001 (128 / (2 * ElementSizeInBits
)) - 1))
3004 case SVETypeFlags::ImmCheckLaneIndexDot
:
3005 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0,
3006 (128 / (4 * ElementSizeInBits
)) - 1))
3009 case SVETypeFlags::ImmCheckComplexRot90_270
:
3010 if (CheckImmediateInSet([](int64_t V
) { return V
== 90 || V
== 270; },
3011 diag::err_rotation_argument_to_cadd
))
3014 case SVETypeFlags::ImmCheckComplexRotAll90
:
3015 if (CheckImmediateInSet(
3017 return V
== 0 || V
== 90 || V
== 180 || V
== 270;
3019 diag::err_rotation_argument_to_cmla
))
3022 case SVETypeFlags::ImmCheck0_1
:
3023 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 1))
3026 case SVETypeFlags::ImmCheck0_2
:
3027 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 2))
3030 case SVETypeFlags::ImmCheck0_3
:
3031 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 3))
3034 case SVETypeFlags::ImmCheck0_0
:
3035 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 0))
3038 case SVETypeFlags::ImmCheck0_15
:
3039 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 15))
3042 case SVETypeFlags::ImmCheck0_255
:
3043 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, 255))
3052 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo
&TI
,
3053 unsigned BuiltinID
, CallExpr
*TheCall
) {
3054 llvm::APSInt Result
;
3058 bool HasConstPtr
= false;
3059 switch (BuiltinID
) {
3060 #define GET_NEON_OVERLOAD_CHECK
3061 #include "clang/Basic/arm_neon.inc"
3062 #include "clang/Basic/arm_fp16.inc"
3063 #undef GET_NEON_OVERLOAD_CHECK
3066 // For NEON intrinsics which are overloaded on vector element type, validate
3067 // the immediate which specifies which variant to emit.
3068 unsigned ImmArg
= TheCall
->getNumArgs()-1;
3070 if (SemaBuiltinConstantArg(TheCall
, ImmArg
, Result
))
3073 TV
= Result
.getLimitedValue(64);
3074 if ((TV
> 63) || (mask
& (1ULL << TV
)) == 0)
3075 return Diag(TheCall
->getBeginLoc(), diag::err_invalid_neon_type_code
)
3076 << TheCall
->getArg(ImmArg
)->getSourceRange();
3079 if (PtrArgNum
>= 0) {
3080 // Check that pointer arguments have the specified type.
3081 Expr
*Arg
= TheCall
->getArg(PtrArgNum
);
3082 if (ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(Arg
))
3083 Arg
= ICE
->getSubExpr();
3084 ExprResult RHS
= DefaultFunctionArrayLvalueConversion(Arg
);
3085 QualType RHSTy
= RHS
.get()->getType();
3087 llvm::Triple::ArchType Arch
= TI
.getTriple().getArch();
3088 bool IsPolyUnsigned
= Arch
== llvm::Triple::aarch64
||
3089 Arch
== llvm::Triple::aarch64_32
||
3090 Arch
== llvm::Triple::aarch64_be
;
3091 bool IsInt64Long
= TI
.getInt64Type() == TargetInfo::SignedLong
;
3093 getNeonEltType(NeonTypeFlags(TV
), Context
, IsPolyUnsigned
, IsInt64Long
);
3095 EltTy
= EltTy
.withConst();
3096 QualType LHSTy
= Context
.getPointerType(EltTy
);
3097 AssignConvertType ConvTy
;
3098 ConvTy
= CheckSingleAssignmentConstraints(LHSTy
, RHS
);
3099 if (RHS
.isInvalid())
3101 if (DiagnoseAssignmentResult(ConvTy
, Arg
->getBeginLoc(), LHSTy
, RHSTy
,
3102 RHS
.get(), AA_Assigning
))
3106 // For NEON intrinsics which take an immediate value as part of the
3107 // instruction, range check them here.
3108 unsigned i
= 0, l
= 0, u
= 0;
3109 switch (BuiltinID
) {
3112 #define GET_NEON_IMMEDIATE_CHECK
3113 #include "clang/Basic/arm_neon.inc"
3114 #include "clang/Basic/arm_fp16.inc"
3115 #undef GET_NEON_IMMEDIATE_CHECK
3118 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
+ l
);
3121 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID
, CallExpr
*TheCall
) {
3122 switch (BuiltinID
) {
3125 #include "clang/Basic/arm_mve_builtin_sema.inc"
3129 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo
&TI
, unsigned BuiltinID
,
3130 CallExpr
*TheCall
) {
3132 switch (BuiltinID
) {
3135 #include "clang/Basic/arm_cde_builtin_sema.inc"
3141 return CheckARMCoprocessorImmediate(TI
, TheCall
->getArg(0), /*WantCDE*/ true);
3144 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo
&TI
,
3145 const Expr
*CoprocArg
, bool WantCDE
) {
3146 if (isConstantEvaluated())
3149 // We can't check the value of a dependent argument.
3150 if (CoprocArg
->isTypeDependent() || CoprocArg
->isValueDependent())
3153 llvm::APSInt CoprocNoAP
= *CoprocArg
->getIntegerConstantExpr(Context
);
3154 int64_t CoprocNo
= CoprocNoAP
.getExtValue();
3155 assert(CoprocNo
>= 0 && "Coprocessor immediate must be non-negative");
3157 uint32_t CDECoprocMask
= TI
.getARMCDECoprocMask();
3158 bool IsCDECoproc
= CoprocNo
<= 7 && (CDECoprocMask
& (1 << CoprocNo
));
3160 if (IsCDECoproc
!= WantCDE
)
3161 return Diag(CoprocArg
->getBeginLoc(), diag::err_arm_invalid_coproc
)
3162 << (int)CoprocNo
<< (int)WantCDE
<< CoprocArg
->getSourceRange();
3167 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID
, CallExpr
*TheCall
,
3168 unsigned MaxWidth
) {
3169 assert((BuiltinID
== ARM::BI__builtin_arm_ldrex
||
3170 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
3171 BuiltinID
== ARM::BI__builtin_arm_strex
||
3172 BuiltinID
== ARM::BI__builtin_arm_stlex
||
3173 BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
3174 BuiltinID
== AArch64::BI__builtin_arm_ldaex
||
3175 BuiltinID
== AArch64::BI__builtin_arm_strex
||
3176 BuiltinID
== AArch64::BI__builtin_arm_stlex
) &&
3177 "unexpected ARM builtin");
3178 bool IsLdrex
= BuiltinID
== ARM::BI__builtin_arm_ldrex
||
3179 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
3180 BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
3181 BuiltinID
== AArch64::BI__builtin_arm_ldaex
;
3183 DeclRefExpr
*DRE
=cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
3185 // Ensure that we have the proper number of arguments.
3186 if (checkArgCount(*this, TheCall
, IsLdrex
? 1 : 2))
3189 // Inspect the pointer argument of the atomic builtin. This should always be
3190 // a pointer type, whose element is an integral scalar or pointer type.
3191 // Because it is a pointer type, we don't have to worry about any implicit
3193 Expr
*PointerArg
= TheCall
->getArg(IsLdrex
? 0 : 1);
3194 ExprResult PointerArgRes
= DefaultFunctionArrayLvalueConversion(PointerArg
);
3195 if (PointerArgRes
.isInvalid())
3197 PointerArg
= PointerArgRes
.get();
3199 const PointerType
*pointerType
= PointerArg
->getType()->getAs
<PointerType
>();
3201 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer
)
3202 << PointerArg
->getType() << PointerArg
->getSourceRange();
3206 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
3207 // task is to insert the appropriate casts into the AST. First work out just
3208 // what the appropriate type is.
3209 QualType ValType
= pointerType
->getPointeeType();
3210 QualType AddrType
= ValType
.getUnqualifiedType().withVolatile();
3212 AddrType
.addConst();
3214 // Issue a warning if the cast is dodgy.
3215 CastKind CastNeeded
= CK_NoOp
;
3216 if (!AddrType
.isAtLeastAsQualifiedAs(ValType
)) {
3217 CastNeeded
= CK_BitCast
;
3218 Diag(DRE
->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers
)
3219 << PointerArg
->getType() << Context
.getPointerType(AddrType
)
3220 << AA_Passing
<< PointerArg
->getSourceRange();
3223 // Finally, do the cast and replace the argument with the corrected version.
3224 AddrType
= Context
.getPointerType(AddrType
);
3225 PointerArgRes
= ImpCastExprToType(PointerArg
, AddrType
, CastNeeded
);
3226 if (PointerArgRes
.isInvalid())
3228 PointerArg
= PointerArgRes
.get();
3230 TheCall
->setArg(IsLdrex
? 0 : 1, PointerArg
);
3232 // In general, we allow ints, floats and pointers to be loaded and stored.
3233 if (!ValType
->isIntegerType() && !ValType
->isAnyPointerType() &&
3234 !ValType
->isBlockPointerType() && !ValType
->isFloatingType()) {
3235 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr
)
3236 << PointerArg
->getType() << PointerArg
->getSourceRange();
3240 // But ARM doesn't have instructions to deal with 128-bit versions.
3241 if (Context
.getTypeSize(ValType
) > MaxWidth
) {
3242 assert(MaxWidth
== 64 && "Diagnostic unexpectedly inaccurate");
3243 Diag(DRE
->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size
)
3244 << PointerArg
->getType() << PointerArg
->getSourceRange();
3248 switch (ValType
.getObjCLifetime()) {
3249 case Qualifiers::OCL_None
:
3250 case Qualifiers::OCL_ExplicitNone
:
3254 case Qualifiers::OCL_Weak
:
3255 case Qualifiers::OCL_Strong
:
3256 case Qualifiers::OCL_Autoreleasing
:
3257 Diag(DRE
->getBeginLoc(), diag::err_arc_atomic_ownership
)
3258 << ValType
<< PointerArg
->getSourceRange();
3263 TheCall
->setType(ValType
);
3267 // Initialize the argument to be stored.
3268 ExprResult ValArg
= TheCall
->getArg(0);
3269 InitializedEntity Entity
= InitializedEntity::InitializeParameter(
3270 Context
, ValType
, /*consume*/ false);
3271 ValArg
= PerformCopyInitialization(Entity
, SourceLocation(), ValArg
);
3272 if (ValArg
.isInvalid())
3274 TheCall
->setArg(0, ValArg
.get());
3276 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
3277 // but the custom checker bypasses all default analysis.
3278 TheCall
->setType(Context
.IntTy
);
3282 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo
&TI
, unsigned BuiltinID
,
3283 CallExpr
*TheCall
) {
3284 if (BuiltinID
== ARM::BI__builtin_arm_ldrex
||
3285 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
3286 BuiltinID
== ARM::BI__builtin_arm_strex
||
3287 BuiltinID
== ARM::BI__builtin_arm_stlex
) {
3288 return CheckARMBuiltinExclusiveCall(BuiltinID
, TheCall
, 64);
3291 if (BuiltinID
== ARM::BI__builtin_arm_prefetch
) {
3292 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1) ||
3293 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 1);
3296 if (BuiltinID
== ARM::BI__builtin_arm_rsr64
||
3297 BuiltinID
== ARM::BI__builtin_arm_wsr64
)
3298 return SemaBuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 3, false);
3300 if (BuiltinID
== ARM::BI__builtin_arm_rsr
||
3301 BuiltinID
== ARM::BI__builtin_arm_rsrp
||
3302 BuiltinID
== ARM::BI__builtin_arm_wsr
||
3303 BuiltinID
== ARM::BI__builtin_arm_wsrp
)
3304 return SemaBuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
3306 if (CheckNeonBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
3308 if (CheckMVEBuiltinFunctionCall(BuiltinID
, TheCall
))
3310 if (CheckCDEBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
3313 // For intrinsics which take an immediate value as part of the instruction,
3314 // range check them here.
3315 // FIXME: VFP Intrinsics should error if VFP not present.
3316 switch (BuiltinID
) {
3317 default: return false;
3318 case ARM::BI__builtin_arm_ssat
:
3319 return SemaBuiltinConstantArgRange(TheCall
, 1, 1, 32);
3320 case ARM::BI__builtin_arm_usat
:
3321 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31);
3322 case ARM::BI__builtin_arm_ssat16
:
3323 return SemaBuiltinConstantArgRange(TheCall
, 1, 1, 16);
3324 case ARM::BI__builtin_arm_usat16
:
3325 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 15);
3326 case ARM::BI__builtin_arm_vcvtr_f
:
3327 case ARM::BI__builtin_arm_vcvtr_d
:
3328 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1);
3329 case ARM::BI__builtin_arm_dmb
:
3330 case ARM::BI__builtin_arm_dsb
:
3331 case ARM::BI__builtin_arm_isb
:
3332 case ARM::BI__builtin_arm_dbg
:
3333 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 15);
3334 case ARM::BI__builtin_arm_cdp
:
3335 case ARM::BI__builtin_arm_cdp2
:
3336 case ARM::BI__builtin_arm_mcr
:
3337 case ARM::BI__builtin_arm_mcr2
:
3338 case ARM::BI__builtin_arm_mrc
:
3339 case ARM::BI__builtin_arm_mrc2
:
3340 case ARM::BI__builtin_arm_mcrr
:
3341 case ARM::BI__builtin_arm_mcrr2
:
3342 case ARM::BI__builtin_arm_mrrc
:
3343 case ARM::BI__builtin_arm_mrrc2
:
3344 case ARM::BI__builtin_arm_ldc
:
3345 case ARM::BI__builtin_arm_ldcl
:
3346 case ARM::BI__builtin_arm_ldc2
:
3347 case ARM::BI__builtin_arm_ldc2l
:
3348 case ARM::BI__builtin_arm_stc
:
3349 case ARM::BI__builtin_arm_stcl
:
3350 case ARM::BI__builtin_arm_stc2
:
3351 case ARM::BI__builtin_arm_stc2l
:
3352 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 15) ||
3353 CheckARMCoprocessorImmediate(TI
, TheCall
->getArg(0),
3358 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo
&TI
,
3360 CallExpr
*TheCall
) {
3361 if (BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
3362 BuiltinID
== AArch64::BI__builtin_arm_ldaex
||
3363 BuiltinID
== AArch64::BI__builtin_arm_strex
||
3364 BuiltinID
== AArch64::BI__builtin_arm_stlex
) {
3365 return CheckARMBuiltinExclusiveCall(BuiltinID
, TheCall
, 128);
3368 if (BuiltinID
== AArch64::BI__builtin_arm_prefetch
) {
3369 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1) ||
3370 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 3) ||
3371 SemaBuiltinConstantArgRange(TheCall
, 3, 0, 1) ||
3372 SemaBuiltinConstantArgRange(TheCall
, 4, 0, 1);
3375 if (BuiltinID
== AArch64::BI__builtin_arm_rsr64
||
3376 BuiltinID
== AArch64::BI__builtin_arm_wsr64
||
3377 BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
3378 BuiltinID
== AArch64::BI__builtin_arm_wsr128
)
3379 return SemaBuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
3381 // Memory Tagging Extensions (MTE) Intrinsics
3382 if (BuiltinID
== AArch64::BI__builtin_arm_irg
||
3383 BuiltinID
== AArch64::BI__builtin_arm_addg
||
3384 BuiltinID
== AArch64::BI__builtin_arm_gmi
||
3385 BuiltinID
== AArch64::BI__builtin_arm_ldg
||
3386 BuiltinID
== AArch64::BI__builtin_arm_stg
||
3387 BuiltinID
== AArch64::BI__builtin_arm_subp
) {
3388 return SemaBuiltinARMMemoryTaggingCall(BuiltinID
, TheCall
);
3391 if (BuiltinID
== AArch64::BI__builtin_arm_rsr
||
3392 BuiltinID
== AArch64::BI__builtin_arm_rsrp
||
3393 BuiltinID
== AArch64::BI__builtin_arm_wsr
||
3394 BuiltinID
== AArch64::BI__builtin_arm_wsrp
)
3395 return SemaBuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
3397 // Only check the valid encoding range. Any constant in this range would be
3398 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
3399 // an exception for incorrect registers. This matches MSVC behavior.
3400 if (BuiltinID
== AArch64::BI_ReadStatusReg
||
3401 BuiltinID
== AArch64::BI_WriteStatusReg
)
3402 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 0x7fff);
3404 if (BuiltinID
== AArch64::BI__getReg
)
3405 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 31);
3407 if (BuiltinID
== AArch64::BI__break
)
3408 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 0xffff);
3410 if (CheckNeonBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
3413 if (CheckSVEBuiltinFunctionCall(BuiltinID
, TheCall
))
3416 // For intrinsics which take an immediate value as part of the instruction,
3417 // range check them here.
3418 unsigned i
= 0, l
= 0, u
= 0;
3419 switch (BuiltinID
) {
3420 default: return false;
3421 case AArch64::BI__builtin_arm_dmb
:
3422 case AArch64::BI__builtin_arm_dsb
:
3423 case AArch64::BI__builtin_arm_isb
: l
= 0; u
= 15; break;
3424 case AArch64::BI__builtin_arm_tcancel
: l
= 0; u
= 65535; break;
3427 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
+ l
);
3430 static bool isValidBPFPreserveFieldInfoArg(Expr
*Arg
) {
3431 if (Arg
->getType()->getAsPlaceholderType())
3434 // The first argument needs to be a record field access.
3435 // If it is an array element access, we delay decision
3436 // to BPF backend to check whether the access is a
3437 // field access or not.
3438 return (Arg
->IgnoreParens()->getObjectKind() == OK_BitField
||
3439 isa
<MemberExpr
>(Arg
->IgnoreParens()) ||
3440 isa
<ArraySubscriptExpr
>(Arg
->IgnoreParens()));
3443 static bool isValidBPFPreserveTypeInfoArg(Expr
*Arg
) {
3444 QualType ArgType
= Arg
->getType();
3445 if (ArgType
->getAsPlaceholderType())
3448 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
3450 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
3452 // __builtin_preserve_type_info(var, flag);
3453 if (!isa
<DeclRefExpr
>(Arg
->IgnoreParens()) &&
3454 !isa
<UnaryOperator
>(Arg
->IgnoreParens()))
3458 if (ArgType
->getAs
<TypedefType
>())
3461 // Record type or Enum type.
3462 const Type
*Ty
= ArgType
->getUnqualifiedDesugaredType();
3463 if (const auto *RT
= Ty
->getAs
<RecordType
>()) {
3464 if (!RT
->getDecl()->getDeclName().isEmpty())
3466 } else if (const auto *ET
= Ty
->getAs
<EnumType
>()) {
3467 if (!ET
->getDecl()->getDeclName().isEmpty())
3474 static bool isValidBPFPreserveEnumValueArg(Expr
*Arg
) {
3475 QualType ArgType
= Arg
->getType();
3476 if (ArgType
->getAsPlaceholderType())
3479 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
3481 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
3483 const auto *UO
= dyn_cast
<UnaryOperator
>(Arg
->IgnoreParens());
3487 const auto *CE
= dyn_cast
<CStyleCastExpr
>(UO
->getSubExpr());
3490 if (CE
->getCastKind() != CK_IntegralToPointer
&&
3491 CE
->getCastKind() != CK_NullToPointer
)
3494 // The integer must be from an EnumConstantDecl.
3495 const auto *DR
= dyn_cast
<DeclRefExpr
>(CE
->getSubExpr());
3499 const EnumConstantDecl
*Enumerator
=
3500 dyn_cast
<EnumConstantDecl
>(DR
->getDecl());
3504 // The type must be EnumType.
3505 const Type
*Ty
= ArgType
->getUnqualifiedDesugaredType();
3506 const auto *ET
= Ty
->getAs
<EnumType
>();
3510 // The enum value must be supported.
3511 return llvm::is_contained(ET
->getDecl()->enumerators(), Enumerator
);
3514 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID
,
3515 CallExpr
*TheCall
) {
3516 assert((BuiltinID
== BPF::BI__builtin_preserve_field_info
||
3517 BuiltinID
== BPF::BI__builtin_btf_type_id
||
3518 BuiltinID
== BPF::BI__builtin_preserve_type_info
||
3519 BuiltinID
== BPF::BI__builtin_preserve_enum_value
) &&
3520 "unexpected BPF builtin");
3522 if (checkArgCount(*this, TheCall
, 2))
3525 // The second argument needs to be a constant int
3526 Expr
*Arg
= TheCall
->getArg(1);
3527 std::optional
<llvm::APSInt
> Value
= Arg
->getIntegerConstantExpr(Context
);
3530 if (BuiltinID
== BPF::BI__builtin_preserve_field_info
)
3531 kind
= diag::err_preserve_field_info_not_const
;
3532 else if (BuiltinID
== BPF::BI__builtin_btf_type_id
)
3533 kind
= diag::err_btf_type_id_not_const
;
3534 else if (BuiltinID
== BPF::BI__builtin_preserve_type_info
)
3535 kind
= diag::err_preserve_type_info_not_const
;
3537 kind
= diag::err_preserve_enum_value_not_const
;
3538 Diag(Arg
->getBeginLoc(), kind
) << 2 << Arg
->getSourceRange();
3542 // The first argument
3543 Arg
= TheCall
->getArg(0);
3544 bool InvalidArg
= false;
3545 bool ReturnUnsignedInt
= true;
3546 if (BuiltinID
== BPF::BI__builtin_preserve_field_info
) {
3547 if (!isValidBPFPreserveFieldInfoArg(Arg
)) {
3549 kind
= diag::err_preserve_field_info_not_field
;
3551 } else if (BuiltinID
== BPF::BI__builtin_preserve_type_info
) {
3552 if (!isValidBPFPreserveTypeInfoArg(Arg
)) {
3554 kind
= diag::err_preserve_type_info_invalid
;
3556 } else if (BuiltinID
== BPF::BI__builtin_preserve_enum_value
) {
3557 if (!isValidBPFPreserveEnumValueArg(Arg
)) {
3559 kind
= diag::err_preserve_enum_value_invalid
;
3561 ReturnUnsignedInt
= false;
3562 } else if (BuiltinID
== BPF::BI__builtin_btf_type_id
) {
3563 ReturnUnsignedInt
= false;
3567 Diag(Arg
->getBeginLoc(), kind
) << 1 << Arg
->getSourceRange();
3571 if (ReturnUnsignedInt
)
3572 TheCall
->setType(Context
.UnsignedIntTy
);
3574 TheCall
->setType(Context
.UnsignedLongTy
);
3578 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID
, CallExpr
*TheCall
) {
3585 struct BuiltinInfo
{
3590 static BuiltinInfo Infos
[] = {
3591 { Hexagon::BI__builtin_circ_ldd
, {{ 3, true, 4, 3 }} },
3592 { Hexagon::BI__builtin_circ_ldw
, {{ 3, true, 4, 2 }} },
3593 { Hexagon::BI__builtin_circ_ldh
, {{ 3, true, 4, 1 }} },
3594 { Hexagon::BI__builtin_circ_lduh
, {{ 3, true, 4, 1 }} },
3595 { Hexagon::BI__builtin_circ_ldb
, {{ 3, true, 4, 0 }} },
3596 { Hexagon::BI__builtin_circ_ldub
, {{ 3, true, 4, 0 }} },
3597 { Hexagon::BI__builtin_circ_std
, {{ 3, true, 4, 3 }} },
3598 { Hexagon::BI__builtin_circ_stw
, {{ 3, true, 4, 2 }} },
3599 { Hexagon::BI__builtin_circ_sth
, {{ 3, true, 4, 1 }} },
3600 { Hexagon::BI__builtin_circ_sthhi
, {{ 3, true, 4, 1 }} },
3601 { Hexagon::BI__builtin_circ_stb
, {{ 3, true, 4, 0 }} },
3603 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci
, {{ 1, true, 4, 0 }} },
3604 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci
, {{ 1, true, 4, 0 }} },
3605 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci
, {{ 1, true, 4, 1 }} },
3606 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci
, {{ 1, true, 4, 1 }} },
3607 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci
, {{ 1, true, 4, 2 }} },
3608 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci
, {{ 1, true, 4, 3 }} },
3609 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci
, {{ 1, true, 4, 0 }} },
3610 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci
, {{ 1, true, 4, 1 }} },
3611 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci
, {{ 1, true, 4, 1 }} },
3612 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci
, {{ 1, true, 4, 2 }} },
3613 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci
, {{ 1, true, 4, 3 }} },
3615 { Hexagon::BI__builtin_HEXAGON_A2_combineii
, {{ 1, true, 8, 0 }} },
3616 { Hexagon::BI__builtin_HEXAGON_A2_tfrih
, {{ 1, false, 16, 0 }} },
3617 { Hexagon::BI__builtin_HEXAGON_A2_tfril
, {{ 1, false, 16, 0 }} },
3618 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi
, {{ 0, true, 8, 0 }} },
3619 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti
, {{ 1, false, 5, 0 }} },
3620 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi
, {{ 1, false, 8, 0 }} },
3621 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti
, {{ 1, true, 8, 0 }} },
3622 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri
, {{ 1, false, 5, 0 }} },
3623 { Hexagon::BI__builtin_HEXAGON_A4_round_ri
, {{ 1, false, 5, 0 }} },
3624 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat
, {{ 1, false, 5, 0 }} },
3625 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi
, {{ 1, false, 8, 0 }} },
3626 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti
, {{ 1, true, 8, 0 }} },
3627 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui
, {{ 1, false, 7, 0 }} },
3628 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi
, {{ 1, true, 8, 0 }} },
3629 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti
, {{ 1, true, 8, 0 }} },
3630 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui
, {{ 1, false, 7, 0 }} },
3631 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi
, {{ 1, true, 8, 0 }} },
3632 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti
, {{ 1, true, 8, 0 }} },
3633 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui
, {{ 1, false, 7, 0 }} },
3634 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri
, {{ 1, false, 6, 0 }} },
3635 { Hexagon::BI__builtin_HEXAGON_C2_muxii
, {{ 2, true, 8, 0 }} },
3636 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri
, {{ 1, false, 6, 0 }} },
3637 { Hexagon::BI__builtin_HEXAGON_F2_dfclass
, {{ 1, false, 5, 0 }} },
3638 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n
, {{ 0, false, 10, 0 }} },
3639 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p
, {{ 0, false, 10, 0 }} },
3640 { Hexagon::BI__builtin_HEXAGON_F2_sfclass
, {{ 1, false, 5, 0 }} },
3641 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n
, {{ 0, false, 10, 0 }} },
3642 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p
, {{ 0, false, 10, 0 }} },
3643 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi
, {{ 2, false, 6, 0 }} },
3644 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2
, {{ 1, false, 6, 2 }} },
3645 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri
, {{ 2, false, 3, 0 }} },
3646 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc
, {{ 2, false, 6, 0 }} },
3647 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and
, {{ 2, false, 6, 0 }} },
3648 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p
, {{ 1, false, 6, 0 }} },
3649 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac
, {{ 2, false, 6, 0 }} },
3650 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or
, {{ 2, false, 6, 0 }} },
3651 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc
, {{ 2, false, 6, 0 }} },
3652 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc
, {{ 2, false, 5, 0 }} },
3653 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and
, {{ 2, false, 5, 0 }} },
3654 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r
, {{ 1, false, 5, 0 }} },
3655 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac
, {{ 2, false, 5, 0 }} },
3656 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or
, {{ 2, false, 5, 0 }} },
3657 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat
, {{ 1, false, 5, 0 }} },
3658 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc
, {{ 2, false, 5, 0 }} },
3659 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh
, {{ 1, false, 4, 0 }} },
3660 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw
, {{ 1, false, 5, 0 }} },
3661 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc
, {{ 2, false, 6, 0 }} },
3662 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and
, {{ 2, false, 6, 0 }} },
3663 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p
, {{ 1, false, 6, 0 }} },
3664 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac
, {{ 2, false, 6, 0 }} },
3665 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or
, {{ 2, false, 6, 0 }} },
3666 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
,
3667 {{ 1, false, 6, 0 }} },
3668 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd
, {{ 1, false, 6, 0 }} },
3669 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc
, {{ 2, false, 5, 0 }} },
3670 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and
, {{ 2, false, 5, 0 }} },
3671 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r
, {{ 1, false, 5, 0 }} },
3672 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac
, {{ 2, false, 5, 0 }} },
3673 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or
, {{ 2, false, 5, 0 }} },
3674 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
,
3675 {{ 1, false, 5, 0 }} },
3676 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd
, {{ 1, false, 5, 0 }} },
3677 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun
, {{ 1, false, 5, 0 }} },
3678 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh
, {{ 1, false, 4, 0 }} },
3679 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw
, {{ 1, false, 5, 0 }} },
3680 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i
, {{ 1, false, 5, 0 }} },
3681 { Hexagon::BI__builtin_HEXAGON_S2_extractu
, {{ 1, false, 5, 0 },
3682 { 2, false, 5, 0 }} },
3683 { Hexagon::BI__builtin_HEXAGON_S2_extractup
, {{ 1, false, 6, 0 },
3684 { 2, false, 6, 0 }} },
3685 { Hexagon::BI__builtin_HEXAGON_S2_insert
, {{ 2, false, 5, 0 },
3686 { 3, false, 5, 0 }} },
3687 { Hexagon::BI__builtin_HEXAGON_S2_insertp
, {{ 2, false, 6, 0 },
3688 { 3, false, 6, 0 }} },
3689 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc
, {{ 2, false, 6, 0 }} },
3690 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and
, {{ 2, false, 6, 0 }} },
3691 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p
, {{ 1, false, 6, 0 }} },
3692 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac
, {{ 2, false, 6, 0 }} },
3693 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or
, {{ 2, false, 6, 0 }} },
3694 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc
, {{ 2, false, 6, 0 }} },
3695 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc
, {{ 2, false, 5, 0 }} },
3696 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and
, {{ 2, false, 5, 0 }} },
3697 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r
, {{ 1, false, 5, 0 }} },
3698 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac
, {{ 2, false, 5, 0 }} },
3699 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or
, {{ 2, false, 5, 0 }} },
3700 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc
, {{ 2, false, 5, 0 }} },
3701 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh
, {{ 1, false, 4, 0 }} },
3702 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw
, {{ 1, false, 5, 0 }} },
3703 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i
, {{ 1, false, 5, 0 }} },
3704 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax
,
3705 {{ 2, false, 4, 0 },
3706 { 3, false, 5, 0 }} },
3707 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax
,
3708 {{ 2, false, 4, 0 },
3709 { 3, false, 5, 0 }} },
3710 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax
,
3711 {{ 2, false, 4, 0 },
3712 { 3, false, 5, 0 }} },
3713 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax
,
3714 {{ 2, false, 4, 0 },
3715 { 3, false, 5, 0 }} },
3716 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i
, {{ 1, false, 5, 0 }} },
3717 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i
, {{ 1, false, 5, 0 }} },
3718 { Hexagon::BI__builtin_HEXAGON_S2_valignib
, {{ 2, false, 3, 0 }} },
3719 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib
, {{ 2, false, 3, 0 }} },
3720 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri
, {{ 2, false, 5, 0 }} },
3721 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri
, {{ 2, false, 5, 0 }} },
3722 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri
, {{ 2, false, 5, 0 }} },
3723 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri
, {{ 2, false, 5, 0 }} },
3724 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi
, {{ 1, true , 6, 0 }} },
3725 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi
, {{ 1, true, 6, 0 }} },
3726 { Hexagon::BI__builtin_HEXAGON_S4_extract
, {{ 1, false, 5, 0 },
3727 { 2, false, 5, 0 }} },
3728 { Hexagon::BI__builtin_HEXAGON_S4_extractp
, {{ 1, false, 6, 0 },
3729 { 2, false, 6, 0 }} },
3730 { Hexagon::BI__builtin_HEXAGON_S4_lsli
, {{ 0, true, 6, 0 }} },
3731 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i
, {{ 1, false, 5, 0 }} },
3732 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri
, {{ 2, false, 5, 0 }} },
3733 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri
, {{ 2, false, 5, 0 }} },
3734 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri
, {{ 2, false, 5, 0 }} },
3735 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri
, {{ 2, false, 5, 0 }} },
3736 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc
, {{ 3, false, 2, 0 }} },
3737 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate
, {{ 2, false, 2, 0 }} },
3738 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
,
3739 {{ 1, false, 4, 0 }} },
3740 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat
, {{ 1, false, 4, 0 }} },
3741 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax
,
3742 {{ 1, false, 4, 0 }} },
3743 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p
, {{ 1, false, 6, 0 }} },
3744 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc
, {{ 2, false, 6, 0 }} },
3745 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and
, {{ 2, false, 6, 0 }} },
3746 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac
, {{ 2, false, 6, 0 }} },
3747 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or
, {{ 2, false, 6, 0 }} },
3748 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc
, {{ 2, false, 6, 0 }} },
3749 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r
, {{ 1, false, 5, 0 }} },
3750 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc
, {{ 2, false, 5, 0 }} },
3751 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and
, {{ 2, false, 5, 0 }} },
3752 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac
, {{ 2, false, 5, 0 }} },
3753 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or
, {{ 2, false, 5, 0 }} },
3754 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc
, {{ 2, false, 5, 0 }} },
3755 { Hexagon::BI__builtin_HEXAGON_V6_valignbi
, {{ 2, false, 3, 0 }} },
3756 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B
, {{ 2, false, 3, 0 }} },
3757 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi
, {{ 2, false, 3, 0 }} },
3758 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B
, {{ 2, false, 3, 0 }} },
3759 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi
, {{ 2, false, 1, 0 }} },
3760 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B
, {{ 2, false, 1, 0 }} },
3761 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc
, {{ 3, false, 1, 0 }} },
3762 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B
,
3763 {{ 3, false, 1, 0 }} },
3764 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi
, {{ 2, false, 1, 0 }} },
3765 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B
, {{ 2, false, 1, 0 }} },
3766 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc
, {{ 3, false, 1, 0 }} },
3767 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B
,
3768 {{ 3, false, 1, 0 }} },
3769 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi
, {{ 2, false, 1, 0 }} },
3770 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B
, {{ 2, false, 1, 0 }} },
3771 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc
, {{ 3, false, 1, 0 }} },
3772 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B
,
3773 {{ 3, false, 1, 0 }} },
3775 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10
, {{ 2, false, 2, 0 }} },
3776 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B
,
3777 {{ 2, false, 2, 0 }} },
3778 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx
,
3779 {{ 3, false, 2, 0 }} },
3780 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B
,
3781 {{ 3, false, 2, 0 }} },
3782 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10
, {{ 2, false, 2, 0 }} },
3783 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B
,
3784 {{ 2, false, 2, 0 }} },
3785 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx
,
3786 {{ 3, false, 2, 0 }} },
3787 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B
,
3788 {{ 3, false, 2, 0 }} },
3789 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi
, {{ 2, false, 3, 0 }} },
3790 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B
, {{ 2, false, 3, 0 }} },
3791 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci
, {{ 3, false, 3, 0 }} },
3792 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B
,
3793 {{ 3, false, 3, 0 }} },
3794 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi
, {{ 2, false, 3, 0 }} },
3795 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B
, {{ 2, false, 3, 0 }} },
3796 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci
, {{ 3, false, 3, 0 }} },
3797 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B
,
3798 {{ 3, false, 3, 0 }} },
3801 // Use a dynamically initialized static to sort the table exactly once on
3803 static const bool SortOnce
=
3805 [](const BuiltinInfo
&LHS
, const BuiltinInfo
&RHS
) {
3806 return LHS
.BuiltinID
< RHS
.BuiltinID
;
3811 const BuiltinInfo
*F
= llvm::partition_point(
3812 Infos
, [=](const BuiltinInfo
&BI
) { return BI
.BuiltinID
< BuiltinID
; });
3813 if (F
== std::end(Infos
) || F
->BuiltinID
!= BuiltinID
)
3818 for (const ArgInfo
&A
: F
->Infos
) {
3819 // Ignore empty ArgInfo elements.
3820 if (A
.BitWidth
== 0)
3823 int32_t Min
= A
.IsSigned
? -(1 << (A
.BitWidth
- 1)) : 0;
3824 int32_t Max
= (1 << (A
.IsSigned
? A
.BitWidth
- 1 : A
.BitWidth
)) - 1;
3826 Error
|= SemaBuiltinConstantArgRange(TheCall
, A
.OpNum
, Min
, Max
);
3828 unsigned M
= 1 << A
.Align
;
3831 Error
|= SemaBuiltinConstantArgRange(TheCall
, A
.OpNum
, Min
, Max
);
3832 Error
|= SemaBuiltinConstantArgMultiple(TheCall
, A
.OpNum
, M
);
3838 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID
,
3839 CallExpr
*TheCall
) {
3840 return CheckHexagonBuiltinArgument(BuiltinID
, TheCall
);
3843 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo
&TI
,
3845 CallExpr
*TheCall
) {
3846 switch (BuiltinID
) {
3849 case LoongArch::BI__builtin_loongarch_cacop_d
:
3850 if (!TI
.hasFeature("64bit"))
3851 return Diag(TheCall
->getBeginLoc(),
3852 diag::err_loongarch_builtin_requires_la64
)
3853 << TheCall
->getSourceRange();
3855 case LoongArch::BI__builtin_loongarch_cacop_w
: {
3856 if (BuiltinID
== LoongArch::BI__builtin_loongarch_cacop_w
&&
3857 !TI
.hasFeature("32bit"))
3858 return Diag(TheCall
->getBeginLoc(),
3859 diag::err_loongarch_builtin_requires_la32
)
3860 << TheCall
->getSourceRange();
3861 SemaBuiltinConstantArgRange(TheCall
, 0, 0, llvm::maxUIntN(5));
3862 SemaBuiltinConstantArgRange(TheCall
, 2, llvm::minIntN(12),
3866 case LoongArch::BI__builtin_loongarch_crc_w_b_w
:
3867 case LoongArch::BI__builtin_loongarch_crc_w_h_w
:
3868 case LoongArch::BI__builtin_loongarch_crc_w_w_w
:
3869 case LoongArch::BI__builtin_loongarch_crc_w_d_w
:
3870 case LoongArch::BI__builtin_loongarch_crcc_w_b_w
:
3871 case LoongArch::BI__builtin_loongarch_crcc_w_h_w
:
3872 case LoongArch::BI__builtin_loongarch_crcc_w_w_w
:
3873 case LoongArch::BI__builtin_loongarch_crcc_w_d_w
:
3874 case LoongArch::BI__builtin_loongarch_iocsrrd_d
:
3875 case LoongArch::BI__builtin_loongarch_iocsrwr_d
:
3876 case LoongArch::BI__builtin_loongarch_asrtle_d
:
3877 case LoongArch::BI__builtin_loongarch_asrtgt_d
:
3878 if (!TI
.hasFeature("64bit"))
3879 return Diag(TheCall
->getBeginLoc(),
3880 diag::err_loongarch_builtin_requires_la64
)
3881 << TheCall
->getSourceRange();
3883 case LoongArch::BI__builtin_loongarch_break
:
3884 case LoongArch::BI__builtin_loongarch_dbar
:
3885 case LoongArch::BI__builtin_loongarch_ibar
:
3886 case LoongArch::BI__builtin_loongarch_syscall
:
3887 // Check if immediate is in [0, 32767].
3888 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 32767);
3889 case LoongArch::BI__builtin_loongarch_csrrd_w
:
3890 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 16383);
3891 case LoongArch::BI__builtin_loongarch_csrwr_w
:
3892 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 16383);
3893 case LoongArch::BI__builtin_loongarch_csrxchg_w
:
3894 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 16383);
3895 case LoongArch::BI__builtin_loongarch_csrrd_d
:
3896 if (!TI
.hasFeature("64bit"))
3897 return Diag(TheCall
->getBeginLoc(),
3898 diag::err_loongarch_builtin_requires_la64
)
3899 << TheCall
->getSourceRange();
3900 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 16383);
3901 case LoongArch::BI__builtin_loongarch_csrwr_d
:
3902 if (!TI
.hasFeature("64bit"))
3903 return Diag(TheCall
->getBeginLoc(),
3904 diag::err_loongarch_builtin_requires_la64
)
3905 << TheCall
->getSourceRange();
3906 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 16383);
3907 case LoongArch::BI__builtin_loongarch_csrxchg_d
:
3908 if (!TI
.hasFeature("64bit"))
3909 return Diag(TheCall
->getBeginLoc(),
3910 diag::err_loongarch_builtin_requires_la64
)
3911 << TheCall
->getSourceRange();
3912 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 16383);
3913 case LoongArch::BI__builtin_loongarch_lddir_d
:
3914 case LoongArch::BI__builtin_loongarch_ldpte_d
:
3915 if (!TI
.hasFeature("64bit"))
3916 return Diag(TheCall
->getBeginLoc(),
3917 diag::err_loongarch_builtin_requires_la64
)
3918 << TheCall
->getSourceRange();
3919 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31);
3920 case LoongArch::BI__builtin_loongarch_movfcsr2gr
:
3921 case LoongArch::BI__builtin_loongarch_movgr2fcsr
:
3922 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, llvm::maxUIntN(2));
3928 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo
&TI
,
3929 unsigned BuiltinID
, CallExpr
*TheCall
) {
3930 return CheckMipsBuiltinCpu(TI
, BuiltinID
, TheCall
) ||
3931 CheckMipsBuiltinArgument(BuiltinID
, TheCall
);
3934 bool Sema::CheckMipsBuiltinCpu(const TargetInfo
&TI
, unsigned BuiltinID
,
3935 CallExpr
*TheCall
) {
3937 if (Mips::BI__builtin_mips_addu_qb
<= BuiltinID
&&
3938 BuiltinID
<= Mips::BI__builtin_mips_lwx
) {
3939 if (!TI
.hasFeature("dsp"))
3940 return Diag(TheCall
->getBeginLoc(), diag::err_mips_builtin_requires_dsp
);
3943 if (Mips::BI__builtin_mips_absq_s_qb
<= BuiltinID
&&
3944 BuiltinID
<= Mips::BI__builtin_mips_subuh_r_qb
) {
3945 if (!TI
.hasFeature("dspr2"))
3946 return Diag(TheCall
->getBeginLoc(),
3947 diag::err_mips_builtin_requires_dspr2
);
3950 if (Mips::BI__builtin_msa_add_a_b
<= BuiltinID
&&
3951 BuiltinID
<= Mips::BI__builtin_msa_xori_b
) {
3952 if (!TI
.hasFeature("msa"))
3953 return Diag(TheCall
->getBeginLoc(), diag::err_mips_builtin_requires_msa
);
3959 // CheckMipsBuiltinArgument - Checks the constant value passed to the
3960 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3961 // ordering for DSP is unspecified. MSA is ordered by the data format used
3962 // by the underlying instruction i.e., df/m, df/n and then by size.
3964 // FIXME: The size tests here should instead be tablegen'd along with the
3965 // definitions from include/clang/Basic/BuiltinsMips.def.
3966 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
3968 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID
, CallExpr
*TheCall
) {
3969 unsigned i
= 0, l
= 0, u
= 0, m
= 0;
3970 switch (BuiltinID
) {
3971 default: return false;
3972 case Mips::BI__builtin_mips_wrdsp
: i
= 1; l
= 0; u
= 63; break;
3973 case Mips::BI__builtin_mips_rddsp
: i
= 0; l
= 0; u
= 63; break;
3974 case Mips::BI__builtin_mips_append
: i
= 2; l
= 0; u
= 31; break;
3975 case Mips::BI__builtin_mips_balign
: i
= 2; l
= 0; u
= 3; break;
3976 case Mips::BI__builtin_mips_precr_sra_ph_w
: i
= 2; l
= 0; u
= 31; break;
3977 case Mips::BI__builtin_mips_precr_sra_r_ph_w
: i
= 2; l
= 0; u
= 31; break;
3978 case Mips::BI__builtin_mips_prepend
: i
= 2; l
= 0; u
= 31; break;
3979 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3981 // These intrinsics take an unsigned 3 bit immediate.
3982 case Mips::BI__builtin_msa_bclri_b
:
3983 case Mips::BI__builtin_msa_bnegi_b
:
3984 case Mips::BI__builtin_msa_bseti_b
:
3985 case Mips::BI__builtin_msa_sat_s_b
:
3986 case Mips::BI__builtin_msa_sat_u_b
:
3987 case Mips::BI__builtin_msa_slli_b
:
3988 case Mips::BI__builtin_msa_srai_b
:
3989 case Mips::BI__builtin_msa_srari_b
:
3990 case Mips::BI__builtin_msa_srli_b
:
3991 case Mips::BI__builtin_msa_srlri_b
: i
= 1; l
= 0; u
= 7; break;
3992 case Mips::BI__builtin_msa_binsli_b
:
3993 case Mips::BI__builtin_msa_binsri_b
: i
= 2; l
= 0; u
= 7; break;
3994 // These intrinsics take an unsigned 4 bit immediate.
3995 case Mips::BI__builtin_msa_bclri_h
:
3996 case Mips::BI__builtin_msa_bnegi_h
:
3997 case Mips::BI__builtin_msa_bseti_h
:
3998 case Mips::BI__builtin_msa_sat_s_h
:
3999 case Mips::BI__builtin_msa_sat_u_h
:
4000 case Mips::BI__builtin_msa_slli_h
:
4001 case Mips::BI__builtin_msa_srai_h
:
4002 case Mips::BI__builtin_msa_srari_h
:
4003 case Mips::BI__builtin_msa_srli_h
:
4004 case Mips::BI__builtin_msa_srlri_h
: i
= 1; l
= 0; u
= 15; break;
4005 case Mips::BI__builtin_msa_binsli_h
:
4006 case Mips::BI__builtin_msa_binsri_h
: i
= 2; l
= 0; u
= 15; break;
4007 // These intrinsics take an unsigned 5 bit immediate.
4008 // The first block of intrinsics actually have an unsigned 5 bit field,
4009 // not a df/n field.
4010 case Mips::BI__builtin_msa_cfcmsa
:
4011 case Mips::BI__builtin_msa_ctcmsa
: i
= 0; l
= 0; u
= 31; break;
4012 case Mips::BI__builtin_msa_clei_u_b
:
4013 case Mips::BI__builtin_msa_clei_u_h
:
4014 case Mips::BI__builtin_msa_clei_u_w
:
4015 case Mips::BI__builtin_msa_clei_u_d
:
4016 case Mips::BI__builtin_msa_clti_u_b
:
4017 case Mips::BI__builtin_msa_clti_u_h
:
4018 case Mips::BI__builtin_msa_clti_u_w
:
4019 case Mips::BI__builtin_msa_clti_u_d
:
4020 case Mips::BI__builtin_msa_maxi_u_b
:
4021 case Mips::BI__builtin_msa_maxi_u_h
:
4022 case Mips::BI__builtin_msa_maxi_u_w
:
4023 case Mips::BI__builtin_msa_maxi_u_d
:
4024 case Mips::BI__builtin_msa_mini_u_b
:
4025 case Mips::BI__builtin_msa_mini_u_h
:
4026 case Mips::BI__builtin_msa_mini_u_w
:
4027 case Mips::BI__builtin_msa_mini_u_d
:
4028 case Mips::BI__builtin_msa_addvi_b
:
4029 case Mips::BI__builtin_msa_addvi_h
:
4030 case Mips::BI__builtin_msa_addvi_w
:
4031 case Mips::BI__builtin_msa_addvi_d
:
4032 case Mips::BI__builtin_msa_bclri_w
:
4033 case Mips::BI__builtin_msa_bnegi_w
:
4034 case Mips::BI__builtin_msa_bseti_w
:
4035 case Mips::BI__builtin_msa_sat_s_w
:
4036 case Mips::BI__builtin_msa_sat_u_w
:
4037 case Mips::BI__builtin_msa_slli_w
:
4038 case Mips::BI__builtin_msa_srai_w
:
4039 case Mips::BI__builtin_msa_srari_w
:
4040 case Mips::BI__builtin_msa_srli_w
:
4041 case Mips::BI__builtin_msa_srlri_w
:
4042 case Mips::BI__builtin_msa_subvi_b
:
4043 case Mips::BI__builtin_msa_subvi_h
:
4044 case Mips::BI__builtin_msa_subvi_w
:
4045 case Mips::BI__builtin_msa_subvi_d
: i
= 1; l
= 0; u
= 31; break;
4046 case Mips::BI__builtin_msa_binsli_w
:
4047 case Mips::BI__builtin_msa_binsri_w
: i
= 2; l
= 0; u
= 31; break;
4048 // These intrinsics take an unsigned 6 bit immediate.
4049 case Mips::BI__builtin_msa_bclri_d
:
4050 case Mips::BI__builtin_msa_bnegi_d
:
4051 case Mips::BI__builtin_msa_bseti_d
:
4052 case Mips::BI__builtin_msa_sat_s_d
:
4053 case Mips::BI__builtin_msa_sat_u_d
:
4054 case Mips::BI__builtin_msa_slli_d
:
4055 case Mips::BI__builtin_msa_srai_d
:
4056 case Mips::BI__builtin_msa_srari_d
:
4057 case Mips::BI__builtin_msa_srli_d
:
4058 case Mips::BI__builtin_msa_srlri_d
: i
= 1; l
= 0; u
= 63; break;
4059 case Mips::BI__builtin_msa_binsli_d
:
4060 case Mips::BI__builtin_msa_binsri_d
: i
= 2; l
= 0; u
= 63; break;
4061 // These intrinsics take a signed 5 bit immediate.
4062 case Mips::BI__builtin_msa_ceqi_b
:
4063 case Mips::BI__builtin_msa_ceqi_h
:
4064 case Mips::BI__builtin_msa_ceqi_w
:
4065 case Mips::BI__builtin_msa_ceqi_d
:
4066 case Mips::BI__builtin_msa_clti_s_b
:
4067 case Mips::BI__builtin_msa_clti_s_h
:
4068 case Mips::BI__builtin_msa_clti_s_w
:
4069 case Mips::BI__builtin_msa_clti_s_d
:
4070 case Mips::BI__builtin_msa_clei_s_b
:
4071 case Mips::BI__builtin_msa_clei_s_h
:
4072 case Mips::BI__builtin_msa_clei_s_w
:
4073 case Mips::BI__builtin_msa_clei_s_d
:
4074 case Mips::BI__builtin_msa_maxi_s_b
:
4075 case Mips::BI__builtin_msa_maxi_s_h
:
4076 case Mips::BI__builtin_msa_maxi_s_w
:
4077 case Mips::BI__builtin_msa_maxi_s_d
:
4078 case Mips::BI__builtin_msa_mini_s_b
:
4079 case Mips::BI__builtin_msa_mini_s_h
:
4080 case Mips::BI__builtin_msa_mini_s_w
:
4081 case Mips::BI__builtin_msa_mini_s_d
: i
= 1; l
= -16; u
= 15; break;
4082 // These intrinsics take an unsigned 8 bit immediate.
4083 case Mips::BI__builtin_msa_andi_b
:
4084 case Mips::BI__builtin_msa_nori_b
:
4085 case Mips::BI__builtin_msa_ori_b
:
4086 case Mips::BI__builtin_msa_shf_b
:
4087 case Mips::BI__builtin_msa_shf_h
:
4088 case Mips::BI__builtin_msa_shf_w
:
4089 case Mips::BI__builtin_msa_xori_b
: i
= 1; l
= 0; u
= 255; break;
4090 case Mips::BI__builtin_msa_bseli_b
:
4091 case Mips::BI__builtin_msa_bmnzi_b
:
4092 case Mips::BI__builtin_msa_bmzi_b
: i
= 2; l
= 0; u
= 255; break;
4094 // These intrinsics take an unsigned 4 bit immediate.
4095 case Mips::BI__builtin_msa_copy_s_b
:
4096 case Mips::BI__builtin_msa_copy_u_b
:
4097 case Mips::BI__builtin_msa_insve_b
:
4098 case Mips::BI__builtin_msa_splati_b
: i
= 1; l
= 0; u
= 15; break;
4099 case Mips::BI__builtin_msa_sldi_b
: i
= 2; l
= 0; u
= 15; break;
4100 // These intrinsics take an unsigned 3 bit immediate.
4101 case Mips::BI__builtin_msa_copy_s_h
:
4102 case Mips::BI__builtin_msa_copy_u_h
:
4103 case Mips::BI__builtin_msa_insve_h
:
4104 case Mips::BI__builtin_msa_splati_h
: i
= 1; l
= 0; u
= 7; break;
4105 case Mips::BI__builtin_msa_sldi_h
: i
= 2; l
= 0; u
= 7; break;
4106 // These intrinsics take an unsigned 2 bit immediate.
4107 case Mips::BI__builtin_msa_copy_s_w
:
4108 case Mips::BI__builtin_msa_copy_u_w
:
4109 case Mips::BI__builtin_msa_insve_w
:
4110 case Mips::BI__builtin_msa_splati_w
: i
= 1; l
= 0; u
= 3; break;
4111 case Mips::BI__builtin_msa_sldi_w
: i
= 2; l
= 0; u
= 3; break;
4112 // These intrinsics take an unsigned 1 bit immediate.
4113 case Mips::BI__builtin_msa_copy_s_d
:
4114 case Mips::BI__builtin_msa_copy_u_d
:
4115 case Mips::BI__builtin_msa_insve_d
:
4116 case Mips::BI__builtin_msa_splati_d
: i
= 1; l
= 0; u
= 1; break;
4117 case Mips::BI__builtin_msa_sldi_d
: i
= 2; l
= 0; u
= 1; break;
4118 // Memory offsets and immediate loads.
4119 // These intrinsics take a signed 10 bit immediate.
4120 case Mips::BI__builtin_msa_ldi_b
: i
= 0; l
= -128; u
= 255; break;
4121 case Mips::BI__builtin_msa_ldi_h
:
4122 case Mips::BI__builtin_msa_ldi_w
:
4123 case Mips::BI__builtin_msa_ldi_d
: i
= 0; l
= -512; u
= 511; break;
4124 case Mips::BI__builtin_msa_ld_b
: i
= 1; l
= -512; u
= 511; m
= 1; break;
4125 case Mips::BI__builtin_msa_ld_h
: i
= 1; l
= -1024; u
= 1022; m
= 2; break;
4126 case Mips::BI__builtin_msa_ld_w
: i
= 1; l
= -2048; u
= 2044; m
= 4; break;
4127 case Mips::BI__builtin_msa_ld_d
: i
= 1; l
= -4096; u
= 4088; m
= 8; break;
4128 case Mips::BI__builtin_msa_ldr_d
: i
= 1; l
= -4096; u
= 4088; m
= 8; break;
4129 case Mips::BI__builtin_msa_ldr_w
: i
= 1; l
= -2048; u
= 2044; m
= 4; break;
4130 case Mips::BI__builtin_msa_st_b
: i
= 2; l
= -512; u
= 511; m
= 1; break;
4131 case Mips::BI__builtin_msa_st_h
: i
= 2; l
= -1024; u
= 1022; m
= 2; break;
4132 case Mips::BI__builtin_msa_st_w
: i
= 2; l
= -2048; u
= 2044; m
= 4; break;
4133 case Mips::BI__builtin_msa_st_d
: i
= 2; l
= -4096; u
= 4088; m
= 8; break;
4134 case Mips::BI__builtin_msa_str_d
: i
= 2; l
= -4096; u
= 4088; m
= 8; break;
4135 case Mips::BI__builtin_msa_str_w
: i
= 2; l
= -2048; u
= 2044; m
= 4; break;
4139 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
);
4141 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
) ||
4142 SemaBuiltinConstantArgMultiple(TheCall
, i
, m
);
4145 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
4146 /// advancing the pointer over the consumed characters. The decoded type is
4147 /// returned. If the decoded type represents a constant integer with a
4148 /// constraint on its value then Mask is set to that value. The type descriptors
4149 /// used in Str are specific to PPC MMA builtins and are documented in the file
4150 /// defining the PPC builtins.
4151 static QualType
DecodePPCMMATypeFromStr(ASTContext
&Context
, const char *&Str
,
4153 bool RequireICE
= false;
4154 ASTContext::GetBuiltinTypeError Error
= ASTContext::GE_None
;
4157 return Context
.getVectorType(Context
.UnsignedCharTy
, 16,
4158 VectorType::VectorKind::AltiVecVector
);
4161 unsigned size
= strtoul(Str
, &End
, 10);
4162 assert(End
!= Str
&& "Missing constant parameter constraint");
4165 return Context
.IntTy
;
4169 unsigned size
= strtoul(Str
, &End
, 10);
4170 assert(End
!= Str
&& "Missing PowerPC MMA type size");
4174 #define PPC_VECTOR_TYPE(typeName, Id, size) \
4175 case size: Type = Context.Id##Ty; break;
4176 #include "clang/Basic/PPCTypes.def"
4177 default: llvm_unreachable("Invalid PowerPC MMA vector type");
4179 bool CheckVectorArgs
= false;
4180 while (!CheckVectorArgs
) {
4183 Type
= Context
.getPointerType(Type
);
4186 Type
= Type
.withConst();
4189 CheckVectorArgs
= true;
4197 return Context
.DecodeTypeStr(--Str
, Context
, Error
, RequireICE
, true);
4201 static bool isPPC_64Builtin(unsigned BuiltinID
) {
4202 // These builtins only work on PPC 64bit targets.
4203 switch (BuiltinID
) {
4204 case PPC::BI__builtin_divde
:
4205 case PPC::BI__builtin_divdeu
:
4206 case PPC::BI__builtin_bpermd
:
4207 case PPC::BI__builtin_pdepd
:
4208 case PPC::BI__builtin_pextd
:
4209 case PPC::BI__builtin_ppc_ldarx
:
4210 case PPC::BI__builtin_ppc_stdcx
:
4211 case PPC::BI__builtin_ppc_tdw
:
4212 case PPC::BI__builtin_ppc_trapd
:
4213 case PPC::BI__builtin_ppc_cmpeqb
:
4214 case PPC::BI__builtin_ppc_setb
:
4215 case PPC::BI__builtin_ppc_mulhd
:
4216 case PPC::BI__builtin_ppc_mulhdu
:
4217 case PPC::BI__builtin_ppc_maddhd
:
4218 case PPC::BI__builtin_ppc_maddhdu
:
4219 case PPC::BI__builtin_ppc_maddld
:
4220 case PPC::BI__builtin_ppc_load8r
:
4221 case PPC::BI__builtin_ppc_store8r
:
4222 case PPC::BI__builtin_ppc_insert_exp
:
4223 case PPC::BI__builtin_ppc_extract_sig
:
4224 case PPC::BI__builtin_ppc_addex
:
4225 case PPC::BI__builtin_darn
:
4226 case PPC::BI__builtin_darn_raw
:
4227 case PPC::BI__builtin_ppc_compare_and_swaplp
:
4228 case PPC::BI__builtin_ppc_fetch_and_addlp
:
4229 case PPC::BI__builtin_ppc_fetch_and_andlp
:
4230 case PPC::BI__builtin_ppc_fetch_and_orlp
:
4231 case PPC::BI__builtin_ppc_fetch_and_swaplp
:
4237 /// Returns true if the argument consists of one contiguous run of 1s with any
4238 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
4239 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
4240 /// since all 1s are not contiguous.
4241 bool Sema::SemaValueIsRunOfOnes(CallExpr
*TheCall
, unsigned ArgNum
) {
4242 llvm::APSInt Result
;
4243 // We can't check the value of a dependent argument.
4244 Expr
*Arg
= TheCall
->getArg(ArgNum
);
4245 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
4248 // Check constant-ness first.
4249 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
4252 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
4253 if (Result
.isShiftedMask() || (~Result
).isShiftedMask())
4256 return Diag(TheCall
->getBeginLoc(),
4257 diag::err_argument_not_contiguous_bit_field
)
4258 << ArgNum
<< Arg
->getSourceRange();
4261 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo
&TI
, unsigned BuiltinID
,
4262 CallExpr
*TheCall
) {
4263 unsigned i
= 0, l
= 0, u
= 0;
4264 bool IsTarget64Bit
= TI
.getTypeWidth(TI
.getIntPtrType()) == 64;
4265 llvm::APSInt Result
;
4267 if (isPPC_64Builtin(BuiltinID
) && !IsTarget64Bit
)
4268 return Diag(TheCall
->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt
)
4269 << TheCall
->getSourceRange();
4271 switch (BuiltinID
) {
4272 default: return false;
4273 case PPC::BI__builtin_altivec_crypto_vshasigmaw
:
4274 case PPC::BI__builtin_altivec_crypto_vshasigmad
:
4275 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1) ||
4276 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 15);
4277 case PPC::BI__builtin_altivec_dss
:
4278 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3);
4279 case PPC::BI__builtin_tbegin
:
4280 case PPC::BI__builtin_tend
:
4281 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 1);
4282 case PPC::BI__builtin_tsr
:
4283 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 7);
4284 case PPC::BI__builtin_tabortwc
:
4285 case PPC::BI__builtin_tabortdc
:
4286 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 31);
4287 case PPC::BI__builtin_tabortwci
:
4288 case PPC::BI__builtin_tabortdci
:
4289 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 31) ||
4290 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 31);
4291 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
4292 // __builtin_(un)pack_longdouble are available only if long double uses IBM
4293 // extended double representation.
4294 case PPC::BI__builtin_unpack_longdouble
:
4295 if (SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1))
4298 case PPC::BI__builtin_pack_longdouble
:
4299 if (&TI
.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
4300 return Diag(TheCall
->getBeginLoc(), diag::err_ppc_builtin_requires_abi
)
4303 case PPC::BI__builtin_altivec_dst
:
4304 case PPC::BI__builtin_altivec_dstt
:
4305 case PPC::BI__builtin_altivec_dstst
:
4306 case PPC::BI__builtin_altivec_dststt
:
4307 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 3);
4308 case PPC::BI__builtin_vsx_xxpermdi
:
4309 case PPC::BI__builtin_vsx_xxsldwi
:
4310 return SemaBuiltinVSX(TheCall
);
4311 case PPC::BI__builtin_unpack_vector_int128
:
4312 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1);
4313 case PPC::BI__builtin_altivec_vgnb
:
4314 return SemaBuiltinConstantArgRange(TheCall
, 1, 2, 7);
4315 case PPC::BI__builtin_vsx_xxeval
:
4316 return SemaBuiltinConstantArgRange(TheCall
, 3, 0, 255);
4317 case PPC::BI__builtin_altivec_vsldbi
:
4318 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 7);
4319 case PPC::BI__builtin_altivec_vsrdbi
:
4320 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 7);
4321 case PPC::BI__builtin_vsx_xxpermx
:
4322 return SemaBuiltinConstantArgRange(TheCall
, 3, 0, 7);
4323 case PPC::BI__builtin_ppc_tw
:
4324 case PPC::BI__builtin_ppc_tdw
:
4325 return SemaBuiltinConstantArgRange(TheCall
, 2, 1, 31);
4326 case PPC::BI__builtin_ppc_cmprb
:
4327 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 1);
4328 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
4329 // be a constant that represents a contiguous bit field.
4330 case PPC::BI__builtin_ppc_rlwnm
:
4331 return SemaValueIsRunOfOnes(TheCall
, 2);
4332 case PPC::BI__builtin_ppc_rlwimi
:
4333 case PPC::BI__builtin_ppc_rldimi
:
4334 return SemaBuiltinConstantArg(TheCall
, 2, Result
) ||
4335 SemaValueIsRunOfOnes(TheCall
, 3);
4336 case PPC::BI__builtin_ppc_addex
: {
4337 if (SemaBuiltinConstantArgRange(TheCall
, 2, 0, 3))
4339 // Output warning for reserved values 1 to 3.
4341 TheCall
->getArg(2)->getIntegerConstantExpr(Context
)->getSExtValue();
4343 Diag(TheCall
->getBeginLoc(), diag::warn_argument_undefined_behaviour
)
4347 case PPC::BI__builtin_ppc_mtfsb0
:
4348 case PPC::BI__builtin_ppc_mtfsb1
:
4349 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 31);
4350 case PPC::BI__builtin_ppc_mtfsf
:
4351 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 255);
4352 case PPC::BI__builtin_ppc_mtfsfi
:
4353 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 7) ||
4354 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 15);
4355 case PPC::BI__builtin_ppc_alignx
:
4356 return SemaBuiltinConstantArgPower2(TheCall
, 0);
4357 case PPC::BI__builtin_ppc_rdlam
:
4358 return SemaValueIsRunOfOnes(TheCall
, 2);
4359 case PPC::BI__builtin_vsx_ldrmb
:
4360 case PPC::BI__builtin_vsx_strmb
:
4361 return SemaBuiltinConstantArgRange(TheCall
, 1, 1, 16);
4362 case PPC::BI__builtin_altivec_vcntmbb
:
4363 case PPC::BI__builtin_altivec_vcntmbh
:
4364 case PPC::BI__builtin_altivec_vcntmbw
:
4365 case PPC::BI__builtin_altivec_vcntmbd
:
4366 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 1);
4367 case PPC::BI__builtin_vsx_xxgenpcvbm
:
4368 case PPC::BI__builtin_vsx_xxgenpcvhm
:
4369 case PPC::BI__builtin_vsx_xxgenpcvwm
:
4370 case PPC::BI__builtin_vsx_xxgenpcvdm
:
4371 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 3);
4372 case PPC::BI__builtin_ppc_test_data_class
: {
4373 // Check if the first argument of the __builtin_ppc_test_data_class call is
4374 // valid. The argument must be 'float' or 'double' or '__float128'.
4375 QualType ArgType
= TheCall
->getArg(0)->getType();
4376 if (ArgType
!= QualType(Context
.FloatTy
) &&
4377 ArgType
!= QualType(Context
.DoubleTy
) &&
4378 ArgType
!= QualType(Context
.Float128Ty
))
4379 return Diag(TheCall
->getBeginLoc(),
4380 diag::err_ppc_invalid_test_data_class_type
);
4381 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 127);
4383 case PPC::BI__builtin_ppc_maxfe
:
4384 case PPC::BI__builtin_ppc_minfe
:
4385 case PPC::BI__builtin_ppc_maxfl
:
4386 case PPC::BI__builtin_ppc_minfl
:
4387 case PPC::BI__builtin_ppc_maxfs
:
4388 case PPC::BI__builtin_ppc_minfs
: {
4389 if (Context
.getTargetInfo().getTriple().isOSAIX() &&
4390 (BuiltinID
== PPC::BI__builtin_ppc_maxfe
||
4391 BuiltinID
== PPC::BI__builtin_ppc_minfe
))
4392 return Diag(TheCall
->getBeginLoc(), diag::err_target_unsupported_type
)
4393 << "builtin" << true << 128 << QualType(Context
.LongDoubleTy
)
4394 << false << Context
.getTargetInfo().getTriple().str();
4395 // Argument type should be exact.
4396 QualType ArgType
= QualType(Context
.LongDoubleTy
);
4397 if (BuiltinID
== PPC::BI__builtin_ppc_maxfl
||
4398 BuiltinID
== PPC::BI__builtin_ppc_minfl
)
4399 ArgType
= QualType(Context
.DoubleTy
);
4400 else if (BuiltinID
== PPC::BI__builtin_ppc_maxfs
||
4401 BuiltinID
== PPC::BI__builtin_ppc_minfs
)
4402 ArgType
= QualType(Context
.FloatTy
);
4403 for (unsigned I
= 0, E
= TheCall
->getNumArgs(); I
< E
; ++I
)
4404 if (TheCall
->getArg(I
)->getType() != ArgType
)
4405 return Diag(TheCall
->getBeginLoc(),
4406 diag::err_typecheck_convert_incompatible
)
4407 << TheCall
->getArg(I
)->getType() << ArgType
<< 1 << 0 << 0;
4410 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
4411 case PPC::BI__builtin_##Name: \
4412 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
4413 #include "clang/Basic/BuiltinsPPC.def"
4415 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
);
4418 // Check if the given type is a non-pointer PPC MMA type. This function is used
4419 // in Sema to prevent invalid uses of restricted PPC MMA types.
4420 bool Sema::CheckPPCMMAType(QualType Type
, SourceLocation TypeLoc
) {
4421 if (Type
->isPointerType() || Type
->isArrayType())
4424 QualType CoreType
= Type
.getCanonicalType().getUnqualifiedType();
4425 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
4427 #include "clang/Basic/PPCTypes.def"
4429 Diag(TypeLoc
, diag::err_ppc_invalid_use_mma_type
);
4435 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID
,
4436 CallExpr
*TheCall
) {
4437 // position of memory order and scope arguments in the builtin
4438 unsigned OrderIndex
, ScopeIndex
;
4439 switch (BuiltinID
) {
4440 case AMDGPU::BI__builtin_amdgcn_atomic_inc32
:
4441 case AMDGPU::BI__builtin_amdgcn_atomic_inc64
:
4442 case AMDGPU::BI__builtin_amdgcn_atomic_dec32
:
4443 case AMDGPU::BI__builtin_amdgcn_atomic_dec64
:
4447 case AMDGPU::BI__builtin_amdgcn_fence
:
4455 ExprResult Arg
= TheCall
->getArg(OrderIndex
);
4456 auto ArgExpr
= Arg
.get();
4457 Expr::EvalResult ArgResult
;
4459 if (!ArgExpr
->EvaluateAsInt(ArgResult
, Context
))
4460 return Diag(ArgExpr
->getExprLoc(), diag::err_typecheck_expect_int
)
4461 << ArgExpr
->getType();
4462 auto Ord
= ArgResult
.Val
.getInt().getZExtValue();
4464 // Check validity of memory ordering as per C11 / C++11's memody model.
4465 // Only fence needs check. Atomic dec/inc allow all memory orders.
4466 if (!llvm::isValidAtomicOrderingCABI(Ord
))
4467 return Diag(ArgExpr
->getBeginLoc(),
4468 diag::warn_atomic_op_has_invalid_memory_order
)
4469 << ArgExpr
->getSourceRange();
4470 switch (static_cast<llvm::AtomicOrderingCABI
>(Ord
)) {
4471 case llvm::AtomicOrderingCABI::relaxed
:
4472 case llvm::AtomicOrderingCABI::consume
:
4473 if (BuiltinID
== AMDGPU::BI__builtin_amdgcn_fence
)
4474 return Diag(ArgExpr
->getBeginLoc(),
4475 diag::warn_atomic_op_has_invalid_memory_order
)
4476 << ArgExpr
->getSourceRange();
4478 case llvm::AtomicOrderingCABI::acquire
:
4479 case llvm::AtomicOrderingCABI::release
:
4480 case llvm::AtomicOrderingCABI::acq_rel
:
4481 case llvm::AtomicOrderingCABI::seq_cst
:
4485 Arg
= TheCall
->getArg(ScopeIndex
);
4486 ArgExpr
= Arg
.get();
4487 Expr::EvalResult ArgResult1
;
4488 // Check that sync scope is a constant literal
4489 if (!ArgExpr
->EvaluateAsConstantExpr(ArgResult1
, Context
))
4490 return Diag(ArgExpr
->getExprLoc(), diag::err_expr_not_string_literal
)
4491 << ArgExpr
->getType();
4496 bool Sema::CheckRISCVLMUL(CallExpr
*TheCall
, unsigned ArgNum
) {
4497 llvm::APSInt Result
;
4499 // We can't check the value of a dependent argument.
4500 Expr
*Arg
= TheCall
->getArg(ArgNum
);
4501 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
4504 // Check constant-ness first.
4505 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
4508 int64_t Val
= Result
.getSExtValue();
4509 if ((Val
>= 0 && Val
<= 3) || (Val
>= 5 && Val
<= 7))
4512 return Diag(TheCall
->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul
)
4513 << Arg
->getSourceRange();
4516 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo
&TI
,
4518 CallExpr
*TheCall
) {
4519 // CodeGenFunction can also detect this, but this gives a better error
4521 bool FeatureMissing
= false;
4522 SmallVector
<StringRef
> ReqFeatures
;
4523 StringRef Features
= Context
.BuiltinInfo
.getRequiredFeatures(BuiltinID
);
4524 Features
.split(ReqFeatures
, ',', -1, false);
4526 // Check if each required feature is included
4527 for (StringRef F
: ReqFeatures
) {
4528 SmallVector
<StringRef
> ReqOpFeatures
;
4529 F
.split(ReqOpFeatures
, '|');
4531 if (llvm::none_of(ReqOpFeatures
,
4532 [&TI
](StringRef OF
) { return TI
.hasFeature(OF
); })) {
4533 std::string FeatureStrs
;
4534 bool IsExtension
= true;
4535 for (StringRef OF
: ReqOpFeatures
) {
4536 // If the feature is 64bit, alter the string so it will print better in
4538 if (OF
== "64bit") {
4539 assert(ReqOpFeatures
.size() == 1 && "Expected '64bit' to be alone");
4541 IsExtension
= false;
4543 if (OF
== "32bit") {
4544 assert(ReqOpFeatures
.size() == 1 && "Expected '32bit' to be alone");
4546 IsExtension
= false;
4549 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
4550 OF
.consume_front("experimental-");
4551 std::string FeatureStr
= OF
.str();
4552 FeatureStr
[0] = std::toupper(FeatureStr
[0]);
4554 FeatureStrs
+= FeatureStrs
.empty() ? "" : ", ";
4556 FeatureStrs
+= FeatureStr
;
4560 FeatureMissing
= true;
4561 Diag(TheCall
->getBeginLoc(), diag::err_riscv_builtin_requires_extension
)
4563 << TheCall
->getSourceRange() << StringRef(FeatureStrs
);
4570 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
4571 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
4572 switch (BuiltinID
) {
4575 case RISCVVector::BI__builtin_rvv_vmulhsu_vv
:
4576 case RISCVVector::BI__builtin_rvv_vmulhsu_vx
:
4577 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu
:
4578 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu
:
4579 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m
:
4580 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m
:
4581 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu
:
4582 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu
:
4583 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum
:
4584 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum
:
4585 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu
:
4586 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu
:
4587 case RISCVVector::BI__builtin_rvv_vmulhu_vv
:
4588 case RISCVVector::BI__builtin_rvv_vmulhu_vx
:
4589 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu
:
4590 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu
:
4591 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m
:
4592 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m
:
4593 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu
:
4594 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu
:
4595 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum
:
4596 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum
:
4597 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu
:
4598 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu
:
4599 case RISCVVector::BI__builtin_rvv_vmulh_vv
:
4600 case RISCVVector::BI__builtin_rvv_vmulh_vx
:
4601 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu
:
4602 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu
:
4603 case RISCVVector::BI__builtin_rvv_vmulh_vv_m
:
4604 case RISCVVector::BI__builtin_rvv_vmulh_vx_m
:
4605 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu
:
4606 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu
:
4607 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum
:
4608 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum
:
4609 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu
:
4610 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu
:
4611 case RISCVVector::BI__builtin_rvv_vsmul_vv
:
4612 case RISCVVector::BI__builtin_rvv_vsmul_vx
:
4613 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu
:
4614 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu
:
4615 case RISCVVector::BI__builtin_rvv_vsmul_vv_m
:
4616 case RISCVVector::BI__builtin_rvv_vsmul_vx_m
:
4617 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu
:
4618 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu
:
4619 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum
:
4620 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum
:
4621 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu
:
4622 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu
: {
4623 bool RequireV
= false;
4624 for (unsigned ArgNum
= 0; ArgNum
< TheCall
->getNumArgs(); ++ArgNum
)
4625 RequireV
|= TheCall
->getArg(ArgNum
)->getType()->isRVVType(
4626 /* Bitwidth */ 64, /* IsFloat */ false);
4628 if (RequireV
&& !TI
.hasFeature("v"))
4629 return Diag(TheCall
->getBeginLoc(),
4630 diag::err_riscv_builtin_requires_extension
)
4631 << /* IsExtension */ false << TheCall
->getSourceRange() << "v";
4637 switch (BuiltinID
) {
4638 case RISCVVector::BI__builtin_rvv_vsetvli
:
4639 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 3) ||
4640 CheckRISCVLMUL(TheCall
, 2);
4641 case RISCVVector::BI__builtin_rvv_vsetvlimax
:
4642 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4643 CheckRISCVLMUL(TheCall
, 1);
4644 case RISCVVector::BI__builtin_rvv_vget_v
: {
4645 ASTContext::BuiltinVectorTypeInfo ResVecInfo
=
4646 Context
.getBuiltinVectorTypeInfo(cast
<BuiltinType
>(
4647 TheCall
->getType().getCanonicalType().getTypePtr()));
4648 ASTContext::BuiltinVectorTypeInfo VecInfo
=
4649 Context
.getBuiltinVectorTypeInfo(cast
<BuiltinType
>(
4650 TheCall
->getArg(0)->getType().getCanonicalType().getTypePtr()));
4652 if (VecInfo
.NumVectors
!= 1) // vget for tuple type
4653 MaxIndex
= VecInfo
.NumVectors
;
4654 else // vget for non-tuple type
4655 MaxIndex
= (VecInfo
.EC
.getKnownMinValue() * VecInfo
.NumVectors
) /
4656 (ResVecInfo
.EC
.getKnownMinValue() * ResVecInfo
.NumVectors
);
4657 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, MaxIndex
- 1);
4659 case RISCVVector::BI__builtin_rvv_vset_v
: {
4660 ASTContext::BuiltinVectorTypeInfo ResVecInfo
=
4661 Context
.getBuiltinVectorTypeInfo(cast
<BuiltinType
>(
4662 TheCall
->getType().getCanonicalType().getTypePtr()));
4663 ASTContext::BuiltinVectorTypeInfo VecInfo
=
4664 Context
.getBuiltinVectorTypeInfo(cast
<BuiltinType
>(
4665 TheCall
->getArg(2)->getType().getCanonicalType().getTypePtr()));
4667 if (ResVecInfo
.NumVectors
!= 1) // vset for tuple type
4668 MaxIndex
= ResVecInfo
.NumVectors
;
4669 else // vset fo non-tuple type
4670 MaxIndex
= (ResVecInfo
.EC
.getKnownMinValue() * ResVecInfo
.NumVectors
) /
4671 (VecInfo
.EC
.getKnownMinValue() * VecInfo
.NumVectors
);
4672 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, MaxIndex
- 1);
4674 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8
:
4675 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4
:
4676 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2
:
4677 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1
:
4678 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2
:
4679 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4
:
4680 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8
:
4681 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4
:
4682 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2
:
4683 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1
:
4684 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2
:
4685 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4
:
4686 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8
:
4687 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2
:
4688 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1
:
4689 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2
:
4690 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4
:
4691 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8
:
4692 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1
:
4693 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2
:
4694 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4
:
4695 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8
:
4696 // bit_27_26, bit_24_20, bit_11_7, simm5
4697 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4698 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31) ||
4699 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 31) ||
4700 SemaBuiltinConstantArgRange(TheCall
, 3, -16, 15);
4701 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se
:
4702 // bit_27_26, bit_11_7, vs2, simm5
4703 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4704 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31) ||
4705 SemaBuiltinConstantArgRange(TheCall
, 3, -16, 15);
4706 case RISCVVector::BI__builtin_rvv_sf_vc_v_i
:
4707 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se
:
4708 // bit_27_26, bit_24_20, simm5
4709 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4710 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31) ||
4711 SemaBuiltinConstantArgRange(TheCall
, 2, -16, 15);
4712 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv
:
4713 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se
:
4714 // bit_27_26, vs2, simm5
4715 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4716 SemaBuiltinConstantArgRange(TheCall
, 2, -16, 15);
4717 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se
:
4718 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se
:
4719 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv
:
4720 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw
:
4721 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se
:
4722 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se
:
4723 // bit_27_26, vd, vs2, simm5
4724 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4725 SemaBuiltinConstantArgRange(TheCall
, 3, -16, 15);
4726 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8
:
4727 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4
:
4728 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2
:
4729 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1
:
4730 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2
:
4731 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4
:
4732 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8
:
4733 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4
:
4734 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2
:
4735 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1
:
4736 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2
:
4737 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4
:
4738 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8
:
4739 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2
:
4740 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1
:
4741 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2
:
4742 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4
:
4743 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8
:
4744 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1
:
4745 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2
:
4746 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4
:
4747 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8
:
4748 // bit_27_26, bit_24_20, bit_11_7, xs1
4749 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4750 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31) ||
4751 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 31);
4752 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se
:
4753 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se
:
4754 // bit_27_26, bit_11_7, vs2, xs1/vs1
4755 case RISCVVector::BI__builtin_rvv_sf_vc_v_x
:
4756 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se
:
4757 // bit_27_26, bit_24-20, xs1
4758 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3) ||
4759 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31);
4760 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se
:
4761 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se
:
4762 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se
:
4763 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se
:
4764 // bit_27_26, vd, vs2, xs1
4765 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv
:
4766 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv
:
4767 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se
:
4768 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se
:
4769 // bit_27_26, vs2, xs1/vs1
4770 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv
:
4771 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv
:
4772 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw
:
4773 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw
:
4774 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se
:
4775 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se
:
4776 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se
:
4777 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se
:
4778 // bit_27_26, vd, vs2, xs1/vs1
4779 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 3);
4780 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se
:
4781 // bit_26, bit_11_7, vs2, fs1
4782 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 1) ||
4783 SemaBuiltinConstantArgRange(TheCall
, 1, 0, 31);
4784 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se
:
4785 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se
:
4786 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv
:
4787 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw
:
4788 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se
:
4789 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se
:
4790 // bit_26, vd, vs2, fs1
4791 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv
:
4792 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se
:
4794 return SemaBuiltinConstantArgRange(TheCall
, 0, 0, 1);
4795 // Check if byteselect is in [0, 3]
4796 case RISCV::BI__builtin_riscv_aes32dsi
:
4797 case RISCV::BI__builtin_riscv_aes32dsmi
:
4798 case RISCV::BI__builtin_riscv_aes32esi
:
4799 case RISCV::BI__builtin_riscv_aes32esmi
:
4800 case RISCV::BI__builtin_riscv_sm4ks
:
4801 case RISCV::BI__builtin_riscv_sm4ed
:
4802 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 3);
4803 // Check if rnum is in [0, 10]
4804 case RISCV::BI__builtin_riscv_aes64ks1i
:
4805 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 10);
4806 // Check if value range for vxrm is in [0, 3]
4807 case RISCVVector::BI__builtin_rvv_vaaddu_vv
:
4808 case RISCVVector::BI__builtin_rvv_vaaddu_vx
:
4809 case RISCVVector::BI__builtin_rvv_vaadd_vv
:
4810 case RISCVVector::BI__builtin_rvv_vaadd_vx
:
4811 case RISCVVector::BI__builtin_rvv_vasubu_vv
:
4812 case RISCVVector::BI__builtin_rvv_vasubu_vx
:
4813 case RISCVVector::BI__builtin_rvv_vasub_vv
:
4814 case RISCVVector::BI__builtin_rvv_vasub_vx
:
4815 case RISCVVector::BI__builtin_rvv_vsmul_vv
:
4816 case RISCVVector::BI__builtin_rvv_vsmul_vx
:
4817 case RISCVVector::BI__builtin_rvv_vssra_vv
:
4818 case RISCVVector::BI__builtin_rvv_vssra_vx
:
4819 case RISCVVector::BI__builtin_rvv_vssrl_vv
:
4820 case RISCVVector::BI__builtin_rvv_vssrl_vx
:
4821 case RISCVVector::BI__builtin_rvv_vnclip_wv
:
4822 case RISCVVector::BI__builtin_rvv_vnclip_wx
:
4823 case RISCVVector::BI__builtin_rvv_vnclipu_wv
:
4824 case RISCVVector::BI__builtin_rvv_vnclipu_wx
:
4825 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 3);
4826 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu
:
4827 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu
:
4828 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu
:
4829 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu
:
4830 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu
:
4831 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu
:
4832 case RISCVVector::BI__builtin_rvv_vasub_vv_tu
:
4833 case RISCVVector::BI__builtin_rvv_vasub_vx_tu
:
4834 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu
:
4835 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu
:
4836 case RISCVVector::BI__builtin_rvv_vssra_vv_tu
:
4837 case RISCVVector::BI__builtin_rvv_vssra_vx_tu
:
4838 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu
:
4839 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu
:
4840 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu
:
4841 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu
:
4842 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu
:
4843 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu
:
4844 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m
:
4845 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m
:
4846 case RISCVVector::BI__builtin_rvv_vaadd_vv_m
:
4847 case RISCVVector::BI__builtin_rvv_vaadd_vx_m
:
4848 case RISCVVector::BI__builtin_rvv_vasubu_vv_m
:
4849 case RISCVVector::BI__builtin_rvv_vasubu_vx_m
:
4850 case RISCVVector::BI__builtin_rvv_vasub_vv_m
:
4851 case RISCVVector::BI__builtin_rvv_vasub_vx_m
:
4852 case RISCVVector::BI__builtin_rvv_vsmul_vv_m
:
4853 case RISCVVector::BI__builtin_rvv_vsmul_vx_m
:
4854 case RISCVVector::BI__builtin_rvv_vssra_vv_m
:
4855 case RISCVVector::BI__builtin_rvv_vssra_vx_m
:
4856 case RISCVVector::BI__builtin_rvv_vssrl_vv_m
:
4857 case RISCVVector::BI__builtin_rvv_vssrl_vx_m
:
4858 case RISCVVector::BI__builtin_rvv_vnclip_wv_m
:
4859 case RISCVVector::BI__builtin_rvv_vnclip_wx_m
:
4860 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m
:
4861 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m
:
4862 return SemaBuiltinConstantArgRange(TheCall
, 3, 0, 3);
4863 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum
:
4864 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu
:
4865 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu
:
4866 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum
:
4867 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu
:
4868 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu
:
4869 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum
:
4870 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu
:
4871 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu
:
4872 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum
:
4873 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu
:
4874 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu
:
4875 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum
:
4876 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu
:
4877 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu
:
4878 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum
:
4879 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu
:
4880 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu
:
4881 case RISCVVector::BI__builtin_rvv_vasub_vv_tum
:
4882 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu
:
4883 case RISCVVector::BI__builtin_rvv_vasub_vv_mu
:
4884 case RISCVVector::BI__builtin_rvv_vasub_vx_tum
:
4885 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu
:
4886 case RISCVVector::BI__builtin_rvv_vasub_vx_mu
:
4887 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu
:
4888 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu
:
4889 case RISCVVector::BI__builtin_rvv_vssra_vv_mu
:
4890 case RISCVVector::BI__builtin_rvv_vssra_vx_mu
:
4891 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu
:
4892 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu
:
4893 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu
:
4894 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu
:
4895 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu
:
4896 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu
:
4897 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum
:
4898 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum
:
4899 case RISCVVector::BI__builtin_rvv_vssra_vv_tum
:
4900 case RISCVVector::BI__builtin_rvv_vssra_vx_tum
:
4901 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum
:
4902 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum
:
4903 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum
:
4904 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum
:
4905 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum
:
4906 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum
:
4907 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu
:
4908 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu
:
4909 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu
:
4910 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu
:
4911 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu
:
4912 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu
:
4913 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu
:
4914 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu
:
4915 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu
:
4916 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu
:
4917 return SemaBuiltinConstantArgRange(TheCall
, 4, 0, 3);
4918 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm
:
4919 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm
:
4920 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm
:
4921 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm
:
4922 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm
:
4923 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm
:
4924 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm
:
4925 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm
:
4926 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm
:
4927 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm
:
4928 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm
:
4929 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm
:
4930 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm
:
4931 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 4);
4932 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm
:
4933 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm
:
4934 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm
:
4935 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm
:
4936 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm
:
4937 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm
:
4938 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm
:
4939 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm
:
4940 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm
:
4941 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm
:
4942 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm
:
4943 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm
:
4944 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm
:
4945 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm
:
4946 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm
:
4947 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm
:
4948 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm
:
4949 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm
:
4950 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm
:
4951 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm
:
4952 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm
:
4953 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm
:
4954 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm
:
4955 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm
:
4956 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu
:
4957 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu
:
4958 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu
:
4959 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu
:
4960 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu
:
4961 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu
:
4962 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu
:
4963 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu
:
4964 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu
:
4965 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu
:
4966 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu
:
4967 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu
:
4968 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu
:
4969 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m
:
4970 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m
:
4971 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m
:
4972 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m
:
4973 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m
:
4974 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m
:
4975 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m
:
4976 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m
:
4977 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m
:
4978 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m
:
4979 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m
:
4980 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m
:
4981 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m
:
4982 return SemaBuiltinConstantArgRange(TheCall
, 2, 0, 4);
4983 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu
:
4984 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu
:
4985 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu
:
4986 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu
:
4987 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu
:
4988 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu
:
4989 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu
:
4990 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu
:
4991 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu
:
4992 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu
:
4993 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu
:
4994 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu
:
4995 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu
:
4996 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu
:
4997 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu
:
4998 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu
:
4999 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu
:
5000 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu
:
5001 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu
:
5002 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu
:
5003 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu
:
5004 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu
:
5005 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu
:
5006 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu
:
5007 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm
:
5008 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm
:
5009 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm
:
5010 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm
:
5011 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm
:
5012 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm
:
5013 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm
:
5014 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm
:
5015 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm
:
5016 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm
:
5017 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm
:
5018 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm
:
5019 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm
:
5020 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm
:
5021 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm
:
5022 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm
:
5023 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm
:
5024 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm
:
5025 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm
:
5026 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm
:
5027 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm
:
5028 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm
:
5029 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm
:
5030 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm
:
5031 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu
:
5032 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu
:
5033 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu
:
5034 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu
:
5035 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu
:
5036 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu
:
5037 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu
:
5038 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu
:
5039 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu
:
5040 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu
:
5041 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu
:
5042 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu
:
5043 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu
:
5044 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu
:
5045 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu
:
5046 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu
:
5047 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu
:
5048 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu
:
5049 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu
:
5050 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu
:
5051 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu
:
5052 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu
:
5053 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu
:
5054 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu
:
5055 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m
:
5056 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m
:
5057 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m
:
5058 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m
:
5059 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m
:
5060 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m
:
5061 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m
:
5062 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m
:
5063 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m
:
5064 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m
:
5065 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m
:
5066 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m
:
5067 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m
:
5068 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m
:
5069 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m
:
5070 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m
:
5071 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m
:
5072 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m
:
5073 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m
:
5074 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m
:
5075 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m
:
5076 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m
:
5077 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m
:
5078 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m
:
5079 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum
:
5080 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum
:
5081 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum
:
5082 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum
:
5083 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum
:
5084 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum
:
5085 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum
:
5086 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum
:
5087 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum
:
5088 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum
:
5089 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum
:
5090 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum
:
5091 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum
:
5092 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu
:
5093 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu
:
5094 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu
:
5095 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu
:
5096 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu
:
5097 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu
:
5098 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu
:
5099 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu
:
5100 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu
:
5101 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu
:
5102 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu
:
5103 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu
:
5104 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu
:
5105 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu
:
5106 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu
:
5107 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu
:
5108 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu
:
5109 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu
:
5110 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu
:
5111 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu
:
5112 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu
:
5113 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu
:
5114 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu
:
5115 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu
:
5116 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu
:
5117 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu
:
5118 return SemaBuiltinConstantArgRange(TheCall
, 3, 0, 4);
5119 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m
:
5120 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m
:
5121 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m
:
5122 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m
:
5123 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m
:
5124 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m
:
5125 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m
:
5126 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m
:
5127 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m
:
5128 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m
:
5129 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m
:
5130 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m
:
5131 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m
:
5132 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m
:
5133 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m
:
5134 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m
:
5135 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m
:
5136 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m
:
5137 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m
:
5138 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m
:
5139 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m
:
5140 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m
:
5141 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m
:
5142 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m
:
5143 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum
:
5144 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum
:
5145 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum
:
5146 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum
:
5147 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum
:
5148 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum
:
5149 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum
:
5150 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum
:
5151 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum
:
5152 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum
:
5153 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum
:
5154 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum
:
5155 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum
:
5156 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum
:
5157 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum
:
5158 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum
:
5159 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum
:
5160 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum
:
5161 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum
:
5162 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum
:
5163 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum
:
5164 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum
:
5165 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum
:
5166 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum
:
5167 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum
:
5168 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum
:
5169 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum
:
5170 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum
:
5171 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum
:
5172 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum
:
5173 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum
:
5174 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum
:
5175 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum
:
5176 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum
:
5177 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum
:
5178 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum
:
5179 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum
:
5180 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum
:
5181 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum
:
5182 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum
:
5183 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum
:
5184 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum
:
5185 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum
:
5186 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum
:
5187 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum
:
5188 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum
:
5189 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum
:
5190 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum
:
5191 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu
:
5192 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu
:
5193 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu
:
5194 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu
:
5195 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu
:
5196 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu
:
5197 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu
:
5198 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu
:
5199 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu
:
5200 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu
:
5201 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu
:
5202 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu
:
5203 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu
:
5204 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu
:
5205 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu
:
5206 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu
:
5207 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu
:
5208 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu
:
5209 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu
:
5210 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu
:
5211 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu
:
5212 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu
:
5213 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu
:
5214 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu
:
5215 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu
:
5216 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu
:
5217 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu
:
5218 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu
:
5219 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu
:
5220 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu
:
5221 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu
:
5222 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu
:
5223 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu
:
5224 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu
:
5225 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu
:
5226 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu
:
5227 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu
:
5228 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu
:
5229 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu
:
5230 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu
:
5231 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu
:
5232 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu
:
5233 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu
:
5234 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu
:
5235 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu
:
5236 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu
:
5237 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu
:
5238 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu
:
5239 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu
:
5240 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu
:
5241 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu
:
5242 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu
:
5243 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu
:
5244 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu
:
5245 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu
:
5246 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu
:
5247 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu
:
5248 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu
:
5249 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu
:
5250 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu
:
5251 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu
:
5252 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu
:
5253 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu
:
5254 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu
:
5255 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu
:
5256 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu
:
5257 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu
:
5258 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu
:
5259 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu
:
5260 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu
:
5261 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu
:
5262 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu
:
5263 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu
:
5264 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu
:
5265 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu
:
5266 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu
:
5267 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu
:
5268 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu
:
5269 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu
:
5270 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu
:
5271 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu
:
5272 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu
:
5273 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu
:
5274 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu
:
5275 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu
:
5276 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu
:
5277 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu
:
5278 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu
:
5279 return SemaBuiltinConstantArgRange(TheCall
, 4, 0, 4);
5280 case RISCV::BI__builtin_riscv_ntl_load
:
5281 case RISCV::BI__builtin_riscv_ntl_store
:
5283 cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
5284 assert((BuiltinID
== RISCV::BI__builtin_riscv_ntl_store
||
5285 BuiltinID
== RISCV::BI__builtin_riscv_ntl_load
) &&
5286 "Unexpected RISC-V nontemporal load/store builtin!");
5287 bool IsStore
= BuiltinID
== RISCV::BI__builtin_riscv_ntl_store
;
5288 unsigned NumArgs
= IsStore
? 3 : 2;
5290 if (checkArgCountAtLeast(*this, TheCall
, NumArgs
- 1))
5293 if (checkArgCountAtMost(*this, TheCall
, NumArgs
))
5296 // Domain value should be compile-time constant.
5298 if (TheCall
->getNumArgs() == NumArgs
&&
5299 SemaBuiltinConstantArgRange(TheCall
, NumArgs
- 1, 2, 5))
5302 Expr
*PointerArg
= TheCall
->getArg(0);
5303 ExprResult PointerArgResult
=
5304 DefaultFunctionArrayLvalueConversion(PointerArg
);
5306 if (PointerArgResult
.isInvalid())
5308 PointerArg
= PointerArgResult
.get();
5310 const PointerType
*PtrType
= PointerArg
->getType()->getAs
<PointerType
>();
5312 Diag(DRE
->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer
)
5313 << PointerArg
->getType() << PointerArg
->getSourceRange();
5317 QualType ValType
= PtrType
->getPointeeType();
5318 ValType
= ValType
.getUnqualifiedType();
5319 if (!ValType
->isIntegerType() && !ValType
->isAnyPointerType() &&
5320 !ValType
->isBlockPointerType() && !ValType
->isFloatingType() &&
5321 !ValType
->isVectorType() && !ValType
->isRVVType()) {
5322 Diag(DRE
->getBeginLoc(),
5323 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector
)
5324 << PointerArg
->getType() << PointerArg
->getSourceRange();
5329 TheCall
->setType(ValType
);
5333 ExprResult ValArg
= TheCall
->getArg(1);
5334 InitializedEntity Entity
= InitializedEntity::InitializeParameter(
5335 Context
, ValType
, /*consume*/ false);
5336 ValArg
= PerformCopyInitialization(Entity
, SourceLocation(), ValArg
);
5337 if (ValArg
.isInvalid())
5340 TheCall
->setArg(1, ValArg
.get());
5341 TheCall
->setType(Context
.VoidTy
);
5348 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID
,
5349 CallExpr
*TheCall
) {
5350 if (BuiltinID
== SystemZ::BI__builtin_tabort
) {
5351 Expr
*Arg
= TheCall
->getArg(0);
5352 if (std::optional
<llvm::APSInt
> AbortCode
=
5353 Arg
->getIntegerConstantExpr(Context
))
5354 if (AbortCode
->getSExtValue() >= 0 && AbortCode
->getSExtValue() < 256)
5355 return Diag(Arg
->getBeginLoc(), diag::err_systemz_invalid_tabort_code
)
5356 << Arg
->getSourceRange();
5359 // For intrinsics which take an immediate value as part of the instruction,
5360 // range check them here.
5361 unsigned i
= 0, l
= 0, u
= 0;
5362 switch (BuiltinID
) {
5363 default: return false;
5364 case SystemZ::BI__builtin_s390_lcbb
: i
= 1; l
= 0; u
= 15; break;
5365 case SystemZ::BI__builtin_s390_verimb
:
5366 case SystemZ::BI__builtin_s390_verimh
:
5367 case SystemZ::BI__builtin_s390_verimf
:
5368 case SystemZ::BI__builtin_s390_verimg
: i
= 3; l
= 0; u
= 255; break;
5369 case SystemZ::BI__builtin_s390_vfaeb
:
5370 case SystemZ::BI__builtin_s390_vfaeh
:
5371 case SystemZ::BI__builtin_s390_vfaef
:
5372 case SystemZ::BI__builtin_s390_vfaebs
:
5373 case SystemZ::BI__builtin_s390_vfaehs
:
5374 case SystemZ::BI__builtin_s390_vfaefs
:
5375 case SystemZ::BI__builtin_s390_vfaezb
:
5376 case SystemZ::BI__builtin_s390_vfaezh
:
5377 case SystemZ::BI__builtin_s390_vfaezf
:
5378 case SystemZ::BI__builtin_s390_vfaezbs
:
5379 case SystemZ::BI__builtin_s390_vfaezhs
:
5380 case SystemZ::BI__builtin_s390_vfaezfs
: i
= 2; l
= 0; u
= 15; break;
5381 case SystemZ::BI__builtin_s390_vfisb
:
5382 case SystemZ::BI__builtin_s390_vfidb
:
5383 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 15) ||
5384 SemaBuiltinConstantArgRange(TheCall
, 2, 0, 15);
5385 case SystemZ::BI__builtin_s390_vftcisb
:
5386 case SystemZ::BI__builtin_s390_vftcidb
: i
= 1; l
= 0; u
= 4095; break;
5387 case SystemZ::BI__builtin_s390_vlbb
: i
= 1; l
= 0; u
= 15; break;
5388 case SystemZ::BI__builtin_s390_vpdi
: i
= 2; l
= 0; u
= 15; break;
5389 case SystemZ::BI__builtin_s390_vsldb
: i
= 2; l
= 0; u
= 15; break;
5390 case SystemZ::BI__builtin_s390_vstrcb
:
5391 case SystemZ::BI__builtin_s390_vstrch
:
5392 case SystemZ::BI__builtin_s390_vstrcf
:
5393 case SystemZ::BI__builtin_s390_vstrczb
:
5394 case SystemZ::BI__builtin_s390_vstrczh
:
5395 case SystemZ::BI__builtin_s390_vstrczf
:
5396 case SystemZ::BI__builtin_s390_vstrcbs
:
5397 case SystemZ::BI__builtin_s390_vstrchs
:
5398 case SystemZ::BI__builtin_s390_vstrcfs
:
5399 case SystemZ::BI__builtin_s390_vstrczbs
:
5400 case SystemZ::BI__builtin_s390_vstrczhs
:
5401 case SystemZ::BI__builtin_s390_vstrczfs
: i
= 3; l
= 0; u
= 15; break;
5402 case SystemZ::BI__builtin_s390_vmslg
: i
= 3; l
= 0; u
= 15; break;
5403 case SystemZ::BI__builtin_s390_vfminsb
:
5404 case SystemZ::BI__builtin_s390_vfmaxsb
:
5405 case SystemZ::BI__builtin_s390_vfmindb
:
5406 case SystemZ::BI__builtin_s390_vfmaxdb
: i
= 2; l
= 0; u
= 15; break;
5407 case SystemZ::BI__builtin_s390_vsld
: i
= 2; l
= 0; u
= 7; break;
5408 case SystemZ::BI__builtin_s390_vsrd
: i
= 2; l
= 0; u
= 7; break;
5409 case SystemZ::BI__builtin_s390_vclfnhs
:
5410 case SystemZ::BI__builtin_s390_vclfnls
:
5411 case SystemZ::BI__builtin_s390_vcfn
:
5412 case SystemZ::BI__builtin_s390_vcnf
: i
= 1; l
= 0; u
= 15; break;
5413 case SystemZ::BI__builtin_s390_vcrnfs
: i
= 2; l
= 0; u
= 15; break;
5415 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
);
5418 bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo
&TI
,
5420 CallExpr
*TheCall
) {
5421 switch (BuiltinID
) {
5422 case WebAssembly::BI__builtin_wasm_ref_null_extern
:
5423 return BuiltinWasmRefNullExtern(TheCall
);
5424 case WebAssembly::BI__builtin_wasm_ref_null_func
:
5425 return BuiltinWasmRefNullFunc(TheCall
);
5426 case WebAssembly::BI__builtin_wasm_table_get
:
5427 return BuiltinWasmTableGet(TheCall
);
5428 case WebAssembly::BI__builtin_wasm_table_set
:
5429 return BuiltinWasmTableSet(TheCall
);
5430 case WebAssembly::BI__builtin_wasm_table_size
:
5431 return BuiltinWasmTableSize(TheCall
);
5432 case WebAssembly::BI__builtin_wasm_table_grow
:
5433 return BuiltinWasmTableGrow(TheCall
);
5434 case WebAssembly::BI__builtin_wasm_table_fill
:
5435 return BuiltinWasmTableFill(TheCall
);
5436 case WebAssembly::BI__builtin_wasm_table_copy
:
5437 return BuiltinWasmTableCopy(TheCall
);
5443 void Sema::checkRVVTypeSupport(QualType Ty
, SourceLocation Loc
, ValueDecl
*D
) {
5444 const TargetInfo
&TI
= Context
.getTargetInfo();
5445 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
5447 if ((Ty
->isRVVType(/* Bitwidth */ 64, /* IsFloat */ false) ||
5448 Ty
->isRVVType(/* ElementCount */ 1)) &&
5449 !TI
.hasFeature("zve64x"))
5450 Diag(Loc
, diag::err_riscv_type_requires_extension
, D
) << Ty
<< "zve64x";
5451 if (Ty
->isRVVType(/* Bitwidth */ 16, /* IsFloat */ true) &&
5452 !TI
.hasFeature("zvfh"))
5453 Diag(Loc
, diag::err_riscv_type_requires_extension
, D
) << Ty
<< "zvfh";
5454 if (Ty
->isRVVType(/* Bitwidth */ 32, /* IsFloat */ true) &&
5455 !TI
.hasFeature("zve32f"))
5456 Diag(Loc
, diag::err_riscv_type_requires_extension
, D
) << Ty
<< "zve32f";
5457 if (Ty
->isRVVType(/* Bitwidth */ 64, /* IsFloat */ true) &&
5458 !TI
.hasFeature("zve64d"))
5459 Diag(Loc
, diag::err_riscv_type_requires_extension
, D
) << Ty
<< "zve64d";
5460 // Given that caller already checked isRVVType() before calling this function,
5461 // if we don't have at least zve32x supported, then we need to emit error.
5462 if (!TI
.hasFeature("zve32x"))
5463 Diag(Loc
, diag::err_riscv_type_requires_extension
, D
) << Ty
<< "zve32x";
5466 bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo
&TI
,
5468 CallExpr
*TheCall
) {
5469 switch (BuiltinID
) {
5470 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4
:
5471 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8
:
5472 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16
:
5473 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16
:
5474 return checkArgCountAtMost(*this, TheCall
, 3);
5480 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
5481 /// This checks that the target supports __builtin_cpu_supports and
5482 /// that the string argument is constant and valid.
5483 static bool SemaBuiltinCpuSupports(Sema
&S
, const TargetInfo
&TI
,
5484 CallExpr
*TheCall
) {
5485 Expr
*Arg
= TheCall
->getArg(0);
5487 // Check if the argument is a string literal.
5488 if (!isa
<StringLiteral
>(Arg
->IgnoreParenImpCasts()))
5489 return S
.Diag(TheCall
->getBeginLoc(), diag::err_expr_not_string_literal
)
5490 << Arg
->getSourceRange();
5492 // Check the contents of the string.
5494 cast
<StringLiteral
>(Arg
->IgnoreParenImpCasts())->getString();
5495 if (!TI
.validateCpuSupports(Feature
))
5496 return S
.Diag(TheCall
->getBeginLoc(), diag::err_invalid_cpu_supports
)
5497 << Arg
->getSourceRange();
5501 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
5502 /// This checks that the target supports __builtin_cpu_is and
5503 /// that the string argument is constant and valid.
5504 static bool SemaBuiltinCpuIs(Sema
&S
, const TargetInfo
&TI
, CallExpr
*TheCall
) {
5505 Expr
*Arg
= TheCall
->getArg(0);
5507 // Check if the argument is a string literal.
5508 if (!isa
<StringLiteral
>(Arg
->IgnoreParenImpCasts()))
5509 return S
.Diag(TheCall
->getBeginLoc(), diag::err_expr_not_string_literal
)
5510 << Arg
->getSourceRange();
5512 // Check the contents of the string.
5514 cast
<StringLiteral
>(Arg
->IgnoreParenImpCasts())->getString();
5515 if (!TI
.validateCpuIs(Feature
))
5516 return S
.Diag(TheCall
->getBeginLoc(), diag::err_invalid_cpu_is
)
5517 << Arg
->getSourceRange();
5521 // Check if the rounding mode is legal.
5522 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID
, CallExpr
*TheCall
) {
5523 // Indicates if this instruction has rounding control or just SAE.
5526 unsigned ArgNum
= 0;
5527 switch (BuiltinID
) {
5530 case X86::BI__builtin_ia32_vcvttsd2si32
:
5531 case X86::BI__builtin_ia32_vcvttsd2si64
:
5532 case X86::BI__builtin_ia32_vcvttsd2usi32
:
5533 case X86::BI__builtin_ia32_vcvttsd2usi64
:
5534 case X86::BI__builtin_ia32_vcvttss2si32
:
5535 case X86::BI__builtin_ia32_vcvttss2si64
:
5536 case X86::BI__builtin_ia32_vcvttss2usi32
:
5537 case X86::BI__builtin_ia32_vcvttss2usi64
:
5538 case X86::BI__builtin_ia32_vcvttsh2si32
:
5539 case X86::BI__builtin_ia32_vcvttsh2si64
:
5540 case X86::BI__builtin_ia32_vcvttsh2usi32
:
5541 case X86::BI__builtin_ia32_vcvttsh2usi64
:
5544 case X86::BI__builtin_ia32_maxpd512
:
5545 case X86::BI__builtin_ia32_maxps512
:
5546 case X86::BI__builtin_ia32_minpd512
:
5547 case X86::BI__builtin_ia32_minps512
:
5548 case X86::BI__builtin_ia32_maxph512
:
5549 case X86::BI__builtin_ia32_minph512
:
5552 case X86::BI__builtin_ia32_vcvtph2pd512_mask
:
5553 case X86::BI__builtin_ia32_vcvtph2psx512_mask
:
5554 case X86::BI__builtin_ia32_cvtps2pd512_mask
:
5555 case X86::BI__builtin_ia32_cvttpd2dq512_mask
:
5556 case X86::BI__builtin_ia32_cvttpd2qq512_mask
:
5557 case X86::BI__builtin_ia32_cvttpd2udq512_mask
:
5558 case X86::BI__builtin_ia32_cvttpd2uqq512_mask
:
5559 case X86::BI__builtin_ia32_cvttps2dq512_mask
:
5560 case X86::BI__builtin_ia32_cvttps2qq512_mask
:
5561 case X86::BI__builtin_ia32_cvttps2udq512_mask
:
5562 case X86::BI__builtin_ia32_cvttps2uqq512_mask
:
5563 case X86::BI__builtin_ia32_vcvttph2w512_mask
:
5564 case X86::BI__builtin_ia32_vcvttph2uw512_mask
:
5565 case X86::BI__builtin_ia32_vcvttph2dq512_mask
:
5566 case X86::BI__builtin_ia32_vcvttph2udq512_mask
:
5567 case X86::BI__builtin_ia32_vcvttph2qq512_mask
:
5568 case X86::BI__builtin_ia32_vcvttph2uqq512_mask
:
5569 case X86::BI__builtin_ia32_exp2pd_mask
:
5570 case X86::BI__builtin_ia32_exp2ps_mask
:
5571 case X86::BI__builtin_ia32_getexppd512_mask
:
5572 case X86::BI__builtin_ia32_getexpps512_mask
:
5573 case X86::BI__builtin_ia32_getexpph512_mask
:
5574 case X86::BI__builtin_ia32_rcp28pd_mask
:
5575 case X86::BI__builtin_ia32_rcp28ps_mask
:
5576 case X86::BI__builtin_ia32_rsqrt28pd_mask
:
5577 case X86::BI__builtin_ia32_rsqrt28ps_mask
:
5578 case X86::BI__builtin_ia32_vcomisd
:
5579 case X86::BI__builtin_ia32_vcomiss
:
5580 case X86::BI__builtin_ia32_vcomish
:
5581 case X86::BI__builtin_ia32_vcvtph2ps512_mask
:
5584 case X86::BI__builtin_ia32_cmppd512_mask
:
5585 case X86::BI__builtin_ia32_cmpps512_mask
:
5586 case X86::BI__builtin_ia32_cmpsd_mask
:
5587 case X86::BI__builtin_ia32_cmpss_mask
:
5588 case X86::BI__builtin_ia32_cmpsh_mask
:
5589 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask
:
5590 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask
:
5591 case X86::BI__builtin_ia32_cvtss2sd_round_mask
:
5592 case X86::BI__builtin_ia32_getexpsd128_round_mask
:
5593 case X86::BI__builtin_ia32_getexpss128_round_mask
:
5594 case X86::BI__builtin_ia32_getexpsh128_round_mask
:
5595 case X86::BI__builtin_ia32_getmantpd512_mask
:
5596 case X86::BI__builtin_ia32_getmantps512_mask
:
5597 case X86::BI__builtin_ia32_getmantph512_mask
:
5598 case X86::BI__builtin_ia32_maxsd_round_mask
:
5599 case X86::BI__builtin_ia32_maxss_round_mask
:
5600 case X86::BI__builtin_ia32_maxsh_round_mask
:
5601 case X86::BI__builtin_ia32_minsd_round_mask
:
5602 case X86::BI__builtin_ia32_minss_round_mask
:
5603 case X86::BI__builtin_ia32_minsh_round_mask
:
5604 case X86::BI__builtin_ia32_rcp28sd_round_mask
:
5605 case X86::BI__builtin_ia32_rcp28ss_round_mask
:
5606 case X86::BI__builtin_ia32_reducepd512_mask
:
5607 case X86::BI__builtin_ia32_reduceps512_mask
:
5608 case X86::BI__builtin_ia32_reduceph512_mask
:
5609 case X86::BI__builtin_ia32_rndscalepd_mask
:
5610 case X86::BI__builtin_ia32_rndscaleps_mask
:
5611 case X86::BI__builtin_ia32_rndscaleph_mask
:
5612 case X86::BI__builtin_ia32_rsqrt28sd_round_mask
:
5613 case X86::BI__builtin_ia32_rsqrt28ss_round_mask
:
5616 case X86::BI__builtin_ia32_fixupimmpd512_mask
:
5617 case X86::BI__builtin_ia32_fixupimmpd512_maskz
:
5618 case X86::BI__builtin_ia32_fixupimmps512_mask
:
5619 case X86::BI__builtin_ia32_fixupimmps512_maskz
:
5620 case X86::BI__builtin_ia32_fixupimmsd_mask
:
5621 case X86::BI__builtin_ia32_fixupimmsd_maskz
:
5622 case X86::BI__builtin_ia32_fixupimmss_mask
:
5623 case X86::BI__builtin_ia32_fixupimmss_maskz
:
5624 case X86::BI__builtin_ia32_getmantsd_round_mask
:
5625 case X86::BI__builtin_ia32_getmantss_round_mask
:
5626 case X86::BI__builtin_ia32_getmantsh_round_mask
:
5627 case X86::BI__builtin_ia32_rangepd512_mask
:
5628 case X86::BI__builtin_ia32_rangeps512_mask
:
5629 case X86::BI__builtin_ia32_rangesd128_round_mask
:
5630 case X86::BI__builtin_ia32_rangess128_round_mask
:
5631 case X86::BI__builtin_ia32_reducesd_mask
:
5632 case X86::BI__builtin_ia32_reducess_mask
:
5633 case X86::BI__builtin_ia32_reducesh_mask
:
5634 case X86::BI__builtin_ia32_rndscalesd_round_mask
:
5635 case X86::BI__builtin_ia32_rndscaless_round_mask
:
5636 case X86::BI__builtin_ia32_rndscalesh_round_mask
:
5639 case X86::BI__builtin_ia32_vcvtsd2si64
:
5640 case X86::BI__builtin_ia32_vcvtsd2si32
:
5641 case X86::BI__builtin_ia32_vcvtsd2usi32
:
5642 case X86::BI__builtin_ia32_vcvtsd2usi64
:
5643 case X86::BI__builtin_ia32_vcvtss2si32
:
5644 case X86::BI__builtin_ia32_vcvtss2si64
:
5645 case X86::BI__builtin_ia32_vcvtss2usi32
:
5646 case X86::BI__builtin_ia32_vcvtss2usi64
:
5647 case X86::BI__builtin_ia32_vcvtsh2si32
:
5648 case X86::BI__builtin_ia32_vcvtsh2si64
:
5649 case X86::BI__builtin_ia32_vcvtsh2usi32
:
5650 case X86::BI__builtin_ia32_vcvtsh2usi64
:
5651 case X86::BI__builtin_ia32_sqrtpd512
:
5652 case X86::BI__builtin_ia32_sqrtps512
:
5653 case X86::BI__builtin_ia32_sqrtph512
:
5657 case X86::BI__builtin_ia32_addph512
:
5658 case X86::BI__builtin_ia32_divph512
:
5659 case X86::BI__builtin_ia32_mulph512
:
5660 case X86::BI__builtin_ia32_subph512
:
5661 case X86::BI__builtin_ia32_addpd512
:
5662 case X86::BI__builtin_ia32_addps512
:
5663 case X86::BI__builtin_ia32_divpd512
:
5664 case X86::BI__builtin_ia32_divps512
:
5665 case X86::BI__builtin_ia32_mulpd512
:
5666 case X86::BI__builtin_ia32_mulps512
:
5667 case X86::BI__builtin_ia32_subpd512
:
5668 case X86::BI__builtin_ia32_subps512
:
5669 case X86::BI__builtin_ia32_cvtsi2sd64
:
5670 case X86::BI__builtin_ia32_cvtsi2ss32
:
5671 case X86::BI__builtin_ia32_cvtsi2ss64
:
5672 case X86::BI__builtin_ia32_cvtusi2sd64
:
5673 case X86::BI__builtin_ia32_cvtusi2ss32
:
5674 case X86::BI__builtin_ia32_cvtusi2ss64
:
5675 case X86::BI__builtin_ia32_vcvtusi2sh
:
5676 case X86::BI__builtin_ia32_vcvtusi642sh
:
5677 case X86::BI__builtin_ia32_vcvtsi2sh
:
5678 case X86::BI__builtin_ia32_vcvtsi642sh
:
5682 case X86::BI__builtin_ia32_cvtdq2ps512_mask
:
5683 case X86::BI__builtin_ia32_cvtudq2ps512_mask
:
5684 case X86::BI__builtin_ia32_vcvtpd2ph512_mask
:
5685 case X86::BI__builtin_ia32_vcvtps2phx512_mask
:
5686 case X86::BI__builtin_ia32_cvtpd2ps512_mask
:
5687 case X86::BI__builtin_ia32_cvtpd2dq512_mask
:
5688 case X86::BI__builtin_ia32_cvtpd2qq512_mask
:
5689 case X86::BI__builtin_ia32_cvtpd2udq512_mask
:
5690 case X86::BI__builtin_ia32_cvtpd2uqq512_mask
:
5691 case X86::BI__builtin_ia32_cvtps2dq512_mask
:
5692 case X86::BI__builtin_ia32_cvtps2qq512_mask
:
5693 case X86::BI__builtin_ia32_cvtps2udq512_mask
:
5694 case X86::BI__builtin_ia32_cvtps2uqq512_mask
:
5695 case X86::BI__builtin_ia32_cvtqq2pd512_mask
:
5696 case X86::BI__builtin_ia32_cvtqq2ps512_mask
:
5697 case X86::BI__builtin_ia32_cvtuqq2pd512_mask
:
5698 case X86::BI__builtin_ia32_cvtuqq2ps512_mask
:
5699 case X86::BI__builtin_ia32_vcvtdq2ph512_mask
:
5700 case X86::BI__builtin_ia32_vcvtudq2ph512_mask
:
5701 case X86::BI__builtin_ia32_vcvtw2ph512_mask
:
5702 case X86::BI__builtin_ia32_vcvtuw2ph512_mask
:
5703 case X86::BI__builtin_ia32_vcvtph2w512_mask
:
5704 case X86::BI__builtin_ia32_vcvtph2uw512_mask
:
5705 case X86::BI__builtin_ia32_vcvtph2dq512_mask
:
5706 case X86::BI__builtin_ia32_vcvtph2udq512_mask
:
5707 case X86::BI__builtin_ia32_vcvtph2qq512_mask
:
5708 case X86::BI__builtin_ia32_vcvtph2uqq512_mask
:
5709 case X86::BI__builtin_ia32_vcvtqq2ph512_mask
:
5710 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask
:
5714 case X86::BI__builtin_ia32_addsh_round_mask
:
5715 case X86::BI__builtin_ia32_addss_round_mask
:
5716 case X86::BI__builtin_ia32_addsd_round_mask
:
5717 case X86::BI__builtin_ia32_divsh_round_mask
:
5718 case X86::BI__builtin_ia32_divss_round_mask
:
5719 case X86::BI__builtin_ia32_divsd_round_mask
:
5720 case X86::BI__builtin_ia32_mulsh_round_mask
:
5721 case X86::BI__builtin_ia32_mulss_round_mask
:
5722 case X86::BI__builtin_ia32_mulsd_round_mask
:
5723 case X86::BI__builtin_ia32_subsh_round_mask
:
5724 case X86::BI__builtin_ia32_subss_round_mask
:
5725 case X86::BI__builtin_ia32_subsd_round_mask
:
5726 case X86::BI__builtin_ia32_scalefph512_mask
:
5727 case X86::BI__builtin_ia32_scalefpd512_mask
:
5728 case X86::BI__builtin_ia32_scalefps512_mask
:
5729 case X86::BI__builtin_ia32_scalefsd_round_mask
:
5730 case X86::BI__builtin_ia32_scalefss_round_mask
:
5731 case X86::BI__builtin_ia32_scalefsh_round_mask
:
5732 case X86::BI__builtin_ia32_cvtsd2ss_round_mask
:
5733 case X86::BI__builtin_ia32_vcvtss2sh_round_mask
:
5734 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask
:
5735 case X86::BI__builtin_ia32_sqrtsd_round_mask
:
5736 case X86::BI__builtin_ia32_sqrtss_round_mask
:
5737 case X86::BI__builtin_ia32_sqrtsh_round_mask
:
5738 case X86::BI__builtin_ia32_vfmaddsd3_mask
:
5739 case X86::BI__builtin_ia32_vfmaddsd3_maskz
:
5740 case X86::BI__builtin_ia32_vfmaddsd3_mask3
:
5741 case X86::BI__builtin_ia32_vfmaddss3_mask
:
5742 case X86::BI__builtin_ia32_vfmaddss3_maskz
:
5743 case X86::BI__builtin_ia32_vfmaddss3_mask3
:
5744 case X86::BI__builtin_ia32_vfmaddsh3_mask
:
5745 case X86::BI__builtin_ia32_vfmaddsh3_maskz
:
5746 case X86::BI__builtin_ia32_vfmaddsh3_mask3
:
5747 case X86::BI__builtin_ia32_vfmaddpd512_mask
:
5748 case X86::BI__builtin_ia32_vfmaddpd512_maskz
:
5749 case X86::BI__builtin_ia32_vfmaddpd512_mask3
:
5750 case X86::BI__builtin_ia32_vfmsubpd512_mask3
:
5751 case X86::BI__builtin_ia32_vfmaddps512_mask
:
5752 case X86::BI__builtin_ia32_vfmaddps512_maskz
:
5753 case X86::BI__builtin_ia32_vfmaddps512_mask3
:
5754 case X86::BI__builtin_ia32_vfmsubps512_mask3
:
5755 case X86::BI__builtin_ia32_vfmaddph512_mask
:
5756 case X86::BI__builtin_ia32_vfmaddph512_maskz
:
5757 case X86::BI__builtin_ia32_vfmaddph512_mask3
:
5758 case X86::BI__builtin_ia32_vfmsubph512_mask3
:
5759 case X86::BI__builtin_ia32_vfmaddsubpd512_mask
:
5760 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz
:
5761 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3
:
5762 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3
:
5763 case X86::BI__builtin_ia32_vfmaddsubps512_mask
:
5764 case X86::BI__builtin_ia32_vfmaddsubps512_maskz
:
5765 case X86::BI__builtin_ia32_vfmaddsubps512_mask3
:
5766 case X86::BI__builtin_ia32_vfmsubaddps512_mask3
:
5767 case X86::BI__builtin_ia32_vfmaddsubph512_mask
:
5768 case X86::BI__builtin_ia32_vfmaddsubph512_maskz
:
5769 case X86::BI__builtin_ia32_vfmaddsubph512_mask3
:
5770 case X86::BI__builtin_ia32_vfmsubaddph512_mask3
:
5771 case X86::BI__builtin_ia32_vfmaddcsh_mask
:
5772 case X86::BI__builtin_ia32_vfmaddcsh_round_mask
:
5773 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3
:
5774 case X86::BI__builtin_ia32_vfmaddcph512_mask
:
5775 case X86::BI__builtin_ia32_vfmaddcph512_maskz
:
5776 case X86::BI__builtin_ia32_vfmaddcph512_mask3
:
5777 case X86::BI__builtin_ia32_vfcmaddcsh_mask
:
5778 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask
:
5779 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3
:
5780 case X86::BI__builtin_ia32_vfcmaddcph512_mask
:
5781 case X86::BI__builtin_ia32_vfcmaddcph512_maskz
:
5782 case X86::BI__builtin_ia32_vfcmaddcph512_mask3
:
5783 case X86::BI__builtin_ia32_vfmulcsh_mask
:
5784 case X86::BI__builtin_ia32_vfmulcph512_mask
:
5785 case X86::BI__builtin_ia32_vfcmulcsh_mask
:
5786 case X86::BI__builtin_ia32_vfcmulcph512_mask
:
5792 llvm::APSInt Result
;
5794 // We can't check the value of a dependent argument.
5795 Expr
*Arg
= TheCall
->getArg(ArgNum
);
5796 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
5799 // Check constant-ness first.
5800 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
5803 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
5804 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
5805 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
5806 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
5807 if (Result
== 4/*ROUND_CUR_DIRECTION*/ ||
5808 Result
== 8/*ROUND_NO_EXC*/ ||
5809 (!HasRC
&& Result
== 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
5810 (HasRC
&& Result
.getZExtValue() >= 8 && Result
.getZExtValue() <= 11))
5813 return Diag(TheCall
->getBeginLoc(), diag::err_x86_builtin_invalid_rounding
)
5814 << Arg
->getSourceRange();
5817 // Check if the gather/scatter scale is legal.
5818 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID
,
5819 CallExpr
*TheCall
) {
5820 unsigned ArgNum
= 0;
5821 switch (BuiltinID
) {
5824 case X86::BI__builtin_ia32_gatherpfdpd
:
5825 case X86::BI__builtin_ia32_gatherpfdps
:
5826 case X86::BI__builtin_ia32_gatherpfqpd
:
5827 case X86::BI__builtin_ia32_gatherpfqps
:
5828 case X86::BI__builtin_ia32_scatterpfdpd
:
5829 case X86::BI__builtin_ia32_scatterpfdps
:
5830 case X86::BI__builtin_ia32_scatterpfqpd
:
5831 case X86::BI__builtin_ia32_scatterpfqps
:
5834 case X86::BI__builtin_ia32_gatherd_pd
:
5835 case X86::BI__builtin_ia32_gatherd_pd256
:
5836 case X86::BI__builtin_ia32_gatherq_pd
:
5837 case X86::BI__builtin_ia32_gatherq_pd256
:
5838 case X86::BI__builtin_ia32_gatherd_ps
:
5839 case X86::BI__builtin_ia32_gatherd_ps256
:
5840 case X86::BI__builtin_ia32_gatherq_ps
:
5841 case X86::BI__builtin_ia32_gatherq_ps256
:
5842 case X86::BI__builtin_ia32_gatherd_q
:
5843 case X86::BI__builtin_ia32_gatherd_q256
:
5844 case X86::BI__builtin_ia32_gatherq_q
:
5845 case X86::BI__builtin_ia32_gatherq_q256
:
5846 case X86::BI__builtin_ia32_gatherd_d
:
5847 case X86::BI__builtin_ia32_gatherd_d256
:
5848 case X86::BI__builtin_ia32_gatherq_d
:
5849 case X86::BI__builtin_ia32_gatherq_d256
:
5850 case X86::BI__builtin_ia32_gather3div2df
:
5851 case X86::BI__builtin_ia32_gather3div2di
:
5852 case X86::BI__builtin_ia32_gather3div4df
:
5853 case X86::BI__builtin_ia32_gather3div4di
:
5854 case X86::BI__builtin_ia32_gather3div4sf
:
5855 case X86::BI__builtin_ia32_gather3div4si
:
5856 case X86::BI__builtin_ia32_gather3div8sf
:
5857 case X86::BI__builtin_ia32_gather3div8si
:
5858 case X86::BI__builtin_ia32_gather3siv2df
:
5859 case X86::BI__builtin_ia32_gather3siv2di
:
5860 case X86::BI__builtin_ia32_gather3siv4df
:
5861 case X86::BI__builtin_ia32_gather3siv4di
:
5862 case X86::BI__builtin_ia32_gather3siv4sf
:
5863 case X86::BI__builtin_ia32_gather3siv4si
:
5864 case X86::BI__builtin_ia32_gather3siv8sf
:
5865 case X86::BI__builtin_ia32_gather3siv8si
:
5866 case X86::BI__builtin_ia32_gathersiv8df
:
5867 case X86::BI__builtin_ia32_gathersiv16sf
:
5868 case X86::BI__builtin_ia32_gatherdiv8df
:
5869 case X86::BI__builtin_ia32_gatherdiv16sf
:
5870 case X86::BI__builtin_ia32_gathersiv8di
:
5871 case X86::BI__builtin_ia32_gathersiv16si
:
5872 case X86::BI__builtin_ia32_gatherdiv8di
:
5873 case X86::BI__builtin_ia32_gatherdiv16si
:
5874 case X86::BI__builtin_ia32_scatterdiv2df
:
5875 case X86::BI__builtin_ia32_scatterdiv2di
:
5876 case X86::BI__builtin_ia32_scatterdiv4df
:
5877 case X86::BI__builtin_ia32_scatterdiv4di
:
5878 case X86::BI__builtin_ia32_scatterdiv4sf
:
5879 case X86::BI__builtin_ia32_scatterdiv4si
:
5880 case X86::BI__builtin_ia32_scatterdiv8sf
:
5881 case X86::BI__builtin_ia32_scatterdiv8si
:
5882 case X86::BI__builtin_ia32_scattersiv2df
:
5883 case X86::BI__builtin_ia32_scattersiv2di
:
5884 case X86::BI__builtin_ia32_scattersiv4df
:
5885 case X86::BI__builtin_ia32_scattersiv4di
:
5886 case X86::BI__builtin_ia32_scattersiv4sf
:
5887 case X86::BI__builtin_ia32_scattersiv4si
:
5888 case X86::BI__builtin_ia32_scattersiv8sf
:
5889 case X86::BI__builtin_ia32_scattersiv8si
:
5890 case X86::BI__builtin_ia32_scattersiv8df
:
5891 case X86::BI__builtin_ia32_scattersiv16sf
:
5892 case X86::BI__builtin_ia32_scatterdiv8df
:
5893 case X86::BI__builtin_ia32_scatterdiv16sf
:
5894 case X86::BI__builtin_ia32_scattersiv8di
:
5895 case X86::BI__builtin_ia32_scattersiv16si
:
5896 case X86::BI__builtin_ia32_scatterdiv8di
:
5897 case X86::BI__builtin_ia32_scatterdiv16si
:
5902 llvm::APSInt Result
;
5904 // We can't check the value of a dependent argument.
5905 Expr
*Arg
= TheCall
->getArg(ArgNum
);
5906 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
5909 // Check constant-ness first.
5910 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
5913 if (Result
== 1 || Result
== 2 || Result
== 4 || Result
== 8)
5916 return Diag(TheCall
->getBeginLoc(), diag::err_x86_builtin_invalid_scale
)
5917 << Arg
->getSourceRange();
5920 enum { TileRegLow
= 0, TileRegHigh
= 7 };
5922 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr
*TheCall
,
5923 ArrayRef
<int> ArgNums
) {
5924 for (int ArgNum
: ArgNums
) {
5925 if (SemaBuiltinConstantArgRange(TheCall
, ArgNum
, TileRegLow
, TileRegHigh
))
5931 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr
*TheCall
,
5932 ArrayRef
<int> ArgNums
) {
5933 // Because the max number of tile register is TileRegHigh + 1, so here we use
5934 // each bit to represent the usage of them in bitset.
5935 std::bitset
<TileRegHigh
+ 1> ArgValues
;
5936 for (int ArgNum
: ArgNums
) {
5937 Expr
*Arg
= TheCall
->getArg(ArgNum
);
5938 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
5941 llvm::APSInt Result
;
5942 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
5944 int ArgExtValue
= Result
.getExtValue();
5945 assert((ArgExtValue
>= TileRegLow
|| ArgExtValue
<= TileRegHigh
) &&
5946 "Incorrect tile register num.");
5947 if (ArgValues
.test(ArgExtValue
))
5948 return Diag(TheCall
->getBeginLoc(),
5949 diag::err_x86_builtin_tile_arg_duplicate
)
5950 << TheCall
->getArg(ArgNum
)->getSourceRange();
5951 ArgValues
.set(ArgExtValue
);
5956 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr
*TheCall
,
5957 ArrayRef
<int> ArgNums
) {
5958 return CheckX86BuiltinTileArgumentsRange(TheCall
, ArgNums
) ||
5959 CheckX86BuiltinTileDuplicate(TheCall
, ArgNums
);
5962 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID
, CallExpr
*TheCall
) {
5963 switch (BuiltinID
) {
5966 case X86::BI__builtin_ia32_tileloadd64
:
5967 case X86::BI__builtin_ia32_tileloaddt164
:
5968 case X86::BI__builtin_ia32_tilestored64
:
5969 case X86::BI__builtin_ia32_tilezero
:
5970 return CheckX86BuiltinTileArgumentsRange(TheCall
, 0);
5971 case X86::BI__builtin_ia32_tdpbssd
:
5972 case X86::BI__builtin_ia32_tdpbsud
:
5973 case X86::BI__builtin_ia32_tdpbusd
:
5974 case X86::BI__builtin_ia32_tdpbuud
:
5975 case X86::BI__builtin_ia32_tdpbf16ps
:
5976 case X86::BI__builtin_ia32_tdpfp16ps
:
5977 case X86::BI__builtin_ia32_tcmmimfp16ps
:
5978 case X86::BI__builtin_ia32_tcmmrlfp16ps
:
5979 return CheckX86BuiltinTileRangeAndDuplicate(TheCall
, {0, 1, 2});
5982 static bool isX86_32Builtin(unsigned BuiltinID
) {
5983 // These builtins only work on x86-32 targets.
5984 switch (BuiltinID
) {
5985 case X86::BI__builtin_ia32_readeflags_u32
:
5986 case X86::BI__builtin_ia32_writeeflags_u32
:
5993 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo
&TI
, unsigned BuiltinID
,
5994 CallExpr
*TheCall
) {
5995 if (BuiltinID
== X86::BI__builtin_cpu_supports
)
5996 return SemaBuiltinCpuSupports(*this, TI
, TheCall
);
5998 if (BuiltinID
== X86::BI__builtin_cpu_is
)
5999 return SemaBuiltinCpuIs(*this, TI
, TheCall
);
6001 // Check for 32-bit only builtins on a 64-bit target.
6002 const llvm::Triple
&TT
= TI
.getTriple();
6003 if (TT
.getArch() != llvm::Triple::x86
&& isX86_32Builtin(BuiltinID
))
6004 return Diag(TheCall
->getCallee()->getBeginLoc(),
6005 diag::err_32_bit_builtin_64_bit_tgt
);
6007 // If the intrinsic has rounding or SAE make sure its valid.
6008 if (CheckX86BuiltinRoundingOrSAE(BuiltinID
, TheCall
))
6011 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
6012 if (CheckX86BuiltinGatherScatterScale(BuiltinID
, TheCall
))
6015 // If the intrinsic has a tile arguments, make sure they are valid.
6016 if (CheckX86BuiltinTileArguments(BuiltinID
, TheCall
))
6019 // For intrinsics which take an immediate value as part of the instruction,
6020 // range check them here.
6021 int i
= 0, l
= 0, u
= 0;
6022 switch (BuiltinID
) {
6025 case X86::BI__builtin_ia32_vec_ext_v2si
:
6026 case X86::BI__builtin_ia32_vec_ext_v2di
:
6027 case X86::BI__builtin_ia32_vextractf128_pd256
:
6028 case X86::BI__builtin_ia32_vextractf128_ps256
:
6029 case X86::BI__builtin_ia32_vextractf128_si256
:
6030 case X86::BI__builtin_ia32_extract128i256
:
6031 case X86::BI__builtin_ia32_extractf64x4_mask
:
6032 case X86::BI__builtin_ia32_extracti64x4_mask
:
6033 case X86::BI__builtin_ia32_extractf32x8_mask
:
6034 case X86::BI__builtin_ia32_extracti32x8_mask
:
6035 case X86::BI__builtin_ia32_extractf64x2_256_mask
:
6036 case X86::BI__builtin_ia32_extracti64x2_256_mask
:
6037 case X86::BI__builtin_ia32_extractf32x4_256_mask
:
6038 case X86::BI__builtin_ia32_extracti32x4_256_mask
:
6039 i
= 1; l
= 0; u
= 1;
6041 case X86::BI__builtin_ia32_vec_set_v2di
:
6042 case X86::BI__builtin_ia32_vinsertf128_pd256
:
6043 case X86::BI__builtin_ia32_vinsertf128_ps256
:
6044 case X86::BI__builtin_ia32_vinsertf128_si256
:
6045 case X86::BI__builtin_ia32_insert128i256
:
6046 case X86::BI__builtin_ia32_insertf32x8
:
6047 case X86::BI__builtin_ia32_inserti32x8
:
6048 case X86::BI__builtin_ia32_insertf64x4
:
6049 case X86::BI__builtin_ia32_inserti64x4
:
6050 case X86::BI__builtin_ia32_insertf64x2_256
:
6051 case X86::BI__builtin_ia32_inserti64x2_256
:
6052 case X86::BI__builtin_ia32_insertf32x4_256
:
6053 case X86::BI__builtin_ia32_inserti32x4_256
:
6054 i
= 2; l
= 0; u
= 1;
6056 case X86::BI__builtin_ia32_vpermilpd
:
6057 case X86::BI__builtin_ia32_vec_ext_v4hi
:
6058 case X86::BI__builtin_ia32_vec_ext_v4si
:
6059 case X86::BI__builtin_ia32_vec_ext_v4sf
:
6060 case X86::BI__builtin_ia32_vec_ext_v4di
:
6061 case X86::BI__builtin_ia32_extractf32x4_mask
:
6062 case X86::BI__builtin_ia32_extracti32x4_mask
:
6063 case X86::BI__builtin_ia32_extractf64x2_512_mask
:
6064 case X86::BI__builtin_ia32_extracti64x2_512_mask
:
6065 i
= 1; l
= 0; u
= 3;
6067 case X86::BI_mm_prefetch
:
6068 case X86::BI__builtin_ia32_vec_ext_v8hi
:
6069 case X86::BI__builtin_ia32_vec_ext_v8si
:
6070 i
= 1; l
= 0; u
= 7;
6072 case X86::BI__builtin_ia32_sha1rnds4
:
6073 case X86::BI__builtin_ia32_blendpd
:
6074 case X86::BI__builtin_ia32_shufpd
:
6075 case X86::BI__builtin_ia32_vec_set_v4hi
:
6076 case X86::BI__builtin_ia32_vec_set_v4si
:
6077 case X86::BI__builtin_ia32_vec_set_v4di
:
6078 case X86::BI__builtin_ia32_shuf_f32x4_256
:
6079 case X86::BI__builtin_ia32_shuf_f64x2_256
:
6080 case X86::BI__builtin_ia32_shuf_i32x4_256
:
6081 case X86::BI__builtin_ia32_shuf_i64x2_256
:
6082 case X86::BI__builtin_ia32_insertf64x2_512
:
6083 case X86::BI__builtin_ia32_inserti64x2_512
:
6084 case X86::BI__builtin_ia32_insertf32x4
:
6085 case X86::BI__builtin_ia32_inserti32x4
:
6086 i
= 2; l
= 0; u
= 3;
6088 case X86::BI__builtin_ia32_vpermil2pd
:
6089 case X86::BI__builtin_ia32_vpermil2pd256
:
6090 case X86::BI__builtin_ia32_vpermil2ps
:
6091 case X86::BI__builtin_ia32_vpermil2ps256
:
6092 i
= 3; l
= 0; u
= 3;
6094 case X86::BI__builtin_ia32_cmpb128_mask
:
6095 case X86::BI__builtin_ia32_cmpw128_mask
:
6096 case X86::BI__builtin_ia32_cmpd128_mask
:
6097 case X86::BI__builtin_ia32_cmpq128_mask
:
6098 case X86::BI__builtin_ia32_cmpb256_mask
:
6099 case X86::BI__builtin_ia32_cmpw256_mask
:
6100 case X86::BI__builtin_ia32_cmpd256_mask
:
6101 case X86::BI__builtin_ia32_cmpq256_mask
:
6102 case X86::BI__builtin_ia32_cmpb512_mask
:
6103 case X86::BI__builtin_ia32_cmpw512_mask
:
6104 case X86::BI__builtin_ia32_cmpd512_mask
:
6105 case X86::BI__builtin_ia32_cmpq512_mask
:
6106 case X86::BI__builtin_ia32_ucmpb128_mask
:
6107 case X86::BI__builtin_ia32_ucmpw128_mask
:
6108 case X86::BI__builtin_ia32_ucmpd128_mask
:
6109 case X86::BI__builtin_ia32_ucmpq128_mask
:
6110 case X86::BI__builtin_ia32_ucmpb256_mask
:
6111 case X86::BI__builtin_ia32_ucmpw256_mask
:
6112 case X86::BI__builtin_ia32_ucmpd256_mask
:
6113 case X86::BI__builtin_ia32_ucmpq256_mask
:
6114 case X86::BI__builtin_ia32_ucmpb512_mask
:
6115 case X86::BI__builtin_ia32_ucmpw512_mask
:
6116 case X86::BI__builtin_ia32_ucmpd512_mask
:
6117 case X86::BI__builtin_ia32_ucmpq512_mask
:
6118 case X86::BI__builtin_ia32_vpcomub
:
6119 case X86::BI__builtin_ia32_vpcomuw
:
6120 case X86::BI__builtin_ia32_vpcomud
:
6121 case X86::BI__builtin_ia32_vpcomuq
:
6122 case X86::BI__builtin_ia32_vpcomb
:
6123 case X86::BI__builtin_ia32_vpcomw
:
6124 case X86::BI__builtin_ia32_vpcomd
:
6125 case X86::BI__builtin_ia32_vpcomq
:
6126 case X86::BI__builtin_ia32_vec_set_v8hi
:
6127 case X86::BI__builtin_ia32_vec_set_v8si
:
6128 i
= 2; l
= 0; u
= 7;
6130 case X86::BI__builtin_ia32_vpermilpd256
:
6131 case X86::BI__builtin_ia32_roundps
:
6132 case X86::BI__builtin_ia32_roundpd
:
6133 case X86::BI__builtin_ia32_roundps256
:
6134 case X86::BI__builtin_ia32_roundpd256
:
6135 case X86::BI__builtin_ia32_getmantpd128_mask
:
6136 case X86::BI__builtin_ia32_getmantpd256_mask
:
6137 case X86::BI__builtin_ia32_getmantps128_mask
:
6138 case X86::BI__builtin_ia32_getmantps256_mask
:
6139 case X86::BI__builtin_ia32_getmantpd512_mask
:
6140 case X86::BI__builtin_ia32_getmantps512_mask
:
6141 case X86::BI__builtin_ia32_getmantph128_mask
:
6142 case X86::BI__builtin_ia32_getmantph256_mask
:
6143 case X86::BI__builtin_ia32_getmantph512_mask
:
6144 case X86::BI__builtin_ia32_vec_ext_v16qi
:
6145 case X86::BI__builtin_ia32_vec_ext_v16hi
:
6146 i
= 1; l
= 0; u
= 15;
6148 case X86::BI__builtin_ia32_pblendd128
:
6149 case X86::BI__builtin_ia32_blendps
:
6150 case X86::BI__builtin_ia32_blendpd256
:
6151 case X86::BI__builtin_ia32_shufpd256
:
6152 case X86::BI__builtin_ia32_roundss
:
6153 case X86::BI__builtin_ia32_roundsd
:
6154 case X86::BI__builtin_ia32_rangepd128_mask
:
6155 case X86::BI__builtin_ia32_rangepd256_mask
:
6156 case X86::BI__builtin_ia32_rangepd512_mask
:
6157 case X86::BI__builtin_ia32_rangeps128_mask
:
6158 case X86::BI__builtin_ia32_rangeps256_mask
:
6159 case X86::BI__builtin_ia32_rangeps512_mask
:
6160 case X86::BI__builtin_ia32_getmantsd_round_mask
:
6161 case X86::BI__builtin_ia32_getmantss_round_mask
:
6162 case X86::BI__builtin_ia32_getmantsh_round_mask
:
6163 case X86::BI__builtin_ia32_vec_set_v16qi
:
6164 case X86::BI__builtin_ia32_vec_set_v16hi
:
6165 i
= 2; l
= 0; u
= 15;
6167 case X86::BI__builtin_ia32_vec_ext_v32qi
:
6168 i
= 1; l
= 0; u
= 31;
6170 case X86::BI__builtin_ia32_cmpps
:
6171 case X86::BI__builtin_ia32_cmpss
:
6172 case X86::BI__builtin_ia32_cmppd
:
6173 case X86::BI__builtin_ia32_cmpsd
:
6174 case X86::BI__builtin_ia32_cmpps256
:
6175 case X86::BI__builtin_ia32_cmppd256
:
6176 case X86::BI__builtin_ia32_cmpps128_mask
:
6177 case X86::BI__builtin_ia32_cmppd128_mask
:
6178 case X86::BI__builtin_ia32_cmpps256_mask
:
6179 case X86::BI__builtin_ia32_cmppd256_mask
:
6180 case X86::BI__builtin_ia32_cmpps512_mask
:
6181 case X86::BI__builtin_ia32_cmppd512_mask
:
6182 case X86::BI__builtin_ia32_cmpsd_mask
:
6183 case X86::BI__builtin_ia32_cmpss_mask
:
6184 case X86::BI__builtin_ia32_vec_set_v32qi
:
6185 i
= 2; l
= 0; u
= 31;
6187 case X86::BI__builtin_ia32_permdf256
:
6188 case X86::BI__builtin_ia32_permdi256
:
6189 case X86::BI__builtin_ia32_permdf512
:
6190 case X86::BI__builtin_ia32_permdi512
:
6191 case X86::BI__builtin_ia32_vpermilps
:
6192 case X86::BI__builtin_ia32_vpermilps256
:
6193 case X86::BI__builtin_ia32_vpermilpd512
:
6194 case X86::BI__builtin_ia32_vpermilps512
:
6195 case X86::BI__builtin_ia32_pshufd
:
6196 case X86::BI__builtin_ia32_pshufd256
:
6197 case X86::BI__builtin_ia32_pshufd512
:
6198 case X86::BI__builtin_ia32_pshufhw
:
6199 case X86::BI__builtin_ia32_pshufhw256
:
6200 case X86::BI__builtin_ia32_pshufhw512
:
6201 case X86::BI__builtin_ia32_pshuflw
:
6202 case X86::BI__builtin_ia32_pshuflw256
:
6203 case X86::BI__builtin_ia32_pshuflw512
:
6204 case X86::BI__builtin_ia32_vcvtps2ph
:
6205 case X86::BI__builtin_ia32_vcvtps2ph_mask
:
6206 case X86::BI__builtin_ia32_vcvtps2ph256
:
6207 case X86::BI__builtin_ia32_vcvtps2ph256_mask
:
6208 case X86::BI__builtin_ia32_vcvtps2ph512_mask
:
6209 case X86::BI__builtin_ia32_rndscaleps_128_mask
:
6210 case X86::BI__builtin_ia32_rndscalepd_128_mask
:
6211 case X86::BI__builtin_ia32_rndscaleps_256_mask
:
6212 case X86::BI__builtin_ia32_rndscalepd_256_mask
:
6213 case X86::BI__builtin_ia32_rndscaleps_mask
:
6214 case X86::BI__builtin_ia32_rndscalepd_mask
:
6215 case X86::BI__builtin_ia32_rndscaleph_mask
:
6216 case X86::BI__builtin_ia32_reducepd128_mask
:
6217 case X86::BI__builtin_ia32_reducepd256_mask
:
6218 case X86::BI__builtin_ia32_reducepd512_mask
:
6219 case X86::BI__builtin_ia32_reduceps128_mask
:
6220 case X86::BI__builtin_ia32_reduceps256_mask
:
6221 case X86::BI__builtin_ia32_reduceps512_mask
:
6222 case X86::BI__builtin_ia32_reduceph128_mask
:
6223 case X86::BI__builtin_ia32_reduceph256_mask
:
6224 case X86::BI__builtin_ia32_reduceph512_mask
:
6225 case X86::BI__builtin_ia32_prold512
:
6226 case X86::BI__builtin_ia32_prolq512
:
6227 case X86::BI__builtin_ia32_prold128
:
6228 case X86::BI__builtin_ia32_prold256
:
6229 case X86::BI__builtin_ia32_prolq128
:
6230 case X86::BI__builtin_ia32_prolq256
:
6231 case X86::BI__builtin_ia32_prord512
:
6232 case X86::BI__builtin_ia32_prorq512
:
6233 case X86::BI__builtin_ia32_prord128
:
6234 case X86::BI__builtin_ia32_prord256
:
6235 case X86::BI__builtin_ia32_prorq128
:
6236 case X86::BI__builtin_ia32_prorq256
:
6237 case X86::BI__builtin_ia32_fpclasspd128_mask
:
6238 case X86::BI__builtin_ia32_fpclasspd256_mask
:
6239 case X86::BI__builtin_ia32_fpclassps128_mask
:
6240 case X86::BI__builtin_ia32_fpclassps256_mask
:
6241 case X86::BI__builtin_ia32_fpclassps512_mask
:
6242 case X86::BI__builtin_ia32_fpclasspd512_mask
:
6243 case X86::BI__builtin_ia32_fpclassph128_mask
:
6244 case X86::BI__builtin_ia32_fpclassph256_mask
:
6245 case X86::BI__builtin_ia32_fpclassph512_mask
:
6246 case X86::BI__builtin_ia32_fpclasssd_mask
:
6247 case X86::BI__builtin_ia32_fpclassss_mask
:
6248 case X86::BI__builtin_ia32_fpclasssh_mask
:
6249 case X86::BI__builtin_ia32_pslldqi128_byteshift
:
6250 case X86::BI__builtin_ia32_pslldqi256_byteshift
:
6251 case X86::BI__builtin_ia32_pslldqi512_byteshift
:
6252 case X86::BI__builtin_ia32_psrldqi128_byteshift
:
6253 case X86::BI__builtin_ia32_psrldqi256_byteshift
:
6254 case X86::BI__builtin_ia32_psrldqi512_byteshift
:
6255 case X86::BI__builtin_ia32_kshiftliqi
:
6256 case X86::BI__builtin_ia32_kshiftlihi
:
6257 case X86::BI__builtin_ia32_kshiftlisi
:
6258 case X86::BI__builtin_ia32_kshiftlidi
:
6259 case X86::BI__builtin_ia32_kshiftriqi
:
6260 case X86::BI__builtin_ia32_kshiftrihi
:
6261 case X86::BI__builtin_ia32_kshiftrisi
:
6262 case X86::BI__builtin_ia32_kshiftridi
:
6263 i
= 1; l
= 0; u
= 255;
6265 case X86::BI__builtin_ia32_vperm2f128_pd256
:
6266 case X86::BI__builtin_ia32_vperm2f128_ps256
:
6267 case X86::BI__builtin_ia32_vperm2f128_si256
:
6268 case X86::BI__builtin_ia32_permti256
:
6269 case X86::BI__builtin_ia32_pblendw128
:
6270 case X86::BI__builtin_ia32_pblendw256
:
6271 case X86::BI__builtin_ia32_blendps256
:
6272 case X86::BI__builtin_ia32_pblendd256
:
6273 case X86::BI__builtin_ia32_palignr128
:
6274 case X86::BI__builtin_ia32_palignr256
:
6275 case X86::BI__builtin_ia32_palignr512
:
6276 case X86::BI__builtin_ia32_alignq512
:
6277 case X86::BI__builtin_ia32_alignd512
:
6278 case X86::BI__builtin_ia32_alignd128
:
6279 case X86::BI__builtin_ia32_alignd256
:
6280 case X86::BI__builtin_ia32_alignq128
:
6281 case X86::BI__builtin_ia32_alignq256
:
6282 case X86::BI__builtin_ia32_vcomisd
:
6283 case X86::BI__builtin_ia32_vcomiss
:
6284 case X86::BI__builtin_ia32_shuf_f32x4
:
6285 case X86::BI__builtin_ia32_shuf_f64x2
:
6286 case X86::BI__builtin_ia32_shuf_i32x4
:
6287 case X86::BI__builtin_ia32_shuf_i64x2
:
6288 case X86::BI__builtin_ia32_shufpd512
:
6289 case X86::BI__builtin_ia32_shufps
:
6290 case X86::BI__builtin_ia32_shufps256
:
6291 case X86::BI__builtin_ia32_shufps512
:
6292 case X86::BI__builtin_ia32_dbpsadbw128
:
6293 case X86::BI__builtin_ia32_dbpsadbw256
:
6294 case X86::BI__builtin_ia32_dbpsadbw512
:
6295 case X86::BI__builtin_ia32_vpshldd128
:
6296 case X86::BI__builtin_ia32_vpshldd256
:
6297 case X86::BI__builtin_ia32_vpshldd512
:
6298 case X86::BI__builtin_ia32_vpshldq128
:
6299 case X86::BI__builtin_ia32_vpshldq256
:
6300 case X86::BI__builtin_ia32_vpshldq512
:
6301 case X86::BI__builtin_ia32_vpshldw128
:
6302 case X86::BI__builtin_ia32_vpshldw256
:
6303 case X86::BI__builtin_ia32_vpshldw512
:
6304 case X86::BI__builtin_ia32_vpshrdd128
:
6305 case X86::BI__builtin_ia32_vpshrdd256
:
6306 case X86::BI__builtin_ia32_vpshrdd512
:
6307 case X86::BI__builtin_ia32_vpshrdq128
:
6308 case X86::BI__builtin_ia32_vpshrdq256
:
6309 case X86::BI__builtin_ia32_vpshrdq512
:
6310 case X86::BI__builtin_ia32_vpshrdw128
:
6311 case X86::BI__builtin_ia32_vpshrdw256
:
6312 case X86::BI__builtin_ia32_vpshrdw512
:
6313 i
= 2; l
= 0; u
= 255;
6315 case X86::BI__builtin_ia32_fixupimmpd512_mask
:
6316 case X86::BI__builtin_ia32_fixupimmpd512_maskz
:
6317 case X86::BI__builtin_ia32_fixupimmps512_mask
:
6318 case X86::BI__builtin_ia32_fixupimmps512_maskz
:
6319 case X86::BI__builtin_ia32_fixupimmsd_mask
:
6320 case X86::BI__builtin_ia32_fixupimmsd_maskz
:
6321 case X86::BI__builtin_ia32_fixupimmss_mask
:
6322 case X86::BI__builtin_ia32_fixupimmss_maskz
:
6323 case X86::BI__builtin_ia32_fixupimmpd128_mask
:
6324 case X86::BI__builtin_ia32_fixupimmpd128_maskz
:
6325 case X86::BI__builtin_ia32_fixupimmpd256_mask
:
6326 case X86::BI__builtin_ia32_fixupimmpd256_maskz
:
6327 case X86::BI__builtin_ia32_fixupimmps128_mask
:
6328 case X86::BI__builtin_ia32_fixupimmps128_maskz
:
6329 case X86::BI__builtin_ia32_fixupimmps256_mask
:
6330 case X86::BI__builtin_ia32_fixupimmps256_maskz
:
6331 case X86::BI__builtin_ia32_pternlogd512_mask
:
6332 case X86::BI__builtin_ia32_pternlogd512_maskz
:
6333 case X86::BI__builtin_ia32_pternlogq512_mask
:
6334 case X86::BI__builtin_ia32_pternlogq512_maskz
:
6335 case X86::BI__builtin_ia32_pternlogd128_mask
:
6336 case X86::BI__builtin_ia32_pternlogd128_maskz
:
6337 case X86::BI__builtin_ia32_pternlogd256_mask
:
6338 case X86::BI__builtin_ia32_pternlogd256_maskz
:
6339 case X86::BI__builtin_ia32_pternlogq128_mask
:
6340 case X86::BI__builtin_ia32_pternlogq128_maskz
:
6341 case X86::BI__builtin_ia32_pternlogq256_mask
:
6342 case X86::BI__builtin_ia32_pternlogq256_maskz
:
6343 case X86::BI__builtin_ia32_vsm3rnds2
:
6344 i
= 3; l
= 0; u
= 255;
6346 case X86::BI__builtin_ia32_gatherpfdpd
:
6347 case X86::BI__builtin_ia32_gatherpfdps
:
6348 case X86::BI__builtin_ia32_gatherpfqpd
:
6349 case X86::BI__builtin_ia32_gatherpfqps
:
6350 case X86::BI__builtin_ia32_scatterpfdpd
:
6351 case X86::BI__builtin_ia32_scatterpfdps
:
6352 case X86::BI__builtin_ia32_scatterpfqpd
:
6353 case X86::BI__builtin_ia32_scatterpfqps
:
6354 i
= 4; l
= 2; u
= 3;
6356 case X86::BI__builtin_ia32_reducesd_mask
:
6357 case X86::BI__builtin_ia32_reducess_mask
:
6358 case X86::BI__builtin_ia32_rndscalesd_round_mask
:
6359 case X86::BI__builtin_ia32_rndscaless_round_mask
:
6360 case X86::BI__builtin_ia32_rndscalesh_round_mask
:
6361 case X86::BI__builtin_ia32_reducesh_mask
:
6362 i
= 4; l
= 0; u
= 255;
6364 case X86::BI__builtin_ia32_cmpccxadd32
:
6365 case X86::BI__builtin_ia32_cmpccxadd64
:
6366 i
= 3; l
= 0; u
= 15;
6370 // Note that we don't force a hard error on the range check here, allowing
6371 // template-generated or macro-generated dead code to potentially have out-of-
6372 // range values. These need to code generate, but don't need to necessarily
6373 // make any sense. We use a warning that defaults to an error.
6374 return SemaBuiltinConstantArgRange(TheCall
, i
, l
, u
, /*RangeIsError*/ false);
6377 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
6378 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
6379 /// Returns true when the format fits the function and the FormatStringInfo has
6381 bool Sema::getFormatStringInfo(const FormatAttr
*Format
, bool IsCXXMember
,
6382 bool IsVariadic
, FormatStringInfo
*FSI
) {
6383 if (Format
->getFirstArg() == 0)
6384 FSI
->ArgPassingKind
= FAPK_VAList
;
6385 else if (IsVariadic
)
6386 FSI
->ArgPassingKind
= FAPK_Variadic
;
6388 FSI
->ArgPassingKind
= FAPK_Fixed
;
6389 FSI
->FormatIdx
= Format
->getFormatIdx() - 1;
6391 FSI
->ArgPassingKind
== FAPK_VAList
? 0 : Format
->getFirstArg() - 1;
6393 // The way the format attribute works in GCC, the implicit this argument
6394 // of member functions is counted. However, it doesn't appear in our own
6395 // lists, so decrement format_idx in that case.
6397 if(FSI
->FormatIdx
== 0)
6400 if (FSI
->FirstDataArg
!= 0)
6401 --FSI
->FirstDataArg
;
6406 /// Checks if a the given expression evaluates to null.
6408 /// Returns true if the value evaluates to null.
6409 static bool CheckNonNullExpr(Sema
&S
, const Expr
*Expr
) {
6410 // If the expression has non-null type, it doesn't evaluate to null.
6411 if (auto nullability
= Expr
->IgnoreImplicit()->getType()->getNullability()) {
6412 if (*nullability
== NullabilityKind::NonNull
)
6416 // As a special case, transparent unions initialized with zero are
6417 // considered null for the purposes of the nonnull attribute.
6418 if (const RecordType
*UT
= Expr
->getType()->getAsUnionType()) {
6419 if (UT
->getDecl()->hasAttr
<TransparentUnionAttr
>())
6420 if (const CompoundLiteralExpr
*CLE
=
6421 dyn_cast
<CompoundLiteralExpr
>(Expr
))
6422 if (const InitListExpr
*ILE
=
6423 dyn_cast
<InitListExpr
>(CLE
->getInitializer()))
6424 Expr
= ILE
->getInit(0);
6428 return (!Expr
->isValueDependent() &&
6429 Expr
->EvaluateAsBooleanCondition(Result
, S
.Context
) &&
6433 static void CheckNonNullArgument(Sema
&S
,
6434 const Expr
*ArgExpr
,
6435 SourceLocation CallSiteLoc
) {
6436 if (CheckNonNullExpr(S
, ArgExpr
))
6437 S
.DiagRuntimeBehavior(CallSiteLoc
, ArgExpr
,
6438 S
.PDiag(diag::warn_null_arg
)
6439 << ArgExpr
->getSourceRange());
6442 bool Sema::GetFormatNSStringIdx(const FormatAttr
*Format
, unsigned &Idx
) {
6443 FormatStringInfo FSI
;
6444 if ((GetFormatStringType(Format
) == FST_NSString
) &&
6445 getFormatStringInfo(Format
, false, true, &FSI
)) {
6446 Idx
= FSI
.FormatIdx
;
6452 /// Diagnose use of %s directive in an NSString which is being passed
6453 /// as formatting string to formatting method.
6455 DiagnoseCStringFormatDirectiveInCFAPI(Sema
&S
,
6456 const NamedDecl
*FDecl
,
6460 bool Format
= false;
6461 ObjCStringFormatFamily SFFamily
= FDecl
->getObjCFStringFormattingFamily();
6462 if (SFFamily
== ObjCStringFormatFamily::SFF_CFString
) {
6467 for (const auto *I
: FDecl
->specific_attrs
<FormatAttr
>()) {
6468 if (S
.GetFormatNSStringIdx(I
, Idx
)) {
6473 if (!Format
|| NumArgs
<= Idx
)
6475 const Expr
*FormatExpr
= Args
[Idx
];
6476 if (const CStyleCastExpr
*CSCE
= dyn_cast
<CStyleCastExpr
>(FormatExpr
))
6477 FormatExpr
= CSCE
->getSubExpr();
6478 const StringLiteral
*FormatString
;
6479 if (const ObjCStringLiteral
*OSL
=
6480 dyn_cast
<ObjCStringLiteral
>(FormatExpr
->IgnoreParenImpCasts()))
6481 FormatString
= OSL
->getString();
6483 FormatString
= dyn_cast
<StringLiteral
>(FormatExpr
->IgnoreParenImpCasts());
6486 if (S
.FormatStringHasSArg(FormatString
)) {
6487 S
.Diag(FormatExpr
->getExprLoc(), diag::warn_objc_cdirective_format_string
)
6489 S
.Diag(FDecl
->getLocation(), diag::note_entity_declared_at
)
6490 << FDecl
->getDeclName();
6494 /// Determine whether the given type has a non-null nullability annotation.
6495 static bool isNonNullType(QualType type
) {
6496 if (auto nullability
= type
->getNullability())
6497 return *nullability
== NullabilityKind::NonNull
;
6502 static void CheckNonNullArguments(Sema
&S
,
6503 const NamedDecl
*FDecl
,
6504 const FunctionProtoType
*Proto
,
6505 ArrayRef
<const Expr
*> Args
,
6506 SourceLocation CallSiteLoc
) {
6507 assert((FDecl
|| Proto
) && "Need a function declaration or prototype");
6509 // Already checked by constant evaluator.
6510 if (S
.isConstantEvaluated())
6512 // Check the attributes attached to the method/function itself.
6513 llvm::SmallBitVector NonNullArgs
;
6515 // Handle the nonnull attribute on the function/method declaration itself.
6516 for (const auto *NonNull
: FDecl
->specific_attrs
<NonNullAttr
>()) {
6517 if (!NonNull
->args_size()) {
6518 // Easy case: all pointer arguments are nonnull.
6519 for (const auto *Arg
: Args
)
6520 if (S
.isValidPointerAttrType(Arg
->getType()))
6521 CheckNonNullArgument(S
, Arg
, CallSiteLoc
);
6525 for (const ParamIdx
&Idx
: NonNull
->args()) {
6526 unsigned IdxAST
= Idx
.getASTIndex();
6527 if (IdxAST
>= Args
.size())
6529 if (NonNullArgs
.empty())
6530 NonNullArgs
.resize(Args
.size());
6531 NonNullArgs
.set(IdxAST
);
6536 if (FDecl
&& (isa
<FunctionDecl
>(FDecl
) || isa
<ObjCMethodDecl
>(FDecl
))) {
6537 // Handle the nonnull attribute on the parameters of the
6539 ArrayRef
<ParmVarDecl
*> parms
;
6540 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(FDecl
))
6541 parms
= FD
->parameters();
6543 parms
= cast
<ObjCMethodDecl
>(FDecl
)->parameters();
6545 unsigned ParamIndex
= 0;
6546 for (ArrayRef
<ParmVarDecl
*>::iterator I
= parms
.begin(), E
= parms
.end();
6547 I
!= E
; ++I
, ++ParamIndex
) {
6548 const ParmVarDecl
*PVD
= *I
;
6549 if (PVD
->hasAttr
<NonNullAttr
>() || isNonNullType(PVD
->getType())) {
6550 if (NonNullArgs
.empty())
6551 NonNullArgs
.resize(Args
.size());
6553 NonNullArgs
.set(ParamIndex
);
6557 // If we have a non-function, non-method declaration but no
6558 // function prototype, try to dig out the function prototype.
6560 if (const ValueDecl
*VD
= dyn_cast
<ValueDecl
>(FDecl
)) {
6561 QualType type
= VD
->getType().getNonReferenceType();
6562 if (auto pointerType
= type
->getAs
<PointerType
>())
6563 type
= pointerType
->getPointeeType();
6564 else if (auto blockType
= type
->getAs
<BlockPointerType
>())
6565 type
= blockType
->getPointeeType();
6566 // FIXME: data member pointers?
6568 // Dig out the function prototype, if there is one.
6569 Proto
= type
->getAs
<FunctionProtoType
>();
6573 // Fill in non-null argument information from the nullability
6574 // information on the parameter types (if we have them).
6577 for (auto paramType
: Proto
->getParamTypes()) {
6578 if (isNonNullType(paramType
)) {
6579 if (NonNullArgs
.empty())
6580 NonNullArgs
.resize(Args
.size());
6582 NonNullArgs
.set(Index
);
6590 // Check for non-null arguments.
6591 for (unsigned ArgIndex
= 0, ArgIndexEnd
= NonNullArgs
.size();
6592 ArgIndex
!= ArgIndexEnd
; ++ArgIndex
) {
6593 if (NonNullArgs
[ArgIndex
])
6594 CheckNonNullArgument(S
, Args
[ArgIndex
], Args
[ArgIndex
]->getExprLoc());
6598 // 16 byte ByVal alignment not due to a vector member is not honoured by XL
6599 // on AIX. Emit a warning here that users are generating binary incompatible
6601 // Here we try to get information about the alignment of the struct member
6602 // from the struct passed to the caller function. We only warn when the struct
6603 // is passed byval, hence the series of checks and early returns if we are a not
6604 // passing a struct byval.
6605 void Sema::checkAIXMemberAlignment(SourceLocation Loc
, const Expr
*Arg
) {
6606 const auto *ICE
= dyn_cast
<ImplicitCastExpr
>(Arg
->IgnoreParens());
6610 const auto *DR
= dyn_cast
<DeclRefExpr
>(ICE
->getSubExpr());
6614 const auto *PD
= dyn_cast
<ParmVarDecl
>(DR
->getDecl());
6615 if (!PD
|| !PD
->getType()->isRecordType())
6618 QualType ArgType
= Arg
->getType();
6619 for (const FieldDecl
*FD
:
6620 ArgType
->castAs
<RecordType
>()->getDecl()->fields()) {
6621 if (const auto *AA
= FD
->getAttr
<AlignedAttr
>()) {
6622 CharUnits Alignment
=
6623 Context
.toCharUnitsFromBits(AA
->getAlignment(Context
));
6624 if (Alignment
.getQuantity() == 16) {
6625 Diag(FD
->getLocation(), diag::warn_not_xl_compatible
) << FD
;
6626 Diag(Loc
, diag::note_misaligned_member_used_here
) << PD
;
6632 /// Warn if a pointer or reference argument passed to a function points to an
6633 /// object that is less aligned than the parameter. This can happen when
6634 /// creating a typedef with a lower alignment than the original type and then
6635 /// calling functions defined in terms of the original type.
6636 void Sema::CheckArgAlignment(SourceLocation Loc
, NamedDecl
*FDecl
,
6637 StringRef ParamName
, QualType ArgTy
,
6640 // If a function accepts a pointer or reference type
6641 if (!ParamTy
->isPointerType() && !ParamTy
->isReferenceType())
6644 // If the parameter is a pointer type, get the pointee type for the
6645 // argument too. If the parameter is a reference type, don't try to get
6646 // the pointee type for the argument.
6647 if (ParamTy
->isPointerType())
6648 ArgTy
= ArgTy
->getPointeeType();
6650 // Remove reference or pointer
6651 ParamTy
= ParamTy
->getPointeeType();
6653 // Find expected alignment, and the actual alignment of the passed object.
6654 // getTypeAlignInChars requires complete types
6655 if (ArgTy
.isNull() || ParamTy
->isDependentType() ||
6656 ParamTy
->isIncompleteType() || ArgTy
->isIncompleteType() ||
6657 ParamTy
->isUndeducedType() || ArgTy
->isUndeducedType())
6660 CharUnits ParamAlign
= Context
.getTypeAlignInChars(ParamTy
);
6661 CharUnits ArgAlign
= Context
.getTypeAlignInChars(ArgTy
);
6663 // If the argument is less aligned than the parameter, there is a
6664 // potential alignment issue.
6665 if (ArgAlign
< ParamAlign
)
6666 Diag(Loc
, diag::warn_param_mismatched_alignment
)
6667 << (int)ArgAlign
.getQuantity() << (int)ParamAlign
.getQuantity()
6668 << ParamName
<< (FDecl
!= nullptr) << FDecl
;
6671 /// Handles the checks for format strings, non-POD arguments to vararg
6672 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
6674 void Sema::checkCall(NamedDecl
*FDecl
, const FunctionProtoType
*Proto
,
6675 const Expr
*ThisArg
, ArrayRef
<const Expr
*> Args
,
6676 bool IsMemberFunction
, SourceLocation Loc
,
6677 SourceRange Range
, VariadicCallType CallType
) {
6678 // FIXME: We should check as much as we can in the template definition.
6679 if (CurContext
->isDependentContext())
6682 // Printf and scanf checking.
6683 llvm::SmallBitVector CheckedVarArgs
;
6685 for (const auto *I
: FDecl
->specific_attrs
<FormatAttr
>()) {
6686 // Only create vector if there are format attributes.
6687 CheckedVarArgs
.resize(Args
.size());
6689 CheckFormatArguments(I
, Args
, IsMemberFunction
, CallType
, Loc
, Range
,
6694 // Refuse POD arguments that weren't caught by the format string
6696 auto *FD
= dyn_cast_or_null
<FunctionDecl
>(FDecl
);
6697 if (CallType
!= VariadicDoesNotApply
&&
6698 (!FD
|| FD
->getBuiltinID() != Builtin::BI__noop
)) {
6699 unsigned NumParams
= Proto
? Proto
->getNumParams()
6700 : FDecl
&& isa
<FunctionDecl
>(FDecl
)
6701 ? cast
<FunctionDecl
>(FDecl
)->getNumParams()
6702 : FDecl
&& isa
<ObjCMethodDecl
>(FDecl
)
6703 ? cast
<ObjCMethodDecl
>(FDecl
)->param_size()
6706 for (unsigned ArgIdx
= NumParams
; ArgIdx
< Args
.size(); ++ArgIdx
) {
6707 // Args[ArgIdx] can be null in malformed code.
6708 if (const Expr
*Arg
= Args
[ArgIdx
]) {
6709 if (CheckedVarArgs
.empty() || !CheckedVarArgs
[ArgIdx
])
6710 checkVariadicArgument(Arg
, CallType
);
6715 if (FDecl
|| Proto
) {
6716 CheckNonNullArguments(*this, FDecl
, Proto
, Args
, Loc
);
6718 // Type safety checking.
6720 for (const auto *I
: FDecl
->specific_attrs
<ArgumentWithTypeTagAttr
>())
6721 CheckArgumentWithTypeTag(I
, Args
, Loc
);
6725 // Check that passed arguments match the alignment of original arguments.
6726 // Try to get the missing prototype from the declaration.
6727 if (!Proto
&& FDecl
) {
6728 const auto *FT
= FDecl
->getFunctionType();
6729 if (isa_and_nonnull
<FunctionProtoType
>(FT
))
6730 Proto
= cast
<FunctionProtoType
>(FDecl
->getFunctionType());
6733 // For variadic functions, we may have more args than parameters.
6734 // For some K&R functions, we may have less args than parameters.
6735 const auto N
= std::min
<unsigned>(Proto
->getNumParams(), Args
.size());
6736 for (unsigned ArgIdx
= 0; ArgIdx
< N
; ++ArgIdx
) {
6737 // Args[ArgIdx] can be null in malformed code.
6738 if (const Expr
*Arg
= Args
[ArgIdx
]) {
6739 if (Arg
->containsErrors())
6742 if (Context
.getTargetInfo().getTriple().isOSAIX() && FDecl
&& Arg
&&
6743 FDecl
->hasLinkage() &&
6744 FDecl
->getFormalLinkage() != InternalLinkage
&&
6745 CallType
== VariadicDoesNotApply
)
6746 checkAIXMemberAlignment((Arg
->getExprLoc()), Arg
);
6748 QualType ParamTy
= Proto
->getParamType(ArgIdx
);
6749 QualType ArgTy
= Arg
->getType();
6750 CheckArgAlignment(Arg
->getExprLoc(), FDecl
, std::to_string(ArgIdx
+ 1),
6756 if (FDecl
&& FDecl
->hasAttr
<AllocAlignAttr
>()) {
6757 auto *AA
= FDecl
->getAttr
<AllocAlignAttr
>();
6758 const Expr
*Arg
= Args
[AA
->getParamIndex().getASTIndex()];
6759 if (!Arg
->isValueDependent()) {
6760 Expr::EvalResult Align
;
6761 if (Arg
->EvaluateAsInt(Align
, Context
)) {
6762 const llvm::APSInt
&I
= Align
.Val
.getInt();
6763 if (!I
.isPowerOf2())
6764 Diag(Arg
->getExprLoc(), diag::warn_alignment_not_power_of_two
)
6765 << Arg
->getSourceRange();
6767 if (I
> Sema::MaximumAlignment
)
6768 Diag(Arg
->getExprLoc(), diag::warn_assume_aligned_too_great
)
6769 << Arg
->getSourceRange() << Sema::MaximumAlignment
;
6775 diagnoseArgDependentDiagnoseIfAttrs(FD
, ThisArg
, Args
, Loc
);
6778 /// CheckConstructorCall - Check a constructor call for correctness and safety
6779 /// properties not enforced by the C type system.
6780 void Sema::CheckConstructorCall(FunctionDecl
*FDecl
, QualType ThisType
,
6781 ArrayRef
<const Expr
*> Args
,
6782 const FunctionProtoType
*Proto
,
6783 SourceLocation Loc
) {
6784 VariadicCallType CallType
=
6785 Proto
->isVariadic() ? VariadicConstructor
: VariadicDoesNotApply
;
6787 auto *Ctor
= cast
<CXXConstructorDecl
>(FDecl
);
6788 CheckArgAlignment(Loc
, FDecl
, "'this'", Context
.getPointerType(ThisType
),
6789 Context
.getPointerType(Ctor
->getThisObjectType()));
6791 checkCall(FDecl
, Proto
, /*ThisArg=*/nullptr, Args
, /*IsMemberFunction=*/true,
6792 Loc
, SourceRange(), CallType
);
6795 /// CheckFunctionCall - Check a direct function call for various correctness
6796 /// and safety properties not strictly enforced by the C type system.
6797 bool Sema::CheckFunctionCall(FunctionDecl
*FDecl
, CallExpr
*TheCall
,
6798 const FunctionProtoType
*Proto
) {
6799 bool IsMemberOperatorCall
= isa
<CXXOperatorCallExpr
>(TheCall
) &&
6800 isa
<CXXMethodDecl
>(FDecl
);
6801 bool IsMemberFunction
= isa
<CXXMemberCallExpr
>(TheCall
) ||
6802 IsMemberOperatorCall
;
6803 VariadicCallType CallType
= getVariadicCallType(FDecl
, Proto
,
6804 TheCall
->getCallee());
6805 Expr
** Args
= TheCall
->getArgs();
6806 unsigned NumArgs
= TheCall
->getNumArgs();
6808 Expr
*ImplicitThis
= nullptr;
6809 if (IsMemberOperatorCall
&& !FDecl
->isStatic()) {
6810 // If this is a call to a non-static member operator, hide the first
6811 // argument from checkCall.
6812 // FIXME: Our choice of AST representation here is less than ideal.
6813 ImplicitThis
= Args
[0];
6816 } else if (IsMemberFunction
&& !FDecl
->isStatic())
6818 cast
<CXXMemberCallExpr
>(TheCall
)->getImplicitObjectArgument();
6821 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
6823 QualType ThisType
= ImplicitThis
->getType();
6824 if (!ThisType
->isPointerType()) {
6825 assert(!ThisType
->isReferenceType());
6826 ThisType
= Context
.getPointerType(ThisType
);
6829 QualType ThisTypeFromDecl
=
6830 Context
.getPointerType(cast
<CXXMethodDecl
>(FDecl
)->getThisObjectType());
6832 CheckArgAlignment(TheCall
->getRParenLoc(), FDecl
, "'this'", ThisType
,
6836 checkCall(FDecl
, Proto
, ImplicitThis
, llvm::ArrayRef(Args
, NumArgs
),
6837 IsMemberFunction
, TheCall
->getRParenLoc(),
6838 TheCall
->getCallee()->getSourceRange(), CallType
);
6840 IdentifierInfo
*FnInfo
= FDecl
->getIdentifier();
6841 // None of the checks below are needed for functions that don't have
6842 // simple names (e.g., C++ conversion functions).
6846 // Enforce TCB except for builtin calls, which are always allowed.
6847 if (FDecl
->getBuiltinID() == 0)
6848 CheckTCBEnforcement(TheCall
->getExprLoc(), FDecl
);
6850 CheckAbsoluteValueFunction(TheCall
, FDecl
);
6851 CheckMaxUnsignedZero(TheCall
, FDecl
);
6853 if (getLangOpts().ObjC
)
6854 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl
, Args
, NumArgs
);
6856 unsigned CMId
= FDecl
->getMemoryFunctionKind();
6858 // Handle memory setting and copying functions.
6862 case Builtin::BIstrlcpy
: // fallthrough
6863 case Builtin::BIstrlcat
:
6864 CheckStrlcpycatArguments(TheCall
, FnInfo
);
6866 case Builtin::BIstrncat
:
6867 CheckStrncatArguments(TheCall
, FnInfo
);
6869 case Builtin::BIfree
:
6870 CheckFreeArguments(TheCall
);
6873 CheckMemaccessArguments(TheCall
, CMId
, FnInfo
);
6879 bool Sema::CheckObjCMethodCall(ObjCMethodDecl
*Method
, SourceLocation lbrac
,
6880 ArrayRef
<const Expr
*> Args
) {
6881 VariadicCallType CallType
=
6882 Method
->isVariadic() ? VariadicMethod
: VariadicDoesNotApply
;
6884 checkCall(Method
, nullptr, /*ThisArg=*/nullptr, Args
,
6885 /*IsMemberFunction=*/false, lbrac
, Method
->getSourceRange(),
6888 CheckTCBEnforcement(lbrac
, Method
);
6893 bool Sema::CheckPointerCall(NamedDecl
*NDecl
, CallExpr
*TheCall
,
6894 const FunctionProtoType
*Proto
) {
6896 if (const auto *V
= dyn_cast
<VarDecl
>(NDecl
))
6897 Ty
= V
->getType().getNonReferenceType();
6898 else if (const auto *F
= dyn_cast
<FieldDecl
>(NDecl
))
6899 Ty
= F
->getType().getNonReferenceType();
6903 if (!Ty
->isBlockPointerType() && !Ty
->isFunctionPointerType() &&
6904 !Ty
->isFunctionProtoType())
6907 VariadicCallType CallType
;
6908 if (!Proto
|| !Proto
->isVariadic()) {
6909 CallType
= VariadicDoesNotApply
;
6910 } else if (Ty
->isBlockPointerType()) {
6911 CallType
= VariadicBlock
;
6912 } else { // Ty->isFunctionPointerType()
6913 CallType
= VariadicFunction
;
6916 checkCall(NDecl
, Proto
, /*ThisArg=*/nullptr,
6917 llvm::ArrayRef(TheCall
->getArgs(), TheCall
->getNumArgs()),
6918 /*IsMemberFunction=*/false, TheCall
->getRParenLoc(),
6919 TheCall
->getCallee()->getSourceRange(), CallType
);
6924 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
6925 /// such as function pointers returned from functions.
6926 bool Sema::CheckOtherCall(CallExpr
*TheCall
, const FunctionProtoType
*Proto
) {
6927 VariadicCallType CallType
= getVariadicCallType(/*FDecl=*/nullptr, Proto
,
6928 TheCall
->getCallee());
6929 checkCall(/*FDecl=*/nullptr, Proto
, /*ThisArg=*/nullptr,
6930 llvm::ArrayRef(TheCall
->getArgs(), TheCall
->getNumArgs()),
6931 /*IsMemberFunction=*/false, TheCall
->getRParenLoc(),
6932 TheCall
->getCallee()->getSourceRange(), CallType
);
6937 static bool isValidOrderingForOp(int64_t Ordering
, AtomicExpr::AtomicOp Op
) {
6938 if (!llvm::isValidAtomicOrderingCABI(Ordering
))
6941 auto OrderingCABI
= (llvm::AtomicOrderingCABI
)Ordering
;
6943 case AtomicExpr::AO__c11_atomic_init
:
6944 case AtomicExpr::AO__opencl_atomic_init
:
6945 llvm_unreachable("There is no ordering argument for an init");
6947 case AtomicExpr::AO__c11_atomic_load
:
6948 case AtomicExpr::AO__opencl_atomic_load
:
6949 case AtomicExpr::AO__hip_atomic_load
:
6950 case AtomicExpr::AO__atomic_load_n
:
6951 case AtomicExpr::AO__atomic_load
:
6952 return OrderingCABI
!= llvm::AtomicOrderingCABI::release
&&
6953 OrderingCABI
!= llvm::AtomicOrderingCABI::acq_rel
;
6955 case AtomicExpr::AO__c11_atomic_store
:
6956 case AtomicExpr::AO__opencl_atomic_store
:
6957 case AtomicExpr::AO__hip_atomic_store
:
6958 case AtomicExpr::AO__atomic_store
:
6959 case AtomicExpr::AO__atomic_store_n
:
6960 return OrderingCABI
!= llvm::AtomicOrderingCABI::consume
&&
6961 OrderingCABI
!= llvm::AtomicOrderingCABI::acquire
&&
6962 OrderingCABI
!= llvm::AtomicOrderingCABI::acq_rel
;
6969 ExprResult
Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult
,
6970 AtomicExpr::AtomicOp Op
) {
6971 CallExpr
*TheCall
= cast
<CallExpr
>(TheCallResult
.get());
6972 DeclRefExpr
*DRE
=cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
6973 MultiExprArg Args
{TheCall
->getArgs(), TheCall
->getNumArgs()};
6974 return BuildAtomicExpr({TheCall
->getBeginLoc(), TheCall
->getEndLoc()},
6975 DRE
->getSourceRange(), TheCall
->getRParenLoc(), Args
,
6979 ExprResult
Sema::BuildAtomicExpr(SourceRange CallRange
, SourceRange ExprRange
,
6980 SourceLocation RParenLoc
, MultiExprArg Args
,
6981 AtomicExpr::AtomicOp Op
,
6982 AtomicArgumentOrder ArgOrder
) {
6983 // All the non-OpenCL operations take one of the following forms.
6984 // The OpenCL operations take the __c11 forms with one extra argument for
6985 // synchronization scope.
6987 // C __c11_atomic_init(A *, C)
6990 // C __c11_atomic_load(A *, int)
6993 // void __atomic_load(A *, CP, int)
6996 // void __atomic_store(A *, CP, int)
6999 // C __c11_atomic_add(A *, M, int)
7002 // C __atomic_exchange_n(A *, CP, int)
7005 // void __atomic_exchange(A *, C *, CP, int)
7008 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
7011 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
7015 const unsigned NumForm
= GNUCmpXchg
+ 1;
7016 const unsigned NumArgs
[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
7017 const unsigned NumVals
[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
7019 // C is an appropriate type,
7020 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
7021 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
7022 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
7023 // the int parameters are for orderings.
7025 static_assert(sizeof(NumArgs
)/sizeof(NumArgs
[0]) == NumForm
7026 && sizeof(NumVals
)/sizeof(NumVals
[0]) == NumForm
,
7027 "need to update code for modified forms");
7028 static_assert(AtomicExpr::AO__c11_atomic_init
== 0 &&
7029 AtomicExpr::AO__c11_atomic_fetch_min
+ 1 ==
7030 AtomicExpr::AO__atomic_load
,
7031 "need to update code for modified C11 atomics");
7032 bool IsOpenCL
= Op
>= AtomicExpr::AO__opencl_atomic_init
&&
7033 Op
<= AtomicExpr::AO__opencl_atomic_fetch_max
;
7034 bool IsHIP
= Op
>= AtomicExpr::AO__hip_atomic_load
&&
7035 Op
<= AtomicExpr::AO__hip_atomic_fetch_max
;
7036 bool IsC11
= (Op
>= AtomicExpr::AO__c11_atomic_init
&&
7037 Op
<= AtomicExpr::AO__c11_atomic_fetch_min
) ||
7039 bool IsN
= Op
== AtomicExpr::AO__atomic_load_n
||
7040 Op
== AtomicExpr::AO__atomic_store_n
||
7041 Op
== AtomicExpr::AO__atomic_exchange_n
||
7042 Op
== AtomicExpr::AO__atomic_compare_exchange_n
;
7043 // Bit mask for extra allowed value types other than integers for atomic
7044 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
7045 // allow floating point.
7046 enum ArithOpExtraValueType
{
7051 unsigned ArithAllows
= AOEVT_None
;
7054 case AtomicExpr::AO__c11_atomic_init
:
7055 case AtomicExpr::AO__opencl_atomic_init
:
7059 case AtomicExpr::AO__c11_atomic_load
:
7060 case AtomicExpr::AO__opencl_atomic_load
:
7061 case AtomicExpr::AO__hip_atomic_load
:
7062 case AtomicExpr::AO__atomic_load_n
:
7066 case AtomicExpr::AO__atomic_load
:
7070 case AtomicExpr::AO__c11_atomic_store
:
7071 case AtomicExpr::AO__opencl_atomic_store
:
7072 case AtomicExpr::AO__hip_atomic_store
:
7073 case AtomicExpr::AO__atomic_store
:
7074 case AtomicExpr::AO__atomic_store_n
:
7077 case AtomicExpr::AO__atomic_fetch_add
:
7078 case AtomicExpr::AO__atomic_fetch_sub
:
7079 case AtomicExpr::AO__atomic_add_fetch
:
7080 case AtomicExpr::AO__atomic_sub_fetch
:
7081 case AtomicExpr::AO__c11_atomic_fetch_add
:
7082 case AtomicExpr::AO__c11_atomic_fetch_sub
:
7083 case AtomicExpr::AO__opencl_atomic_fetch_add
:
7084 case AtomicExpr::AO__opencl_atomic_fetch_sub
:
7085 case AtomicExpr::AO__hip_atomic_fetch_add
:
7086 case AtomicExpr::AO__hip_atomic_fetch_sub
:
7087 ArithAllows
= AOEVT_Pointer
| AOEVT_FP
;
7090 case AtomicExpr::AO__atomic_fetch_max
:
7091 case AtomicExpr::AO__atomic_fetch_min
:
7092 case AtomicExpr::AO__atomic_max_fetch
:
7093 case AtomicExpr::AO__atomic_min_fetch
:
7094 case AtomicExpr::AO__c11_atomic_fetch_max
:
7095 case AtomicExpr::AO__c11_atomic_fetch_min
:
7096 case AtomicExpr::AO__opencl_atomic_fetch_max
:
7097 case AtomicExpr::AO__opencl_atomic_fetch_min
:
7098 case AtomicExpr::AO__hip_atomic_fetch_max
:
7099 case AtomicExpr::AO__hip_atomic_fetch_min
:
7100 ArithAllows
= AOEVT_FP
;
7103 case AtomicExpr::AO__c11_atomic_fetch_and
:
7104 case AtomicExpr::AO__c11_atomic_fetch_or
:
7105 case AtomicExpr::AO__c11_atomic_fetch_xor
:
7106 case AtomicExpr::AO__hip_atomic_fetch_and
:
7107 case AtomicExpr::AO__hip_atomic_fetch_or
:
7108 case AtomicExpr::AO__hip_atomic_fetch_xor
:
7109 case AtomicExpr::AO__c11_atomic_fetch_nand
:
7110 case AtomicExpr::AO__opencl_atomic_fetch_and
:
7111 case AtomicExpr::AO__opencl_atomic_fetch_or
:
7112 case AtomicExpr::AO__opencl_atomic_fetch_xor
:
7113 case AtomicExpr::AO__atomic_fetch_and
:
7114 case AtomicExpr::AO__atomic_fetch_or
:
7115 case AtomicExpr::AO__atomic_fetch_xor
:
7116 case AtomicExpr::AO__atomic_fetch_nand
:
7117 case AtomicExpr::AO__atomic_and_fetch
:
7118 case AtomicExpr::AO__atomic_or_fetch
:
7119 case AtomicExpr::AO__atomic_xor_fetch
:
7120 case AtomicExpr::AO__atomic_nand_fetch
:
7124 case AtomicExpr::AO__c11_atomic_exchange
:
7125 case AtomicExpr::AO__hip_atomic_exchange
:
7126 case AtomicExpr::AO__opencl_atomic_exchange
:
7127 case AtomicExpr::AO__atomic_exchange_n
:
7131 case AtomicExpr::AO__atomic_exchange
:
7135 case AtomicExpr::AO__c11_atomic_compare_exchange_strong
:
7136 case AtomicExpr::AO__c11_atomic_compare_exchange_weak
:
7137 case AtomicExpr::AO__hip_atomic_compare_exchange_strong
:
7138 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong
:
7139 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak
:
7140 case AtomicExpr::AO__hip_atomic_compare_exchange_weak
:
7144 case AtomicExpr::AO__atomic_compare_exchange
:
7145 case AtomicExpr::AO__atomic_compare_exchange_n
:
7150 unsigned AdjustedNumArgs
= NumArgs
[Form
];
7151 if ((IsOpenCL
|| IsHIP
) && Op
!= AtomicExpr::AO__opencl_atomic_init
)
7153 // Check we have the right number of arguments.
7154 if (Args
.size() < AdjustedNumArgs
) {
7155 Diag(CallRange
.getEnd(), diag::err_typecheck_call_too_few_args
)
7156 << 0 << AdjustedNumArgs
<< static_cast<unsigned>(Args
.size())
7159 } else if (Args
.size() > AdjustedNumArgs
) {
7160 Diag(Args
[AdjustedNumArgs
]->getBeginLoc(),
7161 diag::err_typecheck_call_too_many_args
)
7162 << 0 << AdjustedNumArgs
<< static_cast<unsigned>(Args
.size())
7167 // Inspect the first argument of the atomic operation.
7168 Expr
*Ptr
= Args
[0];
7169 ExprResult ConvertedPtr
= DefaultFunctionArrayLvalueConversion(Ptr
);
7170 if (ConvertedPtr
.isInvalid())
7173 Ptr
= ConvertedPtr
.get();
7174 const PointerType
*pointerType
= Ptr
->getType()->getAs
<PointerType
>();
7176 Diag(ExprRange
.getBegin(), diag::err_atomic_builtin_must_be_pointer
)
7177 << Ptr
->getType() << Ptr
->getSourceRange();
7181 // For a __c11 builtin, this should be a pointer to an _Atomic type.
7182 QualType AtomTy
= pointerType
->getPointeeType(); // 'A'
7183 QualType ValType
= AtomTy
; // 'C'
7185 if (!AtomTy
->isAtomicType()) {
7186 Diag(ExprRange
.getBegin(), diag::err_atomic_op_needs_atomic
)
7187 << Ptr
->getType() << Ptr
->getSourceRange();
7190 if ((Form
!= Load
&& Form
!= LoadCopy
&& AtomTy
.isConstQualified()) ||
7191 AtomTy
.getAddressSpace() == LangAS::opencl_constant
) {
7192 Diag(ExprRange
.getBegin(), diag::err_atomic_op_needs_non_const_atomic
)
7193 << (AtomTy
.isConstQualified() ? 0 : 1) << Ptr
->getType()
7194 << Ptr
->getSourceRange();
7197 ValType
= AtomTy
->castAs
<AtomicType
>()->getValueType();
7198 } else if (Form
!= Load
&& Form
!= LoadCopy
) {
7199 if (ValType
.isConstQualified()) {
7200 Diag(ExprRange
.getBegin(), diag::err_atomic_op_needs_non_const_pointer
)
7201 << Ptr
->getType() << Ptr
->getSourceRange();
7206 // For an arithmetic operation, the implied arithmetic must be well-formed.
7207 if (Form
== Arithmetic
) {
7208 // GCC does not enforce these rules for GNU atomics, but we do to help catch
7209 // trivial type errors.
7210 auto IsAllowedValueType
= [&](QualType ValType
,
7211 unsigned AllowedType
) -> bool {
7212 if (ValType
->isIntegerType())
7214 if (ValType
->isPointerType())
7215 return AllowedType
& AOEVT_Pointer
;
7216 if (!(ValType
->isFloatingType() && (AllowedType
& AOEVT_FP
)))
7218 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
7219 if (ValType
->isSpecificBuiltinType(BuiltinType::LongDouble
) &&
7220 &Context
.getTargetInfo().getLongDoubleFormat() ==
7221 &llvm::APFloat::x87DoubleExtended())
7225 if (!IsAllowedValueType(ValType
, ArithAllows
)) {
7226 auto DID
= ArithAllows
& AOEVT_FP
7227 ? (ArithAllows
& AOEVT_Pointer
7228 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
7229 : diag::err_atomic_op_needs_atomic_int_or_fp
)
7230 : diag::err_atomic_op_needs_atomic_int
;
7231 Diag(ExprRange
.getBegin(), DID
)
7232 << IsC11
<< Ptr
->getType() << Ptr
->getSourceRange();
7235 if (IsC11
&& ValType
->isPointerType() &&
7236 RequireCompleteType(Ptr
->getBeginLoc(), ValType
->getPointeeType(),
7237 diag::err_incomplete_type
)) {
7240 } else if (IsN
&& !ValType
->isIntegerType() && !ValType
->isPointerType()) {
7241 // For __atomic_*_n operations, the value type must be a scalar integral or
7242 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
7243 Diag(ExprRange
.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr
)
7244 << IsC11
<< Ptr
->getType() << Ptr
->getSourceRange();
7248 if (!IsC11
&& !AtomTy
.isTriviallyCopyableType(Context
) &&
7249 !AtomTy
->isScalarType()) {
7250 // For GNU atomics, require a trivially-copyable type. This is not part of
7251 // the GNU atomics specification but we enforce it for consistency with
7252 // other atomics which generally all require a trivially-copyable type. This
7253 // is because atomics just copy bits.
7254 Diag(ExprRange
.getBegin(), diag::err_atomic_op_needs_trivial_copy
)
7255 << Ptr
->getType() << Ptr
->getSourceRange();
7259 switch (ValType
.getObjCLifetime()) {
7260 case Qualifiers::OCL_None
:
7261 case Qualifiers::OCL_ExplicitNone
:
7265 case Qualifiers::OCL_Weak
:
7266 case Qualifiers::OCL_Strong
:
7267 case Qualifiers::OCL_Autoreleasing
:
7268 // FIXME: Can this happen? By this point, ValType should be known
7269 // to be trivially copyable.
7270 Diag(ExprRange
.getBegin(), diag::err_arc_atomic_ownership
)
7271 << ValType
<< Ptr
->getSourceRange();
7275 // All atomic operations have an overload which takes a pointer to a volatile
7276 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
7277 // into the result or the other operands. Similarly atomic_load takes a
7278 // pointer to a const 'A'.
7279 ValType
.removeLocalVolatile();
7280 ValType
.removeLocalConst();
7281 QualType ResultType
= ValType
;
7282 if (Form
== Copy
|| Form
== LoadCopy
|| Form
== GNUXchg
||
7284 ResultType
= Context
.VoidTy
;
7285 else if (Form
== C11CmpXchg
|| Form
== GNUCmpXchg
)
7286 ResultType
= Context
.BoolTy
;
7288 // The type of a parameter passed 'by value'. In the GNU atomics, such
7289 // arguments are actually passed as pointers.
7290 QualType ByValType
= ValType
; // 'CP'
7291 bool IsPassedByAddress
= false;
7292 if (!IsC11
&& !IsHIP
&& !IsN
) {
7293 ByValType
= Ptr
->getType();
7294 IsPassedByAddress
= true;
7297 SmallVector
<Expr
*, 5> APIOrderedArgs
;
7298 if (ArgOrder
== Sema::AtomicArgumentOrder::AST
) {
7299 APIOrderedArgs
.push_back(Args
[0]);
7303 APIOrderedArgs
.push_back(Args
[1]); // Val1/Order
7309 APIOrderedArgs
.push_back(Args
[2]); // Val1
7310 APIOrderedArgs
.push_back(Args
[1]); // Order
7313 APIOrderedArgs
.push_back(Args
[2]); // Val1
7314 APIOrderedArgs
.push_back(Args
[3]); // Val2
7315 APIOrderedArgs
.push_back(Args
[1]); // Order
7318 APIOrderedArgs
.push_back(Args
[2]); // Val1
7319 APIOrderedArgs
.push_back(Args
[4]); // Val2
7320 APIOrderedArgs
.push_back(Args
[1]); // Order
7321 APIOrderedArgs
.push_back(Args
[3]); // OrderFail
7324 APIOrderedArgs
.push_back(Args
[2]); // Val1
7325 APIOrderedArgs
.push_back(Args
[4]); // Val2
7326 APIOrderedArgs
.push_back(Args
[5]); // Weak
7327 APIOrderedArgs
.push_back(Args
[1]); // Order
7328 APIOrderedArgs
.push_back(Args
[3]); // OrderFail
7332 APIOrderedArgs
.append(Args
.begin(), Args
.end());
7334 // The first argument's non-CV pointer type is used to deduce the type of
7335 // subsequent arguments, except for:
7336 // - weak flag (always converted to bool)
7337 // - memory order (always converted to int)
7338 // - scope (always converted to int)
7339 for (unsigned i
= 0; i
!= APIOrderedArgs
.size(); ++i
) {
7341 if (i
< NumVals
[Form
] + 1) {
7344 // The first argument is always a pointer. It has a fixed type.
7345 // It is always dereferenced, a nullptr is undefined.
7346 CheckNonNullArgument(*this, APIOrderedArgs
[i
], ExprRange
.getBegin());
7347 // Nothing else to do: we already know all we want about this pointer.
7350 // The second argument is the non-atomic operand. For arithmetic, this
7351 // is always passed by value, and for a compare_exchange it is always
7352 // passed by address. For the rest, GNU uses by-address and C11 uses
7354 assert(Form
!= Load
);
7355 if (Form
== Arithmetic
&& ValType
->isPointerType())
7356 Ty
= Context
.getPointerDiffType();
7357 else if (Form
== Init
|| Form
== Arithmetic
)
7359 else if (Form
== Copy
|| Form
== Xchg
) {
7360 if (IsPassedByAddress
) {
7361 // The value pointer is always dereferenced, a nullptr is undefined.
7362 CheckNonNullArgument(*this, APIOrderedArgs
[i
],
7363 ExprRange
.getBegin());
7367 Expr
*ValArg
= APIOrderedArgs
[i
];
7368 // The value pointer is always dereferenced, a nullptr is undefined.
7369 CheckNonNullArgument(*this, ValArg
, ExprRange
.getBegin());
7370 LangAS AS
= LangAS::Default
;
7371 // Keep address space of non-atomic pointer type.
7372 if (const PointerType
*PtrTy
=
7373 ValArg
->getType()->getAs
<PointerType
>()) {
7374 AS
= PtrTy
->getPointeeType().getAddressSpace();
7376 Ty
= Context
.getPointerType(
7377 Context
.getAddrSpaceQualType(ValType
.getUnqualifiedType(), AS
));
7381 // The third argument to compare_exchange / GNU exchange is the desired
7382 // value, either by-value (for the C11 and *_n variant) or as a pointer.
7383 if (IsPassedByAddress
)
7384 CheckNonNullArgument(*this, APIOrderedArgs
[i
], ExprRange
.getBegin());
7388 // The fourth argument to GNU compare_exchange is a 'weak' flag.
7389 Ty
= Context
.BoolTy
;
7393 // The order(s) and scope are always converted to int.
7397 InitializedEntity Entity
=
7398 InitializedEntity::InitializeParameter(Context
, Ty
, false);
7399 ExprResult Arg
= APIOrderedArgs
[i
];
7400 Arg
= PerformCopyInitialization(Entity
, SourceLocation(), Arg
);
7401 if (Arg
.isInvalid())
7403 APIOrderedArgs
[i
] = Arg
.get();
7406 // Permute the arguments into a 'consistent' order.
7407 SmallVector
<Expr
*, 5> SubExprs
;
7408 SubExprs
.push_back(Ptr
);
7411 // Note, AtomicExpr::getVal1() has a special case for this atomic.
7412 SubExprs
.push_back(APIOrderedArgs
[1]); // Val1
7415 SubExprs
.push_back(APIOrderedArgs
[1]); // Order
7421 SubExprs
.push_back(APIOrderedArgs
[2]); // Order
7422 SubExprs
.push_back(APIOrderedArgs
[1]); // Val1
7425 // Note, AtomicExpr::getVal2() has a special case for this atomic.
7426 SubExprs
.push_back(APIOrderedArgs
[3]); // Order
7427 SubExprs
.push_back(APIOrderedArgs
[1]); // Val1
7428 SubExprs
.push_back(APIOrderedArgs
[2]); // Val2
7431 SubExprs
.push_back(APIOrderedArgs
[3]); // Order
7432 SubExprs
.push_back(APIOrderedArgs
[1]); // Val1
7433 SubExprs
.push_back(APIOrderedArgs
[4]); // OrderFail
7434 SubExprs
.push_back(APIOrderedArgs
[2]); // Val2
7437 SubExprs
.push_back(APIOrderedArgs
[4]); // Order
7438 SubExprs
.push_back(APIOrderedArgs
[1]); // Val1
7439 SubExprs
.push_back(APIOrderedArgs
[5]); // OrderFail
7440 SubExprs
.push_back(APIOrderedArgs
[2]); // Val2
7441 SubExprs
.push_back(APIOrderedArgs
[3]); // Weak
7445 if (SubExprs
.size() >= 2 && Form
!= Init
) {
7446 if (std::optional
<llvm::APSInt
> Result
=
7447 SubExprs
[1]->getIntegerConstantExpr(Context
))
7448 if (!isValidOrderingForOp(Result
->getSExtValue(), Op
))
7449 Diag(SubExprs
[1]->getBeginLoc(),
7450 diag::warn_atomic_op_has_invalid_memory_order
)
7451 << SubExprs
[1]->getSourceRange();
7454 if (auto ScopeModel
= AtomicExpr::getScopeModel(Op
)) {
7455 auto *Scope
= Args
[Args
.size() - 1];
7456 if (std::optional
<llvm::APSInt
> Result
=
7457 Scope
->getIntegerConstantExpr(Context
)) {
7458 if (!ScopeModel
->isValid(Result
->getZExtValue()))
7459 Diag(Scope
->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope
)
7460 << Scope
->getSourceRange();
7462 SubExprs
.push_back(Scope
);
7465 AtomicExpr
*AE
= new (Context
)
7466 AtomicExpr(ExprRange
.getBegin(), SubExprs
, ResultType
, Op
, RParenLoc
);
7468 if ((Op
== AtomicExpr::AO__c11_atomic_load
||
7469 Op
== AtomicExpr::AO__c11_atomic_store
||
7470 Op
== AtomicExpr::AO__opencl_atomic_load
||
7471 Op
== AtomicExpr::AO__hip_atomic_load
||
7472 Op
== AtomicExpr::AO__opencl_atomic_store
||
7473 Op
== AtomicExpr::AO__hip_atomic_store
) &&
7474 Context
.AtomicUsesUnsupportedLibcall(AE
))
7475 Diag(AE
->getBeginLoc(), diag::err_atomic_load_store_uses_lib
)
7476 << ((Op
== AtomicExpr::AO__c11_atomic_load
||
7477 Op
== AtomicExpr::AO__opencl_atomic_load
||
7478 Op
== AtomicExpr::AO__hip_atomic_load
)
7482 if (ValType
->isBitIntType()) {
7483 Diag(Ptr
->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit
);
7490 /// checkBuiltinArgument - Given a call to a builtin function, perform
7491 /// normal type-checking on the given argument, updating the call in
7492 /// place. This is useful when a builtin function requires custom
7493 /// type-checking for some of its arguments but not necessarily all of
7496 /// Returns true on error.
7497 static bool checkBuiltinArgument(Sema
&S
, CallExpr
*E
, unsigned ArgIndex
) {
7498 FunctionDecl
*Fn
= E
->getDirectCallee();
7499 assert(Fn
&& "builtin call without direct callee!");
7501 ParmVarDecl
*Param
= Fn
->getParamDecl(ArgIndex
);
7502 InitializedEntity Entity
=
7503 InitializedEntity::InitializeParameter(S
.Context
, Param
);
7505 ExprResult Arg
= E
->getArg(ArgIndex
);
7506 Arg
= S
.PerformCopyInitialization(Entity
, SourceLocation(), Arg
);
7507 if (Arg
.isInvalid())
7510 E
->setArg(ArgIndex
, Arg
.get());
7514 bool Sema::BuiltinWasmRefNullExtern(CallExpr
*TheCall
) {
7515 if (TheCall
->getNumArgs() != 0)
7518 TheCall
->setType(Context
.getWebAssemblyExternrefType());
7523 bool Sema::BuiltinWasmRefNullFunc(CallExpr
*TheCall
) {
7524 if (TheCall
->getNumArgs() != 0) {
7525 Diag(TheCall
->getBeginLoc(), diag::err_typecheck_call_too_many_args
)
7526 << 0 /*function call*/ << 0 << TheCall
->getNumArgs();
7530 // This custom type checking code ensures that the nodes are as expected
7531 // in order to later on generate the necessary builtin.
7532 QualType Pointee
= Context
.getFunctionType(Context
.VoidTy
, {}, {});
7533 QualType Type
= Context
.getPointerType(Pointee
);
7534 Pointee
= Context
.getAddrSpaceQualType(Pointee
, LangAS::wasm_funcref
);
7535 Type
= Context
.getAttributedType(attr::WebAssemblyFuncref
, Type
,
7536 Context
.getPointerType(Pointee
));
7537 TheCall
->setType(Type
);
7542 /// We have a call to a function like __sync_fetch_and_add, which is an
7543 /// overloaded function based on the pointer type of its first argument.
7544 /// The main BuildCallExpr routines have already promoted the types of
7545 /// arguments because all of these calls are prototyped as void(...).
7547 /// This function goes through and does final semantic checking for these
7548 /// builtins, as well as generating any warnings.
7550 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult
) {
7551 CallExpr
*TheCall
= static_cast<CallExpr
*>(TheCallResult
.get());
7552 Expr
*Callee
= TheCall
->getCallee();
7553 DeclRefExpr
*DRE
= cast
<DeclRefExpr
>(Callee
->IgnoreParenCasts());
7554 FunctionDecl
*FDecl
= cast
<FunctionDecl
>(DRE
->getDecl());
7556 // Ensure that we have at least one argument to do type inference from.
7557 if (TheCall
->getNumArgs() < 1) {
7558 Diag(TheCall
->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least
)
7559 << 0 << 1 << TheCall
->getNumArgs() << Callee
->getSourceRange();
7563 // Inspect the first argument of the atomic builtin. This should always be
7564 // a pointer type, whose element is an integral scalar or pointer type.
7565 // Because it is a pointer type, we don't have to worry about any implicit
7567 // FIXME: We don't allow floating point scalars as input.
7568 Expr
*FirstArg
= TheCall
->getArg(0);
7569 ExprResult FirstArgResult
= DefaultFunctionArrayLvalueConversion(FirstArg
);
7570 if (FirstArgResult
.isInvalid())
7572 FirstArg
= FirstArgResult
.get();
7573 TheCall
->setArg(0, FirstArg
);
7575 const PointerType
*pointerType
= FirstArg
->getType()->getAs
<PointerType
>();
7577 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer
)
7578 << FirstArg
->getType() << FirstArg
->getSourceRange();
7582 QualType ValType
= pointerType
->getPointeeType();
7583 if (!ValType
->isIntegerType() && !ValType
->isAnyPointerType() &&
7584 !ValType
->isBlockPointerType()) {
7585 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr
)
7586 << FirstArg
->getType() << FirstArg
->getSourceRange();
7590 if (ValType
.isConstQualified()) {
7591 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const
)
7592 << FirstArg
->getType() << FirstArg
->getSourceRange();
7596 switch (ValType
.getObjCLifetime()) {
7597 case Qualifiers::OCL_None
:
7598 case Qualifiers::OCL_ExplicitNone
:
7602 case Qualifiers::OCL_Weak
:
7603 case Qualifiers::OCL_Strong
:
7604 case Qualifiers::OCL_Autoreleasing
:
7605 Diag(DRE
->getBeginLoc(), diag::err_arc_atomic_ownership
)
7606 << ValType
<< FirstArg
->getSourceRange();
7610 // Strip any qualifiers off ValType.
7611 ValType
= ValType
.getUnqualifiedType();
7613 // The majority of builtins return a value, but a few have special return
7614 // types, so allow them to override appropriately below.
7615 QualType ResultType
= ValType
;
7617 // We need to figure out which concrete builtin this maps onto. For example,
7618 // __sync_fetch_and_add with a 2 byte object turns into
7619 // __sync_fetch_and_add_2.
7620 #define BUILTIN_ROW(x) \
7621 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
7622 Builtin::BI##x##_8, Builtin::BI##x##_16 }
7624 static const unsigned BuiltinIndices
[][5] = {
7625 BUILTIN_ROW(__sync_fetch_and_add
),
7626 BUILTIN_ROW(__sync_fetch_and_sub
),
7627 BUILTIN_ROW(__sync_fetch_and_or
),
7628 BUILTIN_ROW(__sync_fetch_and_and
),
7629 BUILTIN_ROW(__sync_fetch_and_xor
),
7630 BUILTIN_ROW(__sync_fetch_and_nand
),
7632 BUILTIN_ROW(__sync_add_and_fetch
),
7633 BUILTIN_ROW(__sync_sub_and_fetch
),
7634 BUILTIN_ROW(__sync_and_and_fetch
),
7635 BUILTIN_ROW(__sync_or_and_fetch
),
7636 BUILTIN_ROW(__sync_xor_and_fetch
),
7637 BUILTIN_ROW(__sync_nand_and_fetch
),
7639 BUILTIN_ROW(__sync_val_compare_and_swap
),
7640 BUILTIN_ROW(__sync_bool_compare_and_swap
),
7641 BUILTIN_ROW(__sync_lock_test_and_set
),
7642 BUILTIN_ROW(__sync_lock_release
),
7643 BUILTIN_ROW(__sync_swap
)
7647 // Determine the index of the size.
7649 switch (Context
.getTypeSizeInChars(ValType
).getQuantity()) {
7650 case 1: SizeIndex
= 0; break;
7651 case 2: SizeIndex
= 1; break;
7652 case 4: SizeIndex
= 2; break;
7653 case 8: SizeIndex
= 3; break;
7654 case 16: SizeIndex
= 4; break;
7656 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_pointer_size
)
7657 << FirstArg
->getType() << FirstArg
->getSourceRange();
7661 // Each of these builtins has one pointer argument, followed by some number of
7662 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
7663 // that we ignore. Find out which row of BuiltinIndices to read from as well
7664 // as the number of fixed args.
7665 unsigned BuiltinID
= FDecl
->getBuiltinID();
7666 unsigned BuiltinIndex
, NumFixed
= 1;
7667 bool WarnAboutSemanticsChange
= false;
7668 switch (BuiltinID
) {
7669 default: llvm_unreachable("Unknown overloaded atomic builtin!");
7670 case Builtin::BI__sync_fetch_and_add
:
7671 case Builtin::BI__sync_fetch_and_add_1
:
7672 case Builtin::BI__sync_fetch_and_add_2
:
7673 case Builtin::BI__sync_fetch_and_add_4
:
7674 case Builtin::BI__sync_fetch_and_add_8
:
7675 case Builtin::BI__sync_fetch_and_add_16
:
7679 case Builtin::BI__sync_fetch_and_sub
:
7680 case Builtin::BI__sync_fetch_and_sub_1
:
7681 case Builtin::BI__sync_fetch_and_sub_2
:
7682 case Builtin::BI__sync_fetch_and_sub_4
:
7683 case Builtin::BI__sync_fetch_and_sub_8
:
7684 case Builtin::BI__sync_fetch_and_sub_16
:
7688 case Builtin::BI__sync_fetch_and_or
:
7689 case Builtin::BI__sync_fetch_and_or_1
:
7690 case Builtin::BI__sync_fetch_and_or_2
:
7691 case Builtin::BI__sync_fetch_and_or_4
:
7692 case Builtin::BI__sync_fetch_and_or_8
:
7693 case Builtin::BI__sync_fetch_and_or_16
:
7697 case Builtin::BI__sync_fetch_and_and
:
7698 case Builtin::BI__sync_fetch_and_and_1
:
7699 case Builtin::BI__sync_fetch_and_and_2
:
7700 case Builtin::BI__sync_fetch_and_and_4
:
7701 case Builtin::BI__sync_fetch_and_and_8
:
7702 case Builtin::BI__sync_fetch_and_and_16
:
7706 case Builtin::BI__sync_fetch_and_xor
:
7707 case Builtin::BI__sync_fetch_and_xor_1
:
7708 case Builtin::BI__sync_fetch_and_xor_2
:
7709 case Builtin::BI__sync_fetch_and_xor_4
:
7710 case Builtin::BI__sync_fetch_and_xor_8
:
7711 case Builtin::BI__sync_fetch_and_xor_16
:
7715 case Builtin::BI__sync_fetch_and_nand
:
7716 case Builtin::BI__sync_fetch_and_nand_1
:
7717 case Builtin::BI__sync_fetch_and_nand_2
:
7718 case Builtin::BI__sync_fetch_and_nand_4
:
7719 case Builtin::BI__sync_fetch_and_nand_8
:
7720 case Builtin::BI__sync_fetch_and_nand_16
:
7722 WarnAboutSemanticsChange
= true;
7725 case Builtin::BI__sync_add_and_fetch
:
7726 case Builtin::BI__sync_add_and_fetch_1
:
7727 case Builtin::BI__sync_add_and_fetch_2
:
7728 case Builtin::BI__sync_add_and_fetch_4
:
7729 case Builtin::BI__sync_add_and_fetch_8
:
7730 case Builtin::BI__sync_add_and_fetch_16
:
7734 case Builtin::BI__sync_sub_and_fetch
:
7735 case Builtin::BI__sync_sub_and_fetch_1
:
7736 case Builtin::BI__sync_sub_and_fetch_2
:
7737 case Builtin::BI__sync_sub_and_fetch_4
:
7738 case Builtin::BI__sync_sub_and_fetch_8
:
7739 case Builtin::BI__sync_sub_and_fetch_16
:
7743 case Builtin::BI__sync_and_and_fetch
:
7744 case Builtin::BI__sync_and_and_fetch_1
:
7745 case Builtin::BI__sync_and_and_fetch_2
:
7746 case Builtin::BI__sync_and_and_fetch_4
:
7747 case Builtin::BI__sync_and_and_fetch_8
:
7748 case Builtin::BI__sync_and_and_fetch_16
:
7752 case Builtin::BI__sync_or_and_fetch
:
7753 case Builtin::BI__sync_or_and_fetch_1
:
7754 case Builtin::BI__sync_or_and_fetch_2
:
7755 case Builtin::BI__sync_or_and_fetch_4
:
7756 case Builtin::BI__sync_or_and_fetch_8
:
7757 case Builtin::BI__sync_or_and_fetch_16
:
7761 case Builtin::BI__sync_xor_and_fetch
:
7762 case Builtin::BI__sync_xor_and_fetch_1
:
7763 case Builtin::BI__sync_xor_and_fetch_2
:
7764 case Builtin::BI__sync_xor_and_fetch_4
:
7765 case Builtin::BI__sync_xor_and_fetch_8
:
7766 case Builtin::BI__sync_xor_and_fetch_16
:
7770 case Builtin::BI__sync_nand_and_fetch
:
7771 case Builtin::BI__sync_nand_and_fetch_1
:
7772 case Builtin::BI__sync_nand_and_fetch_2
:
7773 case Builtin::BI__sync_nand_and_fetch_4
:
7774 case Builtin::BI__sync_nand_and_fetch_8
:
7775 case Builtin::BI__sync_nand_and_fetch_16
:
7777 WarnAboutSemanticsChange
= true;
7780 case Builtin::BI__sync_val_compare_and_swap
:
7781 case Builtin::BI__sync_val_compare_and_swap_1
:
7782 case Builtin::BI__sync_val_compare_and_swap_2
:
7783 case Builtin::BI__sync_val_compare_and_swap_4
:
7784 case Builtin::BI__sync_val_compare_and_swap_8
:
7785 case Builtin::BI__sync_val_compare_and_swap_16
:
7790 case Builtin::BI__sync_bool_compare_and_swap
:
7791 case Builtin::BI__sync_bool_compare_and_swap_1
:
7792 case Builtin::BI__sync_bool_compare_and_swap_2
:
7793 case Builtin::BI__sync_bool_compare_and_swap_4
:
7794 case Builtin::BI__sync_bool_compare_and_swap_8
:
7795 case Builtin::BI__sync_bool_compare_and_swap_16
:
7798 ResultType
= Context
.BoolTy
;
7801 case Builtin::BI__sync_lock_test_and_set
:
7802 case Builtin::BI__sync_lock_test_and_set_1
:
7803 case Builtin::BI__sync_lock_test_and_set_2
:
7804 case Builtin::BI__sync_lock_test_and_set_4
:
7805 case Builtin::BI__sync_lock_test_and_set_8
:
7806 case Builtin::BI__sync_lock_test_and_set_16
:
7810 case Builtin::BI__sync_lock_release
:
7811 case Builtin::BI__sync_lock_release_1
:
7812 case Builtin::BI__sync_lock_release_2
:
7813 case Builtin::BI__sync_lock_release_4
:
7814 case Builtin::BI__sync_lock_release_8
:
7815 case Builtin::BI__sync_lock_release_16
:
7818 ResultType
= Context
.VoidTy
;
7821 case Builtin::BI__sync_swap
:
7822 case Builtin::BI__sync_swap_1
:
7823 case Builtin::BI__sync_swap_2
:
7824 case Builtin::BI__sync_swap_4
:
7825 case Builtin::BI__sync_swap_8
:
7826 case Builtin::BI__sync_swap_16
:
7831 // Now that we know how many fixed arguments we expect, first check that we
7832 // have at least that many.
7833 if (TheCall
->getNumArgs() < 1+NumFixed
) {
7834 Diag(TheCall
->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least
)
7835 << 0 << 1 + NumFixed
<< TheCall
->getNumArgs()
7836 << Callee
->getSourceRange();
7840 Diag(TheCall
->getEndLoc(), diag::warn_atomic_implicit_seq_cst
)
7841 << Callee
->getSourceRange();
7843 if (WarnAboutSemanticsChange
) {
7844 Diag(TheCall
->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change
)
7845 << Callee
->getSourceRange();
7848 // Get the decl for the concrete builtin from this, we can tell what the
7849 // concrete integer type we should convert to is.
7850 unsigned NewBuiltinID
= BuiltinIndices
[BuiltinIndex
][SizeIndex
];
7851 StringRef NewBuiltinName
= Context
.BuiltinInfo
.getName(NewBuiltinID
);
7852 FunctionDecl
*NewBuiltinDecl
;
7853 if (NewBuiltinID
== BuiltinID
)
7854 NewBuiltinDecl
= FDecl
;
7856 // Perform builtin lookup to avoid redeclaring it.
7857 DeclarationName
DN(&Context
.Idents
.get(NewBuiltinName
));
7858 LookupResult
Res(*this, DN
, DRE
->getBeginLoc(), LookupOrdinaryName
);
7859 LookupName(Res
, TUScope
, /*AllowBuiltinCreation=*/true);
7860 assert(Res
.getFoundDecl());
7861 NewBuiltinDecl
= dyn_cast
<FunctionDecl
>(Res
.getFoundDecl());
7862 if (!NewBuiltinDecl
)
7866 // The first argument --- the pointer --- has a fixed type; we
7867 // deduce the types of the rest of the arguments accordingly. Walk
7868 // the remaining arguments, converting them to the deduced value type.
7869 for (unsigned i
= 0; i
!= NumFixed
; ++i
) {
7870 ExprResult Arg
= TheCall
->getArg(i
+1);
7872 // GCC does an implicit conversion to the pointer or integer ValType. This
7873 // can fail in some cases (1i -> int**), check for this error case now.
7874 // Initialize the argument.
7875 InitializedEntity Entity
= InitializedEntity::InitializeParameter(Context
,
7876 ValType
, /*consume*/ false);
7877 Arg
= PerformCopyInitialization(Entity
, SourceLocation(), Arg
);
7878 if (Arg
.isInvalid())
7881 // Okay, we have something that *can* be converted to the right type. Check
7882 // to see if there is a potentially weird extension going on here. This can
7883 // happen when you do an atomic operation on something like an char* and
7884 // pass in 42. The 42 gets converted to char. This is even more strange
7885 // for things like 45.123 -> char, etc.
7886 // FIXME: Do this check.
7887 TheCall
->setArg(i
+1, Arg
.get());
7890 // Create a new DeclRefExpr to refer to the new decl.
7891 DeclRefExpr
*NewDRE
= DeclRefExpr::Create(
7892 Context
, DRE
->getQualifierLoc(), SourceLocation(), NewBuiltinDecl
,
7893 /*enclosing*/ false, DRE
->getLocation(), Context
.BuiltinFnTy
,
7894 DRE
->getValueKind(), nullptr, nullptr, DRE
->isNonOdrUse());
7896 // Set the callee in the CallExpr.
7897 // FIXME: This loses syntactic information.
7898 QualType CalleePtrTy
= Context
.getPointerType(NewBuiltinDecl
->getType());
7899 ExprResult PromotedCall
= ImpCastExprToType(NewDRE
, CalleePtrTy
,
7900 CK_BuiltinFnToFnPtr
);
7901 TheCall
->setCallee(PromotedCall
.get());
7903 // Change the result type of the call to match the original value type. This
7904 // is arbitrary, but the codegen for these builtins ins design to handle it
7906 TheCall
->setType(ResultType
);
7908 // Prohibit problematic uses of bit-precise integer types with atomic
7909 // builtins. The arguments would have already been converted to the first
7910 // argument's type, so only need to check the first argument.
7911 const auto *BitIntValType
= ValType
->getAs
<BitIntType
>();
7912 if (BitIntValType
&& !llvm::isPowerOf2_64(BitIntValType
->getNumBits())) {
7913 Diag(FirstArg
->getExprLoc(), diag::err_atomic_builtin_ext_int_size
);
7917 return TheCallResult
;
7920 /// SemaBuiltinNontemporalOverloaded - We have a call to
7921 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
7922 /// overloaded function based on the pointer type of its last argument.
7924 /// This function goes through and does final semantic checking for these
7926 ExprResult
Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult
) {
7927 CallExpr
*TheCall
= (CallExpr
*)TheCallResult
.get();
7929 cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
7930 FunctionDecl
*FDecl
= cast
<FunctionDecl
>(DRE
->getDecl());
7931 unsigned BuiltinID
= FDecl
->getBuiltinID();
7932 assert((BuiltinID
== Builtin::BI__builtin_nontemporal_store
||
7933 BuiltinID
== Builtin::BI__builtin_nontemporal_load
) &&
7934 "Unexpected nontemporal load/store builtin!");
7935 bool isStore
= BuiltinID
== Builtin::BI__builtin_nontemporal_store
;
7936 unsigned numArgs
= isStore
? 2 : 1;
7938 // Ensure that we have the proper number of arguments.
7939 if (checkArgCount(*this, TheCall
, numArgs
))
7942 // Inspect the last argument of the nontemporal builtin. This should always
7943 // be a pointer type, from which we imply the type of the memory access.
7944 // Because it is a pointer type, we don't have to worry about any implicit
7946 Expr
*PointerArg
= TheCall
->getArg(numArgs
- 1);
7947 ExprResult PointerArgResult
=
7948 DefaultFunctionArrayLvalueConversion(PointerArg
);
7950 if (PointerArgResult
.isInvalid())
7952 PointerArg
= PointerArgResult
.get();
7953 TheCall
->setArg(numArgs
- 1, PointerArg
);
7955 const PointerType
*pointerType
= PointerArg
->getType()->getAs
<PointerType
>();
7957 Diag(DRE
->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer
)
7958 << PointerArg
->getType() << PointerArg
->getSourceRange();
7962 QualType ValType
= pointerType
->getPointeeType();
7964 // Strip any qualifiers off ValType.
7965 ValType
= ValType
.getUnqualifiedType();
7966 if (!ValType
->isIntegerType() && !ValType
->isAnyPointerType() &&
7967 !ValType
->isBlockPointerType() && !ValType
->isFloatingType() &&
7968 !ValType
->isVectorType()) {
7969 Diag(DRE
->getBeginLoc(),
7970 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector
)
7971 << PointerArg
->getType() << PointerArg
->getSourceRange();
7976 TheCall
->setType(ValType
);
7977 return TheCallResult
;
7980 ExprResult ValArg
= TheCall
->getArg(0);
7981 InitializedEntity Entity
= InitializedEntity::InitializeParameter(
7982 Context
, ValType
, /*consume*/ false);
7983 ValArg
= PerformCopyInitialization(Entity
, SourceLocation(), ValArg
);
7984 if (ValArg
.isInvalid())
7987 TheCall
->setArg(0, ValArg
.get());
7988 TheCall
->setType(Context
.VoidTy
);
7989 return TheCallResult
;
7992 /// CheckObjCString - Checks that the argument to the builtin
7993 /// CFString constructor is correct
7994 /// Note: It might also make sense to do the UTF-16 conversion here (would
7995 /// simplify the backend).
7996 bool Sema::CheckObjCString(Expr
*Arg
) {
7997 Arg
= Arg
->IgnoreParenCasts();
7998 StringLiteral
*Literal
= dyn_cast
<StringLiteral
>(Arg
);
8000 if (!Literal
|| !Literal
->isOrdinary()) {
8001 Diag(Arg
->getBeginLoc(), diag::err_cfstring_literal_not_string_constant
)
8002 << Arg
->getSourceRange();
8006 if (Literal
->containsNonAsciiOrNull()) {
8007 StringRef String
= Literal
->getString();
8008 unsigned NumBytes
= String
.size();
8009 SmallVector
<llvm::UTF16
, 128> ToBuf(NumBytes
);
8010 const llvm::UTF8
*FromPtr
= (const llvm::UTF8
*)String
.data();
8011 llvm::UTF16
*ToPtr
= &ToBuf
[0];
8013 llvm::ConversionResult Result
=
8014 llvm::ConvertUTF8toUTF16(&FromPtr
, FromPtr
+ NumBytes
, &ToPtr
,
8015 ToPtr
+ NumBytes
, llvm::strictConversion
);
8016 // Check for conversion failure.
8017 if (Result
!= llvm::conversionOK
)
8018 Diag(Arg
->getBeginLoc(), diag::warn_cfstring_truncated
)
8019 << Arg
->getSourceRange();
8024 /// CheckObjCString - Checks that the format string argument to the os_log()
8025 /// and os_trace() functions is correct, and converts it to const char *.
8026 ExprResult
Sema::CheckOSLogFormatStringArg(Expr
*Arg
) {
8027 Arg
= Arg
->IgnoreParenCasts();
8028 auto *Literal
= dyn_cast
<StringLiteral
>(Arg
);
8030 if (auto *ObjcLiteral
= dyn_cast
<ObjCStringLiteral
>(Arg
)) {
8031 Literal
= ObjcLiteral
->getString();
8035 if (!Literal
|| (!Literal
->isOrdinary() && !Literal
->isUTF8())) {
8037 Diag(Arg
->getBeginLoc(), diag::err_os_log_format_not_string_constant
)
8038 << Arg
->getSourceRange());
8041 ExprResult
Result(Literal
);
8042 QualType ResultTy
= Context
.getPointerType(Context
.CharTy
.withConst());
8043 InitializedEntity Entity
=
8044 InitializedEntity::InitializeParameter(Context
, ResultTy
, false);
8045 Result
= PerformCopyInitialization(Entity
, SourceLocation(), Result
);
8049 /// Check that the user is calling the appropriate va_start builtin for the
8050 /// target and calling convention.
8051 static bool checkVAStartABI(Sema
&S
, unsigned BuiltinID
, Expr
*Fn
) {
8052 const llvm::Triple
&TT
= S
.Context
.getTargetInfo().getTriple();
8053 bool IsX64
= TT
.getArch() == llvm::Triple::x86_64
;
8054 bool IsAArch64
= (TT
.getArch() == llvm::Triple::aarch64
||
8055 TT
.getArch() == llvm::Triple::aarch64_32
);
8056 bool IsWindows
= TT
.isOSWindows();
8057 bool IsMSVAStart
= BuiltinID
== Builtin::BI__builtin_ms_va_start
;
8058 if (IsX64
|| IsAArch64
) {
8059 CallingConv CC
= CC_C
;
8060 if (const FunctionDecl
*FD
= S
.getCurFunctionDecl())
8061 CC
= FD
->getType()->castAs
<FunctionType
>()->getCallConv();
8063 // Don't allow this in System V ABI functions.
8064 if (CC
== CC_X86_64SysV
|| (!IsWindows
&& CC
!= CC_Win64
))
8065 return S
.Diag(Fn
->getBeginLoc(),
8066 diag::err_ms_va_start_used_in_sysv_function
);
8068 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
8069 // On x64 Windows, don't allow this in System V ABI functions.
8070 // (Yes, that means there's no corresponding way to support variadic
8071 // System V ABI functions on Windows.)
8072 if ((IsWindows
&& CC
== CC_X86_64SysV
) ||
8073 (!IsWindows
&& CC
== CC_Win64
))
8074 return S
.Diag(Fn
->getBeginLoc(),
8075 diag::err_va_start_used_in_wrong_abi_function
)
8082 return S
.Diag(Fn
->getBeginLoc(), diag::err_builtin_x64_aarch64_only
);
8086 static bool checkVAStartIsInVariadicFunction(Sema
&S
, Expr
*Fn
,
8087 ParmVarDecl
**LastParam
= nullptr) {
8088 // Determine whether the current function, block, or obj-c method is variadic
8089 // and get its parameter list.
8090 bool IsVariadic
= false;
8091 ArrayRef
<ParmVarDecl
*> Params
;
8092 DeclContext
*Caller
= S
.CurContext
;
8093 if (auto *Block
= dyn_cast
<BlockDecl
>(Caller
)) {
8094 IsVariadic
= Block
->isVariadic();
8095 Params
= Block
->parameters();
8096 } else if (auto *FD
= dyn_cast
<FunctionDecl
>(Caller
)) {
8097 IsVariadic
= FD
->isVariadic();
8098 Params
= FD
->parameters();
8099 } else if (auto *MD
= dyn_cast
<ObjCMethodDecl
>(Caller
)) {
8100 IsVariadic
= MD
->isVariadic();
8101 // FIXME: This isn't correct for methods (results in bogus warning).
8102 Params
= MD
->parameters();
8103 } else if (isa
<CapturedDecl
>(Caller
)) {
8104 // We don't support va_start in a CapturedDecl.
8105 S
.Diag(Fn
->getBeginLoc(), diag::err_va_start_captured_stmt
);
8108 // This must be some other declcontext that parses exprs.
8109 S
.Diag(Fn
->getBeginLoc(), diag::err_va_start_outside_function
);
8114 S
.Diag(Fn
->getBeginLoc(), diag::err_va_start_fixed_function
);
8119 *LastParam
= Params
.empty() ? nullptr : Params
.back();
8124 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
8125 /// for validity. Emit an error and return true on failure; return false
8127 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID
, CallExpr
*TheCall
) {
8128 Expr
*Fn
= TheCall
->getCallee();
8130 if (checkVAStartABI(*this, BuiltinID
, Fn
))
8133 // In C2x mode, va_start only needs one argument. However, the builtin still
8134 // requires two arguments (which matches the behavior of the GCC builtin),
8135 // <stdarg.h> passes `0` as the second argument in C2x mode.
8136 if (checkArgCount(*this, TheCall
, 2))
8139 // Type-check the first argument normally.
8140 if (checkBuiltinArgument(*this, TheCall
, 0))
8143 // Check that the current function is variadic, and get its last parameter.
8144 ParmVarDecl
*LastParam
;
8145 if (checkVAStartIsInVariadicFunction(*this, Fn
, &LastParam
))
8148 // Verify that the second argument to the builtin is the last argument of the
8149 // current function or method. In C2x mode, if the second argument is an
8150 // integer constant expression with value 0, then we don't bother with this
8152 bool SecondArgIsLastNamedArgument
= false;
8153 const Expr
*Arg
= TheCall
->getArg(1)->IgnoreParenCasts();
8154 if (std::optional
<llvm::APSInt
> Val
=
8155 TheCall
->getArg(1)->getIntegerConstantExpr(Context
);
8156 Val
&& LangOpts
.C2x
&& *Val
== 0)
8159 // These are valid if SecondArgIsLastNamedArgument is false after the next
8162 SourceLocation ParamLoc
;
8163 bool IsCRegister
= false;
8165 if (const DeclRefExpr
*DR
= dyn_cast
<DeclRefExpr
>(Arg
)) {
8166 if (const ParmVarDecl
*PV
= dyn_cast
<ParmVarDecl
>(DR
->getDecl())) {
8167 SecondArgIsLastNamedArgument
= PV
== LastParam
;
8169 Type
= PV
->getType();
8170 ParamLoc
= PV
->getLocation();
8172 PV
->getStorageClass() == SC_Register
&& !getLangOpts().CPlusPlus
;
8176 if (!SecondArgIsLastNamedArgument
)
8177 Diag(TheCall
->getArg(1)->getBeginLoc(),
8178 diag::warn_second_arg_of_va_start_not_last_named_param
);
8179 else if (IsCRegister
|| Type
->isReferenceType() ||
8180 Type
->isSpecificBuiltinType(BuiltinType::Float
) || [=] {
8181 // Promotable integers are UB, but enumerations need a bit of
8182 // extra checking to see what their promotable type actually is.
8183 if (!Context
.isPromotableIntegerType(Type
))
8185 if (!Type
->isEnumeralType())
8187 const EnumDecl
*ED
= Type
->castAs
<EnumType
>()->getDecl();
8189 Context
.typesAreCompatible(ED
->getPromotionType(), Type
));
8191 unsigned Reason
= 0;
8192 if (Type
->isReferenceType()) Reason
= 1;
8193 else if (IsCRegister
) Reason
= 2;
8194 Diag(Arg
->getBeginLoc(), diag::warn_va_start_type_is_undefined
) << Reason
;
8195 Diag(ParamLoc
, diag::note_parameter_type
) << Type
;
8201 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr
*Call
) {
8202 auto IsSuitablyTypedFormatArgument
= [this](const Expr
*Arg
) -> bool {
8203 const LangOptions
&LO
= getLangOpts();
8206 return Arg
->getType()
8210 .withoutLocalFastQualifiers() == Context
.CharTy
;
8212 // In C, allow aliasing through `char *`, this is required for AArch64 at
8217 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
8218 // const char *named_addr);
8220 Expr
*Func
= Call
->getCallee();
8222 if (Call
->getNumArgs() < 3)
8223 return Diag(Call
->getEndLoc(),
8224 diag::err_typecheck_call_too_few_args_at_least
)
8225 << 0 /*function call*/ << 3 << Call
->getNumArgs();
8227 // Type-check the first argument normally.
8228 if (checkBuiltinArgument(*this, Call
, 0))
8231 // Check that the current function is variadic.
8232 if (checkVAStartIsInVariadicFunction(*this, Func
))
8235 // __va_start on Windows does not validate the parameter qualifiers
8237 const Expr
*Arg1
= Call
->getArg(1)->IgnoreParens();
8238 const Type
*Arg1Ty
= Arg1
->getType().getCanonicalType().getTypePtr();
8240 const Expr
*Arg2
= Call
->getArg(2)->IgnoreParens();
8241 const Type
*Arg2Ty
= Arg2
->getType().getCanonicalType().getTypePtr();
8243 const QualType
&ConstCharPtrTy
=
8244 Context
.getPointerType(Context
.CharTy
.withConst());
8245 if (!Arg1Ty
->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1
))
8246 Diag(Arg1
->getBeginLoc(), diag::err_typecheck_convert_incompatible
)
8247 << Arg1
->getType() << ConstCharPtrTy
<< 1 /* different class */
8248 << 0 /* qualifier difference */
8249 << 3 /* parameter mismatch */
8250 << 2 << Arg1
->getType() << ConstCharPtrTy
;
8252 const QualType SizeTy
= Context
.getSizeType();
8253 if (Arg2Ty
->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy
)
8254 Diag(Arg2
->getBeginLoc(), diag::err_typecheck_convert_incompatible
)
8255 << Arg2
->getType() << SizeTy
<< 1 /* different class */
8256 << 0 /* qualifier difference */
8257 << 3 /* parameter mismatch */
8258 << 3 << Arg2
->getType() << SizeTy
;
8263 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
8264 /// friends. This is declared to take (...), so we have to check everything.
8265 bool Sema::SemaBuiltinUnorderedCompare(CallExpr
*TheCall
) {
8266 if (checkArgCount(*this, TheCall
, 2))
8269 ExprResult OrigArg0
= TheCall
->getArg(0);
8270 ExprResult OrigArg1
= TheCall
->getArg(1);
8272 // Do standard promotions between the two arguments, returning their common
8274 QualType Res
= UsualArithmeticConversions(
8275 OrigArg0
, OrigArg1
, TheCall
->getExprLoc(), ACK_Comparison
);
8276 if (OrigArg0
.isInvalid() || OrigArg1
.isInvalid())
8279 // Make sure any conversions are pushed back into the call; this is
8280 // type safe since unordered compare builtins are declared as "_Bool
8282 TheCall
->setArg(0, OrigArg0
.get());
8283 TheCall
->setArg(1, OrigArg1
.get());
8285 if (OrigArg0
.get()->isTypeDependent() || OrigArg1
.get()->isTypeDependent())
8288 // If the common type isn't a real floating type, then the arguments were
8289 // invalid for this operation.
8290 if (Res
.isNull() || !Res
->isRealFloatingType())
8291 return Diag(OrigArg0
.get()->getBeginLoc(),
8292 diag::err_typecheck_call_invalid_ordered_compare
)
8293 << OrigArg0
.get()->getType() << OrigArg1
.get()->getType()
8294 << SourceRange(OrigArg0
.get()->getBeginLoc(),
8295 OrigArg1
.get()->getEndLoc());
8300 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
8301 /// __builtin_isnan and friends. This is declared to take (...), so we have
8302 /// to check everything. We expect the last argument to be a floating point
8304 bool Sema::SemaBuiltinFPClassification(CallExpr
*TheCall
, unsigned NumArgs
) {
8305 if (checkArgCount(*this, TheCall
, NumArgs
))
8308 // Find out position of floating-point argument.
8309 unsigned FPArgNo
= (NumArgs
== 2) ? 0 : NumArgs
- 1;
8311 // We can count on all parameters preceding the floating-point just being int.
8312 // Try all of those.
8313 for (unsigned i
= 0; i
< FPArgNo
; ++i
) {
8314 Expr
*Arg
= TheCall
->getArg(i
);
8316 if (Arg
->isTypeDependent())
8319 ExprResult Res
= PerformImplicitConversion(Arg
, Context
.IntTy
, AA_Passing
);
8321 if (Res
.isInvalid())
8323 TheCall
->setArg(i
, Res
.get());
8326 Expr
*OrigArg
= TheCall
->getArg(FPArgNo
);
8328 if (OrigArg
->isTypeDependent())
8331 // Usual Unary Conversions will convert half to float, which we want for
8332 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
8333 // type how it is, but do normal L->Rvalue conversions.
8334 if (Context
.getTargetInfo().useFP16ConversionIntrinsics())
8335 OrigArg
= UsualUnaryConversions(OrigArg
).get();
8337 OrigArg
= DefaultFunctionArrayLvalueConversion(OrigArg
).get();
8338 TheCall
->setArg(FPArgNo
, OrigArg
);
8340 // This operation requires a non-_Complex floating-point number.
8341 if (!OrigArg
->getType()->isRealFloatingType())
8342 return Diag(OrigArg
->getBeginLoc(),
8343 diag::err_typecheck_call_invalid_unary_fp
)
8344 << OrigArg
->getType() << OrigArg
->getSourceRange();
8346 // __builtin_isfpclass has integer parameter that specify test mask. It is
8347 // passed in (...), so it should be analyzed completely here.
8349 if (SemaBuiltinConstantArgRange(TheCall
, 1, 0, llvm::fcAllFlags
))
8355 /// Perform semantic analysis for a call to __builtin_complex.
8356 bool Sema::SemaBuiltinComplex(CallExpr
*TheCall
) {
8357 if (checkArgCount(*this, TheCall
, 2))
8360 bool Dependent
= false;
8361 for (unsigned I
= 0; I
!= 2; ++I
) {
8362 Expr
*Arg
= TheCall
->getArg(I
);
8363 QualType T
= Arg
->getType();
8364 if (T
->isDependentType()) {
8369 // Despite supporting _Complex int, GCC requires a real floating point type
8370 // for the operands of __builtin_complex.
8371 if (!T
->isRealFloatingType()) {
8372 return Diag(Arg
->getBeginLoc(), diag::err_typecheck_call_requires_real_fp
)
8373 << Arg
->getType() << Arg
->getSourceRange();
8376 ExprResult Converted
= DefaultLvalueConversion(Arg
);
8377 if (Converted
.isInvalid())
8379 TheCall
->setArg(I
, Converted
.get());
8383 TheCall
->setType(Context
.DependentTy
);
8387 Expr
*Real
= TheCall
->getArg(0);
8388 Expr
*Imag
= TheCall
->getArg(1);
8389 if (!Context
.hasSameType(Real
->getType(), Imag
->getType())) {
8390 return Diag(Real
->getBeginLoc(),
8391 diag::err_typecheck_call_different_arg_types
)
8392 << Real
->getType() << Imag
->getType()
8393 << Real
->getSourceRange() << Imag
->getSourceRange();
8396 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
8397 // don't allow this builtin to form those types either.
8398 // FIXME: Should we allow these types?
8399 if (Real
->getType()->isFloat16Type())
8400 return Diag(TheCall
->getBeginLoc(), diag::err_invalid_complex_spec
)
8402 if (Real
->getType()->isHalfType())
8403 return Diag(TheCall
->getBeginLoc(), diag::err_invalid_complex_spec
)
8406 TheCall
->setType(Context
.getComplexType(Real
->getType()));
8410 // Customized Sema Checking for VSX builtins that have the following signature:
8411 // vector [...] builtinName(vector [...], vector [...], const int);
8412 // Which takes the same type of vectors (any legal vector type) for the first
8413 // two arguments and takes compile time constant for the third argument.
8414 // Example builtins are :
8415 // vector double vec_xxpermdi(vector double, vector double, int);
8416 // vector short vec_xxsldwi(vector short, vector short, int);
8417 bool Sema::SemaBuiltinVSX(CallExpr
*TheCall
) {
8418 unsigned ExpectedNumArgs
= 3;
8419 if (checkArgCount(*this, TheCall
, ExpectedNumArgs
))
8422 // Check the third argument is a compile time constant
8423 if (!TheCall
->getArg(2)->isIntegerConstantExpr(Context
))
8424 return Diag(TheCall
->getBeginLoc(),
8425 diag::err_vsx_builtin_nonconstant_argument
)
8426 << 3 /* argument index */ << TheCall
->getDirectCallee()
8427 << SourceRange(TheCall
->getArg(2)->getBeginLoc(),
8428 TheCall
->getArg(2)->getEndLoc());
8430 QualType Arg1Ty
= TheCall
->getArg(0)->getType();
8431 QualType Arg2Ty
= TheCall
->getArg(1)->getType();
8433 // Check the type of argument 1 and argument 2 are vectors.
8434 SourceLocation BuiltinLoc
= TheCall
->getBeginLoc();
8435 if ((!Arg1Ty
->isVectorType() && !Arg1Ty
->isDependentType()) ||
8436 (!Arg2Ty
->isVectorType() && !Arg2Ty
->isDependentType())) {
8437 return Diag(BuiltinLoc
, diag::err_vec_builtin_non_vector
)
8438 << TheCall
->getDirectCallee()
8439 << SourceRange(TheCall
->getArg(0)->getBeginLoc(),
8440 TheCall
->getArg(1)->getEndLoc());
8443 // Check the first two arguments are the same type.
8444 if (!Context
.hasSameUnqualifiedType(Arg1Ty
, Arg2Ty
)) {
8445 return Diag(BuiltinLoc
, diag::err_vec_builtin_incompatible_vector
)
8446 << TheCall
->getDirectCallee()
8447 << SourceRange(TheCall
->getArg(0)->getBeginLoc(),
8448 TheCall
->getArg(1)->getEndLoc());
8451 // When default clang type checking is turned off and the customized type
8452 // checking is used, the returning type of the function must be explicitly
8453 // set. Otherwise it is _Bool by default.
8454 TheCall
->setType(Arg1Ty
);
8459 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
8460 // This is declared to take (...), so we have to check everything.
8461 ExprResult
Sema::SemaBuiltinShuffleVector(CallExpr
*TheCall
) {
8462 if (TheCall
->getNumArgs() < 2)
8463 return ExprError(Diag(TheCall
->getEndLoc(),
8464 diag::err_typecheck_call_too_few_args_at_least
)
8465 << 0 /*function call*/ << 2 << TheCall
->getNumArgs()
8466 << TheCall
->getSourceRange());
8468 // Determine which of the following types of shufflevector we're checking:
8469 // 1) unary, vector mask: (lhs, mask)
8470 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
8471 QualType resType
= TheCall
->getArg(0)->getType();
8472 unsigned numElements
= 0;
8474 if (!TheCall
->getArg(0)->isTypeDependent() &&
8475 !TheCall
->getArg(1)->isTypeDependent()) {
8476 QualType LHSType
= TheCall
->getArg(0)->getType();
8477 QualType RHSType
= TheCall
->getArg(1)->getType();
8479 if (!LHSType
->isVectorType() || !RHSType
->isVectorType())
8481 Diag(TheCall
->getBeginLoc(), diag::err_vec_builtin_non_vector
)
8482 << TheCall
->getDirectCallee()
8483 << SourceRange(TheCall
->getArg(0)->getBeginLoc(),
8484 TheCall
->getArg(1)->getEndLoc()));
8486 numElements
= LHSType
->castAs
<VectorType
>()->getNumElements();
8487 unsigned numResElements
= TheCall
->getNumArgs() - 2;
8489 // Check to see if we have a call with 2 vector arguments, the unary shuffle
8490 // with mask. If so, verify that RHS is an integer vector type with the
8491 // same number of elts as lhs.
8492 if (TheCall
->getNumArgs() == 2) {
8493 if (!RHSType
->hasIntegerRepresentation() ||
8494 RHSType
->castAs
<VectorType
>()->getNumElements() != numElements
)
8495 return ExprError(Diag(TheCall
->getBeginLoc(),
8496 diag::err_vec_builtin_incompatible_vector
)
8497 << TheCall
->getDirectCallee()
8498 << SourceRange(TheCall
->getArg(1)->getBeginLoc(),
8499 TheCall
->getArg(1)->getEndLoc()));
8500 } else if (!Context
.hasSameUnqualifiedType(LHSType
, RHSType
)) {
8501 return ExprError(Diag(TheCall
->getBeginLoc(),
8502 diag::err_vec_builtin_incompatible_vector
)
8503 << TheCall
->getDirectCallee()
8504 << SourceRange(TheCall
->getArg(0)->getBeginLoc(),
8505 TheCall
->getArg(1)->getEndLoc()));
8506 } else if (numElements
!= numResElements
) {
8507 QualType eltType
= LHSType
->castAs
<VectorType
>()->getElementType();
8508 resType
= Context
.getVectorType(eltType
, numResElements
,
8509 VectorType::GenericVector
);
8513 for (unsigned i
= 2; i
< TheCall
->getNumArgs(); i
++) {
8514 if (TheCall
->getArg(i
)->isTypeDependent() ||
8515 TheCall
->getArg(i
)->isValueDependent())
8518 std::optional
<llvm::APSInt
> Result
;
8519 if (!(Result
= TheCall
->getArg(i
)->getIntegerConstantExpr(Context
)))
8520 return ExprError(Diag(TheCall
->getBeginLoc(),
8521 diag::err_shufflevector_nonconstant_argument
)
8522 << TheCall
->getArg(i
)->getSourceRange());
8524 // Allow -1 which will be translated to undef in the IR.
8525 if (Result
->isSigned() && Result
->isAllOnes())
8528 if (Result
->getActiveBits() > 64 ||
8529 Result
->getZExtValue() >= numElements
* 2)
8530 return ExprError(Diag(TheCall
->getBeginLoc(),
8531 diag::err_shufflevector_argument_too_large
)
8532 << TheCall
->getArg(i
)->getSourceRange());
8535 SmallVector
<Expr
*, 32> exprs
;
8537 for (unsigned i
= 0, e
= TheCall
->getNumArgs(); i
!= e
; i
++) {
8538 exprs
.push_back(TheCall
->getArg(i
));
8539 TheCall
->setArg(i
, nullptr);
8542 return new (Context
) ShuffleVectorExpr(Context
, exprs
, resType
,
8543 TheCall
->getCallee()->getBeginLoc(),
8544 TheCall
->getRParenLoc());
8547 /// SemaConvertVectorExpr - Handle __builtin_convertvector
8548 ExprResult
Sema::SemaConvertVectorExpr(Expr
*E
, TypeSourceInfo
*TInfo
,
8549 SourceLocation BuiltinLoc
,
8550 SourceLocation RParenLoc
) {
8551 ExprValueKind VK
= VK_PRValue
;
8552 ExprObjectKind OK
= OK_Ordinary
;
8553 QualType DstTy
= TInfo
->getType();
8554 QualType SrcTy
= E
->getType();
8556 if (!SrcTy
->isVectorType() && !SrcTy
->isDependentType())
8557 return ExprError(Diag(BuiltinLoc
,
8558 diag::err_convertvector_non_vector
)
8559 << E
->getSourceRange());
8560 if (!DstTy
->isVectorType() && !DstTy
->isDependentType())
8561 return ExprError(Diag(BuiltinLoc
,
8562 diag::err_convertvector_non_vector_type
));
8564 if (!SrcTy
->isDependentType() && !DstTy
->isDependentType()) {
8565 unsigned SrcElts
= SrcTy
->castAs
<VectorType
>()->getNumElements();
8566 unsigned DstElts
= DstTy
->castAs
<VectorType
>()->getNumElements();
8567 if (SrcElts
!= DstElts
)
8568 return ExprError(Diag(BuiltinLoc
,
8569 diag::err_convertvector_incompatible_vector
)
8570 << E
->getSourceRange());
8573 return new (Context
)
8574 ConvertVectorExpr(E
, TInfo
, DstTy
, VK
, OK
, BuiltinLoc
, RParenLoc
);
8577 /// SemaBuiltinPrefetch - Handle __builtin_prefetch.
8578 // This is declared to take (const void*, ...) and can take two
8579 // optional constant int args.
8580 bool Sema::SemaBuiltinPrefetch(CallExpr
*TheCall
) {
8581 unsigned NumArgs
= TheCall
->getNumArgs();
8584 return Diag(TheCall
->getEndLoc(),
8585 diag::err_typecheck_call_too_many_args_at_most
)
8586 << 0 /*function call*/ << 3 << NumArgs
<< TheCall
->getSourceRange();
8588 // Argument 0 is checked for us and the remaining arguments must be
8589 // constant integers.
8590 for (unsigned i
= 1; i
!= NumArgs
; ++i
)
8591 if (SemaBuiltinConstantArgRange(TheCall
, i
, 0, i
== 1 ? 1 : 3))
8597 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
8598 bool Sema::SemaBuiltinArithmeticFence(CallExpr
*TheCall
) {
8599 if (!Context
.getTargetInfo().checkArithmeticFenceSupported())
8600 return Diag(TheCall
->getBeginLoc(), diag::err_builtin_target_unsupported
)
8601 << SourceRange(TheCall
->getBeginLoc(), TheCall
->getEndLoc());
8602 if (checkArgCount(*this, TheCall
, 1))
8604 Expr
*Arg
= TheCall
->getArg(0);
8605 if (Arg
->isInstantiationDependent())
8608 QualType ArgTy
= Arg
->getType();
8609 if (!ArgTy
->hasFloatingRepresentation())
8610 return Diag(TheCall
->getEndLoc(), diag::err_typecheck_expect_flt_or_vector
)
8612 if (Arg
->isLValue()) {
8613 ExprResult FirstArg
= DefaultLvalueConversion(Arg
);
8614 TheCall
->setArg(0, FirstArg
.get());
8616 TheCall
->setType(TheCall
->getArg(0)->getType());
8620 /// SemaBuiltinAssume - Handle __assume (MS Extension).
8621 // __assume does not evaluate its arguments, and should warn if its argument
8622 // has side effects.
8623 bool Sema::SemaBuiltinAssume(CallExpr
*TheCall
) {
8624 Expr
*Arg
= TheCall
->getArg(0);
8625 if (Arg
->isInstantiationDependent()) return false;
8627 if (Arg
->HasSideEffects(Context
))
8628 Diag(Arg
->getBeginLoc(), diag::warn_assume_side_effects
)
8629 << Arg
->getSourceRange()
8630 << cast
<FunctionDecl
>(TheCall
->getCalleeDecl())->getIdentifier();
8635 /// Handle __builtin_alloca_with_align. This is declared
8636 /// as (size_t, size_t) where the second size_t must be a power of 2 greater
8638 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr
*TheCall
) {
8639 // The alignment must be a constant integer.
8640 Expr
*Arg
= TheCall
->getArg(1);
8642 // We can't check the value of a dependent argument.
8643 if (!Arg
->isTypeDependent() && !Arg
->isValueDependent()) {
8644 if (const auto *UE
=
8645 dyn_cast
<UnaryExprOrTypeTraitExpr
>(Arg
->IgnoreParenImpCasts()))
8646 if (UE
->getKind() == UETT_AlignOf
||
8647 UE
->getKind() == UETT_PreferredAlignOf
)
8648 Diag(TheCall
->getBeginLoc(), diag::warn_alloca_align_alignof
)
8649 << Arg
->getSourceRange();
8651 llvm::APSInt Result
= Arg
->EvaluateKnownConstInt(Context
);
8653 if (!Result
.isPowerOf2())
8654 return Diag(TheCall
->getBeginLoc(), diag::err_alignment_not_power_of_two
)
8655 << Arg
->getSourceRange();
8657 if (Result
< Context
.getCharWidth())
8658 return Diag(TheCall
->getBeginLoc(), diag::err_alignment_too_small
)
8659 << (unsigned)Context
.getCharWidth() << Arg
->getSourceRange();
8661 if (Result
> std::numeric_limits
<int32_t>::max())
8662 return Diag(TheCall
->getBeginLoc(), diag::err_alignment_too_big
)
8663 << std::numeric_limits
<int32_t>::max() << Arg
->getSourceRange();
8669 /// Handle __builtin_assume_aligned. This is declared
8670 /// as (const void*, size_t, ...) and can take one optional constant int arg.
8671 bool Sema::SemaBuiltinAssumeAligned(CallExpr
*TheCall
) {
8672 if (checkArgCountRange(*this, TheCall
, 2, 3))
8675 unsigned NumArgs
= TheCall
->getNumArgs();
8676 Expr
*FirstArg
= TheCall
->getArg(0);
8679 ExprResult FirstArgResult
=
8680 DefaultFunctionArrayLvalueConversion(FirstArg
);
8681 if (checkBuiltinArgument(*this, TheCall
, 0))
8683 /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
8684 TheCall
->setArg(0, FirstArgResult
.get());
8687 // The alignment must be a constant integer.
8688 Expr
*SecondArg
= TheCall
->getArg(1);
8690 // We can't check the value of a dependent argument.
8691 if (!SecondArg
->isValueDependent()) {
8692 llvm::APSInt Result
;
8693 if (SemaBuiltinConstantArg(TheCall
, 1, Result
))
8696 if (!Result
.isPowerOf2())
8697 return Diag(TheCall
->getBeginLoc(), diag::err_alignment_not_power_of_two
)
8698 << SecondArg
->getSourceRange();
8700 if (Result
> Sema::MaximumAlignment
)
8701 Diag(TheCall
->getBeginLoc(), diag::warn_assume_aligned_too_great
)
8702 << SecondArg
->getSourceRange() << Sema::MaximumAlignment
;
8706 Expr
*ThirdArg
= TheCall
->getArg(2);
8707 if (convertArgumentToType(*this, ThirdArg
, Context
.getSizeType()))
8709 TheCall
->setArg(2, ThirdArg
);
8715 bool Sema::SemaBuiltinOSLogFormat(CallExpr
*TheCall
) {
8716 unsigned BuiltinID
=
8717 cast
<FunctionDecl
>(TheCall
->getCalleeDecl())->getBuiltinID();
8718 bool IsSizeCall
= BuiltinID
== Builtin::BI__builtin_os_log_format_buffer_size
;
8720 unsigned NumArgs
= TheCall
->getNumArgs();
8721 unsigned NumRequiredArgs
= IsSizeCall
? 1 : 2;
8722 if (NumArgs
< NumRequiredArgs
) {
8723 return Diag(TheCall
->getEndLoc(), diag::err_typecheck_call_too_few_args
)
8724 << 0 /* function call */ << NumRequiredArgs
<< NumArgs
8725 << TheCall
->getSourceRange();
8727 if (NumArgs
>= NumRequiredArgs
+ 0x100) {
8728 return Diag(TheCall
->getEndLoc(),
8729 diag::err_typecheck_call_too_many_args_at_most
)
8730 << 0 /* function call */ << (NumRequiredArgs
+ 0xff) << NumArgs
8731 << TheCall
->getSourceRange();
8735 // For formatting call, check buffer arg.
8737 ExprResult
Arg(TheCall
->getArg(i
));
8738 InitializedEntity Entity
= InitializedEntity::InitializeParameter(
8739 Context
, Context
.VoidPtrTy
, false);
8740 Arg
= PerformCopyInitialization(Entity
, SourceLocation(), Arg
);
8741 if (Arg
.isInvalid())
8743 TheCall
->setArg(i
, Arg
.get());
8747 // Check string literal arg.
8748 unsigned FormatIdx
= i
;
8750 ExprResult Arg
= CheckOSLogFormatStringArg(TheCall
->getArg(i
));
8751 if (Arg
.isInvalid())
8753 TheCall
->setArg(i
, Arg
.get());
8757 // Make sure variadic args are scalar.
8758 unsigned FirstDataArg
= i
;
8759 while (i
< NumArgs
) {
8760 ExprResult Arg
= DefaultVariadicArgumentPromotion(
8761 TheCall
->getArg(i
), VariadicFunction
, nullptr);
8762 if (Arg
.isInvalid())
8764 CharUnits ArgSize
= Context
.getTypeSizeInChars(Arg
.get()->getType());
8765 if (ArgSize
.getQuantity() >= 0x100) {
8766 return Diag(Arg
.get()->getEndLoc(), diag::err_os_log_argument_too_big
)
8767 << i
<< (int)ArgSize
.getQuantity() << 0xff
8768 << TheCall
->getSourceRange();
8770 TheCall
->setArg(i
, Arg
.get());
8774 // Check formatting specifiers. NOTE: We're only doing this for the non-size
8775 // call to avoid duplicate diagnostics.
8777 llvm::SmallBitVector
CheckedVarArgs(NumArgs
, false);
8778 ArrayRef
<const Expr
*> Args(TheCall
->getArgs(), TheCall
->getNumArgs());
8779 bool Success
= CheckFormatArguments(
8780 Args
, FAPK_Variadic
, FormatIdx
, FirstDataArg
, FST_OSLog
,
8781 VariadicFunction
, TheCall
->getBeginLoc(), SourceRange(),
8788 TheCall
->setType(Context
.getSizeType());
8790 TheCall
->setType(Context
.VoidPtrTy
);
8795 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
8796 /// TheCall is a constant expression.
8797 bool Sema::SemaBuiltinConstantArg(CallExpr
*TheCall
, int ArgNum
,
8798 llvm::APSInt
&Result
) {
8799 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8800 DeclRefExpr
*DRE
=cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
8801 FunctionDecl
*FDecl
= cast
<FunctionDecl
>(DRE
->getDecl());
8803 if (Arg
->isTypeDependent() || Arg
->isValueDependent()) return false;
8805 std::optional
<llvm::APSInt
> R
;
8806 if (!(R
= Arg
->getIntegerConstantExpr(Context
)))
8807 return Diag(TheCall
->getBeginLoc(), diag::err_constant_integer_arg_type
)
8808 << FDecl
->getDeclName() << Arg
->getSourceRange();
8813 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
8814 /// TheCall is a constant expression in the range [Low, High].
8815 bool Sema::SemaBuiltinConstantArgRange(CallExpr
*TheCall
, int ArgNum
,
8816 int Low
, int High
, bool RangeIsError
) {
8817 if (isConstantEvaluated())
8819 llvm::APSInt Result
;
8821 // We can't check the value of a dependent argument.
8822 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8823 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
8826 // Check constant-ness first.
8827 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
8830 if (Result
.getSExtValue() < Low
|| Result
.getSExtValue() > High
) {
8832 return Diag(TheCall
->getBeginLoc(), diag::err_argument_invalid_range
)
8833 << toString(Result
, 10) << Low
<< High
<< Arg
->getSourceRange();
8835 // Defer the warning until we know if the code will be emitted so that
8836 // dead code can ignore this.
8837 DiagRuntimeBehavior(TheCall
->getBeginLoc(), TheCall
,
8838 PDiag(diag::warn_argument_invalid_range
)
8839 << toString(Result
, 10) << Low
<< High
8840 << Arg
->getSourceRange());
8846 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
8847 /// TheCall is a constant expression is a multiple of Num..
8848 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr
*TheCall
, int ArgNum
,
8850 llvm::APSInt Result
;
8852 // We can't check the value of a dependent argument.
8853 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8854 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
8857 // Check constant-ness first.
8858 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
8861 if (Result
.getSExtValue() % Num
!= 0)
8862 return Diag(TheCall
->getBeginLoc(), diag::err_argument_not_multiple
)
8863 << Num
<< Arg
->getSourceRange();
8868 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
8869 /// constant expression representing a power of 2.
8870 bool Sema::SemaBuiltinConstantArgPower2(CallExpr
*TheCall
, int ArgNum
) {
8871 llvm::APSInt Result
;
8873 // We can't check the value of a dependent argument.
8874 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8875 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
8878 // Check constant-ness first.
8879 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
8882 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
8883 // and only if x is a power of 2.
8884 if (Result
.isStrictlyPositive() && (Result
& (Result
- 1)) == 0)
8887 return Diag(TheCall
->getBeginLoc(), diag::err_argument_not_power_of_2
)
8888 << Arg
->getSourceRange();
8891 static bool IsShiftedByte(llvm::APSInt Value
) {
8892 if (Value
.isNegative())
8895 // Check if it's a shifted byte, by shifting it down
8897 // If the value fits in the bottom byte, the check passes.
8901 // Otherwise, if the value has _any_ bits in the bottom byte, the check
8903 if ((Value
& 0xFF) != 0)
8906 // If the bottom 8 bits are all 0, but something above that is nonzero,
8907 // then shifting the value right by 8 bits won't affect whether it's a
8908 // shifted byte or not. So do that, and go round again.
8913 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
8914 /// a constant expression representing an arbitrary byte value shifted left by
8915 /// a multiple of 8 bits.
8916 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr
*TheCall
, int ArgNum
,
8918 llvm::APSInt Result
;
8920 // We can't check the value of a dependent argument.
8921 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8922 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
8925 // Check constant-ness first.
8926 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
8929 // Truncate to the given size.
8930 Result
= Result
.getLoBits(ArgBits
);
8931 Result
.setIsUnsigned(true);
8933 if (IsShiftedByte(Result
))
8936 return Diag(TheCall
->getBeginLoc(), diag::err_argument_not_shifted_byte
)
8937 << Arg
->getSourceRange();
8940 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
8941 /// TheCall is a constant expression representing either a shifted byte value,
8942 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
8943 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
8944 /// Arm MVE intrinsics.
8945 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr
*TheCall
,
8948 llvm::APSInt Result
;
8950 // We can't check the value of a dependent argument.
8951 Expr
*Arg
= TheCall
->getArg(ArgNum
);
8952 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
8955 // Check constant-ness first.
8956 if (SemaBuiltinConstantArg(TheCall
, ArgNum
, Result
))
8959 // Truncate to the given size.
8960 Result
= Result
.getLoBits(ArgBits
);
8961 Result
.setIsUnsigned(true);
8963 // Check to see if it's in either of the required forms.
8964 if (IsShiftedByte(Result
) ||
8965 (Result
> 0 && Result
< 0x10000 && (Result
& 0xFF) == 0xFF))
8968 return Diag(TheCall
->getBeginLoc(),
8969 diag::err_argument_not_shifted_byte_or_xxff
)
8970 << Arg
->getSourceRange();
8973 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
8974 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID
, CallExpr
*TheCall
) {
8975 if (BuiltinID
== AArch64::BI__builtin_arm_irg
) {
8976 if (checkArgCount(*this, TheCall
, 2))
8978 Expr
*Arg0
= TheCall
->getArg(0);
8979 Expr
*Arg1
= TheCall
->getArg(1);
8981 ExprResult FirstArg
= DefaultFunctionArrayLvalueConversion(Arg0
);
8982 if (FirstArg
.isInvalid())
8984 QualType FirstArgType
= FirstArg
.get()->getType();
8985 if (!FirstArgType
->isAnyPointerType())
8986 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
8987 << "first" << FirstArgType
<< Arg0
->getSourceRange();
8988 TheCall
->setArg(0, FirstArg
.get());
8990 ExprResult SecArg
= DefaultLvalueConversion(Arg1
);
8991 if (SecArg
.isInvalid())
8993 QualType SecArgType
= SecArg
.get()->getType();
8994 if (!SecArgType
->isIntegerType())
8995 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_integer
)
8996 << "second" << SecArgType
<< Arg1
->getSourceRange();
8998 // Derive the return type from the pointer argument.
8999 TheCall
->setType(FirstArgType
);
9003 if (BuiltinID
== AArch64::BI__builtin_arm_addg
) {
9004 if (checkArgCount(*this, TheCall
, 2))
9007 Expr
*Arg0
= TheCall
->getArg(0);
9008 ExprResult FirstArg
= DefaultFunctionArrayLvalueConversion(Arg0
);
9009 if (FirstArg
.isInvalid())
9011 QualType FirstArgType
= FirstArg
.get()->getType();
9012 if (!FirstArgType
->isAnyPointerType())
9013 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
9014 << "first" << FirstArgType
<< Arg0
->getSourceRange();
9015 TheCall
->setArg(0, FirstArg
.get());
9017 // Derive the return type from the pointer argument.
9018 TheCall
->setType(FirstArgType
);
9020 // Second arg must be an constant in range [0,15]
9021 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, 15);
9024 if (BuiltinID
== AArch64::BI__builtin_arm_gmi
) {
9025 if (checkArgCount(*this, TheCall
, 2))
9027 Expr
*Arg0
= TheCall
->getArg(0);
9028 Expr
*Arg1
= TheCall
->getArg(1);
9030 ExprResult FirstArg
= DefaultFunctionArrayLvalueConversion(Arg0
);
9031 if (FirstArg
.isInvalid())
9033 QualType FirstArgType
= FirstArg
.get()->getType();
9034 if (!FirstArgType
->isAnyPointerType())
9035 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
9036 << "first" << FirstArgType
<< Arg0
->getSourceRange();
9038 QualType SecArgType
= Arg1
->getType();
9039 if (!SecArgType
->isIntegerType())
9040 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_integer
)
9041 << "second" << SecArgType
<< Arg1
->getSourceRange();
9042 TheCall
->setType(Context
.IntTy
);
9046 if (BuiltinID
== AArch64::BI__builtin_arm_ldg
||
9047 BuiltinID
== AArch64::BI__builtin_arm_stg
) {
9048 if (checkArgCount(*this, TheCall
, 1))
9050 Expr
*Arg0
= TheCall
->getArg(0);
9051 ExprResult FirstArg
= DefaultFunctionArrayLvalueConversion(Arg0
);
9052 if (FirstArg
.isInvalid())
9055 QualType FirstArgType
= FirstArg
.get()->getType();
9056 if (!FirstArgType
->isAnyPointerType())
9057 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
9058 << "first" << FirstArgType
<< Arg0
->getSourceRange();
9059 TheCall
->setArg(0, FirstArg
.get());
9061 // Derive the return type from the pointer argument.
9062 if (BuiltinID
== AArch64::BI__builtin_arm_ldg
)
9063 TheCall
->setType(FirstArgType
);
9067 if (BuiltinID
== AArch64::BI__builtin_arm_subp
) {
9068 Expr
*ArgA
= TheCall
->getArg(0);
9069 Expr
*ArgB
= TheCall
->getArg(1);
9071 ExprResult ArgExprA
= DefaultFunctionArrayLvalueConversion(ArgA
);
9072 ExprResult ArgExprB
= DefaultFunctionArrayLvalueConversion(ArgB
);
9074 if (ArgExprA
.isInvalid() || ArgExprB
.isInvalid())
9077 QualType ArgTypeA
= ArgExprA
.get()->getType();
9078 QualType ArgTypeB
= ArgExprB
.get()->getType();
9080 auto isNull
= [&] (Expr
*E
) -> bool {
9081 return E
->isNullPointerConstant(
9082 Context
, Expr::NPC_ValueDependentIsNotNull
); };
9084 // argument should be either a pointer or null
9085 if (!ArgTypeA
->isAnyPointerType() && !isNull(ArgA
))
9086 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_null_or_pointer
)
9087 << "first" << ArgTypeA
<< ArgA
->getSourceRange();
9089 if (!ArgTypeB
->isAnyPointerType() && !isNull(ArgB
))
9090 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_null_or_pointer
)
9091 << "second" << ArgTypeB
<< ArgB
->getSourceRange();
9093 // Ensure Pointee types are compatible
9094 if (ArgTypeA
->isAnyPointerType() && !isNull(ArgA
) &&
9095 ArgTypeB
->isAnyPointerType() && !isNull(ArgB
)) {
9096 QualType pointeeA
= ArgTypeA
->getPointeeType();
9097 QualType pointeeB
= ArgTypeB
->getPointeeType();
9098 if (!Context
.typesAreCompatible(
9099 Context
.getCanonicalType(pointeeA
).getUnqualifiedType(),
9100 Context
.getCanonicalType(pointeeB
).getUnqualifiedType())) {
9101 return Diag(TheCall
->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible
)
9102 << ArgTypeA
<< ArgTypeB
<< ArgA
->getSourceRange()
9103 << ArgB
->getSourceRange();
9107 // at least one argument should be pointer type
9108 if (!ArgTypeA
->isAnyPointerType() && !ArgTypeB
->isAnyPointerType())
9109 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_any2arg_pointer
)
9110 << ArgTypeA
<< ArgTypeB
<< ArgA
->getSourceRange();
9112 if (isNull(ArgA
)) // adopt type of the other pointer
9113 ArgExprA
= ImpCastExprToType(ArgExprA
.get(), ArgTypeB
, CK_NullToPointer
);
9116 ArgExprB
= ImpCastExprToType(ArgExprB
.get(), ArgTypeA
, CK_NullToPointer
);
9118 TheCall
->setArg(0, ArgExprA
.get());
9119 TheCall
->setArg(1, ArgExprB
.get());
9120 TheCall
->setType(Context
.LongLongTy
);
9123 assert(false && "Unhandled ARM MTE intrinsic");
9127 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
9128 /// TheCall is an ARM/AArch64 special register string literal.
9129 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID
, CallExpr
*TheCall
,
9130 int ArgNum
, unsigned ExpectedFieldNum
,
9132 bool IsARMBuiltin
= BuiltinID
== ARM::BI__builtin_arm_rsr64
||
9133 BuiltinID
== ARM::BI__builtin_arm_wsr64
||
9134 BuiltinID
== ARM::BI__builtin_arm_rsr
||
9135 BuiltinID
== ARM::BI__builtin_arm_rsrp
||
9136 BuiltinID
== ARM::BI__builtin_arm_wsr
||
9137 BuiltinID
== ARM::BI__builtin_arm_wsrp
;
9138 bool IsAArch64Builtin
= BuiltinID
== AArch64::BI__builtin_arm_rsr64
||
9139 BuiltinID
== AArch64::BI__builtin_arm_wsr64
||
9140 BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
9141 BuiltinID
== AArch64::BI__builtin_arm_wsr128
||
9142 BuiltinID
== AArch64::BI__builtin_arm_rsr
||
9143 BuiltinID
== AArch64::BI__builtin_arm_rsrp
||
9144 BuiltinID
== AArch64::BI__builtin_arm_wsr
||
9145 BuiltinID
== AArch64::BI__builtin_arm_wsrp
;
9146 assert((IsARMBuiltin
|| IsAArch64Builtin
) && "Unexpected ARM builtin.");
9148 // We can't check the value of a dependent argument.
9149 Expr
*Arg
= TheCall
->getArg(ArgNum
);
9150 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
9153 // Check if the argument is a string literal.
9154 if (!isa
<StringLiteral
>(Arg
->IgnoreParenImpCasts()))
9155 return Diag(TheCall
->getBeginLoc(), diag::err_expr_not_string_literal
)
9156 << Arg
->getSourceRange();
9158 // Check the type of special register given.
9159 StringRef Reg
= cast
<StringLiteral
>(Arg
->IgnoreParenImpCasts())->getString();
9160 SmallVector
<StringRef
, 6> Fields
;
9161 Reg
.split(Fields
, ":");
9163 if (Fields
.size() != ExpectedFieldNum
&& !(AllowName
&& Fields
.size() == 1))
9164 return Diag(TheCall
->getBeginLoc(), diag::err_arm_invalid_specialreg
)
9165 << Arg
->getSourceRange();
9167 // If the string is the name of a register then we cannot check that it is
9168 // valid here but if the string is of one the forms described in ACLE then we
9169 // can check that the supplied fields are integers and within the valid
9171 if (Fields
.size() > 1) {
9172 bool FiveFields
= Fields
.size() == 5;
9174 bool ValidString
= true;
9176 ValidString
&= Fields
[0].starts_with_insensitive("cp") ||
9177 Fields
[0].starts_with_insensitive("p");
9179 Fields
[0] = Fields
[0].drop_front(
9180 Fields
[0].starts_with_insensitive("cp") ? 2 : 1);
9182 ValidString
&= Fields
[2].starts_with_insensitive("c");
9184 Fields
[2] = Fields
[2].drop_front(1);
9187 ValidString
&= Fields
[3].starts_with_insensitive("c");
9189 Fields
[3] = Fields
[3].drop_front(1);
9193 SmallVector
<int, 5> Ranges
;
9195 Ranges
.append({IsAArch64Builtin
? 1 : 15, 7, 15, 15, 7});
9197 Ranges
.append({15, 7, 15});
9199 for (unsigned i
=0; i
<Fields
.size(); ++i
) {
9201 ValidString
&= !Fields
[i
].getAsInteger(10, IntField
);
9202 ValidString
&= (IntField
>= 0 && IntField
<= Ranges
[i
]);
9206 return Diag(TheCall
->getBeginLoc(), diag::err_arm_invalid_specialreg
)
9207 << Arg
->getSourceRange();
9208 } else if (IsAArch64Builtin
&& Fields
.size() == 1) {
9209 // This code validates writes to PSTATE registers.
9212 if (TheCall
->getNumArgs() != 2)
9215 // The 128-bit system register accesses do not touch PSTATE.
9216 if (BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
9217 BuiltinID
== AArch64::BI__builtin_arm_wsr128
)
9220 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
9221 // along with the upper limit on the immediates allowed.
9222 auto MaxLimit
= llvm::StringSwitch
<std::optional
<unsigned>>(Reg
)
9223 .CaseLower("spsel", 15)
9224 .CaseLower("daifclr", 15)
9225 .CaseLower("daifset", 15)
9226 .CaseLower("pan", 15)
9227 .CaseLower("uao", 15)
9228 .CaseLower("dit", 15)
9229 .CaseLower("ssbs", 15)
9230 .CaseLower("tco", 15)
9231 .CaseLower("allint", 1)
9233 .Default(std::nullopt
);
9235 // If this is not a named PSTATE, just continue without validating, as this
9236 // will be lowered to an "MSR (register)" instruction directly
9240 // Here we only allow constants in the range for that pstate, as required by
9243 // While clang also accepts the names of system registers in its ACLE
9244 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
9245 // as the value written via a register is different to the value used as an
9246 // immediate to have the same effect. e.g., for the instruction `msr tco,
9247 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
9248 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
9250 // If a programmer wants to codegen the MSR (register) form of `msr tco,
9251 // xN`, they can still do so by specifying the register using five
9252 // colon-separated numbers in a string.
9253 return SemaBuiltinConstantArgRange(TheCall
, 1, 0, *MaxLimit
);
9259 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
9260 /// Emit an error and return true on failure; return false on success.
9261 /// TypeStr is a string containing the type descriptor of the value returned by
9262 /// the builtin and the descriptors of the expected type of the arguments.
9263 bool Sema::SemaBuiltinPPCMMACall(CallExpr
*TheCall
, unsigned BuiltinID
,
9264 const char *TypeStr
) {
9266 assert((TypeStr
[0] != '\0') &&
9267 "Invalid types in PPC MMA builtin declaration");
9270 unsigned ArgNum
= 0;
9272 // The first type in TypeStr is the type of the value returned by the
9273 // builtin. So we first read that type and change the type of TheCall.
9274 QualType type
= DecodePPCMMATypeFromStr(Context
, TypeStr
, Mask
);
9275 TheCall
->setType(type
);
9277 while (*TypeStr
!= '\0') {
9279 QualType ExpectedType
= DecodePPCMMATypeFromStr(Context
, TypeStr
, Mask
);
9280 if (ArgNum
>= TheCall
->getNumArgs()) {
9285 Expr
*Arg
= TheCall
->getArg(ArgNum
);
9286 QualType PassedType
= Arg
->getType();
9287 QualType StrippedRVType
= PassedType
.getCanonicalType();
9289 // Strip Restrict/Volatile qualifiers.
9290 if (StrippedRVType
.isRestrictQualified() ||
9291 StrippedRVType
.isVolatileQualified())
9292 StrippedRVType
= StrippedRVType
.getCanonicalType().getUnqualifiedType();
9294 // The only case where the argument type and expected type are allowed to
9295 // mismatch is if the argument type is a non-void pointer (or array) and
9296 // expected type is a void pointer.
9297 if (StrippedRVType
!= ExpectedType
)
9298 if (!(ExpectedType
->isVoidPointerType() &&
9299 (StrippedRVType
->isPointerType() || StrippedRVType
->isArrayType())))
9300 return Diag(Arg
->getBeginLoc(),
9301 diag::err_typecheck_convert_incompatible
)
9302 << PassedType
<< ExpectedType
<< 1 << 0 << 0;
9304 // If the value of the Mask is not 0, we have a constraint in the size of
9305 // the integer argument so here we ensure the argument is a constant that
9306 // is in the valid range.
9308 SemaBuiltinConstantArgRange(TheCall
, ArgNum
, 0, Mask
, true))
9314 // In case we exited early from the previous loop, there are other types to
9315 // read from TypeStr. So we need to read them all to ensure we have the right
9316 // number of arguments in TheCall and if it is not the case, to display a
9317 // better error message.
9318 while (*TypeStr
!= '\0') {
9319 (void) DecodePPCMMATypeFromStr(Context
, TypeStr
, Mask
);
9322 if (checkArgCount(*this, TheCall
, ArgNum
))
9328 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
9329 /// This checks that the target supports __builtin_longjmp and
9330 /// that val is a constant 1.
9331 bool Sema::SemaBuiltinLongjmp(CallExpr
*TheCall
) {
9332 if (!Context
.getTargetInfo().hasSjLjLowering())
9333 return Diag(TheCall
->getBeginLoc(), diag::err_builtin_longjmp_unsupported
)
9334 << SourceRange(TheCall
->getBeginLoc(), TheCall
->getEndLoc());
9336 Expr
*Arg
= TheCall
->getArg(1);
9337 llvm::APSInt Result
;
9339 // TODO: This is less than ideal. Overload this to take a value.
9340 if (SemaBuiltinConstantArg(TheCall
, 1, Result
))
9344 return Diag(TheCall
->getBeginLoc(), diag::err_builtin_longjmp_invalid_val
)
9345 << SourceRange(Arg
->getBeginLoc(), Arg
->getEndLoc());
9350 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
9351 /// This checks that the target supports __builtin_setjmp.
9352 bool Sema::SemaBuiltinSetjmp(CallExpr
*TheCall
) {
9353 if (!Context
.getTargetInfo().hasSjLjLowering())
9354 return Diag(TheCall
->getBeginLoc(), diag::err_builtin_setjmp_unsupported
)
9355 << SourceRange(TheCall
->getBeginLoc(), TheCall
->getEndLoc());
9361 class UncoveredArgHandler
{
9362 enum { Unknown
= -1, AllCovered
= -2 };
9364 signed FirstUncoveredArg
= Unknown
;
9365 SmallVector
<const Expr
*, 4> DiagnosticExprs
;
9368 UncoveredArgHandler() = default;
9370 bool hasUncoveredArg() const {
9371 return (FirstUncoveredArg
>= 0);
9374 unsigned getUncoveredArg() const {
9375 assert(hasUncoveredArg() && "no uncovered argument");
9376 return FirstUncoveredArg
;
9379 void setAllCovered() {
9380 // A string has been found with all arguments covered, so clear out
9382 DiagnosticExprs
.clear();
9383 FirstUncoveredArg
= AllCovered
;
9386 void Update(signed NewFirstUncoveredArg
, const Expr
*StrExpr
) {
9387 assert(NewFirstUncoveredArg
>= 0 && "Outside range");
9389 // Don't update if a previous string covers all arguments.
9390 if (FirstUncoveredArg
== AllCovered
)
9393 // UncoveredArgHandler tracks the highest uncovered argument index
9394 // and with it all the strings that match this index.
9395 if (NewFirstUncoveredArg
== FirstUncoveredArg
)
9396 DiagnosticExprs
.push_back(StrExpr
);
9397 else if (NewFirstUncoveredArg
> FirstUncoveredArg
) {
9398 DiagnosticExprs
.clear();
9399 DiagnosticExprs
.push_back(StrExpr
);
9400 FirstUncoveredArg
= NewFirstUncoveredArg
;
9404 void Diagnose(Sema
&S
, bool IsFunctionCall
, const Expr
*ArgExpr
);
9407 enum StringLiteralCheckType
{
9409 SLCT_UncheckedLiteral
,
9415 static void sumOffsets(llvm::APSInt
&Offset
, llvm::APSInt Addend
,
9416 BinaryOperatorKind BinOpKind
,
9417 bool AddendIsRight
) {
9418 unsigned BitWidth
= Offset
.getBitWidth();
9419 unsigned AddendBitWidth
= Addend
.getBitWidth();
9420 // There might be negative interim results.
9421 if (Addend
.isUnsigned()) {
9422 Addend
= Addend
.zext(++AddendBitWidth
);
9423 Addend
.setIsSigned(true);
9425 // Adjust the bit width of the APSInts.
9426 if (AddendBitWidth
> BitWidth
) {
9427 Offset
= Offset
.sext(AddendBitWidth
);
9428 BitWidth
= AddendBitWidth
;
9429 } else if (BitWidth
> AddendBitWidth
) {
9430 Addend
= Addend
.sext(BitWidth
);
9434 llvm::APSInt ResOffset
= Offset
;
9435 if (BinOpKind
== BO_Add
)
9436 ResOffset
= Offset
.sadd_ov(Addend
, Ov
);
9438 assert(AddendIsRight
&& BinOpKind
== BO_Sub
&&
9439 "operator must be add or sub with addend on the right");
9440 ResOffset
= Offset
.ssub_ov(Addend
, Ov
);
9443 // We add an offset to a pointer here so we should support an offset as big as
9446 assert(BitWidth
<= std::numeric_limits
<unsigned>::max() / 2 &&
9447 "index (intermediate) result too big");
9448 Offset
= Offset
.sext(2 * BitWidth
);
9449 sumOffsets(Offset
, Addend
, BinOpKind
, AddendIsRight
);
9458 // This is a wrapper class around StringLiteral to support offsetted string
9459 // literals as format strings. It takes the offset into account when returning
9460 // the string and its length or the source locations to display notes correctly.
9461 class FormatStringLiteral
{
9462 const StringLiteral
*FExpr
;
9466 FormatStringLiteral(const StringLiteral
*fexpr
, int64_t Offset
= 0)
9467 : FExpr(fexpr
), Offset(Offset
) {}
9469 StringRef
getString() const {
9470 return FExpr
->getString().drop_front(Offset
);
9473 unsigned getByteLength() const {
9474 return FExpr
->getByteLength() - getCharByteWidth() * Offset
;
9477 unsigned getLength() const { return FExpr
->getLength() - Offset
; }
9478 unsigned getCharByteWidth() const { return FExpr
->getCharByteWidth(); }
9480 StringLiteral::StringKind
getKind() const { return FExpr
->getKind(); }
9482 QualType
getType() const { return FExpr
->getType(); }
9484 bool isAscii() const { return FExpr
->isOrdinary(); }
9485 bool isWide() const { return FExpr
->isWide(); }
9486 bool isUTF8() const { return FExpr
->isUTF8(); }
9487 bool isUTF16() const { return FExpr
->isUTF16(); }
9488 bool isUTF32() const { return FExpr
->isUTF32(); }
9489 bool isPascal() const { return FExpr
->isPascal(); }
9491 SourceLocation
getLocationOfByte(
9492 unsigned ByteNo
, const SourceManager
&SM
, const LangOptions
&Features
,
9493 const TargetInfo
&Target
, unsigned *StartToken
= nullptr,
9494 unsigned *StartTokenByteOffset
= nullptr) const {
9495 return FExpr
->getLocationOfByte(ByteNo
+ Offset
, SM
, Features
, Target
,
9496 StartToken
, StartTokenByteOffset
);
9499 SourceLocation
getBeginLoc() const LLVM_READONLY
{
9500 return FExpr
->getBeginLoc().getLocWithOffset(Offset
);
9503 SourceLocation
getEndLoc() const LLVM_READONLY
{ return FExpr
->getEndLoc(); }
9508 static void CheckFormatString(
9509 Sema
&S
, const FormatStringLiteral
*FExpr
, const Expr
*OrigFormatExpr
,
9510 ArrayRef
<const Expr
*> Args
, Sema::FormatArgumentPassingKind APK
,
9511 unsigned format_idx
, unsigned firstDataArg
, Sema::FormatStringType Type
,
9512 bool inFunctionCall
, Sema::VariadicCallType CallType
,
9513 llvm::SmallBitVector
&CheckedVarArgs
, UncoveredArgHandler
&UncoveredArg
,
9514 bool IgnoreStringsWithoutSpecifiers
);
9516 static const Expr
*maybeConstEvalStringLiteral(ASTContext
&Context
,
9519 // Determine if an expression is a string literal or constant string.
9520 // If this function returns false on the arguments to a function expecting a
9521 // format string, we will usually need to emit a warning.
9522 // True string literals are then checked by CheckFormatString.
9523 static StringLiteralCheckType
9524 checkFormatStringExpr(Sema
&S
, const Expr
*E
, ArrayRef
<const Expr
*> Args
,
9525 Sema::FormatArgumentPassingKind APK
, unsigned format_idx
,
9526 unsigned firstDataArg
, Sema::FormatStringType Type
,
9527 Sema::VariadicCallType CallType
, bool InFunctionCall
,
9528 llvm::SmallBitVector
&CheckedVarArgs
,
9529 UncoveredArgHandler
&UncoveredArg
, llvm::APSInt Offset
,
9530 bool IgnoreStringsWithoutSpecifiers
= false) {
9531 if (S
.isConstantEvaluated())
9532 return SLCT_NotALiteral
;
9534 assert(Offset
.isSigned() && "invalid offset");
9536 if (E
->isTypeDependent() || E
->isValueDependent())
9537 return SLCT_NotALiteral
;
9539 E
= E
->IgnoreParenCasts();
9541 if (E
->isNullPointerConstant(S
.Context
, Expr::NPC_ValueDependentIsNotNull
))
9542 // Technically -Wformat-nonliteral does not warn about this case.
9543 // The behavior of printf and friends in this case is implementation
9544 // dependent. Ideally if the format string cannot be null then
9545 // it should have a 'nonnull' attribute in the function prototype.
9546 return SLCT_UncheckedLiteral
;
9548 switch (E
->getStmtClass()) {
9549 case Stmt::InitListExprClass
:
9550 // Handle expressions like {"foobar"}.
9551 if (const clang::Expr
*SLE
= maybeConstEvalStringLiteral(S
.Context
, E
)) {
9552 return checkFormatStringExpr(S
, SLE
, Args
, APK
, format_idx
, firstDataArg
,
9553 Type
, CallType
, /*InFunctionCall*/ false,
9554 CheckedVarArgs
, UncoveredArg
, Offset
,
9555 IgnoreStringsWithoutSpecifiers
);
9557 return SLCT_NotALiteral
;
9558 case Stmt::BinaryConditionalOperatorClass
:
9559 case Stmt::ConditionalOperatorClass
: {
9560 // The expression is a literal if both sub-expressions were, and it was
9561 // completely checked only if both sub-expressions were checked.
9562 const AbstractConditionalOperator
*C
=
9563 cast
<AbstractConditionalOperator
>(E
);
9565 // Determine whether it is necessary to check both sub-expressions, for
9566 // example, because the condition expression is a constant that can be
9567 // evaluated at compile time.
9568 bool CheckLeft
= true, CheckRight
= true;
9571 if (C
->getCond()->EvaluateAsBooleanCondition(Cond
, S
.getASTContext(),
9572 S
.isConstantEvaluated())) {
9579 // We need to maintain the offsets for the right and the left hand side
9580 // separately to check if every possible indexed expression is a valid
9581 // string literal. They might have different offsets for different string
9582 // literals in the end.
9583 StringLiteralCheckType Left
;
9585 Left
= SLCT_UncheckedLiteral
;
9587 Left
= checkFormatStringExpr(S
, C
->getTrueExpr(), Args
, APK
, format_idx
,
9588 firstDataArg
, Type
, CallType
, InFunctionCall
,
9589 CheckedVarArgs
, UncoveredArg
, Offset
,
9590 IgnoreStringsWithoutSpecifiers
);
9591 if (Left
== SLCT_NotALiteral
|| !CheckRight
) {
9596 StringLiteralCheckType Right
= checkFormatStringExpr(
9597 S
, C
->getFalseExpr(), Args
, APK
, format_idx
, firstDataArg
, Type
,
9598 CallType
, InFunctionCall
, CheckedVarArgs
, UncoveredArg
, Offset
,
9599 IgnoreStringsWithoutSpecifiers
);
9601 return (CheckLeft
&& Left
< Right
) ? Left
: Right
;
9604 case Stmt::ImplicitCastExprClass
:
9605 E
= cast
<ImplicitCastExpr
>(E
)->getSubExpr();
9608 case Stmt::OpaqueValueExprClass
:
9609 if (const Expr
*src
= cast
<OpaqueValueExpr
>(E
)->getSourceExpr()) {
9613 return SLCT_NotALiteral
;
9615 case Stmt::PredefinedExprClass
:
9616 // While __func__, etc., are technically not string literals, they
9617 // cannot contain format specifiers and thus are not a security
9619 return SLCT_UncheckedLiteral
;
9621 case Stmt::DeclRefExprClass
: {
9622 const DeclRefExpr
*DR
= cast
<DeclRefExpr
>(E
);
9624 // As an exception, do not flag errors for variables binding to
9625 // const string literals.
9626 if (const VarDecl
*VD
= dyn_cast
<VarDecl
>(DR
->getDecl())) {
9627 bool isConstant
= false;
9628 QualType T
= DR
->getType();
9630 if (const ArrayType
*AT
= S
.Context
.getAsArrayType(T
)) {
9631 isConstant
= AT
->getElementType().isConstant(S
.Context
);
9632 } else if (const PointerType
*PT
= T
->getAs
<PointerType
>()) {
9633 isConstant
= T
.isConstant(S
.Context
) &&
9634 PT
->getPointeeType().isConstant(S
.Context
);
9635 } else if (T
->isObjCObjectPointerType()) {
9636 // In ObjC, there is usually no "const ObjectPointer" type,
9637 // so don't check if the pointee type is constant.
9638 isConstant
= T
.isConstant(S
.Context
);
9642 if (const Expr
*Init
= VD
->getAnyInitializer()) {
9643 // Look through initializers like const char c[] = { "foo" }
9644 if (const InitListExpr
*InitList
= dyn_cast
<InitListExpr
>(Init
)) {
9645 if (InitList
->isStringLiteralInit())
9646 Init
= InitList
->getInit(0)->IgnoreParenImpCasts();
9648 return checkFormatStringExpr(
9649 S
, Init
, Args
, APK
, format_idx
, firstDataArg
, Type
, CallType
,
9650 /*InFunctionCall*/ false, CheckedVarArgs
, UncoveredArg
, Offset
);
9654 // When the format argument is an argument of this function, and this
9655 // function also has the format attribute, there are several interactions
9656 // for which there shouldn't be a warning. For instance, when calling
9657 // v*printf from a function that has the printf format attribute, we
9658 // should not emit a warning about using `fmt`, even though it's not
9659 // constant, because the arguments have already been checked for the
9660 // caller of `logmessage`:
9662 // __attribute__((format(printf, 1, 2)))
9663 // void logmessage(char const *fmt, ...) {
9665 // va_start(ap, fmt);
9666 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */
9670 // Another interaction that we need to support is calling a variadic
9671 // format function from a format function that has fixed arguments. For
9674 // __attribute__((format(printf, 1, 2)))
9675 // void logstring(char const *fmt, char const *str) {
9676 // printf(fmt, str); /* do not emit a warning about "fmt" */
9679 // Same (and perhaps more relatably) for the variadic template case:
9681 // template<typename... Args>
9682 // __attribute__((format(printf, 1, 2)))
9683 // void log(const char *fmt, Args&&... args) {
9684 // printf(fmt, forward<Args>(args)...);
9685 // /* do not emit a warning about "fmt" */
9688 // Due to implementation difficulty, we only check the format, not the
9689 // format arguments, in all cases.
9691 if (const auto *PV
= dyn_cast
<ParmVarDecl
>(VD
)) {
9692 if (const auto *D
= dyn_cast
<Decl
>(PV
->getDeclContext())) {
9693 for (const auto *PVFormat
: D
->specific_attrs
<FormatAttr
>()) {
9694 bool IsCXXMember
= false;
9695 if (const auto *MD
= dyn_cast
<CXXMethodDecl
>(D
))
9696 IsCXXMember
= MD
->isInstance();
9698 bool IsVariadic
= false;
9699 if (const FunctionType
*FnTy
= D
->getFunctionType())
9700 IsVariadic
= cast
<FunctionProtoType
>(FnTy
)->isVariadic();
9701 else if (const auto *BD
= dyn_cast
<BlockDecl
>(D
))
9702 IsVariadic
= BD
->isVariadic();
9703 else if (const auto *OMD
= dyn_cast
<ObjCMethodDecl
>(D
))
9704 IsVariadic
= OMD
->isVariadic();
9706 Sema::FormatStringInfo CallerFSI
;
9707 if (Sema::getFormatStringInfo(PVFormat
, IsCXXMember
, IsVariadic
,
9709 // We also check if the formats are compatible.
9710 // We can't pass a 'scanf' string to a 'printf' function.
9711 if (PV
->getFunctionScopeIndex() == CallerFSI
.FormatIdx
&&
9712 Type
== S
.GetFormatStringType(PVFormat
)) {
9713 // Lastly, check that argument passing kinds transition in a
9714 // way that makes sense:
9715 // from a caller with FAPK_VAList, allow FAPK_VAList
9716 // from a caller with FAPK_Fixed, allow FAPK_Fixed
9717 // from a caller with FAPK_Fixed, allow FAPK_Variadic
9718 // from a caller with FAPK_Variadic, allow FAPK_VAList
9719 switch (combineFAPK(CallerFSI
.ArgPassingKind
, APK
)) {
9720 case combineFAPK(Sema::FAPK_VAList
, Sema::FAPK_VAList
):
9721 case combineFAPK(Sema::FAPK_Fixed
, Sema::FAPK_Fixed
):
9722 case combineFAPK(Sema::FAPK_Fixed
, Sema::FAPK_Variadic
):
9723 case combineFAPK(Sema::FAPK_Variadic
, Sema::FAPK_VAList
):
9724 return SLCT_UncheckedLiteral
;
9733 return SLCT_NotALiteral
;
9736 case Stmt::CallExprClass
:
9737 case Stmt::CXXMemberCallExprClass
: {
9738 const CallExpr
*CE
= cast
<CallExpr
>(E
);
9739 if (const NamedDecl
*ND
= dyn_cast_or_null
<NamedDecl
>(CE
->getCalleeDecl())) {
9740 bool IsFirst
= true;
9741 StringLiteralCheckType CommonResult
;
9742 for (const auto *FA
: ND
->specific_attrs
<FormatArgAttr
>()) {
9743 const Expr
*Arg
= CE
->getArg(FA
->getFormatIdx().getASTIndex());
9744 StringLiteralCheckType Result
= checkFormatStringExpr(
9745 S
, Arg
, Args
, APK
, format_idx
, firstDataArg
, Type
, CallType
,
9746 InFunctionCall
, CheckedVarArgs
, UncoveredArg
, Offset
,
9747 IgnoreStringsWithoutSpecifiers
);
9749 CommonResult
= Result
;
9754 return CommonResult
;
9756 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
)) {
9757 unsigned BuiltinID
= FD
->getBuiltinID();
9758 if (BuiltinID
== Builtin::BI__builtin___CFStringMakeConstantString
||
9759 BuiltinID
== Builtin::BI__builtin___NSStringMakeConstantString
) {
9760 const Expr
*Arg
= CE
->getArg(0);
9761 return checkFormatStringExpr(
9762 S
, Arg
, Args
, APK
, format_idx
, firstDataArg
, Type
, CallType
,
9763 InFunctionCall
, CheckedVarArgs
, UncoveredArg
, Offset
,
9764 IgnoreStringsWithoutSpecifiers
);
9768 if (const Expr
*SLE
= maybeConstEvalStringLiteral(S
.Context
, E
))
9769 return checkFormatStringExpr(S
, SLE
, Args
, APK
, format_idx
, firstDataArg
,
9770 Type
, CallType
, /*InFunctionCall*/ false,
9771 CheckedVarArgs
, UncoveredArg
, Offset
,
9772 IgnoreStringsWithoutSpecifiers
);
9773 return SLCT_NotALiteral
;
9775 case Stmt::ObjCMessageExprClass
: {
9776 const auto *ME
= cast
<ObjCMessageExpr
>(E
);
9777 if (const auto *MD
= ME
->getMethodDecl()) {
9778 if (const auto *FA
= MD
->getAttr
<FormatArgAttr
>()) {
9779 // As a special case heuristic, if we're using the method -[NSBundle
9780 // localizedStringForKey:value:table:], ignore any key strings that lack
9781 // format specifiers. The idea is that if the key doesn't have any
9782 // format specifiers then its probably just a key to map to the
9783 // localized strings. If it does have format specifiers though, then its
9784 // likely that the text of the key is the format string in the
9785 // programmer's language, and should be checked.
9786 const ObjCInterfaceDecl
*IFace
;
9787 if (MD
->isInstanceMethod() && (IFace
= MD
->getClassInterface()) &&
9788 IFace
->getIdentifier()->isStr("NSBundle") &&
9789 MD
->getSelector().isKeywordSelector(
9790 {"localizedStringForKey", "value", "table"})) {
9791 IgnoreStringsWithoutSpecifiers
= true;
9794 const Expr
*Arg
= ME
->getArg(FA
->getFormatIdx().getASTIndex());
9795 return checkFormatStringExpr(
9796 S
, Arg
, Args
, APK
, format_idx
, firstDataArg
, Type
, CallType
,
9797 InFunctionCall
, CheckedVarArgs
, UncoveredArg
, Offset
,
9798 IgnoreStringsWithoutSpecifiers
);
9802 return SLCT_NotALiteral
;
9804 case Stmt::ObjCStringLiteralClass
:
9805 case Stmt::StringLiteralClass
: {
9806 const StringLiteral
*StrE
= nullptr;
9808 if (const ObjCStringLiteral
*ObjCFExpr
= dyn_cast
<ObjCStringLiteral
>(E
))
9809 StrE
= ObjCFExpr
->getString();
9811 StrE
= cast
<StringLiteral
>(E
);
9814 if (Offset
.isNegative() || Offset
> StrE
->getLength()) {
9815 // TODO: It would be better to have an explicit warning for out of
9817 return SLCT_NotALiteral
;
9819 FormatStringLiteral
FStr(StrE
, Offset
.sextOrTrunc(64).getSExtValue());
9820 CheckFormatString(S
, &FStr
, E
, Args
, APK
, format_idx
, firstDataArg
, Type
,
9821 InFunctionCall
, CallType
, CheckedVarArgs
, UncoveredArg
,
9822 IgnoreStringsWithoutSpecifiers
);
9823 return SLCT_CheckedLiteral
;
9826 return SLCT_NotALiteral
;
9828 case Stmt::BinaryOperatorClass
: {
9829 const BinaryOperator
*BinOp
= cast
<BinaryOperator
>(E
);
9831 // A string literal + an int offset is still a string literal.
9832 if (BinOp
->isAdditiveOp()) {
9833 Expr::EvalResult LResult
, RResult
;
9835 bool LIsInt
= BinOp
->getLHS()->EvaluateAsInt(
9836 LResult
, S
.Context
, Expr::SE_NoSideEffects
, S
.isConstantEvaluated());
9837 bool RIsInt
= BinOp
->getRHS()->EvaluateAsInt(
9838 RResult
, S
.Context
, Expr::SE_NoSideEffects
, S
.isConstantEvaluated());
9840 if (LIsInt
!= RIsInt
) {
9841 BinaryOperatorKind BinOpKind
= BinOp
->getOpcode();
9844 if (BinOpKind
== BO_Add
) {
9845 sumOffsets(Offset
, LResult
.Val
.getInt(), BinOpKind
, RIsInt
);
9846 E
= BinOp
->getRHS();
9850 sumOffsets(Offset
, RResult
.Val
.getInt(), BinOpKind
, RIsInt
);
9851 E
= BinOp
->getLHS();
9857 return SLCT_NotALiteral
;
9859 case Stmt::UnaryOperatorClass
: {
9860 const UnaryOperator
*UnaOp
= cast
<UnaryOperator
>(E
);
9861 auto ASE
= dyn_cast
<ArraySubscriptExpr
>(UnaOp
->getSubExpr());
9862 if (UnaOp
->getOpcode() == UO_AddrOf
&& ASE
) {
9863 Expr::EvalResult IndexResult
;
9864 if (ASE
->getRHS()->EvaluateAsInt(IndexResult
, S
.Context
,
9865 Expr::SE_NoSideEffects
,
9866 S
.isConstantEvaluated())) {
9867 sumOffsets(Offset
, IndexResult
.Val
.getInt(), BO_Add
,
9868 /*RHS is int*/ true);
9874 return SLCT_NotALiteral
;
9878 return SLCT_NotALiteral
;
9882 // If this expression can be evaluated at compile-time,
9883 // check if the result is a StringLiteral and return it
9884 // otherwise return nullptr
9885 static const Expr
*maybeConstEvalStringLiteral(ASTContext
&Context
,
9887 Expr::EvalResult Result
;
9888 if (E
->EvaluateAsRValue(Result
, Context
) && Result
.Val
.isLValue()) {
9889 const auto *LVE
= Result
.Val
.getLValueBase().dyn_cast
<const Expr
*>();
9890 if (isa_and_nonnull
<StringLiteral
>(LVE
))
9896 Sema::FormatStringType
Sema::GetFormatStringType(const FormatAttr
*Format
) {
9897 return llvm::StringSwitch
<FormatStringType
>(Format
->getType()->getName())
9898 .Case("scanf", FST_Scanf
)
9899 .Cases("printf", "printf0", FST_Printf
)
9900 .Cases("NSString", "CFString", FST_NSString
)
9901 .Case("strftime", FST_Strftime
)
9902 .Case("strfmon", FST_Strfmon
)
9903 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf
)
9904 .Case("freebsd_kprintf", FST_FreeBSDKPrintf
)
9905 .Case("os_trace", FST_OSLog
)
9906 .Case("os_log", FST_OSLog
)
9907 .Default(FST_Unknown
);
9910 /// CheckFormatArguments - Check calls to printf and scanf (and similar
9911 /// functions) for correct use of format strings.
9912 /// Returns true if a format string has been fully checked.
9913 bool Sema::CheckFormatArguments(const FormatAttr
*Format
,
9914 ArrayRef
<const Expr
*> Args
, bool IsCXXMember
,
9915 VariadicCallType CallType
, SourceLocation Loc
,
9917 llvm::SmallBitVector
&CheckedVarArgs
) {
9918 FormatStringInfo FSI
;
9919 if (getFormatStringInfo(Format
, IsCXXMember
, CallType
!= VariadicDoesNotApply
,
9921 return CheckFormatArguments(Args
, FSI
.ArgPassingKind
, FSI
.FormatIdx
,
9922 FSI
.FirstDataArg
, GetFormatStringType(Format
),
9923 CallType
, Loc
, Range
, CheckedVarArgs
);
9927 bool Sema::CheckFormatArguments(ArrayRef
<const Expr
*> Args
,
9928 Sema::FormatArgumentPassingKind APK
,
9929 unsigned format_idx
, unsigned firstDataArg
,
9930 FormatStringType Type
,
9931 VariadicCallType CallType
, SourceLocation Loc
,
9933 llvm::SmallBitVector
&CheckedVarArgs
) {
9934 // CHECK: printf/scanf-like function is called with no format string.
9935 if (format_idx
>= Args
.size()) {
9936 Diag(Loc
, diag::warn_missing_format_string
) << Range
;
9940 const Expr
*OrigFormatExpr
= Args
[format_idx
]->IgnoreParenCasts();
9942 // CHECK: format string is not a string literal.
9944 // Dynamically generated format strings are difficult to
9945 // automatically vet at compile time. Requiring that format strings
9946 // are string literals: (1) permits the checking of format strings by
9947 // the compiler and thereby (2) can practically remove the source of
9948 // many format string exploits.
9950 // Format string can be either ObjC string (e.g. @"%d") or
9951 // C string (e.g. "%d")
9952 // ObjC string uses the same format specifiers as C string, so we can use
9953 // the same format string checking logic for both ObjC and C strings.
9954 UncoveredArgHandler UncoveredArg
;
9955 StringLiteralCheckType CT
= checkFormatStringExpr(
9956 *this, OrigFormatExpr
, Args
, APK
, format_idx
, firstDataArg
, Type
,
9958 /*IsFunctionCall*/ true, CheckedVarArgs
, UncoveredArg
,
9959 /*no string offset*/ llvm::APSInt(64, false) = 0);
9961 // Generate a diagnostic where an uncovered argument is detected.
9962 if (UncoveredArg
.hasUncoveredArg()) {
9963 unsigned ArgIdx
= UncoveredArg
.getUncoveredArg() + firstDataArg
;
9964 assert(ArgIdx
< Args
.size() && "ArgIdx outside bounds");
9965 UncoveredArg
.Diagnose(*this, /*IsFunctionCall*/true, Args
[ArgIdx
]);
9968 if (CT
!= SLCT_NotALiteral
)
9969 // Literal format string found, check done!
9970 return CT
== SLCT_CheckedLiteral
;
9972 // Strftime is particular as it always uses a single 'time' argument,
9973 // so it is safe to pass a non-literal string.
9974 if (Type
== FST_Strftime
)
9977 // Do not emit diag when the string param is a macro expansion and the
9978 // format is either NSString or CFString. This is a hack to prevent
9979 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
9980 // which are usually used in place of NS and CF string literals.
9981 SourceLocation FormatLoc
= Args
[format_idx
]->getBeginLoc();
9982 if (Type
== FST_NSString
&& SourceMgr
.isInSystemMacro(FormatLoc
))
9985 // If there are no arguments specified, warn with -Wformat-security, otherwise
9986 // warn only with -Wformat-nonliteral.
9987 if (Args
.size() == firstDataArg
) {
9988 Diag(FormatLoc
, diag::warn_format_nonliteral_noargs
)
9989 << OrigFormatExpr
->getSourceRange();
9994 case FST_FreeBSDKPrintf
:
9996 Diag(FormatLoc
, diag::note_format_security_fixit
)
9997 << FixItHint::CreateInsertion(FormatLoc
, "\"%s\", ");
10000 Diag(FormatLoc
, diag::note_format_security_fixit
)
10001 << FixItHint::CreateInsertion(FormatLoc
, "@\"%@\", ");
10005 Diag(FormatLoc
, diag::warn_format_nonliteral
)
10006 << OrigFormatExpr
->getSourceRange();
10013 class CheckFormatHandler
: public analyze_format_string::FormatStringHandler
{
10016 const FormatStringLiteral
*FExpr
;
10017 const Expr
*OrigFormatExpr
;
10018 const Sema::FormatStringType FSType
;
10019 const unsigned FirstDataArg
;
10020 const unsigned NumDataArgs
;
10021 const char *Beg
; // Start of format string.
10022 const Sema::FormatArgumentPassingKind ArgPassingKind
;
10023 ArrayRef
<const Expr
*> Args
;
10024 unsigned FormatIdx
;
10025 llvm::SmallBitVector CoveredArgs
;
10026 bool usesPositionalArgs
= false;
10027 bool atFirstArg
= true;
10028 bool inFunctionCall
;
10029 Sema::VariadicCallType CallType
;
10030 llvm::SmallBitVector
&CheckedVarArgs
;
10031 UncoveredArgHandler
&UncoveredArg
;
10034 CheckFormatHandler(Sema
&s
, const FormatStringLiteral
*fexpr
,
10035 const Expr
*origFormatExpr
,
10036 const Sema::FormatStringType type
, unsigned firstDataArg
,
10037 unsigned numDataArgs
, const char *beg
,
10038 Sema::FormatArgumentPassingKind APK
,
10039 ArrayRef
<const Expr
*> Args
, unsigned formatIdx
,
10040 bool inFunctionCall
, Sema::VariadicCallType callType
,
10041 llvm::SmallBitVector
&CheckedVarArgs
,
10042 UncoveredArgHandler
&UncoveredArg
)
10043 : S(s
), FExpr(fexpr
), OrigFormatExpr(origFormatExpr
), FSType(type
),
10044 FirstDataArg(firstDataArg
), NumDataArgs(numDataArgs
), Beg(beg
),
10045 ArgPassingKind(APK
), Args(Args
), FormatIdx(formatIdx
),
10046 inFunctionCall(inFunctionCall
), CallType(callType
),
10047 CheckedVarArgs(CheckedVarArgs
), UncoveredArg(UncoveredArg
) {
10048 CoveredArgs
.resize(numDataArgs
);
10049 CoveredArgs
.reset();
10052 void DoneProcessing();
10054 void HandleIncompleteSpecifier(const char *startSpecifier
,
10055 unsigned specifierLen
) override
;
10057 void HandleInvalidLengthModifier(
10058 const analyze_format_string::FormatSpecifier
&FS
,
10059 const analyze_format_string::ConversionSpecifier
&CS
,
10060 const char *startSpecifier
, unsigned specifierLen
,
10063 void HandleNonStandardLengthModifier(
10064 const analyze_format_string::FormatSpecifier
&FS
,
10065 const char *startSpecifier
, unsigned specifierLen
);
10067 void HandleNonStandardConversionSpecifier(
10068 const analyze_format_string::ConversionSpecifier
&CS
,
10069 const char *startSpecifier
, unsigned specifierLen
);
10071 void HandlePosition(const char *startPos
, unsigned posLen
) override
;
10073 void HandleInvalidPosition(const char *startSpecifier
,
10074 unsigned specifierLen
,
10075 analyze_format_string::PositionContext p
) override
;
10077 void HandleZeroPosition(const char *startPos
, unsigned posLen
) override
;
10079 void HandleNullChar(const char *nullCharacter
) override
;
10081 template <typename Range
>
10083 EmitFormatDiagnostic(Sema
&S
, bool inFunctionCall
, const Expr
*ArgumentExpr
,
10084 const PartialDiagnostic
&PDiag
, SourceLocation StringLoc
,
10085 bool IsStringLocation
, Range StringRange
,
10086 ArrayRef
<FixItHint
> Fixit
= std::nullopt
);
10089 bool HandleInvalidConversionSpecifier(unsigned argIndex
, SourceLocation Loc
,
10090 const char *startSpec
,
10091 unsigned specifierLen
,
10092 const char *csStart
, unsigned csLen
);
10094 void HandlePositionalNonpositionalArgs(SourceLocation Loc
,
10095 const char *startSpec
,
10096 unsigned specifierLen
);
10098 SourceRange
getFormatStringRange();
10099 CharSourceRange
getSpecifierRange(const char *startSpecifier
,
10100 unsigned specifierLen
);
10101 SourceLocation
getLocationOfByte(const char *x
);
10103 const Expr
*getDataArg(unsigned i
) const;
10105 bool CheckNumArgs(const analyze_format_string::FormatSpecifier
&FS
,
10106 const analyze_format_string::ConversionSpecifier
&CS
,
10107 const char *startSpecifier
, unsigned specifierLen
,
10108 unsigned argIndex
);
10110 template <typename Range
>
10111 void EmitFormatDiagnostic(PartialDiagnostic PDiag
, SourceLocation StringLoc
,
10112 bool IsStringLocation
, Range StringRange
,
10113 ArrayRef
<FixItHint
> Fixit
= std::nullopt
);
10118 SourceRange
CheckFormatHandler::getFormatStringRange() {
10119 return OrigFormatExpr
->getSourceRange();
10122 CharSourceRange
CheckFormatHandler::
10123 getSpecifierRange(const char *startSpecifier
, unsigned specifierLen
) {
10124 SourceLocation Start
= getLocationOfByte(startSpecifier
);
10125 SourceLocation End
= getLocationOfByte(startSpecifier
+ specifierLen
- 1);
10127 // Advance the end SourceLocation by one due to half-open ranges.
10128 End
= End
.getLocWithOffset(1);
10130 return CharSourceRange::getCharRange(Start
, End
);
10133 SourceLocation
CheckFormatHandler::getLocationOfByte(const char *x
) {
10134 return FExpr
->getLocationOfByte(x
- Beg
, S
.getSourceManager(),
10135 S
.getLangOpts(), S
.Context
.getTargetInfo());
10138 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier
,
10139 unsigned specifierLen
){
10140 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_incomplete_specifier
),
10141 getLocationOfByte(startSpecifier
),
10142 /*IsStringLocation*/true,
10143 getSpecifierRange(startSpecifier
, specifierLen
));
10146 void CheckFormatHandler::HandleInvalidLengthModifier(
10147 const analyze_format_string::FormatSpecifier
&FS
,
10148 const analyze_format_string::ConversionSpecifier
&CS
,
10149 const char *startSpecifier
, unsigned specifierLen
, unsigned DiagID
) {
10150 using namespace analyze_format_string
;
10152 const LengthModifier
&LM
= FS
.getLengthModifier();
10153 CharSourceRange LMRange
= getSpecifierRange(LM
.getStart(), LM
.getLength());
10155 // See if we know how to fix this length modifier.
10156 std::optional
<LengthModifier
> FixedLM
= FS
.getCorrectedLengthModifier();
10158 EmitFormatDiagnostic(S
.PDiag(DiagID
) << LM
.toString() << CS
.toString(),
10159 getLocationOfByte(LM
.getStart()),
10160 /*IsStringLocation*/true,
10161 getSpecifierRange(startSpecifier
, specifierLen
));
10163 S
.Diag(getLocationOfByte(LM
.getStart()), diag::note_format_fix_specifier
)
10164 << FixedLM
->toString()
10165 << FixItHint::CreateReplacement(LMRange
, FixedLM
->toString());
10169 if (DiagID
== diag::warn_format_nonsensical_length
)
10170 Hint
= FixItHint::CreateRemoval(LMRange
);
10172 EmitFormatDiagnostic(S
.PDiag(DiagID
) << LM
.toString() << CS
.toString(),
10173 getLocationOfByte(LM
.getStart()),
10174 /*IsStringLocation*/true,
10175 getSpecifierRange(startSpecifier
, specifierLen
),
10180 void CheckFormatHandler::HandleNonStandardLengthModifier(
10181 const analyze_format_string::FormatSpecifier
&FS
,
10182 const char *startSpecifier
, unsigned specifierLen
) {
10183 using namespace analyze_format_string
;
10185 const LengthModifier
&LM
= FS
.getLengthModifier();
10186 CharSourceRange LMRange
= getSpecifierRange(LM
.getStart(), LM
.getLength());
10188 // See if we know how to fix this length modifier.
10189 std::optional
<LengthModifier
> FixedLM
= FS
.getCorrectedLengthModifier();
10191 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_non_standard
)
10192 << LM
.toString() << 0,
10193 getLocationOfByte(LM
.getStart()),
10194 /*IsStringLocation*/true,
10195 getSpecifierRange(startSpecifier
, specifierLen
));
10197 S
.Diag(getLocationOfByte(LM
.getStart()), diag::note_format_fix_specifier
)
10198 << FixedLM
->toString()
10199 << FixItHint::CreateReplacement(LMRange
, FixedLM
->toString());
10202 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_non_standard
)
10203 << LM
.toString() << 0,
10204 getLocationOfByte(LM
.getStart()),
10205 /*IsStringLocation*/true,
10206 getSpecifierRange(startSpecifier
, specifierLen
));
10210 void CheckFormatHandler::HandleNonStandardConversionSpecifier(
10211 const analyze_format_string::ConversionSpecifier
&CS
,
10212 const char *startSpecifier
, unsigned specifierLen
) {
10213 using namespace analyze_format_string
;
10215 // See if we know how to fix this conversion specifier.
10216 std::optional
<ConversionSpecifier
> FixedCS
= CS
.getStandardSpecifier();
10218 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_non_standard
)
10219 << CS
.toString() << /*conversion specifier*/1,
10220 getLocationOfByte(CS
.getStart()),
10221 /*IsStringLocation*/true,
10222 getSpecifierRange(startSpecifier
, specifierLen
));
10224 CharSourceRange CSRange
= getSpecifierRange(CS
.getStart(), CS
.getLength());
10225 S
.Diag(getLocationOfByte(CS
.getStart()), diag::note_format_fix_specifier
)
10226 << FixedCS
->toString()
10227 << FixItHint::CreateReplacement(CSRange
, FixedCS
->toString());
10229 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_non_standard
)
10230 << CS
.toString() << /*conversion specifier*/1,
10231 getLocationOfByte(CS
.getStart()),
10232 /*IsStringLocation*/true,
10233 getSpecifierRange(startSpecifier
, specifierLen
));
10237 void CheckFormatHandler::HandlePosition(const char *startPos
,
10239 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_non_standard_positional_arg
),
10240 getLocationOfByte(startPos
),
10241 /*IsStringLocation*/true,
10242 getSpecifierRange(startPos
, posLen
));
10245 void CheckFormatHandler::HandleInvalidPosition(
10246 const char *startSpecifier
, unsigned specifierLen
,
10247 analyze_format_string::PositionContext p
) {
10248 EmitFormatDiagnostic(
10249 S
.PDiag(diag::warn_format_invalid_positional_specifier
) << (unsigned)p
,
10250 getLocationOfByte(startSpecifier
), /*IsStringLocation*/ true,
10251 getSpecifierRange(startSpecifier
, specifierLen
));
10254 void CheckFormatHandler::HandleZeroPosition(const char *startPos
,
10256 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_zero_positional_specifier
),
10257 getLocationOfByte(startPos
),
10258 /*IsStringLocation*/true,
10259 getSpecifierRange(startPos
, posLen
));
10262 void CheckFormatHandler::HandleNullChar(const char *nullCharacter
) {
10263 if (!isa
<ObjCStringLiteral
>(OrigFormatExpr
)) {
10264 // The presence of a null character is likely an error.
10265 EmitFormatDiagnostic(
10266 S
.PDiag(diag::warn_printf_format_string_contains_null_char
),
10267 getLocationOfByte(nullCharacter
), /*IsStringLocation*/true,
10268 getFormatStringRange());
10272 // Note that this may return NULL if there was an error parsing or building
10273 // one of the argument expressions.
10274 const Expr
*CheckFormatHandler::getDataArg(unsigned i
) const {
10275 return Args
[FirstDataArg
+ i
];
10278 void CheckFormatHandler::DoneProcessing() {
10279 // Does the number of data arguments exceed the number of
10280 // format conversions in the format string?
10281 if (ArgPassingKind
!= Sema::FAPK_VAList
) {
10282 // Find any arguments that weren't covered.
10283 CoveredArgs
.flip();
10284 signed notCoveredArg
= CoveredArgs
.find_first();
10285 if (notCoveredArg
>= 0) {
10286 assert((unsigned)notCoveredArg
< NumDataArgs
);
10287 UncoveredArg
.Update(notCoveredArg
, OrigFormatExpr
);
10289 UncoveredArg
.setAllCovered();
10294 void UncoveredArgHandler::Diagnose(Sema
&S
, bool IsFunctionCall
,
10295 const Expr
*ArgExpr
) {
10296 assert(hasUncoveredArg() && !DiagnosticExprs
.empty() &&
10302 SourceLocation Loc
= ArgExpr
->getBeginLoc();
10304 if (S
.getSourceManager().isInSystemMacro(Loc
))
10307 PartialDiagnostic PDiag
= S
.PDiag(diag::warn_printf_data_arg_not_used
);
10308 for (auto E
: DiagnosticExprs
)
10309 PDiag
<< E
->getSourceRange();
10311 CheckFormatHandler::EmitFormatDiagnostic(
10312 S
, IsFunctionCall
, DiagnosticExprs
[0],
10313 PDiag
, Loc
, /*IsStringLocation*/false,
10314 DiagnosticExprs
[0]->getSourceRange());
10318 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex
,
10319 SourceLocation Loc
,
10320 const char *startSpec
,
10321 unsigned specifierLen
,
10322 const char *csStart
,
10324 bool keepGoing
= true;
10325 if (argIndex
< NumDataArgs
) {
10326 // Consider the argument coverered, even though the specifier doesn't
10328 CoveredArgs
.set(argIndex
);
10331 // If argIndex exceeds the number of data arguments we
10332 // don't issue a warning because that is just a cascade of warnings (and
10333 // they may have intended '%%' anyway). We don't want to continue processing
10334 // the format string after this point, however, as we will like just get
10335 // gibberish when trying to match arguments.
10339 StringRef
Specifier(csStart
, csLen
);
10341 // If the specifier in non-printable, it could be the first byte of a UTF-8
10342 // sequence. In that case, print the UTF-8 code point. If not, print the byte
10344 std::string CodePointStr
;
10345 if (!llvm::sys::locale::isPrint(*csStart
)) {
10346 llvm::UTF32 CodePoint
;
10347 const llvm::UTF8
**B
= reinterpret_cast<const llvm::UTF8
**>(&csStart
);
10348 const llvm::UTF8
*E
=
10349 reinterpret_cast<const llvm::UTF8
*>(csStart
+ csLen
);
10350 llvm::ConversionResult Result
=
10351 llvm::convertUTF8Sequence(B
, E
, &CodePoint
, llvm::strictConversion
);
10353 if (Result
!= llvm::conversionOK
) {
10354 unsigned char FirstChar
= *csStart
;
10355 CodePoint
= (llvm::UTF32
)FirstChar
;
10358 llvm::raw_string_ostream
OS(CodePointStr
);
10359 if (CodePoint
< 256)
10360 OS
<< "\\x" << llvm::format("%02x", CodePoint
);
10361 else if (CodePoint
<= 0xFFFF)
10362 OS
<< "\\u" << llvm::format("%04x", CodePoint
);
10364 OS
<< "\\U" << llvm::format("%08x", CodePoint
);
10366 Specifier
= CodePointStr
;
10369 EmitFormatDiagnostic(
10370 S
.PDiag(diag::warn_format_invalid_conversion
) << Specifier
, Loc
,
10371 /*IsStringLocation*/ true, getSpecifierRange(startSpec
, specifierLen
));
10377 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc
,
10378 const char *startSpec
,
10379 unsigned specifierLen
) {
10380 EmitFormatDiagnostic(
10381 S
.PDiag(diag::warn_format_mix_positional_nonpositional_args
),
10382 Loc
, /*isStringLoc*/true, getSpecifierRange(startSpec
, specifierLen
));
10386 CheckFormatHandler::CheckNumArgs(
10387 const analyze_format_string::FormatSpecifier
&FS
,
10388 const analyze_format_string::ConversionSpecifier
&CS
,
10389 const char *startSpecifier
, unsigned specifierLen
, unsigned argIndex
) {
10391 if (argIndex
>= NumDataArgs
) {
10392 PartialDiagnostic PDiag
= FS
.usesPositionalArg()
10393 ? (S
.PDiag(diag::warn_printf_positional_arg_exceeds_data_args
)
10394 << (argIndex
+1) << NumDataArgs
)
10395 : S
.PDiag(diag::warn_printf_insufficient_data_args
);
10396 EmitFormatDiagnostic(
10397 PDiag
, getLocationOfByte(CS
.getStart()), /*IsStringLocation*/true,
10398 getSpecifierRange(startSpecifier
, specifierLen
));
10400 // Since more arguments than conversion tokens are given, by extension
10401 // all arguments are covered, so mark this as so.
10402 UncoveredArg
.setAllCovered();
10408 template<typename Range
>
10409 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag
,
10410 SourceLocation Loc
,
10411 bool IsStringLocation
,
10413 ArrayRef
<FixItHint
> FixIt
) {
10414 EmitFormatDiagnostic(S
, inFunctionCall
, Args
[FormatIdx
], PDiag
,
10415 Loc
, IsStringLocation
, StringRange
, FixIt
);
10418 /// If the format string is not within the function call, emit a note
10419 /// so that the function call and string are in diagnostic messages.
10421 /// \param InFunctionCall if true, the format string is within the function
10422 /// call and only one diagnostic message will be produced. Otherwise, an
10423 /// extra note will be emitted pointing to location of the format string.
10425 /// \param ArgumentExpr the expression that is passed as the format string
10426 /// argument in the function call. Used for getting locations when two
10427 /// diagnostics are emitted.
10429 /// \param PDiag the callee should already have provided any strings for the
10430 /// diagnostic message. This function only adds locations and fixits
10431 /// to diagnostics.
10433 /// \param Loc primary location for diagnostic. If two diagnostics are
10434 /// required, one will be at Loc and a new SourceLocation will be created for
10437 /// \param IsStringLocation if true, Loc points to the format string should be
10438 /// used for the note. Otherwise, Loc points to the argument list and will
10439 /// be used with PDiag.
10441 /// \param StringRange some or all of the string to highlight. This is
10442 /// templated so it can accept either a CharSourceRange or a SourceRange.
10444 /// \param FixIt optional fix it hint for the format string.
10445 template <typename Range
>
10446 void CheckFormatHandler::EmitFormatDiagnostic(
10447 Sema
&S
, bool InFunctionCall
, const Expr
*ArgumentExpr
,
10448 const PartialDiagnostic
&PDiag
, SourceLocation Loc
, bool IsStringLocation
,
10449 Range StringRange
, ArrayRef
<FixItHint
> FixIt
) {
10450 if (InFunctionCall
) {
10451 const Sema::SemaDiagnosticBuilder
&D
= S
.Diag(Loc
, PDiag
);
10455 S
.Diag(IsStringLocation
? ArgumentExpr
->getExprLoc() : Loc
, PDiag
)
10456 << ArgumentExpr
->getSourceRange();
10458 const Sema::SemaDiagnosticBuilder
&Note
=
10459 S
.Diag(IsStringLocation
? Loc
: StringRange
.getBegin(),
10460 diag::note_format_string_defined
);
10462 Note
<< StringRange
;
10467 //===--- CHECK: Printf format string checking ------------------------------===//
10471 class CheckPrintfHandler
: public CheckFormatHandler
{
10473 CheckPrintfHandler(Sema
&s
, const FormatStringLiteral
*fexpr
,
10474 const Expr
*origFormatExpr
,
10475 const Sema::FormatStringType type
, unsigned firstDataArg
,
10476 unsigned numDataArgs
, bool isObjC
, const char *beg
,
10477 Sema::FormatArgumentPassingKind APK
,
10478 ArrayRef
<const Expr
*> Args
, unsigned formatIdx
,
10479 bool inFunctionCall
, Sema::VariadicCallType CallType
,
10480 llvm::SmallBitVector
&CheckedVarArgs
,
10481 UncoveredArgHandler
&UncoveredArg
)
10482 : CheckFormatHandler(s
, fexpr
, origFormatExpr
, type
, firstDataArg
,
10483 numDataArgs
, beg
, APK
, Args
, formatIdx
,
10484 inFunctionCall
, CallType
, CheckedVarArgs
,
10487 bool isObjCContext() const { return FSType
== Sema::FST_NSString
; }
10489 /// Returns true if '%@' specifiers are allowed in the format string.
10490 bool allowsObjCArg() const {
10491 return FSType
== Sema::FST_NSString
|| FSType
== Sema::FST_OSLog
||
10492 FSType
== Sema::FST_OSTrace
;
10495 bool HandleInvalidPrintfConversionSpecifier(
10496 const analyze_printf::PrintfSpecifier
&FS
,
10497 const char *startSpecifier
,
10498 unsigned specifierLen
) override
;
10500 void handleInvalidMaskType(StringRef MaskType
) override
;
10502 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
&FS
,
10503 const char *startSpecifier
, unsigned specifierLen
,
10504 const TargetInfo
&Target
) override
;
10505 bool checkFormatExpr(const analyze_printf::PrintfSpecifier
&FS
,
10506 const char *StartSpecifier
,
10507 unsigned SpecifierLen
,
10510 bool HandleAmount(const analyze_format_string::OptionalAmount
&Amt
, unsigned k
,
10511 const char *startSpecifier
, unsigned specifierLen
);
10512 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier
&FS
,
10513 const analyze_printf::OptionalAmount
&Amt
,
10515 const char *startSpecifier
, unsigned specifierLen
);
10516 void HandleFlag(const analyze_printf::PrintfSpecifier
&FS
,
10517 const analyze_printf::OptionalFlag
&flag
,
10518 const char *startSpecifier
, unsigned specifierLen
);
10519 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier
&FS
,
10520 const analyze_printf::OptionalFlag
&ignoredFlag
,
10521 const analyze_printf::OptionalFlag
&flag
,
10522 const char *startSpecifier
, unsigned specifierLen
);
10523 bool checkForCStrMembers(const analyze_printf::ArgType
&AT
,
10526 void HandleEmptyObjCModifierFlag(const char *startFlag
,
10527 unsigned flagLen
) override
;
10529 void HandleInvalidObjCModifierFlag(const char *startFlag
,
10530 unsigned flagLen
) override
;
10532 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart
,
10533 const char *flagsEnd
,
10534 const char *conversionPosition
)
10540 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
10541 const analyze_printf::PrintfSpecifier
&FS
,
10542 const char *startSpecifier
,
10543 unsigned specifierLen
) {
10544 const analyze_printf::PrintfConversionSpecifier
&CS
=
10545 FS
.getConversionSpecifier();
10547 return HandleInvalidConversionSpecifier(FS
.getArgIndex(),
10548 getLocationOfByte(CS
.getStart()),
10549 startSpecifier
, specifierLen
,
10550 CS
.getStart(), CS
.getLength());
10553 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType
) {
10554 S
.Diag(getLocationOfByte(MaskType
.data()), diag::err_invalid_mask_type_size
);
10557 bool CheckPrintfHandler::HandleAmount(
10558 const analyze_format_string::OptionalAmount
&Amt
, unsigned k
,
10559 const char *startSpecifier
, unsigned specifierLen
) {
10560 if (Amt
.hasDataArgument()) {
10561 if (ArgPassingKind
!= Sema::FAPK_VAList
) {
10562 unsigned argIndex
= Amt
.getArgIndex();
10563 if (argIndex
>= NumDataArgs
) {
10564 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_asterisk_missing_arg
)
10566 getLocationOfByte(Amt
.getStart()),
10567 /*IsStringLocation*/ true,
10568 getSpecifierRange(startSpecifier
, specifierLen
));
10569 // Don't do any more checking. We will just emit
10570 // spurious errors.
10574 // Type check the data argument. It should be an 'int'.
10575 // Although not in conformance with C99, we also allow the argument to be
10576 // an 'unsigned int' as that is a reasonably safe case. GCC also
10577 // doesn't emit a warning for that case.
10578 CoveredArgs
.set(argIndex
);
10579 const Expr
*Arg
= getDataArg(argIndex
);
10583 QualType T
= Arg
->getType();
10585 const analyze_printf::ArgType
&AT
= Amt
.getArgType(S
.Context
);
10586 assert(AT
.isValid());
10588 if (!AT
.matchesType(S
.Context
, T
)) {
10589 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_asterisk_wrong_type
)
10590 << k
<< AT
.getRepresentativeTypeName(S
.Context
)
10591 << T
<< Arg
->getSourceRange(),
10592 getLocationOfByte(Amt
.getStart()),
10593 /*IsStringLocation*/true,
10594 getSpecifierRange(startSpecifier
, specifierLen
));
10595 // Don't do any more checking. We will just emit
10596 // spurious errors.
10604 void CheckPrintfHandler::HandleInvalidAmount(
10605 const analyze_printf::PrintfSpecifier
&FS
,
10606 const analyze_printf::OptionalAmount
&Amt
,
10608 const char *startSpecifier
,
10609 unsigned specifierLen
) {
10610 const analyze_printf::PrintfConversionSpecifier
&CS
=
10611 FS
.getConversionSpecifier();
10614 Amt
.getHowSpecified() == analyze_printf::OptionalAmount::Constant
10615 ? FixItHint::CreateRemoval(getSpecifierRange(Amt
.getStart(),
10616 Amt
.getConstantLength()))
10619 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_nonsensical_optional_amount
)
10620 << type
<< CS
.toString(),
10621 getLocationOfByte(Amt
.getStart()),
10622 /*IsStringLocation*/true,
10623 getSpecifierRange(startSpecifier
, specifierLen
),
10627 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier
&FS
,
10628 const analyze_printf::OptionalFlag
&flag
,
10629 const char *startSpecifier
,
10630 unsigned specifierLen
) {
10631 // Warn about pointless flag with a fixit removal.
10632 const analyze_printf::PrintfConversionSpecifier
&CS
=
10633 FS
.getConversionSpecifier();
10634 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_nonsensical_flag
)
10635 << flag
.toString() << CS
.toString(),
10636 getLocationOfByte(flag
.getPosition()),
10637 /*IsStringLocation*/true,
10638 getSpecifierRange(startSpecifier
, specifierLen
),
10639 FixItHint::CreateRemoval(
10640 getSpecifierRange(flag
.getPosition(), 1)));
10643 void CheckPrintfHandler::HandleIgnoredFlag(
10644 const analyze_printf::PrintfSpecifier
&FS
,
10645 const analyze_printf::OptionalFlag
&ignoredFlag
,
10646 const analyze_printf::OptionalFlag
&flag
,
10647 const char *startSpecifier
,
10648 unsigned specifierLen
) {
10649 // Warn about ignored flag with a fixit removal.
10650 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_ignored_flag
)
10651 << ignoredFlag
.toString() << flag
.toString(),
10652 getLocationOfByte(ignoredFlag
.getPosition()),
10653 /*IsStringLocation*/true,
10654 getSpecifierRange(startSpecifier
, specifierLen
),
10655 FixItHint::CreateRemoval(
10656 getSpecifierRange(ignoredFlag
.getPosition(), 1)));
10659 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag
,
10660 unsigned flagLen
) {
10661 // Warn about an empty flag.
10662 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_empty_objc_flag
),
10663 getLocationOfByte(startFlag
),
10664 /*IsStringLocation*/true,
10665 getSpecifierRange(startFlag
, flagLen
));
10668 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag
,
10669 unsigned flagLen
) {
10670 // Warn about an invalid flag.
10671 auto Range
= getSpecifierRange(startFlag
, flagLen
);
10672 StringRef
flag(startFlag
, flagLen
);
10673 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_invalid_objc_flag
) << flag
,
10674 getLocationOfByte(startFlag
),
10675 /*IsStringLocation*/true,
10676 Range
, FixItHint::CreateRemoval(Range
));
10679 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
10680 const char *flagsStart
, const char *flagsEnd
, const char *conversionPosition
) {
10681 // Warn about using '[...]' without a '@' conversion.
10682 auto Range
= getSpecifierRange(flagsStart
, flagsEnd
- flagsStart
+ 1);
10683 auto diag
= diag::warn_printf_ObjCflags_without_ObjCConversion
;
10684 EmitFormatDiagnostic(S
.PDiag(diag
) << StringRef(conversionPosition
, 1),
10685 getLocationOfByte(conversionPosition
),
10686 /*IsStringLocation*/true,
10687 Range
, FixItHint::CreateRemoval(Range
));
10690 // Determines if the specified is a C++ class or struct containing
10691 // a member with the specified name and kind (e.g. a CXXMethodDecl named
10693 template<typename MemberKind
>
10694 static llvm::SmallPtrSet
<MemberKind
*, 1>
10695 CXXRecordMembersNamed(StringRef Name
, Sema
&S
, QualType Ty
) {
10696 const RecordType
*RT
= Ty
->getAs
<RecordType
>();
10697 llvm::SmallPtrSet
<MemberKind
*, 1> Results
;
10701 const CXXRecordDecl
*RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl());
10702 if (!RD
|| !RD
->getDefinition())
10705 LookupResult
R(S
, &S
.Context
.Idents
.get(Name
), SourceLocation(),
10706 Sema::LookupMemberName
);
10707 R
.suppressDiagnostics();
10709 // We just need to include all members of the right kind turned up by the
10710 // filter, at this point.
10711 if (S
.LookupQualifiedName(R
, RT
->getDecl()))
10712 for (LookupResult::iterator I
= R
.begin(), E
= R
.end(); I
!= E
; ++I
) {
10713 NamedDecl
*decl
= (*I
)->getUnderlyingDecl();
10714 if (MemberKind
*FK
= dyn_cast
<MemberKind
>(decl
))
10715 Results
.insert(FK
);
10720 /// Check if we could call '.c_str()' on an object.
10722 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
10723 /// allow the call, or if it would be ambiguous).
10724 bool Sema::hasCStrMethod(const Expr
*E
) {
10725 using MethodSet
= llvm::SmallPtrSet
<CXXMethodDecl
*, 1>;
10727 MethodSet Results
=
10728 CXXRecordMembersNamed
<CXXMethodDecl
>("c_str", *this, E
->getType());
10729 for (MethodSet::iterator MI
= Results
.begin(), ME
= Results
.end();
10731 if ((*MI
)->getMinRequiredArguments() == 0)
10736 // Check if a (w)string was passed when a (w)char* was needed, and offer a
10737 // better diagnostic if so. AT is assumed to be valid.
10738 // Returns true when a c_str() conversion method is found.
10739 bool CheckPrintfHandler::checkForCStrMembers(
10740 const analyze_printf::ArgType
&AT
, const Expr
*E
) {
10741 using MethodSet
= llvm::SmallPtrSet
<CXXMethodDecl
*, 1>;
10743 MethodSet Results
=
10744 CXXRecordMembersNamed
<CXXMethodDecl
>("c_str", S
, E
->getType());
10746 for (MethodSet::iterator MI
= Results
.begin(), ME
= Results
.end();
10748 const CXXMethodDecl
*Method
= *MI
;
10749 if (Method
->getMinRequiredArguments() == 0 &&
10750 AT
.matchesType(S
.Context
, Method
->getReturnType())) {
10751 // FIXME: Suggest parens if the expression needs them.
10752 SourceLocation EndLoc
= S
.getLocForEndOfToken(E
->getEndLoc());
10753 S
.Diag(E
->getBeginLoc(), diag::note_printf_c_str
)
10754 << "c_str()" << FixItHint::CreateInsertion(EndLoc
, ".c_str()");
10762 bool CheckPrintfHandler::HandlePrintfSpecifier(
10763 const analyze_printf::PrintfSpecifier
&FS
, const char *startSpecifier
,
10764 unsigned specifierLen
, const TargetInfo
&Target
) {
10765 using namespace analyze_format_string
;
10766 using namespace analyze_printf
;
10768 const PrintfConversionSpecifier
&CS
= FS
.getConversionSpecifier();
10770 if (FS
.consumesDataArgument()) {
10772 atFirstArg
= false;
10773 usesPositionalArgs
= FS
.usesPositionalArg();
10775 else if (usesPositionalArgs
!= FS
.usesPositionalArg()) {
10776 HandlePositionalNonpositionalArgs(getLocationOfByte(CS
.getStart()),
10777 startSpecifier
, specifierLen
);
10782 // First check if the field width, precision, and conversion specifier
10783 // have matching data arguments.
10784 if (!HandleAmount(FS
.getFieldWidth(), /* field width */ 0,
10785 startSpecifier
, specifierLen
)) {
10789 if (!HandleAmount(FS
.getPrecision(), /* precision */ 1,
10790 startSpecifier
, specifierLen
)) {
10794 if (!CS
.consumesDataArgument()) {
10795 // FIXME: Technically specifying a precision or field width here
10796 // makes no sense. Worth issuing a warning at some point.
10800 // Consume the argument.
10801 unsigned argIndex
= FS
.getArgIndex();
10802 if (argIndex
< NumDataArgs
) {
10803 // The check to see if the argIndex is valid will come later.
10804 // We set the bit here because we may exit early from this
10805 // function if we encounter some other error.
10806 CoveredArgs
.set(argIndex
);
10809 // FreeBSD kernel extensions.
10810 if (CS
.getKind() == ConversionSpecifier::FreeBSDbArg
||
10811 CS
.getKind() == ConversionSpecifier::FreeBSDDArg
) {
10812 // We need at least two arguments.
10813 if (!CheckNumArgs(FS
, CS
, startSpecifier
, specifierLen
, argIndex
+ 1))
10816 // Claim the second argument.
10817 CoveredArgs
.set(argIndex
+ 1);
10819 // Type check the first argument (int for %b, pointer for %D)
10820 const Expr
*Ex
= getDataArg(argIndex
);
10821 const analyze_printf::ArgType
&AT
=
10822 (CS
.getKind() == ConversionSpecifier::FreeBSDbArg
) ?
10823 ArgType(S
.Context
.IntTy
) : ArgType::CPointerTy
;
10824 if (AT
.isValid() && !AT
.matchesType(S
.Context
, Ex
->getType()))
10825 EmitFormatDiagnostic(
10826 S
.PDiag(diag::warn_format_conversion_argument_type_mismatch
)
10827 << AT
.getRepresentativeTypeName(S
.Context
) << Ex
->getType()
10828 << false << Ex
->getSourceRange(),
10829 Ex
->getBeginLoc(), /*IsStringLocation*/ false,
10830 getSpecifierRange(startSpecifier
, specifierLen
));
10832 // Type check the second argument (char * for both %b and %D)
10833 Ex
= getDataArg(argIndex
+ 1);
10834 const analyze_printf::ArgType
&AT2
= ArgType::CStrTy
;
10835 if (AT2
.isValid() && !AT2
.matchesType(S
.Context
, Ex
->getType()))
10836 EmitFormatDiagnostic(
10837 S
.PDiag(diag::warn_format_conversion_argument_type_mismatch
)
10838 << AT2
.getRepresentativeTypeName(S
.Context
) << Ex
->getType()
10839 << false << Ex
->getSourceRange(),
10840 Ex
->getBeginLoc(), /*IsStringLocation*/ false,
10841 getSpecifierRange(startSpecifier
, specifierLen
));
10846 // Check for using an Objective-C specific conversion specifier
10847 // in a non-ObjC literal.
10848 if (!allowsObjCArg() && CS
.isObjCArg()) {
10849 return HandleInvalidPrintfConversionSpecifier(FS
, startSpecifier
,
10853 // %P can only be used with os_log.
10854 if (FSType
!= Sema::FST_OSLog
&& CS
.getKind() == ConversionSpecifier::PArg
) {
10855 return HandleInvalidPrintfConversionSpecifier(FS
, startSpecifier
,
10859 // %n is not allowed with os_log.
10860 if (FSType
== Sema::FST_OSLog
&& CS
.getKind() == ConversionSpecifier::nArg
) {
10861 EmitFormatDiagnostic(S
.PDiag(diag::warn_os_log_format_narg
),
10862 getLocationOfByte(CS
.getStart()),
10863 /*IsStringLocation*/ false,
10864 getSpecifierRange(startSpecifier
, specifierLen
));
10869 // Only scalars are allowed for os_trace.
10870 if (FSType
== Sema::FST_OSTrace
&&
10871 (CS
.getKind() == ConversionSpecifier::PArg
||
10872 CS
.getKind() == ConversionSpecifier::sArg
||
10873 CS
.getKind() == ConversionSpecifier::ObjCObjArg
)) {
10874 return HandleInvalidPrintfConversionSpecifier(FS
, startSpecifier
,
10878 // Check for use of public/private annotation outside of os_log().
10879 if (FSType
!= Sema::FST_OSLog
) {
10880 if (FS
.isPublic().isSet()) {
10881 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_invalid_annotation
)
10883 getLocationOfByte(FS
.isPublic().getPosition()),
10884 /*IsStringLocation*/ false,
10885 getSpecifierRange(startSpecifier
, specifierLen
));
10887 if (FS
.isPrivate().isSet()) {
10888 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_invalid_annotation
)
10890 getLocationOfByte(FS
.isPrivate().getPosition()),
10891 /*IsStringLocation*/ false,
10892 getSpecifierRange(startSpecifier
, specifierLen
));
10896 const llvm::Triple
&Triple
= Target
.getTriple();
10897 if (CS
.getKind() == ConversionSpecifier::nArg
&&
10898 (Triple
.isAndroid() || Triple
.isOSFuchsia())) {
10899 EmitFormatDiagnostic(S
.PDiag(diag::warn_printf_narg_not_supported
),
10900 getLocationOfByte(CS
.getStart()),
10901 /*IsStringLocation*/ false,
10902 getSpecifierRange(startSpecifier
, specifierLen
));
10905 // Check for invalid use of field width
10906 if (!FS
.hasValidFieldWidth()) {
10907 HandleInvalidAmount(FS
, FS
.getFieldWidth(), /* field width */ 0,
10908 startSpecifier
, specifierLen
);
10911 // Check for invalid use of precision
10912 if (!FS
.hasValidPrecision()) {
10913 HandleInvalidAmount(FS
, FS
.getPrecision(), /* precision */ 1,
10914 startSpecifier
, specifierLen
);
10917 // Precision is mandatory for %P specifier.
10918 if (CS
.getKind() == ConversionSpecifier::PArg
&&
10919 FS
.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified
) {
10920 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_P_no_precision
),
10921 getLocationOfByte(startSpecifier
),
10922 /*IsStringLocation*/ false,
10923 getSpecifierRange(startSpecifier
, specifierLen
));
10926 // Check each flag does not conflict with any other component.
10927 if (!FS
.hasValidThousandsGroupingPrefix())
10928 HandleFlag(FS
, FS
.hasThousandsGrouping(), startSpecifier
, specifierLen
);
10929 if (!FS
.hasValidLeadingZeros())
10930 HandleFlag(FS
, FS
.hasLeadingZeros(), startSpecifier
, specifierLen
);
10931 if (!FS
.hasValidPlusPrefix())
10932 HandleFlag(FS
, FS
.hasPlusPrefix(), startSpecifier
, specifierLen
);
10933 if (!FS
.hasValidSpacePrefix())
10934 HandleFlag(FS
, FS
.hasSpacePrefix(), startSpecifier
, specifierLen
);
10935 if (!FS
.hasValidAlternativeForm())
10936 HandleFlag(FS
, FS
.hasAlternativeForm(), startSpecifier
, specifierLen
);
10937 if (!FS
.hasValidLeftJustified())
10938 HandleFlag(FS
, FS
.isLeftJustified(), startSpecifier
, specifierLen
);
10940 // Check that flags are not ignored by another flag
10941 if (FS
.hasSpacePrefix() && FS
.hasPlusPrefix()) // ' ' ignored by '+'
10942 HandleIgnoredFlag(FS
, FS
.hasSpacePrefix(), FS
.hasPlusPrefix(),
10943 startSpecifier
, specifierLen
);
10944 if (FS
.hasLeadingZeros() && FS
.isLeftJustified()) // '0' ignored by '-'
10945 HandleIgnoredFlag(FS
, FS
.hasLeadingZeros(), FS
.isLeftJustified(),
10946 startSpecifier
, specifierLen
);
10948 // Check the length modifier is valid with the given conversion specifier.
10949 if (!FS
.hasValidLengthModifier(S
.getASTContext().getTargetInfo(),
10951 HandleInvalidLengthModifier(FS
, CS
, startSpecifier
, specifierLen
,
10952 diag::warn_format_nonsensical_length
);
10953 else if (!FS
.hasStandardLengthModifier())
10954 HandleNonStandardLengthModifier(FS
, startSpecifier
, specifierLen
);
10955 else if (!FS
.hasStandardLengthConversionCombination())
10956 HandleInvalidLengthModifier(FS
, CS
, startSpecifier
, specifierLen
,
10957 diag::warn_format_non_standard_conversion_spec
);
10959 if (!FS
.hasStandardConversionSpecifier(S
.getLangOpts()))
10960 HandleNonStandardConversionSpecifier(CS
, startSpecifier
, specifierLen
);
10962 // The remaining checks depend on the data arguments.
10963 if (ArgPassingKind
== Sema::FAPK_VAList
)
10966 if (!CheckNumArgs(FS
, CS
, startSpecifier
, specifierLen
, argIndex
))
10969 const Expr
*Arg
= getDataArg(argIndex
);
10973 return checkFormatExpr(FS
, startSpecifier
, specifierLen
, Arg
);
10976 static bool requiresParensToAddCast(const Expr
*E
) {
10977 // FIXME: We should have a general way to reason about operator
10978 // precedence and whether parens are actually needed here.
10979 // Take care of a few common cases where they aren't.
10980 const Expr
*Inside
= E
->IgnoreImpCasts();
10981 if (const PseudoObjectExpr
*POE
= dyn_cast
<PseudoObjectExpr
>(Inside
))
10982 Inside
= POE
->getSyntacticForm()->IgnoreImpCasts();
10984 switch (Inside
->getStmtClass()) {
10985 case Stmt::ArraySubscriptExprClass
:
10986 case Stmt::CallExprClass
:
10987 case Stmt::CharacterLiteralClass
:
10988 case Stmt::CXXBoolLiteralExprClass
:
10989 case Stmt::DeclRefExprClass
:
10990 case Stmt::FloatingLiteralClass
:
10991 case Stmt::IntegerLiteralClass
:
10992 case Stmt::MemberExprClass
:
10993 case Stmt::ObjCArrayLiteralClass
:
10994 case Stmt::ObjCBoolLiteralExprClass
:
10995 case Stmt::ObjCBoxedExprClass
:
10996 case Stmt::ObjCDictionaryLiteralClass
:
10997 case Stmt::ObjCEncodeExprClass
:
10998 case Stmt::ObjCIvarRefExprClass
:
10999 case Stmt::ObjCMessageExprClass
:
11000 case Stmt::ObjCPropertyRefExprClass
:
11001 case Stmt::ObjCStringLiteralClass
:
11002 case Stmt::ObjCSubscriptRefExprClass
:
11003 case Stmt::ParenExprClass
:
11004 case Stmt::StringLiteralClass
:
11005 case Stmt::UnaryOperatorClass
:
11012 static std::pair
<QualType
, StringRef
>
11013 shouldNotPrintDirectly(const ASTContext
&Context
,
11014 QualType IntendedTy
,
11016 // Use a 'while' to peel off layers of typedefs.
11017 QualType TyTy
= IntendedTy
;
11018 while (const TypedefType
*UserTy
= TyTy
->getAs
<TypedefType
>()) {
11019 StringRef Name
= UserTy
->getDecl()->getName();
11020 QualType CastTy
= llvm::StringSwitch
<QualType
>(Name
)
11021 .Case("CFIndex", Context
.getNSIntegerType())
11022 .Case("NSInteger", Context
.getNSIntegerType())
11023 .Case("NSUInteger", Context
.getNSUIntegerType())
11024 .Case("SInt32", Context
.IntTy
)
11025 .Case("UInt32", Context
.UnsignedIntTy
)
11026 .Default(QualType());
11028 if (!CastTy
.isNull())
11029 return std::make_pair(CastTy
, Name
);
11031 TyTy
= UserTy
->desugar();
11034 // Strip parens if necessary.
11035 if (const ParenExpr
*PE
= dyn_cast
<ParenExpr
>(E
))
11036 return shouldNotPrintDirectly(Context
,
11037 PE
->getSubExpr()->getType(),
11040 // If this is a conditional expression, then its result type is constructed
11041 // via usual arithmetic conversions and thus there might be no necessary
11042 // typedef sugar there. Recurse to operands to check for NSInteger &
11043 // Co. usage condition.
11044 if (const ConditionalOperator
*CO
= dyn_cast
<ConditionalOperator
>(E
)) {
11045 QualType TrueTy
, FalseTy
;
11046 StringRef TrueName
, FalseName
;
11048 std::tie(TrueTy
, TrueName
) =
11049 shouldNotPrintDirectly(Context
,
11050 CO
->getTrueExpr()->getType(),
11051 CO
->getTrueExpr());
11052 std::tie(FalseTy
, FalseName
) =
11053 shouldNotPrintDirectly(Context
,
11054 CO
->getFalseExpr()->getType(),
11055 CO
->getFalseExpr());
11057 if (TrueTy
== FalseTy
)
11058 return std::make_pair(TrueTy
, TrueName
);
11059 else if (TrueTy
.isNull())
11060 return std::make_pair(FalseTy
, FalseName
);
11061 else if (FalseTy
.isNull())
11062 return std::make_pair(TrueTy
, TrueName
);
11065 return std::make_pair(QualType(), StringRef());
11068 /// Return true if \p ICE is an implicit argument promotion of an arithmetic
11069 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
11070 /// type do not count.
11072 isArithmeticArgumentPromotion(Sema
&S
, const ImplicitCastExpr
*ICE
) {
11073 QualType From
= ICE
->getSubExpr()->getType();
11074 QualType To
= ICE
->getType();
11075 // It's an integer promotion if the destination type is the promoted
11077 if (ICE
->getCastKind() == CK_IntegralCast
&&
11078 S
.Context
.isPromotableIntegerType(From
) &&
11079 S
.Context
.getPromotedIntegerType(From
) == To
)
11081 // Look through vector types, since we do default argument promotion for
11082 // those in OpenCL.
11083 if (const auto *VecTy
= From
->getAs
<ExtVectorType
>())
11084 From
= VecTy
->getElementType();
11085 if (const auto *VecTy
= To
->getAs
<ExtVectorType
>())
11086 To
= VecTy
->getElementType();
11087 // It's a floating promotion if the source type is a lower rank.
11088 return ICE
->getCastKind() == CK_FloatingCast
&&
11089 S
.Context
.getFloatingTypeOrder(From
, To
) < 0;
11093 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier
&FS
,
11094 const char *StartSpecifier
,
11095 unsigned SpecifierLen
,
11097 using namespace analyze_format_string
;
11098 using namespace analyze_printf
;
11100 // Now type check the data expression that matches the
11101 // format specifier.
11102 const analyze_printf::ArgType
&AT
= FS
.getArgType(S
.Context
, isObjCContext());
11106 QualType ExprTy
= E
->getType();
11107 while (const TypeOfExprType
*TET
= dyn_cast
<TypeOfExprType
>(ExprTy
)) {
11108 ExprTy
= TET
->getUnderlyingExpr()->getType();
11111 // When using the format attribute in C++, you can receive a function or an
11112 // array that will necessarily decay to a pointer when passed to the final
11113 // format consumer. Apply decay before type comparison.
11114 if (ExprTy
->canDecayToPointerType())
11115 ExprTy
= S
.Context
.getDecayedType(ExprTy
);
11117 // Diagnose attempts to print a boolean value as a character. Unlike other
11118 // -Wformat diagnostics, this is fine from a type perspective, but it still
11119 // doesn't make sense.
11120 if (FS
.getConversionSpecifier().getKind() == ConversionSpecifier::cArg
&&
11121 E
->isKnownToHaveBooleanValue()) {
11122 const CharSourceRange
&CSR
=
11123 getSpecifierRange(StartSpecifier
, SpecifierLen
);
11124 SmallString
<4> FSString
;
11125 llvm::raw_svector_ostream
os(FSString
);
11127 EmitFormatDiagnostic(S
.PDiag(diag::warn_format_bool_as_character
)
11129 E
->getExprLoc(), false, CSR
);
11133 ArgType::MatchKind ImplicitMatch
= ArgType::NoMatch
;
11134 ArgType::MatchKind Match
= AT
.matchesType(S
.Context
, ExprTy
);
11135 if (Match
== ArgType::Match
)
11138 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr
11139 assert(Match
!= ArgType::NoMatchPromotionTypeConfusion
);
11141 // Look through argument promotions for our error message's reported type.
11142 // This includes the integral and floating promotions, but excludes array
11143 // and function pointer decay (seeing that an argument intended to be a
11144 // string has type 'char [6]' is probably more confusing than 'char *') and
11145 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
11146 if (const ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
11147 if (isArithmeticArgumentPromotion(S
, ICE
)) {
11148 E
= ICE
->getSubExpr();
11149 ExprTy
= E
->getType();
11151 // Check if we didn't match because of an implicit cast from a 'char'
11152 // or 'short' to an 'int'. This is done because printf is a varargs
11154 if (ICE
->getType() == S
.Context
.IntTy
||
11155 ICE
->getType() == S
.Context
.UnsignedIntTy
) {
11156 // All further checking is done on the subexpression
11157 ImplicitMatch
= AT
.matchesType(S
.Context
, ExprTy
);
11158 if (ImplicitMatch
== ArgType::Match
)
11162 } else if (const CharacterLiteral
*CL
= dyn_cast
<CharacterLiteral
>(E
)) {
11163 // Special case for 'a', which has type 'int' in C.
11164 // Note, however, that we do /not/ want to treat multibyte constants like
11165 // 'MooV' as characters! This form is deprecated but still exists. In
11166 // addition, don't treat expressions as of type 'char' if one byte length
11167 // modifier is provided.
11168 if (ExprTy
== S
.Context
.IntTy
&&
11169 FS
.getLengthModifier().getKind() != LengthModifier::AsChar
)
11170 if (llvm::isUIntN(S
.Context
.getCharWidth(), CL
->getValue())) {
11171 ExprTy
= S
.Context
.CharTy
;
11172 // To improve check results, we consider a character literal in C
11173 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is
11174 // more likely a type confusion situation, so we will suggest to
11175 // use '%hhd' instead by discarding the MatchPromotion.
11176 if (Match
== ArgType::MatchPromotion
)
11177 Match
= ArgType::NoMatch
;
11180 if (Match
== ArgType::MatchPromotion
) {
11181 // WG14 N2562 only clarified promotions in *printf
11182 // For NSLog in ObjC, just preserve -Wformat behavior
11183 if (!S
.getLangOpts().ObjC
&&
11184 ImplicitMatch
!= ArgType::NoMatchPromotionTypeConfusion
&&
11185 ImplicitMatch
!= ArgType::NoMatchTypeConfusion
)
11187 Match
= ArgType::NoMatch
;
11189 if (ImplicitMatch
== ArgType::NoMatchPedantic
||
11190 ImplicitMatch
== ArgType::NoMatchTypeConfusion
)
11191 Match
= ImplicitMatch
;
11192 assert(Match
!= ArgType::MatchPromotion
);
11193 // Look through unscoped enums to their underlying type.
11194 bool IsEnum
= false;
11195 bool IsScopedEnum
= false;
11196 if (auto EnumTy
= ExprTy
->getAs
<EnumType
>()) {
11197 if (EnumTy
->isUnscopedEnumerationType()) {
11198 ExprTy
= EnumTy
->getDecl()->getIntegerType();
11199 // This controls whether we're talking about the underlying type or not,
11200 // which we only want to do when it's an unscoped enum.
11203 IsScopedEnum
= true;
11207 // %C in an Objective-C context prints a unichar, not a wchar_t.
11208 // If the argument is an integer of some kind, believe the %C and suggest
11209 // a cast instead of changing the conversion specifier.
11210 QualType IntendedTy
= ExprTy
;
11211 if (isObjCContext() &&
11212 FS
.getConversionSpecifier().getKind() == ConversionSpecifier::CArg
) {
11213 if (ExprTy
->isIntegralOrUnscopedEnumerationType() &&
11214 !ExprTy
->isCharType()) {
11215 // 'unichar' is defined as a typedef of unsigned short, but we should
11216 // prefer using the typedef if it is visible.
11217 IntendedTy
= S
.Context
.UnsignedShortTy
;
11219 // While we are here, check if the value is an IntegerLiteral that happens
11220 // to be within the valid range.
11221 if (const IntegerLiteral
*IL
= dyn_cast
<IntegerLiteral
>(E
)) {
11222 const llvm::APInt
&V
= IL
->getValue();
11223 if (V
.getActiveBits() <= S
.Context
.getTypeSize(IntendedTy
))
11227 LookupResult
Result(S
, &S
.Context
.Idents
.get("unichar"), E
->getBeginLoc(),
11228 Sema::LookupOrdinaryName
);
11229 if (S
.LookupName(Result
, S
.getCurScope())) {
11230 NamedDecl
*ND
= Result
.getFoundDecl();
11231 if (TypedefNameDecl
*TD
= dyn_cast
<TypedefNameDecl
>(ND
))
11232 if (TD
->getUnderlyingType() == IntendedTy
)
11233 IntendedTy
= S
.Context
.getTypedefType(TD
);
11238 // Special-case some of Darwin's platform-independence types by suggesting
11239 // casts to primitive types that are known to be large enough.
11240 bool ShouldNotPrintDirectly
= false; StringRef CastTyName
;
11241 if (S
.Context
.getTargetInfo().getTriple().isOSDarwin()) {
11243 std::tie(CastTy
, CastTyName
) = shouldNotPrintDirectly(S
.Context
, IntendedTy
, E
);
11244 if (!CastTy
.isNull()) {
11245 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
11246 // (long in ASTContext). Only complain to pedants.
11247 if ((CastTyName
== "NSInteger" || CastTyName
== "NSUInteger") &&
11248 (AT
.isSizeT() || AT
.isPtrdiffT()) &&
11249 AT
.matchesType(S
.Context
, CastTy
))
11250 Match
= ArgType::NoMatchPedantic
;
11251 IntendedTy
= CastTy
;
11252 ShouldNotPrintDirectly
= true;
11256 // We may be able to offer a FixItHint if it is a supported type.
11257 PrintfSpecifier fixedFS
= FS
;
11259 fixedFS
.fixType(IntendedTy
, S
.getLangOpts(), S
.Context
, isObjCContext());
11262 // Get the fix string from the fixed format specifier
11263 SmallString
<16> buf
;
11264 llvm::raw_svector_ostream
os(buf
);
11265 fixedFS
.toString(os
);
11267 CharSourceRange SpecRange
= getSpecifierRange(StartSpecifier
, SpecifierLen
);
11269 if (IntendedTy
== ExprTy
&& !ShouldNotPrintDirectly
&& !IsScopedEnum
) {
11272 case ArgType::Match
:
11273 case ArgType::MatchPromotion
:
11274 case ArgType::NoMatchPromotionTypeConfusion
:
11275 llvm_unreachable("expected non-matching");
11276 case ArgType::NoMatchPedantic
:
11277 Diag
= diag::warn_format_conversion_argument_type_mismatch_pedantic
;
11279 case ArgType::NoMatchTypeConfusion
:
11280 Diag
= diag::warn_format_conversion_argument_type_mismatch_confusion
;
11282 case ArgType::NoMatch
:
11283 Diag
= diag::warn_format_conversion_argument_type_mismatch
;
11287 // In this case, the specifier is wrong and should be changed to match
11289 EmitFormatDiagnostic(S
.PDiag(Diag
)
11290 << AT
.getRepresentativeTypeName(S
.Context
)
11291 << IntendedTy
<< IsEnum
<< E
->getSourceRange(),
11293 /*IsStringLocation*/ false, SpecRange
,
11294 FixItHint::CreateReplacement(SpecRange
, os
.str()));
11296 // The canonical type for formatting this value is different from the
11297 // actual type of the expression. (This occurs, for example, with Darwin's
11298 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
11299 // should be printed as 'long' for 64-bit compatibility.)
11300 // Rather than emitting a normal format/argument mismatch, we want to
11301 // add a cast to the recommended type (and correct the format string
11303 SmallString
<16> CastBuf
;
11304 llvm::raw_svector_ostream
CastFix(CastBuf
);
11305 CastFix
<< (S
.LangOpts
.CPlusPlus
? "static_cast<" : "(");
11306 if (IsScopedEnum
) {
11307 CastFix
<< AT
.getRepresentativeType(S
.Context
).getAsString(
11308 S
.Context
.getPrintingPolicy());
11310 IntendedTy
.print(CastFix
, S
.Context
.getPrintingPolicy());
11312 CastFix
<< (S
.LangOpts
.CPlusPlus
? ">" : ")");
11314 SmallVector
<FixItHint
,4> Hints
;
11315 if ((!AT
.matchesType(S
.Context
, IntendedTy
) && !IsScopedEnum
) ||
11316 ShouldNotPrintDirectly
)
11317 Hints
.push_back(FixItHint::CreateReplacement(SpecRange
, os
.str()));
11319 if (const CStyleCastExpr
*CCast
= dyn_cast
<CStyleCastExpr
>(E
)) {
11320 // If there's already a cast present, just replace it.
11321 SourceRange
CastRange(CCast
->getLParenLoc(), CCast
->getRParenLoc());
11322 Hints
.push_back(FixItHint::CreateReplacement(CastRange
, CastFix
.str()));
11324 } else if (!requiresParensToAddCast(E
) && !S
.LangOpts
.CPlusPlus
) {
11325 // If the expression has high enough precedence,
11326 // just write the C-style cast.
11328 FixItHint::CreateInsertion(E
->getBeginLoc(), CastFix
.str()));
11330 // Otherwise, add parens around the expression as well as the cast.
11333 FixItHint::CreateInsertion(E
->getBeginLoc(), CastFix
.str()));
11335 SourceLocation After
= S
.getLocForEndOfToken(E
->getEndLoc());
11336 Hints
.push_back(FixItHint::CreateInsertion(After
, ")"));
11339 if (ShouldNotPrintDirectly
) {
11340 // The expression has a type that should not be printed directly.
11341 // We extract the name from the typedef because we don't want to show
11342 // the underlying type in the diagnostic.
11344 if (const auto *TypedefTy
= ExprTy
->getAs
<TypedefType
>())
11345 Name
= TypedefTy
->getDecl()->getName();
11348 unsigned Diag
= Match
== ArgType::NoMatchPedantic
11349 ? diag::warn_format_argument_needs_cast_pedantic
11350 : diag::warn_format_argument_needs_cast
;
11351 EmitFormatDiagnostic(S
.PDiag(Diag
) << Name
<< IntendedTy
<< IsEnum
11352 << E
->getSourceRange(),
11353 E
->getBeginLoc(), /*IsStringLocation=*/false,
11356 // In this case, the expression could be printed using a different
11357 // specifier, but we've decided that the specifier is probably correct
11358 // and we should cast instead. Just use the normal warning message.
11359 EmitFormatDiagnostic(
11360 S
.PDiag(diag::warn_format_conversion_argument_type_mismatch
)
11361 << AT
.getRepresentativeTypeName(S
.Context
) << ExprTy
<< IsEnum
11362 << E
->getSourceRange(),
11363 E
->getBeginLoc(), /*IsStringLocation*/ false, SpecRange
, Hints
);
11367 const CharSourceRange
&CSR
= getSpecifierRange(StartSpecifier
,
11369 // Since the warning for passing non-POD types to variadic functions
11370 // was deferred until now, we emit a warning for non-POD
11372 bool EmitTypeMismatch
= false;
11373 switch (S
.isValidVarArgType(ExprTy
)) {
11374 case Sema::VAK_Valid
:
11375 case Sema::VAK_ValidInCXX11
: {
11378 case ArgType::Match
:
11379 case ArgType::MatchPromotion
:
11380 case ArgType::NoMatchPromotionTypeConfusion
:
11381 llvm_unreachable("expected non-matching");
11382 case ArgType::NoMatchPedantic
:
11383 Diag
= diag::warn_format_conversion_argument_type_mismatch_pedantic
;
11385 case ArgType::NoMatchTypeConfusion
:
11386 Diag
= diag::warn_format_conversion_argument_type_mismatch_confusion
;
11388 case ArgType::NoMatch
:
11389 Diag
= diag::warn_format_conversion_argument_type_mismatch
;
11393 EmitFormatDiagnostic(
11394 S
.PDiag(Diag
) << AT
.getRepresentativeTypeName(S
.Context
) << ExprTy
11395 << IsEnum
<< CSR
<< E
->getSourceRange(),
11396 E
->getBeginLoc(), /*IsStringLocation*/ false, CSR
);
11399 case Sema::VAK_Undefined
:
11400 case Sema::VAK_MSVCUndefined
:
11401 if (CallType
== Sema::VariadicDoesNotApply
) {
11402 EmitTypeMismatch
= true;
11404 EmitFormatDiagnostic(
11405 S
.PDiag(diag::warn_non_pod_vararg_with_format_string
)
11406 << S
.getLangOpts().CPlusPlus11
<< ExprTy
<< CallType
11407 << AT
.getRepresentativeTypeName(S
.Context
) << CSR
11408 << E
->getSourceRange(),
11409 E
->getBeginLoc(), /*IsStringLocation*/ false, CSR
);
11410 checkForCStrMembers(AT
, E
);
11414 case Sema::VAK_Invalid
:
11415 if (CallType
== Sema::VariadicDoesNotApply
)
11416 EmitTypeMismatch
= true;
11417 else if (ExprTy
->isObjCObjectType())
11418 EmitFormatDiagnostic(
11419 S
.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format
)
11420 << S
.getLangOpts().CPlusPlus11
<< ExprTy
<< CallType
11421 << AT
.getRepresentativeTypeName(S
.Context
) << CSR
11422 << E
->getSourceRange(),
11423 E
->getBeginLoc(), /*IsStringLocation*/ false, CSR
);
11425 // FIXME: If this is an initializer list, suggest removing the braces
11426 // or inserting a cast to the target type.
11427 S
.Diag(E
->getBeginLoc(), diag::err_cannot_pass_to_vararg_format
)
11428 << isa
<InitListExpr
>(E
) << ExprTy
<< CallType
11429 << AT
.getRepresentativeTypeName(S
.Context
) << E
->getSourceRange();
11433 if (EmitTypeMismatch
) {
11434 // The function is not variadic, so we do not generate warnings about
11435 // being allowed to pass that object as a variadic argument. Instead,
11436 // since there are inherently no printf specifiers for types which cannot
11437 // be passed as variadic arguments, emit a plain old specifier mismatch
11439 EmitFormatDiagnostic(
11440 S
.PDiag(diag::warn_format_conversion_argument_type_mismatch
)
11441 << AT
.getRepresentativeTypeName(S
.Context
) << ExprTy
<< false
11442 << E
->getSourceRange(),
11443 E
->getBeginLoc(), false, CSR
);
11446 assert(FirstDataArg
+ FS
.getArgIndex() < CheckedVarArgs
.size() &&
11447 "format string specifier index out of range");
11448 CheckedVarArgs
[FirstDataArg
+ FS
.getArgIndex()] = true;
11454 //===--- CHECK: Scanf format string checking ------------------------------===//
11458 class CheckScanfHandler
: public CheckFormatHandler
{
11460 CheckScanfHandler(Sema
&s
, const FormatStringLiteral
*fexpr
,
11461 const Expr
*origFormatExpr
, Sema::FormatStringType type
,
11462 unsigned firstDataArg
, unsigned numDataArgs
,
11463 const char *beg
, Sema::FormatArgumentPassingKind APK
,
11464 ArrayRef
<const Expr
*> Args
, unsigned formatIdx
,
11465 bool inFunctionCall
, Sema::VariadicCallType CallType
,
11466 llvm::SmallBitVector
&CheckedVarArgs
,
11467 UncoveredArgHandler
&UncoveredArg
)
11468 : CheckFormatHandler(s
, fexpr
, origFormatExpr
, type
, firstDataArg
,
11469 numDataArgs
, beg
, APK
, Args
, formatIdx
,
11470 inFunctionCall
, CallType
, CheckedVarArgs
,
11473 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier
&FS
,
11474 const char *startSpecifier
,
11475 unsigned specifierLen
) override
;
11477 bool HandleInvalidScanfConversionSpecifier(
11478 const analyze_scanf::ScanfSpecifier
&FS
,
11479 const char *startSpecifier
,
11480 unsigned specifierLen
) override
;
11482 void HandleIncompleteScanList(const char *start
, const char *end
) override
;
11487 void CheckScanfHandler::HandleIncompleteScanList(const char *start
,
11489 EmitFormatDiagnostic(S
.PDiag(diag::warn_scanf_scanlist_incomplete
),
11490 getLocationOfByte(end
), /*IsStringLocation*/true,
11491 getSpecifierRange(start
, end
- start
));
11494 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
11495 const analyze_scanf::ScanfSpecifier
&FS
,
11496 const char *startSpecifier
,
11497 unsigned specifierLen
) {
11498 const analyze_scanf::ScanfConversionSpecifier
&CS
=
11499 FS
.getConversionSpecifier();
11501 return HandleInvalidConversionSpecifier(FS
.getArgIndex(),
11502 getLocationOfByte(CS
.getStart()),
11503 startSpecifier
, specifierLen
,
11504 CS
.getStart(), CS
.getLength());
11507 bool CheckScanfHandler::HandleScanfSpecifier(
11508 const analyze_scanf::ScanfSpecifier
&FS
,
11509 const char *startSpecifier
,
11510 unsigned specifierLen
) {
11511 using namespace analyze_scanf
;
11512 using namespace analyze_format_string
;
11514 const ScanfConversionSpecifier
&CS
= FS
.getConversionSpecifier();
11516 // Handle case where '%' and '*' don't consume an argument. These shouldn't
11517 // be used to decide if we are using positional arguments consistently.
11518 if (FS
.consumesDataArgument()) {
11520 atFirstArg
= false;
11521 usesPositionalArgs
= FS
.usesPositionalArg();
11523 else if (usesPositionalArgs
!= FS
.usesPositionalArg()) {
11524 HandlePositionalNonpositionalArgs(getLocationOfByte(CS
.getStart()),
11525 startSpecifier
, specifierLen
);
11530 // Check if the field with is non-zero.
11531 const OptionalAmount
&Amt
= FS
.getFieldWidth();
11532 if (Amt
.getHowSpecified() == OptionalAmount::Constant
) {
11533 if (Amt
.getConstantAmount() == 0) {
11534 const CharSourceRange
&R
= getSpecifierRange(Amt
.getStart(),
11535 Amt
.getConstantLength());
11536 EmitFormatDiagnostic(S
.PDiag(diag::warn_scanf_nonzero_width
),
11537 getLocationOfByte(Amt
.getStart()),
11538 /*IsStringLocation*/true, R
,
11539 FixItHint::CreateRemoval(R
));
11543 if (!FS
.consumesDataArgument()) {
11544 // FIXME: Technically specifying a precision or field width here
11545 // makes no sense. Worth issuing a warning at some point.
11549 // Consume the argument.
11550 unsigned argIndex
= FS
.getArgIndex();
11551 if (argIndex
< NumDataArgs
) {
11552 // The check to see if the argIndex is valid will come later.
11553 // We set the bit here because we may exit early from this
11554 // function if we encounter some other error.
11555 CoveredArgs
.set(argIndex
);
11558 // Check the length modifier is valid with the given conversion specifier.
11559 if (!FS
.hasValidLengthModifier(S
.getASTContext().getTargetInfo(),
11561 HandleInvalidLengthModifier(FS
, CS
, startSpecifier
, specifierLen
,
11562 diag::warn_format_nonsensical_length
);
11563 else if (!FS
.hasStandardLengthModifier())
11564 HandleNonStandardLengthModifier(FS
, startSpecifier
, specifierLen
);
11565 else if (!FS
.hasStandardLengthConversionCombination())
11566 HandleInvalidLengthModifier(FS
, CS
, startSpecifier
, specifierLen
,
11567 diag::warn_format_non_standard_conversion_spec
);
11569 if (!FS
.hasStandardConversionSpecifier(S
.getLangOpts()))
11570 HandleNonStandardConversionSpecifier(CS
, startSpecifier
, specifierLen
);
11572 // The remaining checks depend on the data arguments.
11573 if (ArgPassingKind
== Sema::FAPK_VAList
)
11576 if (!CheckNumArgs(FS
, CS
, startSpecifier
, specifierLen
, argIndex
))
11579 // Check that the argument type matches the format specifier.
11580 const Expr
*Ex
= getDataArg(argIndex
);
11584 const analyze_format_string::ArgType
&AT
= FS
.getArgType(S
.Context
);
11586 if (!AT
.isValid()) {
11590 analyze_format_string::ArgType::MatchKind Match
=
11591 AT
.matchesType(S
.Context
, Ex
->getType());
11592 bool Pedantic
= Match
== analyze_format_string::ArgType::NoMatchPedantic
;
11593 if (Match
== analyze_format_string::ArgType::Match
)
11596 ScanfSpecifier fixedFS
= FS
;
11597 bool Success
= fixedFS
.fixType(Ex
->getType(), Ex
->IgnoreImpCasts()->getType(),
11598 S
.getLangOpts(), S
.Context
);
11601 Pedantic
? diag::warn_format_conversion_argument_type_mismatch_pedantic
11602 : diag::warn_format_conversion_argument_type_mismatch
;
11605 // Get the fix string from the fixed format specifier.
11606 SmallString
<128> buf
;
11607 llvm::raw_svector_ostream
os(buf
);
11608 fixedFS
.toString(os
);
11610 EmitFormatDiagnostic(
11611 S
.PDiag(Diag
) << AT
.getRepresentativeTypeName(S
.Context
)
11612 << Ex
->getType() << false << Ex
->getSourceRange(),
11614 /*IsStringLocation*/ false,
11615 getSpecifierRange(startSpecifier
, specifierLen
),
11616 FixItHint::CreateReplacement(
11617 getSpecifierRange(startSpecifier
, specifierLen
), os
.str()));
11619 EmitFormatDiagnostic(S
.PDiag(Diag
)
11620 << AT
.getRepresentativeTypeName(S
.Context
)
11621 << Ex
->getType() << false << Ex
->getSourceRange(),
11623 /*IsStringLocation*/ false,
11624 getSpecifierRange(startSpecifier
, specifierLen
));
11630 static void CheckFormatString(
11631 Sema
&S
, const FormatStringLiteral
*FExpr
, const Expr
*OrigFormatExpr
,
11632 ArrayRef
<const Expr
*> Args
, Sema::FormatArgumentPassingKind APK
,
11633 unsigned format_idx
, unsigned firstDataArg
, Sema::FormatStringType Type
,
11634 bool inFunctionCall
, Sema::VariadicCallType CallType
,
11635 llvm::SmallBitVector
&CheckedVarArgs
, UncoveredArgHandler
&UncoveredArg
,
11636 bool IgnoreStringsWithoutSpecifiers
) {
11637 // CHECK: is the format string a wide literal?
11638 if (!FExpr
->isAscii() && !FExpr
->isUTF8()) {
11639 CheckFormatHandler::EmitFormatDiagnostic(
11640 S
, inFunctionCall
, Args
[format_idx
],
11641 S
.PDiag(diag::warn_format_string_is_wide_literal
), FExpr
->getBeginLoc(),
11642 /*IsStringLocation*/ true, OrigFormatExpr
->getSourceRange());
11646 // Str - The format string. NOTE: this is NOT null-terminated!
11647 StringRef StrRef
= FExpr
->getString();
11648 const char *Str
= StrRef
.data();
11649 // Account for cases where the string literal is truncated in a declaration.
11650 const ConstantArrayType
*T
=
11651 S
.Context
.getAsConstantArrayType(FExpr
->getType());
11652 assert(T
&& "String literal not of constant array type!");
11653 size_t TypeSize
= T
->getSize().getZExtValue();
11654 size_t StrLen
= std::min(std::max(TypeSize
, size_t(1)) - 1, StrRef
.size());
11655 const unsigned numDataArgs
= Args
.size() - firstDataArg
;
11657 if (IgnoreStringsWithoutSpecifiers
&&
11658 !analyze_format_string::parseFormatStringHasFormattingSpecifiers(
11659 Str
, Str
+ StrLen
, S
.getLangOpts(), S
.Context
.getTargetInfo()))
11662 // Emit a warning if the string literal is truncated and does not contain an
11663 // embedded null character.
11664 if (TypeSize
<= StrRef
.size() && !StrRef
.substr(0, TypeSize
).contains('\0')) {
11665 CheckFormatHandler::EmitFormatDiagnostic(
11666 S
, inFunctionCall
, Args
[format_idx
],
11667 S
.PDiag(diag::warn_printf_format_string_not_null_terminated
),
11668 FExpr
->getBeginLoc(),
11669 /*IsStringLocation=*/true, OrigFormatExpr
->getSourceRange());
11673 // CHECK: empty format string?
11674 if (StrLen
== 0 && numDataArgs
> 0) {
11675 CheckFormatHandler::EmitFormatDiagnostic(
11676 S
, inFunctionCall
, Args
[format_idx
],
11677 S
.PDiag(diag::warn_empty_format_string
), FExpr
->getBeginLoc(),
11678 /*IsStringLocation*/ true, OrigFormatExpr
->getSourceRange());
11682 if (Type
== Sema::FST_Printf
|| Type
== Sema::FST_NSString
||
11683 Type
== Sema::FST_FreeBSDKPrintf
|| Type
== Sema::FST_OSLog
||
11684 Type
== Sema::FST_OSTrace
) {
11685 CheckPrintfHandler
H(
11686 S
, FExpr
, OrigFormatExpr
, Type
, firstDataArg
, numDataArgs
,
11687 (Type
== Sema::FST_NSString
|| Type
== Sema::FST_OSTrace
), Str
, APK
,
11688 Args
, format_idx
, inFunctionCall
, CallType
, CheckedVarArgs
,
11691 if (!analyze_format_string::ParsePrintfString(
11692 H
, Str
, Str
+ StrLen
, S
.getLangOpts(), S
.Context
.getTargetInfo(),
11693 Type
== Sema::FST_FreeBSDKPrintf
))
11694 H
.DoneProcessing();
11695 } else if (Type
== Sema::FST_Scanf
) {
11696 CheckScanfHandler
H(S
, FExpr
, OrigFormatExpr
, Type
, firstDataArg
,
11697 numDataArgs
, Str
, APK
, Args
, format_idx
, inFunctionCall
,
11698 CallType
, CheckedVarArgs
, UncoveredArg
);
11700 if (!analyze_format_string::ParseScanfString(
11701 H
, Str
, Str
+ StrLen
, S
.getLangOpts(), S
.Context
.getTargetInfo()))
11702 H
.DoneProcessing();
11703 } // TODO: handle other formats
11706 bool Sema::FormatStringHasSArg(const StringLiteral
*FExpr
) {
11707 // Str - The format string. NOTE: this is NOT null-terminated!
11708 StringRef StrRef
= FExpr
->getString();
11709 const char *Str
= StrRef
.data();
11710 // Account for cases where the string literal is truncated in a declaration.
11711 const ConstantArrayType
*T
= Context
.getAsConstantArrayType(FExpr
->getType());
11712 assert(T
&& "String literal not of constant array type!");
11713 size_t TypeSize
= T
->getSize().getZExtValue();
11714 size_t StrLen
= std::min(std::max(TypeSize
, size_t(1)) - 1, StrRef
.size());
11715 return analyze_format_string::ParseFormatStringHasSArg(Str
, Str
+ StrLen
,
11717 Context
.getTargetInfo());
11720 //===--- CHECK: Warn on use of wrong absolute value function. -------------===//
11722 // Returns the related absolute value function that is larger, of 0 if one
11724 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction
) {
11725 switch (AbsFunction
) {
11729 case Builtin::BI__builtin_abs
:
11730 return Builtin::BI__builtin_labs
;
11731 case Builtin::BI__builtin_labs
:
11732 return Builtin::BI__builtin_llabs
;
11733 case Builtin::BI__builtin_llabs
:
11736 case Builtin::BI__builtin_fabsf
:
11737 return Builtin::BI__builtin_fabs
;
11738 case Builtin::BI__builtin_fabs
:
11739 return Builtin::BI__builtin_fabsl
;
11740 case Builtin::BI__builtin_fabsl
:
11743 case Builtin::BI__builtin_cabsf
:
11744 return Builtin::BI__builtin_cabs
;
11745 case Builtin::BI__builtin_cabs
:
11746 return Builtin::BI__builtin_cabsl
;
11747 case Builtin::BI__builtin_cabsl
:
11750 case Builtin::BIabs
:
11751 return Builtin::BIlabs
;
11752 case Builtin::BIlabs
:
11753 return Builtin::BIllabs
;
11754 case Builtin::BIllabs
:
11757 case Builtin::BIfabsf
:
11758 return Builtin::BIfabs
;
11759 case Builtin::BIfabs
:
11760 return Builtin::BIfabsl
;
11761 case Builtin::BIfabsl
:
11764 case Builtin::BIcabsf
:
11765 return Builtin::BIcabs
;
11766 case Builtin::BIcabs
:
11767 return Builtin::BIcabsl
;
11768 case Builtin::BIcabsl
:
11773 // Returns the argument type of the absolute value function.
11774 static QualType
getAbsoluteValueArgumentType(ASTContext
&Context
,
11775 unsigned AbsType
) {
11779 ASTContext::GetBuiltinTypeError Error
= ASTContext::GE_None
;
11780 QualType BuiltinType
= Context
.GetBuiltinType(AbsType
, Error
);
11781 if (Error
!= ASTContext::GE_None
)
11784 const FunctionProtoType
*FT
= BuiltinType
->getAs
<FunctionProtoType
>();
11788 if (FT
->getNumParams() != 1)
11791 return FT
->getParamType(0);
11794 // Returns the best absolute value function, or zero, based on type and
11795 // current absolute value function.
11796 static unsigned getBestAbsFunction(ASTContext
&Context
, QualType ArgType
,
11797 unsigned AbsFunctionKind
) {
11798 unsigned BestKind
= 0;
11799 uint64_t ArgSize
= Context
.getTypeSize(ArgType
);
11800 for (unsigned Kind
= AbsFunctionKind
; Kind
!= 0;
11801 Kind
= getLargerAbsoluteValueFunction(Kind
)) {
11802 QualType ParamType
= getAbsoluteValueArgumentType(Context
, Kind
);
11803 if (Context
.getTypeSize(ParamType
) >= ArgSize
) {
11806 else if (Context
.hasSameType(ParamType
, ArgType
)) {
11815 enum AbsoluteValueKind
{
11821 static AbsoluteValueKind
getAbsoluteValueKind(QualType T
) {
11822 if (T
->isIntegralOrEnumerationType())
11823 return AVK_Integer
;
11824 if (T
->isRealFloatingType())
11825 return AVK_Floating
;
11826 if (T
->isAnyComplexType())
11827 return AVK_Complex
;
11829 llvm_unreachable("Type not integer, floating, or complex");
11832 // Changes the absolute value function to a different type. Preserves whether
11833 // the function is a builtin.
11834 static unsigned changeAbsFunction(unsigned AbsKind
,
11835 AbsoluteValueKind ValueKind
) {
11836 switch (ValueKind
) {
11841 case Builtin::BI__builtin_fabsf
:
11842 case Builtin::BI__builtin_fabs
:
11843 case Builtin::BI__builtin_fabsl
:
11844 case Builtin::BI__builtin_cabsf
:
11845 case Builtin::BI__builtin_cabs
:
11846 case Builtin::BI__builtin_cabsl
:
11847 return Builtin::BI__builtin_abs
;
11848 case Builtin::BIfabsf
:
11849 case Builtin::BIfabs
:
11850 case Builtin::BIfabsl
:
11851 case Builtin::BIcabsf
:
11852 case Builtin::BIcabs
:
11853 case Builtin::BIcabsl
:
11854 return Builtin::BIabs
;
11860 case Builtin::BI__builtin_abs
:
11861 case Builtin::BI__builtin_labs
:
11862 case Builtin::BI__builtin_llabs
:
11863 case Builtin::BI__builtin_cabsf
:
11864 case Builtin::BI__builtin_cabs
:
11865 case Builtin::BI__builtin_cabsl
:
11866 return Builtin::BI__builtin_fabsf
;
11867 case Builtin::BIabs
:
11868 case Builtin::BIlabs
:
11869 case Builtin::BIllabs
:
11870 case Builtin::BIcabsf
:
11871 case Builtin::BIcabs
:
11872 case Builtin::BIcabsl
:
11873 return Builtin::BIfabsf
;
11879 case Builtin::BI__builtin_abs
:
11880 case Builtin::BI__builtin_labs
:
11881 case Builtin::BI__builtin_llabs
:
11882 case Builtin::BI__builtin_fabsf
:
11883 case Builtin::BI__builtin_fabs
:
11884 case Builtin::BI__builtin_fabsl
:
11885 return Builtin::BI__builtin_cabsf
;
11886 case Builtin::BIabs
:
11887 case Builtin::BIlabs
:
11888 case Builtin::BIllabs
:
11889 case Builtin::BIfabsf
:
11890 case Builtin::BIfabs
:
11891 case Builtin::BIfabsl
:
11892 return Builtin::BIcabsf
;
11895 llvm_unreachable("Unable to convert function");
11898 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl
*FDecl
) {
11899 const IdentifierInfo
*FnInfo
= FDecl
->getIdentifier();
11903 switch (FDecl
->getBuiltinID()) {
11906 case Builtin::BI__builtin_abs
:
11907 case Builtin::BI__builtin_fabs
:
11908 case Builtin::BI__builtin_fabsf
:
11909 case Builtin::BI__builtin_fabsl
:
11910 case Builtin::BI__builtin_labs
:
11911 case Builtin::BI__builtin_llabs
:
11912 case Builtin::BI__builtin_cabs
:
11913 case Builtin::BI__builtin_cabsf
:
11914 case Builtin::BI__builtin_cabsl
:
11915 case Builtin::BIabs
:
11916 case Builtin::BIlabs
:
11917 case Builtin::BIllabs
:
11918 case Builtin::BIfabs
:
11919 case Builtin::BIfabsf
:
11920 case Builtin::BIfabsl
:
11921 case Builtin::BIcabs
:
11922 case Builtin::BIcabsf
:
11923 case Builtin::BIcabsl
:
11924 return FDecl
->getBuiltinID();
11926 llvm_unreachable("Unknown Builtin type");
11929 // If the replacement is valid, emit a note with replacement function.
11930 // Additionally, suggest including the proper header if not already included.
11931 static void emitReplacement(Sema
&S
, SourceLocation Loc
, SourceRange Range
,
11932 unsigned AbsKind
, QualType ArgType
) {
11933 bool EmitHeaderHint
= true;
11934 const char *HeaderName
= nullptr;
11935 StringRef FunctionName
;
11936 if (S
.getLangOpts().CPlusPlus
&& !ArgType
->isAnyComplexType()) {
11937 FunctionName
= "std::abs";
11938 if (ArgType
->isIntegralOrEnumerationType()) {
11939 HeaderName
= "cstdlib";
11940 } else if (ArgType
->isRealFloatingType()) {
11941 HeaderName
= "cmath";
11943 llvm_unreachable("Invalid Type");
11946 // Lookup all std::abs
11947 if (NamespaceDecl
*Std
= S
.getStdNamespace()) {
11948 LookupResult
R(S
, &S
.Context
.Idents
.get("abs"), Loc
, Sema::LookupAnyName
);
11949 R
.suppressDiagnostics();
11950 S
.LookupQualifiedName(R
, Std
);
11952 for (const auto *I
: R
) {
11953 const FunctionDecl
*FDecl
= nullptr;
11954 if (const UsingShadowDecl
*UsingD
= dyn_cast
<UsingShadowDecl
>(I
)) {
11955 FDecl
= dyn_cast
<FunctionDecl
>(UsingD
->getTargetDecl());
11957 FDecl
= dyn_cast
<FunctionDecl
>(I
);
11962 // Found std::abs(), check that they are the right ones.
11963 if (FDecl
->getNumParams() != 1)
11966 // Check that the parameter type can handle the argument.
11967 QualType ParamType
= FDecl
->getParamDecl(0)->getType();
11968 if (getAbsoluteValueKind(ArgType
) == getAbsoluteValueKind(ParamType
) &&
11969 S
.Context
.getTypeSize(ArgType
) <=
11970 S
.Context
.getTypeSize(ParamType
)) {
11971 // Found a function, don't need the header hint.
11972 EmitHeaderHint
= false;
11978 FunctionName
= S
.Context
.BuiltinInfo
.getName(AbsKind
);
11979 HeaderName
= S
.Context
.BuiltinInfo
.getHeaderName(AbsKind
);
11982 DeclarationName
DN(&S
.Context
.Idents
.get(FunctionName
));
11983 LookupResult
R(S
, DN
, Loc
, Sema::LookupAnyName
);
11984 R
.suppressDiagnostics();
11985 S
.LookupName(R
, S
.getCurScope());
11987 if (R
.isSingleResult()) {
11988 FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(R
.getFoundDecl());
11989 if (FD
&& FD
->getBuiltinID() == AbsKind
) {
11990 EmitHeaderHint
= false;
11994 } else if (!R
.empty()) {
12000 S
.Diag(Loc
, diag::note_replace_abs_function
)
12001 << FunctionName
<< FixItHint::CreateReplacement(Range
, FunctionName
);
12006 if (!EmitHeaderHint
)
12009 S
.Diag(Loc
, diag::note_include_header_or_declare
) << HeaderName
12013 template <std::size_t StrLen
>
12014 static bool IsStdFunction(const FunctionDecl
*FDecl
,
12015 const char (&Str
)[StrLen
]) {
12018 if (!FDecl
->getIdentifier() || !FDecl
->getIdentifier()->isStr(Str
))
12020 if (!FDecl
->isInStdNamespace())
12026 // Warn when using the wrong abs() function.
12027 void Sema::CheckAbsoluteValueFunction(const CallExpr
*Call
,
12028 const FunctionDecl
*FDecl
) {
12029 if (Call
->getNumArgs() != 1)
12032 unsigned AbsKind
= getAbsoluteValueFunctionKind(FDecl
);
12033 bool IsStdAbs
= IsStdFunction(FDecl
, "abs");
12034 if (AbsKind
== 0 && !IsStdAbs
)
12037 QualType ArgType
= Call
->getArg(0)->IgnoreParenImpCasts()->getType();
12038 QualType ParamType
= Call
->getArg(0)->getType();
12040 // Unsigned types cannot be negative. Suggest removing the absolute value
12042 if (ArgType
->isUnsignedIntegerType()) {
12043 StringRef FunctionName
=
12044 IsStdAbs
? "std::abs" : Context
.BuiltinInfo
.getName(AbsKind
);
12045 Diag(Call
->getExprLoc(), diag::warn_unsigned_abs
) << ArgType
<< ParamType
;
12046 Diag(Call
->getExprLoc(), diag::note_remove_abs
)
12048 << FixItHint::CreateRemoval(Call
->getCallee()->getSourceRange());
12052 // Taking the absolute value of a pointer is very suspicious, they probably
12053 // wanted to index into an array, dereference a pointer, call a function, etc.
12054 if (ArgType
->isPointerType() || ArgType
->canDecayToPointerType()) {
12055 unsigned DiagType
= 0;
12056 if (ArgType
->isFunctionType())
12058 else if (ArgType
->isArrayType())
12061 Diag(Call
->getExprLoc(), diag::warn_pointer_abs
) << DiagType
<< ArgType
;
12065 // std::abs has overloads which prevent most of the absolute value problems
12070 AbsoluteValueKind ArgValueKind
= getAbsoluteValueKind(ArgType
);
12071 AbsoluteValueKind ParamValueKind
= getAbsoluteValueKind(ParamType
);
12073 // The argument and parameter are the same kind. Check if they are the right
12075 if (ArgValueKind
== ParamValueKind
) {
12076 if (Context
.getTypeSize(ArgType
) <= Context
.getTypeSize(ParamType
))
12079 unsigned NewAbsKind
= getBestAbsFunction(Context
, ArgType
, AbsKind
);
12080 Diag(Call
->getExprLoc(), diag::warn_abs_too_small
)
12081 << FDecl
<< ArgType
<< ParamType
;
12083 if (NewAbsKind
== 0)
12086 emitReplacement(*this, Call
->getExprLoc(),
12087 Call
->getCallee()->getSourceRange(), NewAbsKind
, ArgType
);
12091 // ArgValueKind != ParamValueKind
12092 // The wrong type of absolute value function was used. Attempt to find the
12094 unsigned NewAbsKind
= changeAbsFunction(AbsKind
, ArgValueKind
);
12095 NewAbsKind
= getBestAbsFunction(Context
, ArgType
, NewAbsKind
);
12096 if (NewAbsKind
== 0)
12099 Diag(Call
->getExprLoc(), diag::warn_wrong_absolute_value_type
)
12100 << FDecl
<< ParamValueKind
<< ArgValueKind
;
12102 emitReplacement(*this, Call
->getExprLoc(),
12103 Call
->getCallee()->getSourceRange(), NewAbsKind
, ArgType
);
12106 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
12107 void Sema::CheckMaxUnsignedZero(const CallExpr
*Call
,
12108 const FunctionDecl
*FDecl
) {
12109 if (!Call
|| !FDecl
) return;
12111 // Ignore template specializations and macros.
12112 if (inTemplateInstantiation()) return;
12113 if (Call
->getExprLoc().isMacroID()) return;
12115 // Only care about the one template argument, two function parameter std::max
12116 if (Call
->getNumArgs() != 2) return;
12117 if (!IsStdFunction(FDecl
, "max")) return;
12118 const auto * ArgList
= FDecl
->getTemplateSpecializationArgs();
12119 if (!ArgList
) return;
12120 if (ArgList
->size() != 1) return;
12122 // Check that template type argument is unsigned integer.
12123 const auto& TA
= ArgList
->get(0);
12124 if (TA
.getKind() != TemplateArgument::Type
) return;
12125 QualType ArgType
= TA
.getAsType();
12126 if (!ArgType
->isUnsignedIntegerType()) return;
12128 // See if either argument is a literal zero.
12129 auto IsLiteralZeroArg
= [](const Expr
* E
) -> bool {
12130 const auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(E
);
12131 if (!MTE
) return false;
12132 const auto *Num
= dyn_cast
<IntegerLiteral
>(MTE
->getSubExpr());
12133 if (!Num
) return false;
12134 if (Num
->getValue() != 0) return false;
12138 const Expr
*FirstArg
= Call
->getArg(0);
12139 const Expr
*SecondArg
= Call
->getArg(1);
12140 const bool IsFirstArgZero
= IsLiteralZeroArg(FirstArg
);
12141 const bool IsSecondArgZero
= IsLiteralZeroArg(SecondArg
);
12143 // Only warn when exactly one argument is zero.
12144 if (IsFirstArgZero
== IsSecondArgZero
) return;
12146 SourceRange FirstRange
= FirstArg
->getSourceRange();
12147 SourceRange SecondRange
= SecondArg
->getSourceRange();
12149 SourceRange ZeroRange
= IsFirstArgZero
? FirstRange
: SecondRange
;
12151 Diag(Call
->getExprLoc(), diag::warn_max_unsigned_zero
)
12152 << IsFirstArgZero
<< Call
->getCallee()->getSourceRange() << ZeroRange
;
12154 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
12155 SourceRange RemovalRange
;
12156 if (IsFirstArgZero
) {
12157 RemovalRange
= SourceRange(FirstRange
.getBegin(),
12158 SecondRange
.getBegin().getLocWithOffset(-1));
12160 RemovalRange
= SourceRange(getLocForEndOfToken(FirstRange
.getEnd()),
12161 SecondRange
.getEnd());
12164 Diag(Call
->getExprLoc(), diag::note_remove_max_call
)
12165 << FixItHint::CreateRemoval(Call
->getCallee()->getSourceRange())
12166 << FixItHint::CreateRemoval(RemovalRange
);
12169 //===--- CHECK: Standard memory functions ---------------------------------===//
12171 /// Takes the expression passed to the size_t parameter of functions
12172 /// such as memcmp, strncat, etc and warns if it's a comparison.
12174 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
12175 static bool CheckMemorySizeofForComparison(Sema
&S
, const Expr
*E
,
12176 IdentifierInfo
*FnName
,
12177 SourceLocation FnLoc
,
12178 SourceLocation RParenLoc
) {
12179 const BinaryOperator
*Size
= dyn_cast
<BinaryOperator
>(E
);
12183 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
12184 if (!Size
->isComparisonOp() && !Size
->isLogicalOp())
12187 SourceRange SizeRange
= Size
->getSourceRange();
12188 S
.Diag(Size
->getOperatorLoc(), diag::warn_memsize_comparison
)
12189 << SizeRange
<< FnName
;
12190 S
.Diag(FnLoc
, diag::note_memsize_comparison_paren
)
12192 << FixItHint::CreateInsertion(
12193 S
.getLocForEndOfToken(Size
->getLHS()->getEndLoc()), ")")
12194 << FixItHint::CreateRemoval(RParenLoc
);
12195 S
.Diag(SizeRange
.getBegin(), diag::note_memsize_comparison_cast_silence
)
12196 << FixItHint::CreateInsertion(SizeRange
.getBegin(), "(size_t)(")
12197 << FixItHint::CreateInsertion(S
.getLocForEndOfToken(SizeRange
.getEnd()),
12203 /// Determine whether the given type is or contains a dynamic class type
12204 /// (e.g., whether it has a vtable).
12205 static const CXXRecordDecl
*getContainedDynamicClass(QualType T
,
12206 bool &IsContained
) {
12207 // Look through array types while ignoring qualifiers.
12208 const Type
*Ty
= T
->getBaseElementTypeUnsafe();
12209 IsContained
= false;
12211 const CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
12212 RD
= RD
? RD
->getDefinition() : nullptr;
12213 if (!RD
|| RD
->isInvalidDecl())
12216 if (RD
->isDynamicClass())
12219 // Check all the fields. If any bases were dynamic, the class is dynamic.
12220 // It's impossible for a class to transitively contain itself by value, so
12221 // infinite recursion is impossible.
12222 for (auto *FD
: RD
->fields()) {
12224 if (const CXXRecordDecl
*ContainedRD
=
12225 getContainedDynamicClass(FD
->getType(), SubContained
)) {
12226 IsContained
= true;
12227 return ContainedRD
;
12234 static const UnaryExprOrTypeTraitExpr
*getAsSizeOfExpr(const Expr
*E
) {
12235 if (const auto *Unary
= dyn_cast
<UnaryExprOrTypeTraitExpr
>(E
))
12236 if (Unary
->getKind() == UETT_SizeOf
)
12241 /// If E is a sizeof expression, returns its argument expression,
12242 /// otherwise returns NULL.
12243 static const Expr
*getSizeOfExprArg(const Expr
*E
) {
12244 if (const UnaryExprOrTypeTraitExpr
*SizeOf
= getAsSizeOfExpr(E
))
12245 if (!SizeOf
->isArgumentType())
12246 return SizeOf
->getArgumentExpr()->IgnoreParenImpCasts();
12250 /// If E is a sizeof expression, returns its argument type.
12251 static QualType
getSizeOfArgType(const Expr
*E
) {
12252 if (const UnaryExprOrTypeTraitExpr
*SizeOf
= getAsSizeOfExpr(E
))
12253 return SizeOf
->getTypeOfArgument();
12259 struct SearchNonTrivialToInitializeField
12260 : DefaultInitializedTypeVisitor
<SearchNonTrivialToInitializeField
> {
12262 DefaultInitializedTypeVisitor
<SearchNonTrivialToInitializeField
>;
12264 SearchNonTrivialToInitializeField(const Expr
*E
, Sema
&S
) : E(E
), S(S
) {}
12266 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK
, QualType FT
,
12267 SourceLocation SL
) {
12268 if (const auto *AT
= asDerived().getContext().getAsArrayType(FT
)) {
12269 asDerived().visitArray(PDIK
, AT
, SL
);
12273 Super::visitWithKind(PDIK
, FT
, SL
);
12276 void visitARCStrong(QualType FT
, SourceLocation SL
) {
12277 S
.DiagRuntimeBehavior(SL
, E
, S
.PDiag(diag::note_nontrivial_field
) << 1);
12279 void visitARCWeak(QualType FT
, SourceLocation SL
) {
12280 S
.DiagRuntimeBehavior(SL
, E
, S
.PDiag(diag::note_nontrivial_field
) << 1);
12282 void visitStruct(QualType FT
, SourceLocation SL
) {
12283 for (const FieldDecl
*FD
: FT
->castAs
<RecordType
>()->getDecl()->fields())
12284 visit(FD
->getType(), FD
->getLocation());
12286 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK
,
12287 const ArrayType
*AT
, SourceLocation SL
) {
12288 visit(getContext().getBaseElementType(AT
), SL
);
12290 void visitTrivial(QualType FT
, SourceLocation SL
) {}
12292 static void diag(QualType RT
, const Expr
*E
, Sema
&S
) {
12293 SearchNonTrivialToInitializeField(E
, S
).visitStruct(RT
, SourceLocation());
12296 ASTContext
&getContext() { return S
.getASTContext(); }
12302 struct SearchNonTrivialToCopyField
12303 : CopiedTypeVisitor
<SearchNonTrivialToCopyField
, false> {
12304 using Super
= CopiedTypeVisitor
<SearchNonTrivialToCopyField
, false>;
12306 SearchNonTrivialToCopyField(const Expr
*E
, Sema
&S
) : E(E
), S(S
) {}
12308 void visitWithKind(QualType::PrimitiveCopyKind PCK
, QualType FT
,
12309 SourceLocation SL
) {
12310 if (const auto *AT
= asDerived().getContext().getAsArrayType(FT
)) {
12311 asDerived().visitArray(PCK
, AT
, SL
);
12315 Super::visitWithKind(PCK
, FT
, SL
);
12318 void visitARCStrong(QualType FT
, SourceLocation SL
) {
12319 S
.DiagRuntimeBehavior(SL
, E
, S
.PDiag(diag::note_nontrivial_field
) << 0);
12321 void visitARCWeak(QualType FT
, SourceLocation SL
) {
12322 S
.DiagRuntimeBehavior(SL
, E
, S
.PDiag(diag::note_nontrivial_field
) << 0);
12324 void visitStruct(QualType FT
, SourceLocation SL
) {
12325 for (const FieldDecl
*FD
: FT
->castAs
<RecordType
>()->getDecl()->fields())
12326 visit(FD
->getType(), FD
->getLocation());
12328 void visitArray(QualType::PrimitiveCopyKind PCK
, const ArrayType
*AT
,
12329 SourceLocation SL
) {
12330 visit(getContext().getBaseElementType(AT
), SL
);
12332 void preVisit(QualType::PrimitiveCopyKind PCK
, QualType FT
,
12333 SourceLocation SL
) {}
12334 void visitTrivial(QualType FT
, SourceLocation SL
) {}
12335 void visitVolatileTrivial(QualType FT
, SourceLocation SL
) {}
12337 static void diag(QualType RT
, const Expr
*E
, Sema
&S
) {
12338 SearchNonTrivialToCopyField(E
, S
).visitStruct(RT
, SourceLocation());
12341 ASTContext
&getContext() { return S
.getASTContext(); }
12349 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
12350 static bool doesExprLikelyComputeSize(const Expr
*SizeofExpr
) {
12351 SizeofExpr
= SizeofExpr
->IgnoreParenImpCasts();
12353 if (const auto *BO
= dyn_cast
<BinaryOperator
>(SizeofExpr
)) {
12354 if (BO
->getOpcode() != BO_Mul
&& BO
->getOpcode() != BO_Add
)
12357 return doesExprLikelyComputeSize(BO
->getLHS()) ||
12358 doesExprLikelyComputeSize(BO
->getRHS());
12361 return getAsSizeOfExpr(SizeofExpr
) != nullptr;
12364 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
12367 /// #define MACRO 0
12372 /// This should return true for the first call to foo, but not for the second
12373 /// (regardless of whether foo is a macro or function).
12374 static bool isArgumentExpandedFromMacro(SourceManager
&SM
,
12375 SourceLocation CallLoc
,
12376 SourceLocation ArgLoc
) {
12377 if (!CallLoc
.isMacroID())
12378 return SM
.getFileID(CallLoc
) != SM
.getFileID(ArgLoc
);
12380 return SM
.getFileID(SM
.getImmediateMacroCallerLoc(CallLoc
)) !=
12381 SM
.getFileID(SM
.getImmediateMacroCallerLoc(ArgLoc
));
12384 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
12385 /// last two arguments transposed.
12386 static void CheckMemaccessSize(Sema
&S
, unsigned BId
, const CallExpr
*Call
) {
12387 if (BId
!= Builtin::BImemset
&& BId
!= Builtin::BIbzero
)
12390 const Expr
*SizeArg
=
12391 Call
->getArg(BId
== Builtin::BImemset
? 2 : 1)->IgnoreImpCasts();
12393 auto isLiteralZero
= [](const Expr
*E
) {
12394 return (isa
<IntegerLiteral
>(E
) &&
12395 cast
<IntegerLiteral
>(E
)->getValue() == 0) ||
12396 (isa
<CharacterLiteral
>(E
) &&
12397 cast
<CharacterLiteral
>(E
)->getValue() == 0);
12400 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
12401 SourceLocation CallLoc
= Call
->getRParenLoc();
12402 SourceManager
&SM
= S
.getSourceManager();
12403 if (isLiteralZero(SizeArg
) &&
12404 !isArgumentExpandedFromMacro(SM
, CallLoc
, SizeArg
->getExprLoc())) {
12406 SourceLocation DiagLoc
= SizeArg
->getExprLoc();
12408 // Some platforms #define bzero to __builtin_memset. See if this is the
12409 // case, and if so, emit a better diagnostic.
12410 if (BId
== Builtin::BIbzero
||
12411 (CallLoc
.isMacroID() && Lexer::getImmediateMacroName(
12412 CallLoc
, SM
, S
.getLangOpts()) == "bzero")) {
12413 S
.Diag(DiagLoc
, diag::warn_suspicious_bzero_size
);
12414 S
.Diag(DiagLoc
, diag::note_suspicious_bzero_size_silence
);
12415 } else if (!isLiteralZero(Call
->getArg(1)->IgnoreImpCasts())) {
12416 S
.Diag(DiagLoc
, diag::warn_suspicious_sizeof_memset
) << 0;
12417 S
.Diag(DiagLoc
, diag::note_suspicious_sizeof_memset_silence
) << 0;
12422 // If the second argument to a memset is a sizeof expression and the third
12423 // isn't, this is also likely an error. This should catch
12424 // 'memset(buf, sizeof(buf), 0xff)'.
12425 if (BId
== Builtin::BImemset
&&
12426 doesExprLikelyComputeSize(Call
->getArg(1)) &&
12427 !doesExprLikelyComputeSize(Call
->getArg(2))) {
12428 SourceLocation DiagLoc
= Call
->getArg(1)->getExprLoc();
12429 S
.Diag(DiagLoc
, diag::warn_suspicious_sizeof_memset
) << 1;
12430 S
.Diag(DiagLoc
, diag::note_suspicious_sizeof_memset_silence
) << 1;
12435 /// Check for dangerous or invalid arguments to memset().
12437 /// This issues warnings on known problematic, dangerous or unspecified
12438 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
12439 /// function calls.
12441 /// \param Call The call expression to diagnose.
12442 void Sema::CheckMemaccessArguments(const CallExpr
*Call
,
12444 IdentifierInfo
*FnName
) {
12447 // It is possible to have a non-standard definition of memset. Validate
12448 // we have enough arguments, and if not, abort further checking.
12449 unsigned ExpectedNumArgs
=
12450 (BId
== Builtin::BIstrndup
|| BId
== Builtin::BIbzero
? 2 : 3);
12451 if (Call
->getNumArgs() < ExpectedNumArgs
)
12454 unsigned LastArg
= (BId
== Builtin::BImemset
|| BId
== Builtin::BIbzero
||
12455 BId
== Builtin::BIstrndup
? 1 : 2);
12457 (BId
== Builtin::BIbzero
|| BId
== Builtin::BIstrndup
? 1 : 2);
12458 const Expr
*LenExpr
= Call
->getArg(LenArg
)->IgnoreParenImpCasts();
12460 if (CheckMemorySizeofForComparison(*this, LenExpr
, FnName
,
12461 Call
->getBeginLoc(), Call
->getRParenLoc()))
12464 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
12465 CheckMemaccessSize(*this, BId
, Call
);
12467 // We have special checking when the length is a sizeof expression.
12468 QualType SizeOfArgTy
= getSizeOfArgType(LenExpr
);
12469 const Expr
*SizeOfArg
= getSizeOfExprArg(LenExpr
);
12470 llvm::FoldingSetNodeID SizeOfArgID
;
12472 // Although widely used, 'bzero' is not a standard function. Be more strict
12473 // with the argument types before allowing diagnostics and only allow the
12474 // form bzero(ptr, sizeof(...)).
12475 QualType FirstArgTy
= Call
->getArg(0)->IgnoreParenImpCasts()->getType();
12476 if (BId
== Builtin::BIbzero
&& !FirstArgTy
->getAs
<PointerType
>())
12479 for (unsigned ArgIdx
= 0; ArgIdx
!= LastArg
; ++ArgIdx
) {
12480 const Expr
*Dest
= Call
->getArg(ArgIdx
)->IgnoreParenImpCasts();
12481 SourceRange ArgRange
= Call
->getArg(ArgIdx
)->getSourceRange();
12483 QualType DestTy
= Dest
->getType();
12484 QualType PointeeTy
;
12485 if (const PointerType
*DestPtrTy
= DestTy
->getAs
<PointerType
>()) {
12486 PointeeTy
= DestPtrTy
->getPointeeType();
12488 // Never warn about void type pointers. This can be used to suppress
12489 // false positives.
12490 if (PointeeTy
->isVoidType())
12493 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
12494 // actually comparing the expressions for equality. Because computing the
12495 // expression IDs can be expensive, we only do this if the diagnostic is
12498 !Diags
.isIgnored(diag::warn_sizeof_pointer_expr_memaccess
,
12499 SizeOfArg
->getExprLoc())) {
12500 // We only compute IDs for expressions if the warning is enabled, and
12501 // cache the sizeof arg's ID.
12502 if (SizeOfArgID
== llvm::FoldingSetNodeID())
12503 SizeOfArg
->Profile(SizeOfArgID
, Context
, true);
12504 llvm::FoldingSetNodeID DestID
;
12505 Dest
->Profile(DestID
, Context
, true);
12506 if (DestID
== SizeOfArgID
) {
12507 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
12508 // over sizeof(src) as well.
12509 unsigned ActionIdx
= 0; // Default is to suggest dereferencing.
12510 StringRef ReadableName
= FnName
->getName();
12512 if (const UnaryOperator
*UnaryOp
= dyn_cast
<UnaryOperator
>(Dest
))
12513 if (UnaryOp
->getOpcode() == UO_AddrOf
)
12514 ActionIdx
= 1; // If its an address-of operator, just remove it.
12515 if (!PointeeTy
->isIncompleteType() &&
12516 (Context
.getTypeSize(PointeeTy
) == Context
.getCharWidth()))
12517 ActionIdx
= 2; // If the pointee's size is sizeof(char),
12518 // suggest an explicit length.
12520 // If the function is defined as a builtin macro, do not show macro
12522 SourceLocation SL
= SizeOfArg
->getExprLoc();
12523 SourceRange DSR
= Dest
->getSourceRange();
12524 SourceRange SSR
= SizeOfArg
->getSourceRange();
12525 SourceManager
&SM
= getSourceManager();
12527 if (SM
.isMacroArgExpansion(SL
)) {
12528 ReadableName
= Lexer::getImmediateMacroName(SL
, SM
, LangOpts
);
12529 SL
= SM
.getSpellingLoc(SL
);
12530 DSR
= SourceRange(SM
.getSpellingLoc(DSR
.getBegin()),
12531 SM
.getSpellingLoc(DSR
.getEnd()));
12532 SSR
= SourceRange(SM
.getSpellingLoc(SSR
.getBegin()),
12533 SM
.getSpellingLoc(SSR
.getEnd()));
12536 DiagRuntimeBehavior(SL
, SizeOfArg
,
12537 PDiag(diag::warn_sizeof_pointer_expr_memaccess
)
12543 DiagRuntimeBehavior(SL
, SizeOfArg
,
12544 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note
)
12552 // Also check for cases where the sizeof argument is the exact same
12553 // type as the memory argument, and where it points to a user-defined
12555 if (SizeOfArgTy
!= QualType()) {
12556 if (PointeeTy
->isRecordType() &&
12557 Context
.typesAreCompatible(SizeOfArgTy
, DestTy
)) {
12558 DiagRuntimeBehavior(LenExpr
->getExprLoc(), Dest
,
12559 PDiag(diag::warn_sizeof_pointer_type_memaccess
)
12560 << FnName
<< SizeOfArgTy
<< ArgIdx
12561 << PointeeTy
<< Dest
->getSourceRange()
12562 << LenExpr
->getSourceRange());
12566 } else if (DestTy
->isArrayType()) {
12567 PointeeTy
= DestTy
;
12570 if (PointeeTy
== QualType())
12573 // Always complain about dynamic classes.
12575 if (const CXXRecordDecl
*ContainedRD
=
12576 getContainedDynamicClass(PointeeTy
, IsContained
)) {
12578 unsigned OperationType
= 0;
12579 const bool IsCmp
= BId
== Builtin::BImemcmp
|| BId
== Builtin::BIbcmp
;
12580 // "overwritten" if we're warning about the destination for any call
12581 // but memcmp; otherwise a verb appropriate to the call.
12582 if (ArgIdx
!= 0 || IsCmp
) {
12583 if (BId
== Builtin::BImemcpy
)
12585 else if(BId
== Builtin::BImemmove
)
12591 DiagRuntimeBehavior(Dest
->getExprLoc(), Dest
,
12592 PDiag(diag::warn_dyn_class_memaccess
)
12593 << (IsCmp
? ArgIdx
+ 2 : ArgIdx
) << FnName
12594 << IsContained
<< ContainedRD
<< OperationType
12595 << Call
->getCallee()->getSourceRange());
12596 } else if (PointeeTy
.hasNonTrivialObjCLifetime() &&
12597 BId
!= Builtin::BImemset
)
12598 DiagRuntimeBehavior(
12599 Dest
->getExprLoc(), Dest
,
12600 PDiag(diag::warn_arc_object_memaccess
)
12601 << ArgIdx
<< FnName
<< PointeeTy
12602 << Call
->getCallee()->getSourceRange());
12603 else if (const auto *RT
= PointeeTy
->getAs
<RecordType
>()) {
12604 if ((BId
== Builtin::BImemset
|| BId
== Builtin::BIbzero
) &&
12605 RT
->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
12606 DiagRuntimeBehavior(Dest
->getExprLoc(), Dest
,
12607 PDiag(diag::warn_cstruct_memaccess
)
12608 << ArgIdx
<< FnName
<< PointeeTy
<< 0);
12609 SearchNonTrivialToInitializeField::diag(PointeeTy
, Dest
, *this);
12610 } else if ((BId
== Builtin::BImemcpy
|| BId
== Builtin::BImemmove
) &&
12611 RT
->getDecl()->isNonTrivialToPrimitiveCopy()) {
12612 DiagRuntimeBehavior(Dest
->getExprLoc(), Dest
,
12613 PDiag(diag::warn_cstruct_memaccess
)
12614 << ArgIdx
<< FnName
<< PointeeTy
<< 1);
12615 SearchNonTrivialToCopyField::diag(PointeeTy
, Dest
, *this);
12622 DiagRuntimeBehavior(
12623 Dest
->getExprLoc(), Dest
,
12624 PDiag(diag::note_bad_memaccess_silence
)
12625 << FixItHint::CreateInsertion(ArgRange
.getBegin(), "(void*)"));
12630 // A little helper routine: ignore addition and subtraction of integer literals.
12631 // This intentionally does not ignore all integer constant expressions because
12632 // we don't want to remove sizeof().
12633 static const Expr
*ignoreLiteralAdditions(const Expr
*Ex
, ASTContext
&Ctx
) {
12634 Ex
= Ex
->IgnoreParenCasts();
12637 const BinaryOperator
* BO
= dyn_cast
<BinaryOperator
>(Ex
);
12638 if (!BO
|| !BO
->isAdditiveOp())
12641 const Expr
*RHS
= BO
->getRHS()->IgnoreParenCasts();
12642 const Expr
*LHS
= BO
->getLHS()->IgnoreParenCasts();
12644 if (isa
<IntegerLiteral
>(RHS
))
12646 else if (isa
<IntegerLiteral
>(LHS
))
12655 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty
,
12656 ASTContext
&Context
) {
12657 // Only handle constant-sized or VLAs, but not flexible members.
12658 if (const ConstantArrayType
*CAT
= Context
.getAsConstantArrayType(Ty
)) {
12659 // Only issue the FIXIT for arrays of size > 1.
12660 if (CAT
->getSize().getSExtValue() <= 1)
12662 } else if (!Ty
->isVariableArrayType()) {
12668 // Warn if the user has made the 'size' argument to strlcpy or strlcat
12669 // be the size of the source, instead of the destination.
12670 void Sema::CheckStrlcpycatArguments(const CallExpr
*Call
,
12671 IdentifierInfo
*FnName
) {
12673 // Don't crash if the user has the wrong number of arguments
12674 unsigned NumArgs
= Call
->getNumArgs();
12675 if ((NumArgs
!= 3) && (NumArgs
!= 4))
12678 const Expr
*SrcArg
= ignoreLiteralAdditions(Call
->getArg(1), Context
);
12679 const Expr
*SizeArg
= ignoreLiteralAdditions(Call
->getArg(2), Context
);
12680 const Expr
*CompareWithSrc
= nullptr;
12682 if (CheckMemorySizeofForComparison(*this, SizeArg
, FnName
,
12683 Call
->getBeginLoc(), Call
->getRParenLoc()))
12686 // Look for 'strlcpy(dst, x, sizeof(x))'
12687 if (const Expr
*Ex
= getSizeOfExprArg(SizeArg
))
12688 CompareWithSrc
= Ex
;
12690 // Look for 'strlcpy(dst, x, strlen(x))'
12691 if (const CallExpr
*SizeCall
= dyn_cast
<CallExpr
>(SizeArg
)) {
12692 if (SizeCall
->getBuiltinCallee() == Builtin::BIstrlen
&&
12693 SizeCall
->getNumArgs() == 1)
12694 CompareWithSrc
= ignoreLiteralAdditions(SizeCall
->getArg(0), Context
);
12698 if (!CompareWithSrc
)
12701 // Determine if the argument to sizeof/strlen is equal to the source
12702 // argument. In principle there's all kinds of things you could do
12703 // here, for instance creating an == expression and evaluating it with
12704 // EvaluateAsBooleanCondition, but this uses a more direct technique:
12705 const DeclRefExpr
*SrcArgDRE
= dyn_cast
<DeclRefExpr
>(SrcArg
);
12709 const DeclRefExpr
*CompareWithSrcDRE
= dyn_cast
<DeclRefExpr
>(CompareWithSrc
);
12710 if (!CompareWithSrcDRE
||
12711 SrcArgDRE
->getDecl() != CompareWithSrcDRE
->getDecl())
12714 const Expr
*OriginalSizeArg
= Call
->getArg(2);
12715 Diag(CompareWithSrcDRE
->getBeginLoc(), diag::warn_strlcpycat_wrong_size
)
12716 << OriginalSizeArg
->getSourceRange() << FnName
;
12718 // Output a FIXIT hint if the destination is an array (rather than a
12719 // pointer to an array). This could be enhanced to handle some
12720 // pointers if we know the actual size, like if DstArg is 'array+2'
12721 // we could say 'sizeof(array)-2'.
12722 const Expr
*DstArg
= Call
->getArg(0)->IgnoreParenImpCasts();
12723 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg
->getType(), Context
))
12726 SmallString
<128> sizeString
;
12727 llvm::raw_svector_ostream
OS(sizeString
);
12729 DstArg
->printPretty(OS
, nullptr, getPrintingPolicy());
12732 Diag(OriginalSizeArg
->getBeginLoc(), diag::note_strlcpycat_wrong_size
)
12733 << FixItHint::CreateReplacement(OriginalSizeArg
->getSourceRange(),
12737 /// Check if two expressions refer to the same declaration.
12738 static bool referToTheSameDecl(const Expr
*E1
, const Expr
*E2
) {
12739 if (const DeclRefExpr
*D1
= dyn_cast_or_null
<DeclRefExpr
>(E1
))
12740 if (const DeclRefExpr
*D2
= dyn_cast_or_null
<DeclRefExpr
>(E2
))
12741 return D1
->getDecl() == D2
->getDecl();
12745 static const Expr
*getStrlenExprArg(const Expr
*E
) {
12746 if (const CallExpr
*CE
= dyn_cast
<CallExpr
>(E
)) {
12747 const FunctionDecl
*FD
= CE
->getDirectCallee();
12748 if (!FD
|| FD
->getMemoryFunctionKind() != Builtin::BIstrlen
)
12750 return CE
->getArg(0)->IgnoreParenCasts();
12755 // Warn on anti-patterns as the 'size' argument to strncat.
12756 // The correct size argument should look like following:
12757 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
12758 void Sema::CheckStrncatArguments(const CallExpr
*CE
,
12759 IdentifierInfo
*FnName
) {
12760 // Don't crash if the user has the wrong number of arguments.
12761 if (CE
->getNumArgs() < 3)
12763 const Expr
*DstArg
= CE
->getArg(0)->IgnoreParenCasts();
12764 const Expr
*SrcArg
= CE
->getArg(1)->IgnoreParenCasts();
12765 const Expr
*LenArg
= CE
->getArg(2)->IgnoreParenCasts();
12767 if (CheckMemorySizeofForComparison(*this, LenArg
, FnName
, CE
->getBeginLoc(),
12768 CE
->getRParenLoc()))
12771 // Identify common expressions, which are wrongly used as the size argument
12772 // to strncat and may lead to buffer overflows.
12773 unsigned PatternType
= 0;
12774 if (const Expr
*SizeOfArg
= getSizeOfExprArg(LenArg
)) {
12776 if (referToTheSameDecl(SizeOfArg
, DstArg
))
12779 else if (referToTheSameDecl(SizeOfArg
, SrcArg
))
12781 } else if (const BinaryOperator
*BE
= dyn_cast
<BinaryOperator
>(LenArg
)) {
12782 if (BE
->getOpcode() == BO_Sub
) {
12783 const Expr
*L
= BE
->getLHS()->IgnoreParenCasts();
12784 const Expr
*R
= BE
->getRHS()->IgnoreParenCasts();
12785 // - sizeof(dst) - strlen(dst)
12786 if (referToTheSameDecl(DstArg
, getSizeOfExprArg(L
)) &&
12787 referToTheSameDecl(DstArg
, getStrlenExprArg(R
)))
12789 // - sizeof(src) - (anything)
12790 else if (referToTheSameDecl(SrcArg
, getSizeOfExprArg(L
)))
12795 if (PatternType
== 0)
12798 // Generate the diagnostic.
12799 SourceLocation SL
= LenArg
->getBeginLoc();
12800 SourceRange SR
= LenArg
->getSourceRange();
12801 SourceManager
&SM
= getSourceManager();
12803 // If the function is defined as a builtin macro, do not show macro expansion.
12804 if (SM
.isMacroArgExpansion(SL
)) {
12805 SL
= SM
.getSpellingLoc(SL
);
12806 SR
= SourceRange(SM
.getSpellingLoc(SR
.getBegin()),
12807 SM
.getSpellingLoc(SR
.getEnd()));
12810 // Check if the destination is an array (rather than a pointer to an array).
12811 QualType DstTy
= DstArg
->getType();
12812 bool isKnownSizeArray
= isConstantSizeArrayWithMoreThanOneElement(DstTy
,
12814 if (!isKnownSizeArray
) {
12815 if (PatternType
== 1)
12816 Diag(SL
, diag::warn_strncat_wrong_size
) << SR
;
12818 Diag(SL
, diag::warn_strncat_src_size
) << SR
;
12822 if (PatternType
== 1)
12823 Diag(SL
, diag::warn_strncat_large_size
) << SR
;
12825 Diag(SL
, diag::warn_strncat_src_size
) << SR
;
12827 SmallString
<128> sizeString
;
12828 llvm::raw_svector_ostream
OS(sizeString
);
12830 DstArg
->printPretty(OS
, nullptr, getPrintingPolicy());
12833 DstArg
->printPretty(OS
, nullptr, getPrintingPolicy());
12836 Diag(SL
, diag::note_strncat_wrong_size
)
12837 << FixItHint::CreateReplacement(SR
, OS
.str());
12841 void CheckFreeArgumentsOnLvalue(Sema
&S
, const std::string
&CalleeName
,
12842 const UnaryOperator
*UnaryExpr
, const Decl
*D
) {
12843 if (isa
<FieldDecl
, FunctionDecl
, VarDecl
>(D
)) {
12844 S
.Diag(UnaryExpr
->getBeginLoc(), diag::warn_free_nonheap_object
)
12845 << CalleeName
<< 0 /*object: */ << cast
<NamedDecl
>(D
);
12850 void CheckFreeArgumentsAddressof(Sema
&S
, const std::string
&CalleeName
,
12851 const UnaryOperator
*UnaryExpr
) {
12852 if (const auto *Lvalue
= dyn_cast
<DeclRefExpr
>(UnaryExpr
->getSubExpr())) {
12853 const Decl
*D
= Lvalue
->getDecl();
12854 if (isa
<DeclaratorDecl
>(D
))
12855 if (!dyn_cast
<DeclaratorDecl
>(D
)->getType()->isReferenceType())
12856 return CheckFreeArgumentsOnLvalue(S
, CalleeName
, UnaryExpr
, D
);
12859 if (const auto *Lvalue
= dyn_cast
<MemberExpr
>(UnaryExpr
->getSubExpr()))
12860 return CheckFreeArgumentsOnLvalue(S
, CalleeName
, UnaryExpr
,
12861 Lvalue
->getMemberDecl());
12864 void CheckFreeArgumentsPlus(Sema
&S
, const std::string
&CalleeName
,
12865 const UnaryOperator
*UnaryExpr
) {
12866 const auto *Lambda
= dyn_cast
<LambdaExpr
>(
12867 UnaryExpr
->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
12871 S
.Diag(Lambda
->getBeginLoc(), diag::warn_free_nonheap_object
)
12872 << CalleeName
<< 2 /*object: lambda expression*/;
12875 void CheckFreeArgumentsStackArray(Sema
&S
, const std::string
&CalleeName
,
12876 const DeclRefExpr
*Lvalue
) {
12877 const auto *Var
= dyn_cast
<VarDecl
>(Lvalue
->getDecl());
12878 if (Var
== nullptr)
12881 S
.Diag(Lvalue
->getBeginLoc(), diag::warn_free_nonheap_object
)
12882 << CalleeName
<< 0 /*object: */ << Var
;
12885 void CheckFreeArgumentsCast(Sema
&S
, const std::string
&CalleeName
,
12886 const CastExpr
*Cast
) {
12887 SmallString
<128> SizeString
;
12888 llvm::raw_svector_ostream
OS(SizeString
);
12890 clang::CastKind Kind
= Cast
->getCastKind();
12891 if (Kind
== clang::CK_BitCast
&&
12892 !Cast
->getSubExpr()->getType()->isFunctionPointerType())
12894 if (Kind
== clang::CK_IntegralToPointer
&&
12895 !isa
<IntegerLiteral
>(
12896 Cast
->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
12899 switch (Cast
->getCastKind()) {
12900 case clang::CK_BitCast
:
12901 case clang::CK_IntegralToPointer
:
12902 case clang::CK_FunctionToPointerDecay
:
12904 Cast
->printPretty(OS
, nullptr, S
.getPrintingPolicy());
12911 S
.Diag(Cast
->getBeginLoc(), diag::warn_free_nonheap_object
)
12912 << CalleeName
<< 0 /*object: */ << OS
.str();
12916 /// Alerts the user that they are attempting to free a non-malloc'd object.
12917 void Sema::CheckFreeArguments(const CallExpr
*E
) {
12918 const std::string CalleeName
=
12919 cast
<FunctionDecl
>(E
->getCalleeDecl())->getQualifiedNameAsString();
12921 { // Prefer something that doesn't involve a cast to make things simpler.
12922 const Expr
*Arg
= E
->getArg(0)->IgnoreParenCasts();
12923 if (const auto *UnaryExpr
= dyn_cast
<UnaryOperator
>(Arg
))
12924 switch (UnaryExpr
->getOpcode()) {
12925 case UnaryOperator::Opcode::UO_AddrOf
:
12926 return CheckFreeArgumentsAddressof(*this, CalleeName
, UnaryExpr
);
12927 case UnaryOperator::Opcode::UO_Plus
:
12928 return CheckFreeArgumentsPlus(*this, CalleeName
, UnaryExpr
);
12933 if (const auto *Lvalue
= dyn_cast
<DeclRefExpr
>(Arg
))
12934 if (Lvalue
->getType()->isArrayType())
12935 return CheckFreeArgumentsStackArray(*this, CalleeName
, Lvalue
);
12937 if (const auto *Label
= dyn_cast
<AddrLabelExpr
>(Arg
)) {
12938 Diag(Label
->getBeginLoc(), diag::warn_free_nonheap_object
)
12939 << CalleeName
<< 0 /*object: */ << Label
->getLabel()->getIdentifier();
12943 if (isa
<BlockExpr
>(Arg
)) {
12944 Diag(Arg
->getBeginLoc(), diag::warn_free_nonheap_object
)
12945 << CalleeName
<< 1 /*object: block*/;
12949 // Maybe the cast was important, check after the other cases.
12950 if (const auto *Cast
= dyn_cast
<CastExpr
>(E
->getArg(0)))
12951 return CheckFreeArgumentsCast(*this, CalleeName
, Cast
);
12955 Sema::CheckReturnValExpr(Expr
*RetValExp
, QualType lhsType
,
12956 SourceLocation ReturnLoc
,
12958 const AttrVec
*Attrs
,
12959 const FunctionDecl
*FD
) {
12960 // Check if the return value is null but should not be.
12961 if (((Attrs
&& hasSpecificAttr
<ReturnsNonNullAttr
>(*Attrs
)) ||
12962 (!isObjCMethod
&& isNonNullType(lhsType
))) &&
12963 CheckNonNullExpr(*this, RetValExp
))
12964 Diag(ReturnLoc
, diag::warn_null_ret
)
12965 << (isObjCMethod
? 1 : 0) << RetValExp
->getSourceRange();
12967 // C++11 [basic.stc.dynamic.allocation]p4:
12968 // If an allocation function declared with a non-throwing
12969 // exception-specification fails to allocate storage, it shall return
12970 // a null pointer. Any other allocation function that fails to allocate
12971 // storage shall indicate failure only by throwing an exception [...]
12973 OverloadedOperatorKind Op
= FD
->getOverloadedOperator();
12974 if (Op
== OO_New
|| Op
== OO_Array_New
) {
12975 const FunctionProtoType
*Proto
12976 = FD
->getType()->castAs
<FunctionProtoType
>();
12977 if (!Proto
->isNothrow(/*ResultIfDependent*/true) &&
12978 CheckNonNullExpr(*this, RetValExp
))
12979 Diag(ReturnLoc
, diag::warn_operator_new_returns_null
)
12980 << FD
<< getLangOpts().CPlusPlus11
;
12984 if (RetValExp
&& RetValExp
->getType()->isWebAssemblyTableType()) {
12985 Diag(ReturnLoc
, diag::err_wasm_table_art
) << 1;
12988 // PPC MMA non-pointer types are not allowed as return type. Checking the type
12989 // here prevent the user from using a PPC MMA type as trailing return type.
12990 if (Context
.getTargetInfo().getTriple().isPPC64())
12991 CheckPPCMMAType(RetValExp
->getType(), ReturnLoc
);
12994 /// Check for comparisons of floating-point values using == and !=. Issue a
12995 /// warning if the comparison is not likely to do what the programmer intended.
12996 void Sema::CheckFloatComparison(SourceLocation Loc
, Expr
*LHS
, Expr
*RHS
,
12997 BinaryOperatorKind Opcode
) {
12998 if (!BinaryOperator::isEqualityOp(Opcode
))
13001 // Match and capture subexpressions such as "(float) X == 0.1".
13002 FloatingLiteral
*FPLiteral
;
13004 auto getCastAndLiteral
= [&FPLiteral
, &FPCast
](Expr
*L
, Expr
*R
) {
13005 FPLiteral
= dyn_cast
<FloatingLiteral
>(L
->IgnoreParens());
13006 FPCast
= dyn_cast
<CastExpr
>(R
->IgnoreParens());
13007 return FPLiteral
&& FPCast
;
13010 if (getCastAndLiteral(LHS
, RHS
) || getCastAndLiteral(RHS
, LHS
)) {
13011 auto *SourceTy
= FPCast
->getSubExpr()->getType()->getAs
<BuiltinType
>();
13012 auto *TargetTy
= FPLiteral
->getType()->getAs
<BuiltinType
>();
13013 if (SourceTy
&& TargetTy
&& SourceTy
->isFloatingPoint() &&
13014 TargetTy
->isFloatingPoint()) {
13016 llvm::APFloat TargetC
= FPLiteral
->getValue();
13017 TargetC
.convert(Context
.getFloatTypeSemantics(QualType(SourceTy
, 0)),
13018 llvm::APFloat::rmNearestTiesToEven
, &Lossy
);
13020 // If the literal cannot be represented in the source type, then a
13021 // check for == is always false and check for != is always true.
13022 Diag(Loc
, diag::warn_float_compare_literal
)
13023 << (Opcode
== BO_EQ
) << QualType(SourceTy
, 0)
13024 << LHS
->getSourceRange() << RHS
->getSourceRange();
13030 // Match a more general floating-point equality comparison (-Wfloat-equal).
13031 Expr
* LeftExprSansParen
= LHS
->IgnoreParenImpCasts();
13032 Expr
* RightExprSansParen
= RHS
->IgnoreParenImpCasts();
13034 // Special case: check for x == x (which is OK).
13035 // Do not emit warnings for such cases.
13036 if (auto *DRL
= dyn_cast
<DeclRefExpr
>(LeftExprSansParen
))
13037 if (auto *DRR
= dyn_cast
<DeclRefExpr
>(RightExprSansParen
))
13038 if (DRL
->getDecl() == DRR
->getDecl())
13041 // Special case: check for comparisons against literals that can be exactly
13042 // represented by APFloat. In such cases, do not emit a warning. This
13043 // is a heuristic: often comparison against such literals are used to
13044 // detect if a value in a variable has not changed. This clearly can
13045 // lead to false negatives.
13046 if (FloatingLiteral
* FLL
= dyn_cast
<FloatingLiteral
>(LeftExprSansParen
)) {
13047 if (FLL
->isExact())
13050 if (FloatingLiteral
* FLR
= dyn_cast
<FloatingLiteral
>(RightExprSansParen
))
13051 if (FLR
->isExact())
13054 // Check for comparisons with builtin types.
13055 if (CallExpr
* CL
= dyn_cast
<CallExpr
>(LeftExprSansParen
))
13056 if (CL
->getBuiltinCallee())
13059 if (CallExpr
* CR
= dyn_cast
<CallExpr
>(RightExprSansParen
))
13060 if (CR
->getBuiltinCallee())
13063 // Emit the diagnostic.
13064 Diag(Loc
, diag::warn_floatingpoint_eq
)
13065 << LHS
->getSourceRange() << RHS
->getSourceRange();
13068 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
13069 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
13073 /// Structure recording the 'active' range of an integer-valued
13076 /// The number of bits active in the int. Note that this includes exactly one
13077 /// sign bit if !NonNegative.
13080 /// True if the int is known not to have negative values. If so, all leading
13081 /// bits before Width are known zero, otherwise they are known to be the
13082 /// same as the MSB within Width.
13085 IntRange(unsigned Width
, bool NonNegative
)
13086 : Width(Width
), NonNegative(NonNegative
) {}
13088 /// Number of bits excluding the sign bit.
13089 unsigned valueBits() const {
13090 return NonNegative
? Width
: Width
- 1;
13093 /// Returns the range of the bool type.
13094 static IntRange
forBoolType() {
13095 return IntRange(1, true);
13098 /// Returns the range of an opaque value of the given integral type.
13099 static IntRange
forValueOfType(ASTContext
&C
, QualType T
) {
13100 return forValueOfCanonicalType(C
,
13101 T
->getCanonicalTypeInternal().getTypePtr());
13104 /// Returns the range of an opaque value of a canonical integral type.
13105 static IntRange
forValueOfCanonicalType(ASTContext
&C
, const Type
*T
) {
13106 assert(T
->isCanonicalUnqualified());
13108 if (const VectorType
*VT
= dyn_cast
<VectorType
>(T
))
13109 T
= VT
->getElementType().getTypePtr();
13110 if (const ComplexType
*CT
= dyn_cast
<ComplexType
>(T
))
13111 T
= CT
->getElementType().getTypePtr();
13112 if (const AtomicType
*AT
= dyn_cast
<AtomicType
>(T
))
13113 T
= AT
->getValueType().getTypePtr();
13115 if (!C
.getLangOpts().CPlusPlus
) {
13116 // For enum types in C code, use the underlying datatype.
13117 if (const EnumType
*ET
= dyn_cast
<EnumType
>(T
))
13118 T
= ET
->getDecl()->getIntegerType().getDesugaredType(C
).getTypePtr();
13119 } else if (const EnumType
*ET
= dyn_cast
<EnumType
>(T
)) {
13120 // For enum types in C++, use the known bit width of the enumerators.
13121 EnumDecl
*Enum
= ET
->getDecl();
13122 // In C++11, enums can have a fixed underlying type. Use this type to
13123 // compute the range.
13124 if (Enum
->isFixed()) {
13125 return IntRange(C
.getIntWidth(QualType(T
, 0)),
13126 !ET
->isSignedIntegerOrEnumerationType());
13129 unsigned NumPositive
= Enum
->getNumPositiveBits();
13130 unsigned NumNegative
= Enum
->getNumNegativeBits();
13132 if (NumNegative
== 0)
13133 return IntRange(NumPositive
, true/*NonNegative*/);
13135 return IntRange(std::max(NumPositive
+ 1, NumNegative
),
13136 false/*NonNegative*/);
13139 if (const auto *EIT
= dyn_cast
<BitIntType
>(T
))
13140 return IntRange(EIT
->getNumBits(), EIT
->isUnsigned());
13142 const BuiltinType
*BT
= cast
<BuiltinType
>(T
);
13143 assert(BT
->isInteger());
13145 return IntRange(C
.getIntWidth(QualType(T
, 0)), BT
->isUnsignedInteger());
13148 /// Returns the "target" range of a canonical integral type, i.e.
13149 /// the range of values expressible in the type.
13151 /// This matches forValueOfCanonicalType except that enums have the
13152 /// full range of their type, not the range of their enumerators.
13153 static IntRange
forTargetOfCanonicalType(ASTContext
&C
, const Type
*T
) {
13154 assert(T
->isCanonicalUnqualified());
13156 if (const VectorType
*VT
= dyn_cast
<VectorType
>(T
))
13157 T
= VT
->getElementType().getTypePtr();
13158 if (const ComplexType
*CT
= dyn_cast
<ComplexType
>(T
))
13159 T
= CT
->getElementType().getTypePtr();
13160 if (const AtomicType
*AT
= dyn_cast
<AtomicType
>(T
))
13161 T
= AT
->getValueType().getTypePtr();
13162 if (const EnumType
*ET
= dyn_cast
<EnumType
>(T
))
13163 T
= C
.getCanonicalType(ET
->getDecl()->getIntegerType()).getTypePtr();
13165 if (const auto *EIT
= dyn_cast
<BitIntType
>(T
))
13166 return IntRange(EIT
->getNumBits(), EIT
->isUnsigned());
13168 const BuiltinType
*BT
= cast
<BuiltinType
>(T
);
13169 assert(BT
->isInteger());
13171 return IntRange(C
.getIntWidth(QualType(T
, 0)), BT
->isUnsignedInteger());
13174 /// Returns the supremum of two ranges: i.e. their conservative merge.
13175 static IntRange
join(IntRange L
, IntRange R
) {
13176 bool Unsigned
= L
.NonNegative
&& R
.NonNegative
;
13177 return IntRange(std::max(L
.valueBits(), R
.valueBits()) + !Unsigned
,
13178 L
.NonNegative
&& R
.NonNegative
);
13181 /// Return the range of a bitwise-AND of the two ranges.
13182 static IntRange
bit_and(IntRange L
, IntRange R
) {
13183 unsigned Bits
= std::max(L
.Width
, R
.Width
);
13184 bool NonNegative
= false;
13185 if (L
.NonNegative
) {
13186 Bits
= std::min(Bits
, L
.Width
);
13187 NonNegative
= true;
13189 if (R
.NonNegative
) {
13190 Bits
= std::min(Bits
, R
.Width
);
13191 NonNegative
= true;
13193 return IntRange(Bits
, NonNegative
);
13196 /// Return the range of a sum of the two ranges.
13197 static IntRange
sum(IntRange L
, IntRange R
) {
13198 bool Unsigned
= L
.NonNegative
&& R
.NonNegative
;
13199 return IntRange(std::max(L
.valueBits(), R
.valueBits()) + 1 + !Unsigned
,
13203 /// Return the range of a difference of the two ranges.
13204 static IntRange
difference(IntRange L
, IntRange R
) {
13205 // We need a 1-bit-wider range if:
13206 // 1) LHS can be negative: least value can be reduced.
13207 // 2) RHS can be negative: greatest value can be increased.
13208 bool CanWiden
= !L
.NonNegative
|| !R
.NonNegative
;
13209 bool Unsigned
= L
.NonNegative
&& R
.Width
== 0;
13210 return IntRange(std::max(L
.valueBits(), R
.valueBits()) + CanWiden
+
13215 /// Return the range of a product of the two ranges.
13216 static IntRange
product(IntRange L
, IntRange R
) {
13217 // If both LHS and RHS can be negative, we can form
13218 // -2^L * -2^R = 2^(L + R)
13219 // which requires L + R + 1 value bits to represent.
13220 bool CanWiden
= !L
.NonNegative
&& !R
.NonNegative
;
13221 bool Unsigned
= L
.NonNegative
&& R
.NonNegative
;
13222 return IntRange(L
.valueBits() + R
.valueBits() + CanWiden
+ !Unsigned
,
13226 /// Return the range of a remainder operation between the two ranges.
13227 static IntRange
rem(IntRange L
, IntRange R
) {
13228 // The result of a remainder can't be larger than the result of
13229 // either side. The sign of the result is the sign of the LHS.
13230 bool Unsigned
= L
.NonNegative
;
13231 return IntRange(std::min(L
.valueBits(), R
.valueBits()) + !Unsigned
,
13238 static IntRange
GetValueRange(ASTContext
&C
, llvm::APSInt
&value
,
13239 unsigned MaxWidth
) {
13240 if (value
.isSigned() && value
.isNegative())
13241 return IntRange(value
.getSignificantBits(), false);
13243 if (value
.getBitWidth() > MaxWidth
)
13244 value
= value
.trunc(MaxWidth
);
13246 // isNonNegative() just checks the sign bit without considering
13248 return IntRange(value
.getActiveBits(), true);
13251 static IntRange
GetValueRange(ASTContext
&C
, APValue
&result
, QualType Ty
,
13252 unsigned MaxWidth
) {
13253 if (result
.isInt())
13254 return GetValueRange(C
, result
.getInt(), MaxWidth
);
13256 if (result
.isVector()) {
13257 IntRange R
= GetValueRange(C
, result
.getVectorElt(0), Ty
, MaxWidth
);
13258 for (unsigned i
= 1, e
= result
.getVectorLength(); i
!= e
; ++i
) {
13259 IntRange El
= GetValueRange(C
, result
.getVectorElt(i
), Ty
, MaxWidth
);
13260 R
= IntRange::join(R
, El
);
13265 if (result
.isComplexInt()) {
13266 IntRange R
= GetValueRange(C
, result
.getComplexIntReal(), MaxWidth
);
13267 IntRange I
= GetValueRange(C
, result
.getComplexIntImag(), MaxWidth
);
13268 return IntRange::join(R
, I
);
13271 // This can happen with lossless casts to intptr_t of "based" lvalues.
13272 // Assume it might use arbitrary bits.
13273 // FIXME: The only reason we need to pass the type in here is to get
13274 // the sign right on this one case. It would be nice if APValue
13276 assert(result
.isLValue() || result
.isAddrLabelDiff());
13277 return IntRange(MaxWidth
, Ty
->isUnsignedIntegerOrEnumerationType());
13280 static QualType
GetExprType(const Expr
*E
) {
13281 QualType Ty
= E
->getType();
13282 if (const AtomicType
*AtomicRHS
= Ty
->getAs
<AtomicType
>())
13283 Ty
= AtomicRHS
->getValueType();
13287 /// Pseudo-evaluate the given integer expression, estimating the
13288 /// range of values it might take.
13290 /// \param MaxWidth The width to which the value will be truncated.
13291 /// \param Approximate If \c true, return a likely range for the result: in
13292 /// particular, assume that arithmetic on narrower types doesn't leave
13293 /// those types. If \c false, return a range including all possible
13295 static IntRange
GetExprRange(ASTContext
&C
, const Expr
*E
, unsigned MaxWidth
,
13296 bool InConstantContext
, bool Approximate
) {
13297 E
= E
->IgnoreParens();
13299 // Try a full evaluation first.
13300 Expr::EvalResult result
;
13301 if (E
->EvaluateAsRValue(result
, C
, InConstantContext
))
13302 return GetValueRange(C
, result
.Val
, GetExprType(E
), MaxWidth
);
13304 // I think we only want to look through implicit casts here; if the
13305 // user has an explicit widening cast, we should treat the value as
13306 // being of the new, wider type.
13307 if (const auto *CE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
13308 if (CE
->getCastKind() == CK_NoOp
|| CE
->getCastKind() == CK_LValueToRValue
)
13309 return GetExprRange(C
, CE
->getSubExpr(), MaxWidth
, InConstantContext
,
13312 IntRange OutputTypeRange
= IntRange::forValueOfType(C
, GetExprType(CE
));
13314 bool isIntegerCast
= CE
->getCastKind() == CK_IntegralCast
||
13315 CE
->getCastKind() == CK_BooleanToSignedIntegral
;
13317 // Assume that non-integer casts can span the full range of the type.
13318 if (!isIntegerCast
)
13319 return OutputTypeRange
;
13321 IntRange SubRange
= GetExprRange(C
, CE
->getSubExpr(),
13322 std::min(MaxWidth
, OutputTypeRange
.Width
),
13323 InConstantContext
, Approximate
);
13325 // Bail out if the subexpr's range is as wide as the cast type.
13326 if (SubRange
.Width
>= OutputTypeRange
.Width
)
13327 return OutputTypeRange
;
13329 // Otherwise, we take the smaller width, and we're non-negative if
13330 // either the output type or the subexpr is.
13331 return IntRange(SubRange
.Width
,
13332 SubRange
.NonNegative
|| OutputTypeRange
.NonNegative
);
13335 if (const auto *CO
= dyn_cast
<ConditionalOperator
>(E
)) {
13336 // If we can fold the condition, just take that operand.
13338 if (CO
->getCond()->EvaluateAsBooleanCondition(CondResult
, C
))
13339 return GetExprRange(C
,
13340 CondResult
? CO
->getTrueExpr() : CO
->getFalseExpr(),
13341 MaxWidth
, InConstantContext
, Approximate
);
13343 // Otherwise, conservatively merge.
13344 // GetExprRange requires an integer expression, but a throw expression
13345 // results in a void type.
13346 Expr
*E
= CO
->getTrueExpr();
13347 IntRange L
= E
->getType()->isVoidType()
13348 ? IntRange
{0, true}
13349 : GetExprRange(C
, E
, MaxWidth
, InConstantContext
, Approximate
);
13350 E
= CO
->getFalseExpr();
13351 IntRange R
= E
->getType()->isVoidType()
13352 ? IntRange
{0, true}
13353 : GetExprRange(C
, E
, MaxWidth
, InConstantContext
, Approximate
);
13354 return IntRange::join(L
, R
);
13357 if (const auto *BO
= dyn_cast
<BinaryOperator
>(E
)) {
13358 IntRange (*Combine
)(IntRange
, IntRange
) = IntRange::join
;
13360 switch (BO
->getOpcode()) {
13362 llvm_unreachable("builtin <=> should have class type");
13364 // Boolean-valued operations are single-bit and positive.
13373 return IntRange::forBoolType();
13375 // The type of the assignments is the type of the LHS, so the RHS
13376 // is not necessarily the same type.
13384 // TODO: bitfields?
13385 return IntRange::forValueOfType(C
, GetExprType(E
));
13387 // Simple assignments just pass through the RHS, which will have
13388 // been coerced to the LHS type.
13390 // TODO: bitfields?
13391 return GetExprRange(C
, BO
->getRHS(), MaxWidth
, InConstantContext
,
13394 // Operations with opaque sources are black-listed.
13397 return IntRange::forValueOfType(C
, GetExprType(E
));
13399 // Bitwise-and uses the *infinum* of the two source ranges.
13402 Combine
= IntRange::bit_and
;
13405 // Left shift gets black-listed based on a judgement call.
13407 // ...except that we want to treat '1 << (blah)' as logically
13408 // positive. It's an important idiom.
13409 if (IntegerLiteral
*I
13410 = dyn_cast
<IntegerLiteral
>(BO
->getLHS()->IgnoreParenCasts())) {
13411 if (I
->getValue() == 1) {
13412 IntRange R
= IntRange::forValueOfType(C
, GetExprType(E
));
13413 return IntRange(R
.Width
, /*NonNegative*/ true);
13419 return IntRange::forValueOfType(C
, GetExprType(E
));
13421 // Right shift by a constant can narrow its left argument.
13423 case BO_ShrAssign
: {
13424 IntRange L
= GetExprRange(C
, BO
->getLHS(), MaxWidth
, InConstantContext
,
13427 // If the shift amount is a positive constant, drop the width by
13429 if (std::optional
<llvm::APSInt
> shift
=
13430 BO
->getRHS()->getIntegerConstantExpr(C
)) {
13431 if (shift
->isNonNegative()) {
13432 unsigned zext
= shift
->getZExtValue();
13433 if (zext
>= L
.Width
)
13434 L
.Width
= (L
.NonNegative
? 0 : 1);
13443 // Comma acts as its right operand.
13445 return GetExprRange(C
, BO
->getRHS(), MaxWidth
, InConstantContext
,
13450 Combine
= IntRange::sum
;
13454 if (BO
->getLHS()->getType()->isPointerType())
13455 return IntRange::forValueOfType(C
, GetExprType(E
));
13457 Combine
= IntRange::difference
;
13462 Combine
= IntRange::product
;
13465 // The width of a division result is mostly determined by the size
13468 // Don't 'pre-truncate' the operands.
13469 unsigned opWidth
= C
.getIntWidth(GetExprType(E
));
13470 IntRange L
= GetExprRange(C
, BO
->getLHS(), opWidth
, InConstantContext
,
13473 // If the divisor is constant, use that.
13474 if (std::optional
<llvm::APSInt
> divisor
=
13475 BO
->getRHS()->getIntegerConstantExpr(C
)) {
13476 unsigned log2
= divisor
->logBase2(); // floor(log_2(divisor))
13477 if (log2
>= L
.Width
)
13478 L
.Width
= (L
.NonNegative
? 0 : 1);
13480 L
.Width
= std::min(L
.Width
- log2
, MaxWidth
);
13484 // Otherwise, just use the LHS's width.
13485 // FIXME: This is wrong if the LHS could be its minimal value and the RHS
13487 IntRange R
= GetExprRange(C
, BO
->getRHS(), opWidth
, InConstantContext
,
13489 return IntRange(L
.Width
, L
.NonNegative
&& R
.NonNegative
);
13493 Combine
= IntRange::rem
;
13496 // The default behavior is okay for these.
13502 // Combine the two ranges, but limit the result to the type in which we
13503 // performed the computation.
13504 QualType T
= GetExprType(E
);
13505 unsigned opWidth
= C
.getIntWidth(T
);
13507 GetExprRange(C
, BO
->getLHS(), opWidth
, InConstantContext
, Approximate
);
13509 GetExprRange(C
, BO
->getRHS(), opWidth
, InConstantContext
, Approximate
);
13510 IntRange C
= Combine(L
, R
);
13511 C
.NonNegative
|= T
->isUnsignedIntegerOrEnumerationType();
13512 C
.Width
= std::min(C
.Width
, MaxWidth
);
13516 if (const auto *UO
= dyn_cast
<UnaryOperator
>(E
)) {
13517 switch (UO
->getOpcode()) {
13518 // Boolean-valued operations are white-listed.
13520 return IntRange::forBoolType();
13522 // Operations with opaque sources are black-listed.
13524 case UO_AddrOf
: // should be impossible
13525 return IntRange::forValueOfType(C
, GetExprType(E
));
13528 return GetExprRange(C
, UO
->getSubExpr(), MaxWidth
, InConstantContext
,
13533 if (const auto *OVE
= dyn_cast
<OpaqueValueExpr
>(E
))
13534 return GetExprRange(C
, OVE
->getSourceExpr(), MaxWidth
, InConstantContext
,
13537 if (const auto *BitField
= E
->getSourceBitField())
13538 return IntRange(BitField
->getBitWidthValue(C
),
13539 BitField
->getType()->isUnsignedIntegerOrEnumerationType());
13541 return IntRange::forValueOfType(C
, GetExprType(E
));
13544 static IntRange
GetExprRange(ASTContext
&C
, const Expr
*E
,
13545 bool InConstantContext
, bool Approximate
) {
13546 return GetExprRange(C
, E
, C
.getIntWidth(GetExprType(E
)), InConstantContext
,
13550 /// Checks whether the given value, which currently has the given
13551 /// source semantics, has the same value when coerced through the
13552 /// target semantics.
13553 static bool IsSameFloatAfterCast(const llvm::APFloat
&value
,
13554 const llvm::fltSemantics
&Src
,
13555 const llvm::fltSemantics
&Tgt
) {
13556 llvm::APFloat truncated
= value
;
13559 truncated
.convert(Src
, llvm::APFloat::rmNearestTiesToEven
, &ignored
);
13560 truncated
.convert(Tgt
, llvm::APFloat::rmNearestTiesToEven
, &ignored
);
13562 return truncated
.bitwiseIsEqual(value
);
13565 /// Checks whether the given value, which currently has the given
13566 /// source semantics, has the same value when coerced through the
13567 /// target semantics.
13569 /// The value might be a vector of floats (or a complex number).
13570 static bool IsSameFloatAfterCast(const APValue
&value
,
13571 const llvm::fltSemantics
&Src
,
13572 const llvm::fltSemantics
&Tgt
) {
13573 if (value
.isFloat())
13574 return IsSameFloatAfterCast(value
.getFloat(), Src
, Tgt
);
13576 if (value
.isVector()) {
13577 for (unsigned i
= 0, e
= value
.getVectorLength(); i
!= e
; ++i
)
13578 if (!IsSameFloatAfterCast(value
.getVectorElt(i
), Src
, Tgt
))
13583 assert(value
.isComplexFloat());
13584 return (IsSameFloatAfterCast(value
.getComplexFloatReal(), Src
, Tgt
) &&
13585 IsSameFloatAfterCast(value
.getComplexFloatImag(), Src
, Tgt
));
13588 static void AnalyzeImplicitConversions(Sema
&S
, Expr
*E
, SourceLocation CC
,
13589 bool IsListInit
= false);
13591 static bool IsEnumConstOrFromMacro(Sema
&S
, Expr
*E
) {
13592 // Suppress cases where we are comparing against an enum constant.
13593 if (const DeclRefExpr
*DR
=
13594 dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts()))
13595 if (isa
<EnumConstantDecl
>(DR
->getDecl()))
13598 // Suppress cases where the value is expanded from a macro, unless that macro
13599 // is how a language represents a boolean literal. This is the case in both C
13600 // and Objective-C.
13601 SourceLocation BeginLoc
= E
->getBeginLoc();
13602 if (BeginLoc
.isMacroID()) {
13603 StringRef MacroName
= Lexer::getImmediateMacroName(
13604 BeginLoc
, S
.getSourceManager(), S
.getLangOpts());
13605 return MacroName
!= "YES" && MacroName
!= "NO" &&
13606 MacroName
!= "true" && MacroName
!= "false";
13612 static bool isKnownToHaveUnsignedValue(Expr
*E
) {
13613 return E
->getType()->isIntegerType() &&
13614 (!E
->getType()->isSignedIntegerType() ||
13615 !E
->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
13619 /// The promoted range of values of a type. In general this has the
13620 /// following structure:
13622 /// |-----------| . . . |-----------|
13624 /// Min HoleMin HoleMax Max
13626 /// ... where there is only a hole if a signed type is promoted to unsigned
13627 /// (in which case Min and Max are the smallest and largest representable
13629 struct PromotedRange
{
13630 // Min, or HoleMax if there is a hole.
13631 llvm::APSInt PromotedMin
;
13632 // Max, or HoleMin if there is a hole.
13633 llvm::APSInt PromotedMax
;
13635 PromotedRange(IntRange R
, unsigned BitWidth
, bool Unsigned
) {
13637 PromotedMin
= PromotedMax
= llvm::APSInt(BitWidth
, Unsigned
);
13638 else if (R
.Width
>= BitWidth
&& !Unsigned
) {
13639 // Promotion made the type *narrower*. This happens when promoting
13640 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
13641 // Treat all values of 'signed int' as being in range for now.
13642 PromotedMin
= llvm::APSInt::getMinValue(BitWidth
, Unsigned
);
13643 PromotedMax
= llvm::APSInt::getMaxValue(BitWidth
, Unsigned
);
13645 PromotedMin
= llvm::APSInt::getMinValue(R
.Width
, R
.NonNegative
)
13646 .extOrTrunc(BitWidth
);
13647 PromotedMin
.setIsUnsigned(Unsigned
);
13649 PromotedMax
= llvm::APSInt::getMaxValue(R
.Width
, R
.NonNegative
)
13650 .extOrTrunc(BitWidth
);
13651 PromotedMax
.setIsUnsigned(Unsigned
);
13655 // Determine whether this range is contiguous (has no hole).
13656 bool isContiguous() const { return PromotedMin
<= PromotedMax
; }
13658 // Where a constant value is within the range.
13659 enum ComparisonResult
{
13666 InRangeFlag
= 0x40,
13668 Less
= LE
| LT
| NE
,
13669 Min
= LE
| InRangeFlag
,
13670 InRange
= InRangeFlag
,
13671 Max
= GE
| InRangeFlag
,
13672 Greater
= GE
| GT
| NE
,
13674 OnlyValue
= LE
| GE
| EQ
| InRangeFlag
,
13678 ComparisonResult
compare(const llvm::APSInt
&Value
) const {
13679 assert(Value
.getBitWidth() == PromotedMin
.getBitWidth() &&
13680 Value
.isUnsigned() == PromotedMin
.isUnsigned());
13681 if (!isContiguous()) {
13682 assert(Value
.isUnsigned() && "discontiguous range for signed compare");
13683 if (Value
.isMinValue()) return Min
;
13684 if (Value
.isMaxValue()) return Max
;
13685 if (Value
>= PromotedMin
) return InRange
;
13686 if (Value
<= PromotedMax
) return InRange
;
13690 switch (llvm::APSInt::compareValues(Value
, PromotedMin
)) {
13691 case -1: return Less
;
13692 case 0: return PromotedMin
== PromotedMax
? OnlyValue
: Min
;
13694 switch (llvm::APSInt::compareValues(Value
, PromotedMax
)) {
13695 case -1: return InRange
;
13696 case 0: return Max
;
13697 case 1: return Greater
;
13701 llvm_unreachable("impossible compare result");
13704 static std::optional
<StringRef
>
13705 constantValue(BinaryOperatorKind Op
, ComparisonResult R
, bool ConstantOnRHS
) {
13706 if (Op
== BO_Cmp
) {
13707 ComparisonResult LTFlag
= LT
, GTFlag
= GT
;
13708 if (ConstantOnRHS
) std::swap(LTFlag
, GTFlag
);
13710 if (R
& EQ
) return StringRef("'std::strong_ordering::equal'");
13711 if (R
& LTFlag
) return StringRef("'std::strong_ordering::less'");
13712 if (R
& GTFlag
) return StringRef("'std::strong_ordering::greater'");
13713 return std::nullopt
;
13716 ComparisonResult TrueFlag
, FalseFlag
;
13720 } else if (Op
== BO_NE
) {
13724 if ((Op
== BO_LT
|| Op
== BO_GE
) ^ ConstantOnRHS
) {
13731 if (Op
== BO_GE
|| Op
== BO_LE
)
13732 std::swap(TrueFlag
, FalseFlag
);
13735 return StringRef("true");
13737 return StringRef("false");
13738 return std::nullopt
;
13743 static bool HasEnumType(Expr
*E
) {
13744 // Strip off implicit integral promotions.
13745 while (ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
13746 if (ICE
->getCastKind() != CK_IntegralCast
&&
13747 ICE
->getCastKind() != CK_NoOp
)
13749 E
= ICE
->getSubExpr();
13752 return E
->getType()->isEnumeralType();
13755 static int classifyConstantValue(Expr
*Constant
) {
13756 // The values of this enumeration are used in the diagnostics
13757 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
13758 enum ConstantValueKind
{
13763 if (auto *BL
= dyn_cast
<CXXBoolLiteralExpr
>(Constant
))
13764 return BL
->getValue() ? ConstantValueKind::LiteralTrue
13765 : ConstantValueKind::LiteralFalse
;
13766 return ConstantValueKind::Miscellaneous
;
13769 static bool CheckTautologicalComparison(Sema
&S
, BinaryOperator
*E
,
13770 Expr
*Constant
, Expr
*Other
,
13771 const llvm::APSInt
&Value
,
13772 bool RhsConstant
) {
13773 if (S
.inTemplateInstantiation())
13776 Expr
*OriginalOther
= Other
;
13778 Constant
= Constant
->IgnoreParenImpCasts();
13779 Other
= Other
->IgnoreParenImpCasts();
13781 // Suppress warnings on tautological comparisons between values of the same
13782 // enumeration type. There are only two ways we could warn on this:
13783 // - If the constant is outside the range of representable values of
13784 // the enumeration. In such a case, we should warn about the cast
13785 // to enumeration type, not about the comparison.
13786 // - If the constant is the maximum / minimum in-range value. For an
13787 // enumeratin type, such comparisons can be meaningful and useful.
13788 if (Constant
->getType()->isEnumeralType() &&
13789 S
.Context
.hasSameUnqualifiedType(Constant
->getType(), Other
->getType()))
13792 IntRange OtherValueRange
= GetExprRange(
13793 S
.Context
, Other
, S
.isConstantEvaluated(), /*Approximate*/ false);
13795 QualType OtherT
= Other
->getType();
13796 if (const auto *AT
= OtherT
->getAs
<AtomicType
>())
13797 OtherT
= AT
->getValueType();
13798 IntRange OtherTypeRange
= IntRange::forValueOfType(S
.Context
, OtherT
);
13800 // Special case for ObjC BOOL on targets where its a typedef for a signed char
13801 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
13802 bool IsObjCSignedCharBool
= S
.getLangOpts().ObjC
&&
13803 S
.NSAPIObj
->isObjCBOOLType(OtherT
) &&
13804 OtherT
->isSpecificBuiltinType(BuiltinType::SChar
);
13806 // Whether we're treating Other as being a bool because of the form of
13807 // expression despite it having another type (typically 'int' in C).
13808 bool OtherIsBooleanDespiteType
=
13809 !OtherT
->isBooleanType() && Other
->isKnownToHaveBooleanValue();
13810 if (OtherIsBooleanDespiteType
|| IsObjCSignedCharBool
)
13811 OtherTypeRange
= OtherValueRange
= IntRange::forBoolType();
13813 // Check if all values in the range of possible values of this expression
13814 // lead to the same comparison outcome.
13815 PromotedRange
OtherPromotedValueRange(OtherValueRange
, Value
.getBitWidth(),
13816 Value
.isUnsigned());
13817 auto Cmp
= OtherPromotedValueRange
.compare(Value
);
13818 auto Result
= PromotedRange::constantValue(E
->getOpcode(), Cmp
, RhsConstant
);
13822 // Also consider the range determined by the type alone. This allows us to
13823 // classify the warning under the proper diagnostic group.
13824 bool TautologicalTypeCompare
= false;
13826 PromotedRange
OtherPromotedTypeRange(OtherTypeRange
, Value
.getBitWidth(),
13827 Value
.isUnsigned());
13828 auto TypeCmp
= OtherPromotedTypeRange
.compare(Value
);
13829 if (auto TypeResult
= PromotedRange::constantValue(E
->getOpcode(), TypeCmp
,
13831 TautologicalTypeCompare
= true;
13833 Result
= TypeResult
;
13837 // Don't warn if the non-constant operand actually always evaluates to the
13839 if (!TautologicalTypeCompare
&& OtherValueRange
.Width
== 0)
13842 // Suppress the diagnostic for an in-range comparison if the constant comes
13843 // from a macro or enumerator. We don't want to diagnose
13845 // some_long_value <= INT_MAX
13847 // when sizeof(int) == sizeof(long).
13848 bool InRange
= Cmp
& PromotedRange::InRangeFlag
;
13849 if (InRange
&& IsEnumConstOrFromMacro(S
, Constant
))
13852 // A comparison of an unsigned bit-field against 0 is really a type problem,
13853 // even though at the type level the bit-field might promote to 'signed int'.
13854 if (Other
->refersToBitField() && InRange
&& Value
== 0 &&
13855 Other
->getType()->isUnsignedIntegerOrEnumerationType())
13856 TautologicalTypeCompare
= true;
13858 // If this is a comparison to an enum constant, include that
13859 // constant in the diagnostic.
13860 const EnumConstantDecl
*ED
= nullptr;
13861 if (const DeclRefExpr
*DR
= dyn_cast
<DeclRefExpr
>(Constant
))
13862 ED
= dyn_cast
<EnumConstantDecl
>(DR
->getDecl());
13864 // Should be enough for uint128 (39 decimal digits)
13865 SmallString
<64> PrettySourceValue
;
13866 llvm::raw_svector_ostream
OS(PrettySourceValue
);
13868 OS
<< '\'' << *ED
<< "' (" << Value
<< ")";
13869 } else if (auto *BL
= dyn_cast
<ObjCBoolLiteralExpr
>(
13870 Constant
->IgnoreParenImpCasts())) {
13871 OS
<< (BL
->getValue() ? "YES" : "NO");
13876 if (!TautologicalTypeCompare
) {
13877 S
.Diag(E
->getOperatorLoc(), diag::warn_tautological_compare_value_range
)
13878 << RhsConstant
<< OtherValueRange
.Width
<< OtherValueRange
.NonNegative
13879 << E
->getOpcodeStr() << OS
.str() << *Result
13880 << E
->getLHS()->getSourceRange() << E
->getRHS()->getSourceRange();
13884 if (IsObjCSignedCharBool
) {
13885 S
.DiagRuntimeBehavior(E
->getOperatorLoc(), E
,
13886 S
.PDiag(diag::warn_tautological_compare_objc_bool
)
13887 << OS
.str() << *Result
);
13891 // FIXME: We use a somewhat different formatting for the in-range cases and
13892 // cases involving boolean values for historical reasons. We should pick a
13893 // consistent way of presenting these diagnostics.
13894 if (!InRange
|| Other
->isKnownToHaveBooleanValue()) {
13896 S
.DiagRuntimeBehavior(
13897 E
->getOperatorLoc(), E
,
13898 S
.PDiag(!InRange
? diag::warn_out_of_range_compare
13899 : diag::warn_tautological_bool_compare
)
13900 << OS
.str() << classifyConstantValue(Constant
) << OtherT
13901 << OtherIsBooleanDespiteType
<< *Result
13902 << E
->getLHS()->getSourceRange() << E
->getRHS()->getSourceRange());
13904 bool IsCharTy
= OtherT
.withoutLocalFastQualifiers() == S
.Context
.CharTy
;
13906 (isKnownToHaveUnsignedValue(OriginalOther
) && Value
== 0)
13907 ? (HasEnumType(OriginalOther
)
13908 ? diag::warn_unsigned_enum_always_true_comparison
13909 : IsCharTy
? diag::warn_unsigned_char_always_true_comparison
13910 : diag::warn_unsigned_always_true_comparison
)
13911 : diag::warn_tautological_constant_compare
;
13913 S
.Diag(E
->getOperatorLoc(), Diag
)
13914 << RhsConstant
<< OtherT
<< E
->getOpcodeStr() << OS
.str() << *Result
13915 << E
->getLHS()->getSourceRange() << E
->getRHS()->getSourceRange();
13921 /// Analyze the operands of the given comparison. Implements the
13922 /// fallback case from AnalyzeComparison.
13923 static void AnalyzeImpConvsInComparison(Sema
&S
, BinaryOperator
*E
) {
13924 AnalyzeImplicitConversions(S
, E
->getLHS(), E
->getOperatorLoc());
13925 AnalyzeImplicitConversions(S
, E
->getRHS(), E
->getOperatorLoc());
13928 /// Implements -Wsign-compare.
13930 /// \param E the binary operator to check for warnings
13931 static void AnalyzeComparison(Sema
&S
, BinaryOperator
*E
) {
13932 // The type the comparison is being performed in.
13933 QualType T
= E
->getLHS()->getType();
13935 // Only analyze comparison operators where both sides have been converted to
13937 if (!S
.Context
.hasSameUnqualifiedType(T
, E
->getRHS()->getType()))
13938 return AnalyzeImpConvsInComparison(S
, E
);
13940 // Don't analyze value-dependent comparisons directly.
13941 if (E
->isValueDependent())
13942 return AnalyzeImpConvsInComparison(S
, E
);
13944 Expr
*LHS
= E
->getLHS();
13945 Expr
*RHS
= E
->getRHS();
13947 if (T
->isIntegralType(S
.Context
)) {
13948 std::optional
<llvm::APSInt
> RHSValue
=
13949 RHS
->getIntegerConstantExpr(S
.Context
);
13950 std::optional
<llvm::APSInt
> LHSValue
=
13951 LHS
->getIntegerConstantExpr(S
.Context
);
13953 // We don't care about expressions whose result is a constant.
13954 if (RHSValue
&& LHSValue
)
13955 return AnalyzeImpConvsInComparison(S
, E
);
13957 // We only care about expressions where just one side is literal
13958 if ((bool)RHSValue
^ (bool)LHSValue
) {
13959 // Is the constant on the RHS or LHS?
13960 const bool RhsConstant
= (bool)RHSValue
;
13961 Expr
*Const
= RhsConstant
? RHS
: LHS
;
13962 Expr
*Other
= RhsConstant
? LHS
: RHS
;
13963 const llvm::APSInt
&Value
= RhsConstant
? *RHSValue
: *LHSValue
;
13965 // Check whether an integer constant comparison results in a value
13966 // of 'true' or 'false'.
13967 if (CheckTautologicalComparison(S
, E
, Const
, Other
, Value
, RhsConstant
))
13968 return AnalyzeImpConvsInComparison(S
, E
);
13972 if (!T
->hasUnsignedIntegerRepresentation()) {
13973 // We don't do anything special if this isn't an unsigned integral
13974 // comparison: we're only interested in integral comparisons, and
13975 // signed comparisons only happen in cases we don't care to warn about.
13976 return AnalyzeImpConvsInComparison(S
, E
);
13979 LHS
= LHS
->IgnoreParenImpCasts();
13980 RHS
= RHS
->IgnoreParenImpCasts();
13982 if (!S
.getLangOpts().CPlusPlus
) {
13983 // Avoid warning about comparison of integers with different signs when
13984 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
13985 // the type of `E`.
13986 if (const auto *TET
= dyn_cast
<TypeOfExprType
>(LHS
->getType()))
13987 LHS
= TET
->getUnderlyingExpr()->IgnoreParenImpCasts();
13988 if (const auto *TET
= dyn_cast
<TypeOfExprType
>(RHS
->getType()))
13989 RHS
= TET
->getUnderlyingExpr()->IgnoreParenImpCasts();
13992 // Check to see if one of the (unmodified) operands is of different
13994 Expr
*signedOperand
, *unsignedOperand
;
13995 if (LHS
->getType()->hasSignedIntegerRepresentation()) {
13996 assert(!RHS
->getType()->hasSignedIntegerRepresentation() &&
13997 "unsigned comparison between two signed integer expressions?");
13998 signedOperand
= LHS
;
13999 unsignedOperand
= RHS
;
14000 } else if (RHS
->getType()->hasSignedIntegerRepresentation()) {
14001 signedOperand
= RHS
;
14002 unsignedOperand
= LHS
;
14004 return AnalyzeImpConvsInComparison(S
, E
);
14007 // Otherwise, calculate the effective range of the signed operand.
14008 IntRange signedRange
= GetExprRange(
14009 S
.Context
, signedOperand
, S
.isConstantEvaluated(), /*Approximate*/ true);
14011 // Go ahead and analyze implicit conversions in the operands. Note
14012 // that we skip the implicit conversions on both sides.
14013 AnalyzeImplicitConversions(S
, LHS
, E
->getOperatorLoc());
14014 AnalyzeImplicitConversions(S
, RHS
, E
->getOperatorLoc());
14016 // If the signed range is non-negative, -Wsign-compare won't fire.
14017 if (signedRange
.NonNegative
)
14020 // For (in)equality comparisons, if the unsigned operand is a
14021 // constant which cannot collide with a overflowed signed operand,
14022 // then reinterpreting the signed operand as unsigned will not
14023 // change the result of the comparison.
14024 if (E
->isEqualityOp()) {
14025 unsigned comparisonWidth
= S
.Context
.getIntWidth(T
);
14026 IntRange unsignedRange
=
14027 GetExprRange(S
.Context
, unsignedOperand
, S
.isConstantEvaluated(),
14028 /*Approximate*/ true);
14030 // We should never be unable to prove that the unsigned operand is
14032 assert(unsignedRange
.NonNegative
&& "unsigned range includes negative?");
14034 if (unsignedRange
.Width
< comparisonWidth
)
14038 S
.DiagRuntimeBehavior(E
->getOperatorLoc(), E
,
14039 S
.PDiag(diag::warn_mixed_sign_comparison
)
14040 << LHS
->getType() << RHS
->getType()
14041 << LHS
->getSourceRange() << RHS
->getSourceRange());
14044 /// Analyzes an attempt to assign the given value to a bitfield.
14046 /// Returns true if there was something fishy about the attempt.
14047 static bool AnalyzeBitFieldAssignment(Sema
&S
, FieldDecl
*Bitfield
, Expr
*Init
,
14048 SourceLocation InitLoc
) {
14049 assert(Bitfield
->isBitField());
14050 if (Bitfield
->isInvalidDecl())
14053 // White-list bool bitfields.
14054 QualType BitfieldType
= Bitfield
->getType();
14055 if (BitfieldType
->isBooleanType())
14058 if (BitfieldType
->isEnumeralType()) {
14059 EnumDecl
*BitfieldEnumDecl
= BitfieldType
->castAs
<EnumType
>()->getDecl();
14060 // If the underlying enum type was not explicitly specified as an unsigned
14061 // type and the enum contain only positive values, MSVC++ will cause an
14062 // inconsistency by storing this as a signed type.
14063 if (S
.getLangOpts().CPlusPlus11
&&
14064 !BitfieldEnumDecl
->getIntegerTypeSourceInfo() &&
14065 BitfieldEnumDecl
->getNumPositiveBits() > 0 &&
14066 BitfieldEnumDecl
->getNumNegativeBits() == 0) {
14067 S
.Diag(InitLoc
, diag::warn_no_underlying_type_specified_for_enum_bitfield
)
14068 << BitfieldEnumDecl
;
14072 // Ignore value- or type-dependent expressions.
14073 if (Bitfield
->getBitWidth()->isValueDependent() ||
14074 Bitfield
->getBitWidth()->isTypeDependent() ||
14075 Init
->isValueDependent() ||
14076 Init
->isTypeDependent())
14079 Expr
*OriginalInit
= Init
->IgnoreParenImpCasts();
14080 unsigned FieldWidth
= Bitfield
->getBitWidthValue(S
.Context
);
14082 Expr::EvalResult Result
;
14083 if (!OriginalInit
->EvaluateAsInt(Result
, S
.Context
,
14084 Expr::SE_AllowSideEffects
)) {
14085 // The RHS is not constant. If the RHS has an enum type, make sure the
14086 // bitfield is wide enough to hold all the values of the enum without
14088 if (const auto *EnumTy
= OriginalInit
->getType()->getAs
<EnumType
>()) {
14089 EnumDecl
*ED
= EnumTy
->getDecl();
14090 bool SignedBitfield
= BitfieldType
->isSignedIntegerType();
14092 // Enum types are implicitly signed on Windows, so check if there are any
14093 // negative enumerators to see if the enum was intended to be signed or
14095 bool SignedEnum
= ED
->getNumNegativeBits() > 0;
14097 // Check for surprising sign changes when assigning enum values to a
14098 // bitfield of different signedness. If the bitfield is signed and we
14099 // have exactly the right number of bits to store this unsigned enum,
14100 // suggest changing the enum to an unsigned type. This typically happens
14101 // on Windows where unfixed enums always use an underlying type of 'int'.
14102 unsigned DiagID
= 0;
14103 if (SignedEnum
&& !SignedBitfield
) {
14104 DiagID
= diag::warn_unsigned_bitfield_assigned_signed_enum
;
14105 } else if (SignedBitfield
&& !SignedEnum
&&
14106 ED
->getNumPositiveBits() == FieldWidth
) {
14107 DiagID
= diag::warn_signed_bitfield_enum_conversion
;
14111 S
.Diag(InitLoc
, DiagID
) << Bitfield
<< ED
;
14112 TypeSourceInfo
*TSI
= Bitfield
->getTypeSourceInfo();
14113 SourceRange TypeRange
=
14114 TSI
? TSI
->getTypeLoc().getSourceRange() : SourceRange();
14115 S
.Diag(Bitfield
->getTypeSpecStartLoc(), diag::note_change_bitfield_sign
)
14116 << SignedEnum
<< TypeRange
;
14119 // Compute the required bitwidth. If the enum has negative values, we need
14120 // one more bit than the normal number of positive bits to represent the
14122 unsigned BitsNeeded
= SignedEnum
? std::max(ED
->getNumPositiveBits() + 1,
14123 ED
->getNumNegativeBits())
14124 : ED
->getNumPositiveBits();
14126 // Check the bitwidth.
14127 if (BitsNeeded
> FieldWidth
) {
14128 Expr
*WidthExpr
= Bitfield
->getBitWidth();
14129 S
.Diag(InitLoc
, diag::warn_bitfield_too_small_for_enum
)
14131 S
.Diag(WidthExpr
->getExprLoc(), diag::note_widen_bitfield
)
14132 << BitsNeeded
<< ED
<< WidthExpr
->getSourceRange();
14139 llvm::APSInt Value
= Result
.Val
.getInt();
14141 unsigned OriginalWidth
= Value
.getBitWidth();
14143 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce
14144 // false positives where the user is demonstrating they intend to use the
14145 // bit-field as a Boolean, check to see if the value is 1 and we're assigning
14146 // to a one-bit bit-field to see if the value came from a macro named 'true'.
14147 bool OneAssignedToOneBitBitfield
= FieldWidth
== 1 && Value
== 1;
14148 if (OneAssignedToOneBitBitfield
&& !S
.LangOpts
.CPlusPlus
) {
14149 SourceLocation MaybeMacroLoc
= OriginalInit
->getBeginLoc();
14150 if (S
.SourceMgr
.isInSystemMacro(MaybeMacroLoc
) &&
14151 S
.findMacroSpelling(MaybeMacroLoc
, "true"))
14155 if (!Value
.isSigned() || Value
.isNegative())
14156 if (UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(OriginalInit
))
14157 if (UO
->getOpcode() == UO_Minus
|| UO
->getOpcode() == UO_Not
)
14158 OriginalWidth
= Value
.getSignificantBits();
14160 if (OriginalWidth
<= FieldWidth
)
14163 // Compute the value which the bitfield will contain.
14164 llvm::APSInt TruncatedValue
= Value
.trunc(FieldWidth
);
14165 TruncatedValue
.setIsSigned(BitfieldType
->isSignedIntegerType());
14167 // Check whether the stored value is equal to the original value.
14168 TruncatedValue
= TruncatedValue
.extend(OriginalWidth
);
14169 if (llvm::APSInt::isSameValue(Value
, TruncatedValue
))
14172 std::string PrettyValue
= toString(Value
, 10);
14173 std::string PrettyTrunc
= toString(TruncatedValue
, 10);
14175 S
.Diag(InitLoc
, OneAssignedToOneBitBitfield
14176 ? diag::warn_impcast_single_bit_bitield_precision_constant
14177 : diag::warn_impcast_bitfield_precision_constant
)
14178 << PrettyValue
<< PrettyTrunc
<< OriginalInit
->getType()
14179 << Init
->getSourceRange();
14184 /// Analyze the given simple or compound assignment for warning-worthy
14186 static void AnalyzeAssignment(Sema
&S
, BinaryOperator
*E
) {
14187 // Just recurse on the LHS.
14188 AnalyzeImplicitConversions(S
, E
->getLHS(), E
->getOperatorLoc());
14190 // We want to recurse on the RHS as normal unless we're assigning to
14192 if (FieldDecl
*Bitfield
= E
->getLHS()->getSourceBitField()) {
14193 if (AnalyzeBitFieldAssignment(S
, Bitfield
, E
->getRHS(),
14194 E
->getOperatorLoc())) {
14195 // Recurse, ignoring any implicit conversions on the RHS.
14196 return AnalyzeImplicitConversions(S
, E
->getRHS()->IgnoreParenImpCasts(),
14197 E
->getOperatorLoc());
14201 AnalyzeImplicitConversions(S
, E
->getRHS(), E
->getOperatorLoc());
14203 // Diagnose implicitly sequentially-consistent atomic assignment.
14204 if (E
->getLHS()->getType()->isAtomicType())
14205 S
.Diag(E
->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst
);
14208 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
14209 static void DiagnoseImpCast(Sema
&S
, Expr
*E
, QualType SourceType
, QualType T
,
14210 SourceLocation CContext
, unsigned diag
,
14211 bool pruneControlFlow
= false) {
14212 if (pruneControlFlow
) {
14213 S
.DiagRuntimeBehavior(E
->getExprLoc(), E
,
14215 << SourceType
<< T
<< E
->getSourceRange()
14216 << SourceRange(CContext
));
14219 S
.Diag(E
->getExprLoc(), diag
)
14220 << SourceType
<< T
<< E
->getSourceRange() << SourceRange(CContext
);
14223 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
14224 static void DiagnoseImpCast(Sema
&S
, Expr
*E
, QualType T
,
14225 SourceLocation CContext
,
14226 unsigned diag
, bool pruneControlFlow
= false) {
14227 DiagnoseImpCast(S
, E
, E
->getType(), T
, CContext
, diag
, pruneControlFlow
);
14230 static bool isObjCSignedCharBool(Sema
&S
, QualType Ty
) {
14231 return Ty
->isSpecificBuiltinType(BuiltinType::SChar
) &&
14232 S
.getLangOpts().ObjC
&& S
.NSAPIObj
->isObjCBOOLType(Ty
);
14235 static void adornObjCBoolConversionDiagWithTernaryFixit(
14236 Sema
&S
, Expr
*SourceExpr
, const Sema::SemaDiagnosticBuilder
&Builder
) {
14237 Expr
*Ignored
= SourceExpr
->IgnoreImplicit();
14238 if (const auto *OVE
= dyn_cast
<OpaqueValueExpr
>(Ignored
))
14239 Ignored
= OVE
->getSourceExpr();
14240 bool NeedsParens
= isa
<AbstractConditionalOperator
>(Ignored
) ||
14241 isa
<BinaryOperator
>(Ignored
) ||
14242 isa
<CXXOperatorCallExpr
>(Ignored
);
14243 SourceLocation EndLoc
= S
.getLocForEndOfToken(SourceExpr
->getEndLoc());
14245 Builder
<< FixItHint::CreateInsertion(SourceExpr
->getBeginLoc(), "(")
14246 << FixItHint::CreateInsertion(EndLoc
, ")");
14247 Builder
<< FixItHint::CreateInsertion(EndLoc
, " ? YES : NO");
14250 /// Diagnose an implicit cast from a floating point value to an integer value.
14251 static void DiagnoseFloatingImpCast(Sema
&S
, Expr
*E
, QualType T
,
14252 SourceLocation CContext
) {
14253 const bool IsBool
= T
->isSpecificBuiltinType(BuiltinType::Bool
);
14254 const bool PruneWarnings
= S
.inTemplateInstantiation();
14256 Expr
*InnerE
= E
->IgnoreParenImpCasts();
14257 // We also want to warn on, e.g., "int i = -1.234"
14258 if (UnaryOperator
*UOp
= dyn_cast
<UnaryOperator
>(InnerE
))
14259 if (UOp
->getOpcode() == UO_Minus
|| UOp
->getOpcode() == UO_Plus
)
14260 InnerE
= UOp
->getSubExpr()->IgnoreParenImpCasts();
14262 const bool IsLiteral
=
14263 isa
<FloatingLiteral
>(E
) || isa
<FloatingLiteral
>(InnerE
);
14265 llvm::APFloat
Value(0.0);
14267 E
->EvaluateAsFloat(Value
, S
.Context
, Expr::SE_AllowSideEffects
);
14269 if (isObjCSignedCharBool(S
, T
)) {
14270 return adornObjCBoolConversionDiagWithTernaryFixit(
14272 S
.Diag(CContext
, diag::warn_impcast_float_to_objc_signed_char_bool
)
14276 return DiagnoseImpCast(S
, E
, T
, CContext
,
14277 diag::warn_impcast_float_integer
, PruneWarnings
);
14280 bool isExact
= false;
14282 llvm::APSInt
IntegerValue(S
.Context
.getIntWidth(T
),
14283 T
->hasUnsignedIntegerRepresentation());
14284 llvm::APFloat::opStatus Result
= Value
.convertToInteger(
14285 IntegerValue
, llvm::APFloat::rmTowardZero
, &isExact
);
14287 // FIXME: Force the precision of the source value down so we don't print
14288 // digits which are usually useless (we don't really care here if we
14289 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
14290 // would automatically print the shortest representation, but it's a bit
14291 // tricky to implement.
14292 SmallString
<16> PrettySourceValue
;
14293 unsigned precision
= llvm::APFloat::semanticsPrecision(Value
.getSemantics());
14294 precision
= (precision
* 59 + 195) / 196;
14295 Value
.toString(PrettySourceValue
, precision
);
14297 if (isObjCSignedCharBool(S
, T
) && IntegerValue
!= 0 && IntegerValue
!= 1) {
14298 return adornObjCBoolConversionDiagWithTernaryFixit(
14300 S
.Diag(CContext
, diag::warn_impcast_constant_value_to_objc_bool
)
14301 << PrettySourceValue
);
14304 if (Result
== llvm::APFloat::opOK
&& isExact
) {
14305 if (IsLiteral
) return;
14306 return DiagnoseImpCast(S
, E
, T
, CContext
, diag::warn_impcast_float_integer
,
14310 // Conversion of a floating-point value to a non-bool integer where the
14311 // integral part cannot be represented by the integer type is undefined.
14312 if (!IsBool
&& Result
== llvm::APFloat::opInvalidOp
)
14313 return DiagnoseImpCast(
14315 IsLiteral
? diag::warn_impcast_literal_float_to_integer_out_of_range
14316 : diag::warn_impcast_float_to_integer_out_of_range
,
14319 unsigned DiagID
= 0;
14321 // Warn on floating point literal to integer.
14322 DiagID
= diag::warn_impcast_literal_float_to_integer
;
14323 } else if (IntegerValue
== 0) {
14324 if (Value
.isZero()) { // Skip -0.0 to 0 conversion.
14325 return DiagnoseImpCast(S
, E
, T
, CContext
,
14326 diag::warn_impcast_float_integer
, PruneWarnings
);
14328 // Warn on non-zero to zero conversion.
14329 DiagID
= diag::warn_impcast_float_to_integer_zero
;
14331 if (IntegerValue
.isUnsigned()) {
14332 if (!IntegerValue
.isMaxValue()) {
14333 return DiagnoseImpCast(S
, E
, T
, CContext
,
14334 diag::warn_impcast_float_integer
, PruneWarnings
);
14336 } else { // IntegerValue.isSigned()
14337 if (!IntegerValue
.isMaxSignedValue() &&
14338 !IntegerValue
.isMinSignedValue()) {
14339 return DiagnoseImpCast(S
, E
, T
, CContext
,
14340 diag::warn_impcast_float_integer
, PruneWarnings
);
14343 // Warn on evaluatable floating point expression to integer conversion.
14344 DiagID
= diag::warn_impcast_float_to_integer
;
14347 SmallString
<16> PrettyTargetValue
;
14349 PrettyTargetValue
= Value
.isZero() ? "false" : "true";
14351 IntegerValue
.toString(PrettyTargetValue
);
14353 if (PruneWarnings
) {
14354 S
.DiagRuntimeBehavior(E
->getExprLoc(), E
,
14356 << E
->getType() << T
.getUnqualifiedType()
14357 << PrettySourceValue
<< PrettyTargetValue
14358 << E
->getSourceRange() << SourceRange(CContext
));
14360 S
.Diag(E
->getExprLoc(), DiagID
)
14361 << E
->getType() << T
.getUnqualifiedType() << PrettySourceValue
14362 << PrettyTargetValue
<< E
->getSourceRange() << SourceRange(CContext
);
14366 /// Analyze the given compound assignment for the possible losing of
14367 /// floating-point precision.
14368 static void AnalyzeCompoundAssignment(Sema
&S
, BinaryOperator
*E
) {
14369 assert(isa
<CompoundAssignOperator
>(E
) &&
14370 "Must be compound assignment operation");
14371 // Recurse on the LHS and RHS in here
14372 AnalyzeImplicitConversions(S
, E
->getLHS(), E
->getOperatorLoc());
14373 AnalyzeImplicitConversions(S
, E
->getRHS(), E
->getOperatorLoc());
14375 if (E
->getLHS()->getType()->isAtomicType())
14376 S
.Diag(E
->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst
);
14378 // Now check the outermost expression
14379 const auto *ResultBT
= E
->getLHS()->getType()->getAs
<BuiltinType
>();
14380 const auto *RBT
= cast
<CompoundAssignOperator
>(E
)
14381 ->getComputationResultType()
14382 ->getAs
<BuiltinType
>();
14384 // The below checks assume source is floating point.
14385 if (!ResultBT
|| !RBT
|| !RBT
->isFloatingPoint()) return;
14387 // If source is floating point but target is an integer.
14388 if (ResultBT
->isInteger())
14389 return DiagnoseImpCast(S
, E
, E
->getRHS()->getType(), E
->getLHS()->getType(),
14390 E
->getExprLoc(), diag::warn_impcast_float_integer
);
14392 if (!ResultBT
->isFloatingPoint())
14395 // If both source and target are floating points, warn about losing precision.
14396 int Order
= S
.getASTContext().getFloatingTypeSemanticOrder(
14397 QualType(ResultBT
, 0), QualType(RBT
, 0));
14398 if (Order
< 0 && !S
.SourceMgr
.isInSystemMacro(E
->getOperatorLoc()))
14399 // warn about dropping FP rank.
14400 DiagnoseImpCast(S
, E
->getRHS(), E
->getLHS()->getType(), E
->getOperatorLoc(),
14401 diag::warn_impcast_float_result_precision
);
14404 static std::string
PrettyPrintInRange(const llvm::APSInt
&Value
,
14406 if (!Range
.Width
) return "0";
14408 llvm::APSInt ValueInRange
= Value
;
14409 ValueInRange
.setIsSigned(!Range
.NonNegative
);
14410 ValueInRange
= ValueInRange
.trunc(Range
.Width
);
14411 return toString(ValueInRange
, 10);
14414 static bool IsImplicitBoolFloatConversion(Sema
&S
, Expr
*Ex
, bool ToBool
) {
14415 if (!isa
<ImplicitCastExpr
>(Ex
))
14418 Expr
*InnerE
= Ex
->IgnoreParenImpCasts();
14419 const Type
*Target
= S
.Context
.getCanonicalType(Ex
->getType()).getTypePtr();
14420 const Type
*Source
=
14421 S
.Context
.getCanonicalType(InnerE
->getType()).getTypePtr();
14422 if (Target
->isDependentType())
14425 const BuiltinType
*FloatCandidateBT
=
14426 dyn_cast
<BuiltinType
>(ToBool
? Source
: Target
);
14427 const Type
*BoolCandidateType
= ToBool
? Target
: Source
;
14429 return (BoolCandidateType
->isSpecificBuiltinType(BuiltinType::Bool
) &&
14430 FloatCandidateBT
&& (FloatCandidateBT
->isFloatingPoint()));
14433 static void CheckImplicitArgumentConversions(Sema
&S
, CallExpr
*TheCall
,
14434 SourceLocation CC
) {
14435 unsigned NumArgs
= TheCall
->getNumArgs();
14436 for (unsigned i
= 0; i
< NumArgs
; ++i
) {
14437 Expr
*CurrA
= TheCall
->getArg(i
);
14438 if (!IsImplicitBoolFloatConversion(S
, CurrA
, true))
14441 bool IsSwapped
= ((i
> 0) &&
14442 IsImplicitBoolFloatConversion(S
, TheCall
->getArg(i
- 1), false));
14443 IsSwapped
|= ((i
< (NumArgs
- 1)) &&
14444 IsImplicitBoolFloatConversion(S
, TheCall
->getArg(i
+ 1), false));
14446 // Warn on this floating-point to bool conversion.
14447 DiagnoseImpCast(S
, CurrA
->IgnoreParenImpCasts(),
14448 CurrA
->getType(), CC
,
14449 diag::warn_impcast_floating_point_to_bool
);
14454 static void DiagnoseNullConversion(Sema
&S
, Expr
*E
, QualType T
,
14455 SourceLocation CC
) {
14456 if (S
.Diags
.isIgnored(diag::warn_impcast_null_pointer_to_integer
,
14460 // Don't warn on functions which have return type nullptr_t.
14461 if (isa
<CallExpr
>(E
))
14464 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
14465 const Expr
*NewE
= E
->IgnoreParenImpCasts();
14466 bool IsGNUNullExpr
= isa
<GNUNullExpr
>(NewE
);
14467 bool HasNullPtrType
= NewE
->getType()->isNullPtrType();
14468 if (!IsGNUNullExpr
&& !HasNullPtrType
)
14471 // Return if target type is a safe conversion.
14472 if (T
->isAnyPointerType() || T
->isBlockPointerType() ||
14473 T
->isMemberPointerType() || !T
->isScalarType() || T
->isNullPtrType())
14476 SourceLocation Loc
= E
->getSourceRange().getBegin();
14478 // Venture through the macro stacks to get to the source of macro arguments.
14479 // The new location is a better location than the complete location that was
14481 Loc
= S
.SourceMgr
.getTopMacroCallerLoc(Loc
);
14482 CC
= S
.SourceMgr
.getTopMacroCallerLoc(CC
);
14484 // __null is usually wrapped in a macro. Go up a macro if that is the case.
14485 if (IsGNUNullExpr
&& Loc
.isMacroID()) {
14486 StringRef MacroName
= Lexer::getImmediateMacroNameForDiagnostics(
14487 Loc
, S
.SourceMgr
, S
.getLangOpts());
14488 if (MacroName
== "NULL")
14489 Loc
= S
.SourceMgr
.getImmediateExpansionRange(Loc
).getBegin();
14492 // Only warn if the null and context location are in the same macro expansion.
14493 if (S
.SourceMgr
.getFileID(Loc
) != S
.SourceMgr
.getFileID(CC
))
14496 S
.Diag(Loc
, diag::warn_impcast_null_pointer_to_integer
)
14497 << HasNullPtrType
<< T
<< SourceRange(CC
)
14498 << FixItHint::CreateReplacement(Loc
,
14499 S
.getFixItZeroLiteralForType(T
, Loc
));
14502 static void checkObjCArrayLiteral(Sema
&S
, QualType TargetType
,
14503 ObjCArrayLiteral
*ArrayLiteral
);
14506 checkObjCDictionaryLiteral(Sema
&S
, QualType TargetType
,
14507 ObjCDictionaryLiteral
*DictionaryLiteral
);
14509 /// Check a single element within a collection literal against the
14510 /// target element type.
14511 static void checkObjCCollectionLiteralElement(Sema
&S
,
14512 QualType TargetElementType
,
14514 unsigned ElementKind
) {
14515 // Skip a bitcast to 'id' or qualified 'id'.
14516 if (auto ICE
= dyn_cast
<ImplicitCastExpr
>(Element
)) {
14517 if (ICE
->getCastKind() == CK_BitCast
&&
14518 ICE
->getSubExpr()->getType()->getAs
<ObjCObjectPointerType
>())
14519 Element
= ICE
->getSubExpr();
14522 QualType ElementType
= Element
->getType();
14523 ExprResult
ElementResult(Element
);
14524 if (ElementType
->getAs
<ObjCObjectPointerType
>() &&
14525 S
.CheckSingleAssignmentConstraints(TargetElementType
,
14528 != Sema::Compatible
) {
14529 S
.Diag(Element
->getBeginLoc(), diag::warn_objc_collection_literal_element
)
14530 << ElementType
<< ElementKind
<< TargetElementType
14531 << Element
->getSourceRange();
14534 if (auto ArrayLiteral
= dyn_cast
<ObjCArrayLiteral
>(Element
))
14535 checkObjCArrayLiteral(S
, TargetElementType
, ArrayLiteral
);
14536 else if (auto DictionaryLiteral
= dyn_cast
<ObjCDictionaryLiteral
>(Element
))
14537 checkObjCDictionaryLiteral(S
, TargetElementType
, DictionaryLiteral
);
14540 /// Check an Objective-C array literal being converted to the given
14542 static void checkObjCArrayLiteral(Sema
&S
, QualType TargetType
,
14543 ObjCArrayLiteral
*ArrayLiteral
) {
14544 if (!S
.NSArrayDecl
)
14547 const auto *TargetObjCPtr
= TargetType
->getAs
<ObjCObjectPointerType
>();
14548 if (!TargetObjCPtr
)
14551 if (TargetObjCPtr
->isUnspecialized() ||
14552 TargetObjCPtr
->getInterfaceDecl()->getCanonicalDecl()
14553 != S
.NSArrayDecl
->getCanonicalDecl())
14556 auto TypeArgs
= TargetObjCPtr
->getTypeArgs();
14557 if (TypeArgs
.size() != 1)
14560 QualType TargetElementType
= TypeArgs
[0];
14561 for (unsigned I
= 0, N
= ArrayLiteral
->getNumElements(); I
!= N
; ++I
) {
14562 checkObjCCollectionLiteralElement(S
, TargetElementType
,
14563 ArrayLiteral
->getElement(I
),
14568 /// Check an Objective-C dictionary literal being converted to the given
14571 checkObjCDictionaryLiteral(Sema
&S
, QualType TargetType
,
14572 ObjCDictionaryLiteral
*DictionaryLiteral
) {
14573 if (!S
.NSDictionaryDecl
)
14576 const auto *TargetObjCPtr
= TargetType
->getAs
<ObjCObjectPointerType
>();
14577 if (!TargetObjCPtr
)
14580 if (TargetObjCPtr
->isUnspecialized() ||
14581 TargetObjCPtr
->getInterfaceDecl()->getCanonicalDecl()
14582 != S
.NSDictionaryDecl
->getCanonicalDecl())
14585 auto TypeArgs
= TargetObjCPtr
->getTypeArgs();
14586 if (TypeArgs
.size() != 2)
14589 QualType TargetKeyType
= TypeArgs
[0];
14590 QualType TargetObjectType
= TypeArgs
[1];
14591 for (unsigned I
= 0, N
= DictionaryLiteral
->getNumElements(); I
!= N
; ++I
) {
14592 auto Element
= DictionaryLiteral
->getKeyValueElement(I
);
14593 checkObjCCollectionLiteralElement(S
, TargetKeyType
, Element
.Key
, 1);
14594 checkObjCCollectionLiteralElement(S
, TargetObjectType
, Element
.Value
, 2);
14598 // Helper function to filter out cases for constant width constant conversion.
14599 // Don't warn on char array initialization or for non-decimal values.
14600 static bool isSameWidthConstantConversion(Sema
&S
, Expr
*E
, QualType T
,
14601 SourceLocation CC
) {
14602 // If initializing from a constant, and the constant starts with '0',
14603 // then it is a binary, octal, or hexadecimal. Allow these constants
14604 // to fill all the bits, even if there is a sign change.
14605 if (auto *IntLit
= dyn_cast
<IntegerLiteral
>(E
->IgnoreParenImpCasts())) {
14606 const char FirstLiteralCharacter
=
14607 S
.getSourceManager().getCharacterData(IntLit
->getBeginLoc())[0];
14608 if (FirstLiteralCharacter
== '0')
14612 // If the CC location points to a '{', and the type is char, then assume
14613 // assume it is an array initialization.
14614 if (CC
.isValid() && T
->isCharType()) {
14615 const char FirstContextCharacter
=
14616 S
.getSourceManager().getCharacterData(CC
)[0];
14617 if (FirstContextCharacter
== '{')
14624 static const IntegerLiteral
*getIntegerLiteral(Expr
*E
) {
14625 const auto *IL
= dyn_cast
<IntegerLiteral
>(E
);
14627 if (auto *UO
= dyn_cast
<UnaryOperator
>(E
)) {
14628 if (UO
->getOpcode() == UO_Minus
)
14629 return dyn_cast
<IntegerLiteral
>(UO
->getSubExpr());
14636 static void DiagnoseIntInBoolContext(Sema
&S
, Expr
*E
) {
14637 E
= E
->IgnoreParenImpCasts();
14638 SourceLocation ExprLoc
= E
->getExprLoc();
14640 if (const auto *BO
= dyn_cast
<BinaryOperator
>(E
)) {
14641 BinaryOperator::Opcode Opc
= BO
->getOpcode();
14642 Expr::EvalResult Result
;
14643 // Do not diagnose unsigned shifts.
14644 if (Opc
== BO_Shl
) {
14645 const auto *LHS
= getIntegerLiteral(BO
->getLHS());
14646 const auto *RHS
= getIntegerLiteral(BO
->getRHS());
14647 if (LHS
&& LHS
->getValue() == 0)
14648 S
.Diag(ExprLoc
, diag::warn_left_shift_always
) << 0;
14649 else if (!E
->isValueDependent() && LHS
&& RHS
&&
14650 RHS
->getValue().isNonNegative() &&
14651 E
->EvaluateAsInt(Result
, S
.Context
, Expr::SE_AllowSideEffects
))
14652 S
.Diag(ExprLoc
, diag::warn_left_shift_always
)
14653 << (Result
.Val
.getInt() != 0);
14654 else if (E
->getType()->isSignedIntegerType())
14655 S
.Diag(ExprLoc
, diag::warn_left_shift_in_bool_context
) << E
;
14659 if (const auto *CO
= dyn_cast
<ConditionalOperator
>(E
)) {
14660 const auto *LHS
= getIntegerLiteral(CO
->getTrueExpr());
14661 const auto *RHS
= getIntegerLiteral(CO
->getFalseExpr());
14664 if ((LHS
->getValue() == 0 || LHS
->getValue() == 1) &&
14665 (RHS
->getValue() == 0 || RHS
->getValue() == 1))
14666 // Do not diagnose common idioms.
14668 if (LHS
->getValue() != 0 && RHS
->getValue() != 0)
14669 S
.Diag(ExprLoc
, diag::warn_integer_constants_in_conditional_always_true
);
14673 static void CheckImplicitConversion(Sema
&S
, Expr
*E
, QualType T
,
14675 bool *ICContext
= nullptr,
14676 bool IsListInit
= false) {
14677 if (E
->isTypeDependent() || E
->isValueDependent()) return;
14679 const Type
*Source
= S
.Context
.getCanonicalType(E
->getType()).getTypePtr();
14680 const Type
*Target
= S
.Context
.getCanonicalType(T
).getTypePtr();
14681 if (Source
== Target
) return;
14682 if (Target
->isDependentType()) return;
14684 // If the conversion context location is invalid don't complain. We also
14685 // don't want to emit a warning if the issue occurs from the expansion of
14686 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
14687 // delay this check as long as possible. Once we detect we are in that
14688 // scenario, we just return.
14689 if (CC
.isInvalid())
14692 if (Source
->isAtomicType())
14693 S
.Diag(E
->getExprLoc(), diag::warn_atomic_implicit_seq_cst
);
14695 // Diagnose implicit casts to bool.
14696 if (Target
->isSpecificBuiltinType(BuiltinType::Bool
)) {
14697 if (isa
<StringLiteral
>(E
))
14698 // Warn on string literal to bool. Checks for string literals in logical
14699 // and expressions, for instance, assert(0 && "error here"), are
14700 // prevented by a check in AnalyzeImplicitConversions().
14701 return DiagnoseImpCast(S
, E
, T
, CC
,
14702 diag::warn_impcast_string_literal_to_bool
);
14703 if (isa
<ObjCStringLiteral
>(E
) || isa
<ObjCArrayLiteral
>(E
) ||
14704 isa
<ObjCDictionaryLiteral
>(E
) || isa
<ObjCBoxedExpr
>(E
)) {
14705 // This covers the literal expressions that evaluate to Objective-C
14707 return DiagnoseImpCast(S
, E
, T
, CC
,
14708 diag::warn_impcast_objective_c_literal_to_bool
);
14710 if (Source
->isPointerType() || Source
->canDecayToPointerType()) {
14711 // Warn on pointer to bool conversion that is always true.
14712 S
.DiagnoseAlwaysNonNullPointer(E
, Expr::NPCK_NotNull
, /*IsEqual*/ false,
14717 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
14718 // is a typedef for signed char (macOS), then that constant value has to be 1
14720 if (isObjCSignedCharBool(S
, T
) && Source
->isIntegralType(S
.Context
)) {
14721 Expr::EvalResult Result
;
14722 if (E
->EvaluateAsInt(Result
, S
.getASTContext(),
14723 Expr::SE_AllowSideEffects
)) {
14724 if (Result
.Val
.getInt() != 1 && Result
.Val
.getInt() != 0) {
14725 adornObjCBoolConversionDiagWithTernaryFixit(
14727 S
.Diag(CC
, diag::warn_impcast_constant_value_to_objc_bool
)
14728 << toString(Result
.Val
.getInt(), 10));
14734 // Check implicit casts from Objective-C collection literals to specialized
14735 // collection types, e.g., NSArray<NSString *> *.
14736 if (auto *ArrayLiteral
= dyn_cast
<ObjCArrayLiteral
>(E
))
14737 checkObjCArrayLiteral(S
, QualType(Target
, 0), ArrayLiteral
);
14738 else if (auto *DictionaryLiteral
= dyn_cast
<ObjCDictionaryLiteral
>(E
))
14739 checkObjCDictionaryLiteral(S
, QualType(Target
, 0), DictionaryLiteral
);
14741 // Strip vector types.
14742 if (isa
<VectorType
>(Source
)) {
14743 if (Target
->isVLSTBuiltinType() &&
14744 (S
.Context
.areCompatibleSveTypes(QualType(Target
, 0),
14745 QualType(Source
, 0)) ||
14746 S
.Context
.areLaxCompatibleSveTypes(QualType(Target
, 0),
14747 QualType(Source
, 0))))
14750 if (Target
->isRVVVLSBuiltinType() &&
14751 (S
.Context
.areCompatibleRVVTypes(QualType(Target
, 0),
14752 QualType(Source
, 0)) ||
14753 S
.Context
.areLaxCompatibleRVVTypes(QualType(Target
, 0),
14754 QualType(Source
, 0))))
14757 if (!isa
<VectorType
>(Target
)) {
14758 if (S
.SourceMgr
.isInSystemMacro(CC
))
14760 return DiagnoseImpCast(S
, E
, T
, CC
, diag::warn_impcast_vector_scalar
);
14763 // If the vector cast is cast between two vectors of the same size, it is
14764 // a bitcast, not a conversion.
14765 if (S
.Context
.getTypeSize(Source
) == S
.Context
.getTypeSize(Target
))
14768 Source
= cast
<VectorType
>(Source
)->getElementType().getTypePtr();
14769 Target
= cast
<VectorType
>(Target
)->getElementType().getTypePtr();
14771 if (auto VecTy
= dyn_cast
<VectorType
>(Target
))
14772 Target
= VecTy
->getElementType().getTypePtr();
14774 // Strip complex types.
14775 if (isa
<ComplexType
>(Source
)) {
14776 if (!isa
<ComplexType
>(Target
)) {
14777 if (S
.SourceMgr
.isInSystemMacro(CC
) || Target
->isBooleanType())
14780 return DiagnoseImpCast(S
, E
, T
, CC
,
14781 S
.getLangOpts().CPlusPlus
14782 ? diag::err_impcast_complex_scalar
14783 : diag::warn_impcast_complex_scalar
);
14786 Source
= cast
<ComplexType
>(Source
)->getElementType().getTypePtr();
14787 Target
= cast
<ComplexType
>(Target
)->getElementType().getTypePtr();
14790 const BuiltinType
*SourceBT
= dyn_cast
<BuiltinType
>(Source
);
14791 const BuiltinType
*TargetBT
= dyn_cast
<BuiltinType
>(Target
);
14793 // Strip SVE vector types
14794 if (SourceBT
&& SourceBT
->isVLSTBuiltinType()) {
14795 // Need the original target type for vector type checks
14796 const Type
*OriginalTarget
= S
.Context
.getCanonicalType(T
).getTypePtr();
14797 // Handle conversion from scalable to fixed when msve-vector-bits is
14799 if (S
.Context
.areCompatibleSveTypes(QualType(OriginalTarget
, 0),
14800 QualType(Source
, 0)) ||
14801 S
.Context
.areLaxCompatibleSveTypes(QualType(OriginalTarget
, 0),
14802 QualType(Source
, 0)))
14805 // If the vector cast is cast between two vectors of the same size, it is
14806 // a bitcast, not a conversion.
14807 if (S
.Context
.getTypeSize(Source
) == S
.Context
.getTypeSize(Target
))
14810 Source
= SourceBT
->getSveEltType(S
.Context
).getTypePtr();
14813 if (TargetBT
&& TargetBT
->isVLSTBuiltinType())
14814 Target
= TargetBT
->getSveEltType(S
.Context
).getTypePtr();
14816 // If the source is floating point...
14817 if (SourceBT
&& SourceBT
->isFloatingPoint()) {
14818 // ...and the target is floating point...
14819 if (TargetBT
&& TargetBT
->isFloatingPoint()) {
14820 // ...then warn if we're dropping FP rank.
14822 int Order
= S
.getASTContext().getFloatingTypeSemanticOrder(
14823 QualType(SourceBT
, 0), QualType(TargetBT
, 0));
14825 // Don't warn about float constants that are precisely
14826 // representable in the target type.
14827 Expr::EvalResult result
;
14828 if (E
->EvaluateAsRValue(result
, S
.Context
)) {
14829 // Value might be a float, a float vector, or a float complex.
14830 if (IsSameFloatAfterCast(result
.Val
,
14831 S
.Context
.getFloatTypeSemantics(QualType(TargetBT
, 0)),
14832 S
.Context
.getFloatTypeSemantics(QualType(SourceBT
, 0))))
14836 if (S
.SourceMgr
.isInSystemMacro(CC
))
14839 DiagnoseImpCast(S
, E
, T
, CC
, diag::warn_impcast_float_precision
);
14841 // ... or possibly if we're increasing rank, too
14842 else if (Order
< 0) {
14843 if (S
.SourceMgr
.isInSystemMacro(CC
))
14846 DiagnoseImpCast(S
, E
, T
, CC
, diag::warn_impcast_double_promotion
);
14851 // If the target is integral, always warn.
14852 if (TargetBT
&& TargetBT
->isInteger()) {
14853 if (S
.SourceMgr
.isInSystemMacro(CC
))
14856 DiagnoseFloatingImpCast(S
, E
, T
, CC
);
14859 // Detect the case where a call result is converted from floating-point to
14860 // to bool, and the final argument to the call is converted from bool, to
14861 // discover this typo:
14863 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
14865 // FIXME: This is an incredibly special case; is there some more general
14866 // way to detect this class of misplaced-parentheses bug?
14867 if (Target
->isBooleanType() && isa
<CallExpr
>(E
)) {
14868 // Check last argument of function call to see if it is an
14869 // implicit cast from a type matching the type the result
14870 // is being cast to.
14871 CallExpr
*CEx
= cast
<CallExpr
>(E
);
14872 if (unsigned NumArgs
= CEx
->getNumArgs()) {
14873 Expr
*LastA
= CEx
->getArg(NumArgs
- 1);
14874 Expr
*InnerE
= LastA
->IgnoreParenImpCasts();
14875 if (isa
<ImplicitCastExpr
>(LastA
) &&
14876 InnerE
->getType()->isBooleanType()) {
14877 // Warn on this floating-point to bool conversion
14878 DiagnoseImpCast(S
, E
, T
, CC
,
14879 diag::warn_impcast_floating_point_to_bool
);
14886 // Valid casts involving fixed point types should be accounted for here.
14887 if (Source
->isFixedPointType()) {
14888 if (Target
->isUnsaturatedFixedPointType()) {
14889 Expr::EvalResult Result
;
14890 if (E
->EvaluateAsFixedPoint(Result
, S
.Context
, Expr::SE_AllowSideEffects
,
14891 S
.isConstantEvaluated())) {
14892 llvm::APFixedPoint Value
= Result
.Val
.getFixedPoint();
14893 llvm::APFixedPoint MaxVal
= S
.Context
.getFixedPointMax(T
);
14894 llvm::APFixedPoint MinVal
= S
.Context
.getFixedPointMin(T
);
14895 if (Value
> MaxVal
|| Value
< MinVal
) {
14896 S
.DiagRuntimeBehavior(E
->getExprLoc(), E
,
14897 S
.PDiag(diag::warn_impcast_fixed_point_range
)
14898 << Value
.toString() << T
14899 << E
->getSourceRange()
14900 << clang::SourceRange(CC
));
14904 } else if (Target
->isIntegerType()) {
14905 Expr::EvalResult Result
;
14906 if (!S
.isConstantEvaluated() &&
14907 E
->EvaluateAsFixedPoint(Result
, S
.Context
,
14908 Expr::SE_AllowSideEffects
)) {
14909 llvm::APFixedPoint FXResult
= Result
.Val
.getFixedPoint();
14912 llvm::APSInt IntResult
= FXResult
.convertToInt(
14913 S
.Context
.getIntWidth(T
),
14914 Target
->isSignedIntegerOrEnumerationType(), &Overflowed
);
14917 S
.DiagRuntimeBehavior(E
->getExprLoc(), E
,
14918 S
.PDiag(diag::warn_impcast_fixed_point_range
)
14919 << FXResult
.toString() << T
14920 << E
->getSourceRange()
14921 << clang::SourceRange(CC
));
14926 } else if (Target
->isUnsaturatedFixedPointType()) {
14927 if (Source
->isIntegerType()) {
14928 Expr::EvalResult Result
;
14929 if (!S
.isConstantEvaluated() &&
14930 E
->EvaluateAsInt(Result
, S
.Context
, Expr::SE_AllowSideEffects
)) {
14931 llvm::APSInt Value
= Result
.Val
.getInt();
14934 llvm::APFixedPoint IntResult
= llvm::APFixedPoint::getFromIntValue(
14935 Value
, S
.Context
.getFixedPointSemantics(T
), &Overflowed
);
14938 S
.DiagRuntimeBehavior(E
->getExprLoc(), E
,
14939 S
.PDiag(diag::warn_impcast_fixed_point_range
)
14940 << toString(Value
, /*Radix=*/10) << T
14941 << E
->getSourceRange()
14942 << clang::SourceRange(CC
));
14949 // If we are casting an integer type to a floating point type without
14950 // initialization-list syntax, we might lose accuracy if the floating
14951 // point type has a narrower significand than the integer type.
14952 if (SourceBT
&& TargetBT
&& SourceBT
->isIntegerType() &&
14953 TargetBT
->isFloatingType() && !IsListInit
) {
14954 // Determine the number of precision bits in the source integer type.
14955 IntRange SourceRange
= GetExprRange(S
.Context
, E
, S
.isConstantEvaluated(),
14956 /*Approximate*/ true);
14957 unsigned int SourcePrecision
= SourceRange
.Width
;
14959 // Determine the number of precision bits in the
14960 // target floating point type.
14961 unsigned int TargetPrecision
= llvm::APFloatBase::semanticsPrecision(
14962 S
.Context
.getFloatTypeSemantics(QualType(TargetBT
, 0)));
14964 if (SourcePrecision
> 0 && TargetPrecision
> 0 &&
14965 SourcePrecision
> TargetPrecision
) {
14967 if (std::optional
<llvm::APSInt
> SourceInt
=
14968 E
->getIntegerConstantExpr(S
.Context
)) {
14969 // If the source integer is a constant, convert it to the target
14970 // floating point type. Issue a warning if the value changes
14971 // during the whole conversion.
14972 llvm::APFloat
TargetFloatValue(
14973 S
.Context
.getFloatTypeSemantics(QualType(TargetBT
, 0)));
14974 llvm::APFloat::opStatus ConversionStatus
=
14975 TargetFloatValue
.convertFromAPInt(
14976 *SourceInt
, SourceBT
->isSignedInteger(),
14977 llvm::APFloat::rmNearestTiesToEven
);
14979 if (ConversionStatus
!= llvm::APFloat::opOK
) {
14980 SmallString
<32> PrettySourceValue
;
14981 SourceInt
->toString(PrettySourceValue
, 10);
14982 SmallString
<32> PrettyTargetValue
;
14983 TargetFloatValue
.toString(PrettyTargetValue
, TargetPrecision
);
14985 S
.DiagRuntimeBehavior(
14986 E
->getExprLoc(), E
,
14987 S
.PDiag(diag::warn_impcast_integer_float_precision_constant
)
14988 << PrettySourceValue
<< PrettyTargetValue
<< E
->getType() << T
14989 << E
->getSourceRange() << clang::SourceRange(CC
));
14992 // Otherwise, the implicit conversion may lose precision.
14993 DiagnoseImpCast(S
, E
, T
, CC
,
14994 diag::warn_impcast_integer_float_precision
);
14999 DiagnoseNullConversion(S
, E
, T
, CC
);
15001 S
.DiscardMisalignedMemberAddress(Target
, E
);
15003 if (Target
->isBooleanType())
15004 DiagnoseIntInBoolContext(S
, E
);
15006 if (!Source
->isIntegerType() || !Target
->isIntegerType())
15009 // TODO: remove this early return once the false positives for constant->bool
15010 // in templates, macros, etc, are reduced or removed.
15011 if (Target
->isSpecificBuiltinType(BuiltinType::Bool
))
15014 if (isObjCSignedCharBool(S
, T
) && !Source
->isCharType() &&
15015 !E
->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
15016 return adornObjCBoolConversionDiagWithTernaryFixit(
15018 S
.Diag(CC
, diag::warn_impcast_int_to_objc_signed_char_bool
)
15022 IntRange SourceTypeRange
=
15023 IntRange::forTargetOfCanonicalType(S
.Context
, Source
);
15024 IntRange LikelySourceRange
=
15025 GetExprRange(S
.Context
, E
, S
.isConstantEvaluated(), /*Approximate*/ true);
15026 IntRange TargetRange
= IntRange::forTargetOfCanonicalType(S
.Context
, Target
);
15028 if (LikelySourceRange
.Width
> TargetRange
.Width
) {
15029 // If the source is a constant, use a default-on diagnostic.
15030 // TODO: this should happen for bitfield stores, too.
15031 Expr::EvalResult Result
;
15032 if (E
->EvaluateAsInt(Result
, S
.Context
, Expr::SE_AllowSideEffects
,
15033 S
.isConstantEvaluated())) {
15034 llvm::APSInt
Value(32);
15035 Value
= Result
.Val
.getInt();
15037 if (S
.SourceMgr
.isInSystemMacro(CC
))
15040 std::string PrettySourceValue
= toString(Value
, 10);
15041 std::string PrettyTargetValue
= PrettyPrintInRange(Value
, TargetRange
);
15043 S
.DiagRuntimeBehavior(
15044 E
->getExprLoc(), E
,
15045 S
.PDiag(diag::warn_impcast_integer_precision_constant
)
15046 << PrettySourceValue
<< PrettyTargetValue
<< E
->getType() << T
15047 << E
->getSourceRange() << SourceRange(CC
));
15051 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
15052 if (S
.SourceMgr
.isInSystemMacro(CC
))
15055 if (TargetRange
.Width
== 32 && S
.Context
.getIntWidth(E
->getType()) == 64)
15056 return DiagnoseImpCast(S
, E
, T
, CC
, diag::warn_impcast_integer_64_32
,
15057 /* pruneControlFlow */ true);
15058 return DiagnoseImpCast(S
, E
, T
, CC
, diag::warn_impcast_integer_precision
);
15061 if (TargetRange
.Width
> SourceTypeRange
.Width
) {
15062 if (auto *UO
= dyn_cast
<UnaryOperator
>(E
))
15063 if (UO
->getOpcode() == UO_Minus
)
15064 if (Source
->isUnsignedIntegerType()) {
15065 if (Target
->isUnsignedIntegerType())
15066 return DiagnoseImpCast(S
, E
, T
, CC
,
15067 diag::warn_impcast_high_order_zero_bits
);
15068 if (Target
->isSignedIntegerType())
15069 return DiagnoseImpCast(S
, E
, T
, CC
,
15070 diag::warn_impcast_nonnegative_result
);
15074 if (TargetRange
.Width
== LikelySourceRange
.Width
&&
15075 !TargetRange
.NonNegative
&& LikelySourceRange
.NonNegative
&&
15076 Source
->isSignedIntegerType()) {
15077 // Warn when doing a signed to signed conversion, warn if the positive
15078 // source value is exactly the width of the target type, which will
15079 // cause a negative value to be stored.
15081 Expr::EvalResult Result
;
15082 if (E
->EvaluateAsInt(Result
, S
.Context
, Expr::SE_AllowSideEffects
) &&
15083 !S
.SourceMgr
.isInSystemMacro(CC
)) {
15084 llvm::APSInt Value
= Result
.Val
.getInt();
15085 if (isSameWidthConstantConversion(S
, E
, T
, CC
)) {
15086 std::string PrettySourceValue
= toString(Value
, 10);
15087 std::string PrettyTargetValue
= PrettyPrintInRange(Value
, TargetRange
);
15089 S
.DiagRuntimeBehavior(
15090 E
->getExprLoc(), E
,
15091 S
.PDiag(diag::warn_impcast_integer_precision_constant
)
15092 << PrettySourceValue
<< PrettyTargetValue
<< E
->getType() << T
15093 << E
->getSourceRange() << SourceRange(CC
));
15098 // Fall through for non-constants to give a sign conversion warning.
15101 if ((!isa
<EnumType
>(Target
) || !isa
<EnumType
>(Source
)) &&
15102 ((TargetRange
.NonNegative
&& !LikelySourceRange
.NonNegative
) ||
15103 (!TargetRange
.NonNegative
&& LikelySourceRange
.NonNegative
&&
15104 LikelySourceRange
.Width
== TargetRange
.Width
))) {
15105 if (S
.SourceMgr
.isInSystemMacro(CC
))
15108 if (SourceBT
&& SourceBT
->isInteger() && TargetBT
&&
15109 TargetBT
->isInteger() &&
15110 Source
->isSignedIntegerType() == Target
->isSignedIntegerType()) {
15114 unsigned DiagID
= diag::warn_impcast_integer_sign
;
15116 // Traditionally, gcc has warned about this under -Wsign-compare.
15117 // We also want to warn about it in -Wconversion.
15118 // So if -Wconversion is off, use a completely identical diagnostic
15119 // in the sign-compare group.
15120 // The conditional-checking code will
15122 DiagID
= diag::warn_impcast_integer_sign_conditional
;
15126 return DiagnoseImpCast(S
, E
, T
, CC
, DiagID
);
15129 // Diagnose conversions between different enumeration types.
15130 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
15131 // type, to give us better diagnostics.
15132 QualType SourceType
= E
->getType();
15133 if (!S
.getLangOpts().CPlusPlus
) {
15134 if (DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
))
15135 if (EnumConstantDecl
*ECD
= dyn_cast
<EnumConstantDecl
>(DRE
->getDecl())) {
15136 EnumDecl
*Enum
= cast
<EnumDecl
>(ECD
->getDeclContext());
15137 SourceType
= S
.Context
.getTypeDeclType(Enum
);
15138 Source
= S
.Context
.getCanonicalType(SourceType
).getTypePtr();
15142 if (const EnumType
*SourceEnum
= Source
->getAs
<EnumType
>())
15143 if (const EnumType
*TargetEnum
= Target
->getAs
<EnumType
>())
15144 if (SourceEnum
->getDecl()->hasNameForLinkage() &&
15145 TargetEnum
->getDecl()->hasNameForLinkage() &&
15146 SourceEnum
!= TargetEnum
) {
15147 if (S
.SourceMgr
.isInSystemMacro(CC
))
15150 return DiagnoseImpCast(S
, E
, SourceType
, T
, CC
,
15151 diag::warn_impcast_different_enum_types
);
15155 static void CheckConditionalOperator(Sema
&S
, AbstractConditionalOperator
*E
,
15156 SourceLocation CC
, QualType T
);
15158 static void CheckConditionalOperand(Sema
&S
, Expr
*E
, QualType T
,
15159 SourceLocation CC
, bool &ICContext
) {
15160 E
= E
->IgnoreParenImpCasts();
15161 // Diagnose incomplete type for second or third operand in C.
15162 if (!S
.getLangOpts().CPlusPlus
&& E
->getType()->isRecordType())
15163 S
.RequireCompleteExprType(E
, diag::err_incomplete_type
);
15165 if (auto *CO
= dyn_cast
<AbstractConditionalOperator
>(E
))
15166 return CheckConditionalOperator(S
, CO
, CC
, T
);
15168 AnalyzeImplicitConversions(S
, E
, CC
);
15169 if (E
->getType() != T
)
15170 return CheckImplicitConversion(S
, E
, T
, CC
, &ICContext
);
15173 static void CheckConditionalOperator(Sema
&S
, AbstractConditionalOperator
*E
,
15174 SourceLocation CC
, QualType T
) {
15175 AnalyzeImplicitConversions(S
, E
->getCond(), E
->getQuestionLoc());
15177 Expr
*TrueExpr
= E
->getTrueExpr();
15178 if (auto *BCO
= dyn_cast
<BinaryConditionalOperator
>(E
))
15179 TrueExpr
= BCO
->getCommon();
15181 bool Suspicious
= false;
15182 CheckConditionalOperand(S
, TrueExpr
, T
, CC
, Suspicious
);
15183 CheckConditionalOperand(S
, E
->getFalseExpr(), T
, CC
, Suspicious
);
15185 if (T
->isBooleanType())
15186 DiagnoseIntInBoolContext(S
, E
);
15188 // If -Wconversion would have warned about either of the candidates
15189 // for a signedness conversion to the context type...
15190 if (!Suspicious
) return;
15192 // ...but it's currently ignored...
15193 if (!S
.Diags
.isIgnored(diag::warn_impcast_integer_sign_conditional
, CC
))
15196 // ...then check whether it would have warned about either of the
15197 // candidates for a signedness conversion to the condition type.
15198 if (E
->getType() == T
) return;
15200 Suspicious
= false;
15201 CheckImplicitConversion(S
, TrueExpr
->IgnoreParenImpCasts(),
15202 E
->getType(), CC
, &Suspicious
);
15204 CheckImplicitConversion(S
, E
->getFalseExpr()->IgnoreParenImpCasts(),
15205 E
->getType(), CC
, &Suspicious
);
15208 /// Check conversion of given expression to boolean.
15209 /// Input argument E is a logical expression.
15210 static void CheckBoolLikeConversion(Sema
&S
, Expr
*E
, SourceLocation CC
) {
15211 if (S
.getLangOpts().Bool
)
15213 if (E
->IgnoreParenImpCasts()->getType()->isAtomicType())
15215 CheckImplicitConversion(S
, E
->IgnoreParenImpCasts(), S
.Context
.BoolTy
, CC
);
15219 struct AnalyzeImplicitConversionsWorkItem
{
15226 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
15227 /// that should be visited are added to WorkList.
15228 static void AnalyzeImplicitConversions(
15229 Sema
&S
, AnalyzeImplicitConversionsWorkItem Item
,
15230 llvm::SmallVectorImpl
<AnalyzeImplicitConversionsWorkItem
> &WorkList
) {
15231 Expr
*OrigE
= Item
.E
;
15232 SourceLocation CC
= Item
.CC
;
15234 QualType T
= OrigE
->getType();
15235 Expr
*E
= OrigE
->IgnoreParenImpCasts();
15237 // Propagate whether we are in a C++ list initialization expression.
15238 // If so, we do not issue warnings for implicit int-float conversion
15239 // precision loss, because C++11 narrowing already handles it.
15240 bool IsListInit
= Item
.IsListInit
||
15241 (isa
<InitListExpr
>(OrigE
) && S
.getLangOpts().CPlusPlus
);
15243 if (E
->isTypeDependent() || E
->isValueDependent())
15246 Expr
*SourceExpr
= E
;
15247 // Examine, but don't traverse into the source expression of an
15248 // OpaqueValueExpr, since it may have multiple parents and we don't want to
15249 // emit duplicate diagnostics. Its fine to examine the form or attempt to
15250 // evaluate it in the context of checking the specific conversion to T though.
15251 if (auto *OVE
= dyn_cast
<OpaqueValueExpr
>(E
))
15252 if (auto *Src
= OVE
->getSourceExpr())
15255 if (const auto *UO
= dyn_cast
<UnaryOperator
>(SourceExpr
))
15256 if (UO
->getOpcode() == UO_Not
&&
15257 UO
->getSubExpr()->isKnownToHaveBooleanValue())
15258 S
.Diag(UO
->getBeginLoc(), diag::warn_bitwise_negation_bool
)
15259 << OrigE
->getSourceRange() << T
->isBooleanType()
15260 << FixItHint::CreateReplacement(UO
->getBeginLoc(), "!");
15262 if (const auto *BO
= dyn_cast
<BinaryOperator
>(SourceExpr
))
15263 if ((BO
->getOpcode() == BO_And
|| BO
->getOpcode() == BO_Or
) &&
15264 BO
->getLHS()->isKnownToHaveBooleanValue() &&
15265 BO
->getRHS()->isKnownToHaveBooleanValue() &&
15266 BO
->getLHS()->HasSideEffects(S
.Context
) &&
15267 BO
->getRHS()->HasSideEffects(S
.Context
)) {
15268 S
.Diag(BO
->getBeginLoc(), diag::warn_bitwise_instead_of_logical
)
15269 << (BO
->getOpcode() == BO_And
? "&" : "|") << OrigE
->getSourceRange()
15270 << FixItHint::CreateReplacement(
15271 BO
->getOperatorLoc(),
15272 (BO
->getOpcode() == BO_And
? "&&" : "||"));
15273 S
.Diag(BO
->getBeginLoc(), diag::note_cast_operand_to_int
);
15276 // For conditional operators, we analyze the arguments as if they
15277 // were being fed directly into the output.
15278 if (auto *CO
= dyn_cast
<AbstractConditionalOperator
>(SourceExpr
)) {
15279 CheckConditionalOperator(S
, CO
, CC
, T
);
15283 // Check implicit argument conversions for function calls.
15284 if (CallExpr
*Call
= dyn_cast
<CallExpr
>(SourceExpr
))
15285 CheckImplicitArgumentConversions(S
, Call
, CC
);
15287 // Go ahead and check any implicit conversions we might have skipped.
15288 // The non-canonical typecheck is just an optimization;
15289 // CheckImplicitConversion will filter out dead implicit conversions.
15290 if (SourceExpr
->getType() != T
)
15291 CheckImplicitConversion(S
, SourceExpr
, T
, CC
, nullptr, IsListInit
);
15293 // Now continue drilling into this expression.
15295 if (PseudoObjectExpr
*POE
= dyn_cast
<PseudoObjectExpr
>(E
)) {
15296 // The bound subexpressions in a PseudoObjectExpr are not reachable
15297 // as transitive children.
15298 // FIXME: Use a more uniform representation for this.
15299 for (auto *SE
: POE
->semantics())
15300 if (auto *OVE
= dyn_cast
<OpaqueValueExpr
>(SE
))
15301 WorkList
.push_back({OVE
->getSourceExpr(), CC
, IsListInit
});
15304 // Skip past explicit casts.
15305 if (auto *CE
= dyn_cast
<ExplicitCastExpr
>(E
)) {
15306 E
= CE
->getSubExpr()->IgnoreParenImpCasts();
15307 if (!CE
->getType()->isVoidType() && E
->getType()->isAtomicType())
15308 S
.Diag(E
->getBeginLoc(), diag::warn_atomic_implicit_seq_cst
);
15309 WorkList
.push_back({E
, CC
, IsListInit
});
15313 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(E
)) {
15314 // Do a somewhat different check with comparison operators.
15315 if (BO
->isComparisonOp())
15316 return AnalyzeComparison(S
, BO
);
15318 // And with simple assignments.
15319 if (BO
->getOpcode() == BO_Assign
)
15320 return AnalyzeAssignment(S
, BO
);
15321 // And with compound assignments.
15322 if (BO
->isAssignmentOp())
15323 return AnalyzeCompoundAssignment(S
, BO
);
15326 // These break the otherwise-useful invariant below. Fortunately,
15327 // we don't really need to recurse into them, because any internal
15328 // expressions should have been analyzed already when they were
15329 // built into statements.
15330 if (isa
<StmtExpr
>(E
)) return;
15332 // Don't descend into unevaluated contexts.
15333 if (isa
<UnaryExprOrTypeTraitExpr
>(E
)) return;
15335 // Now just recurse over the expression's children.
15336 CC
= E
->getExprLoc();
15337 BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(E
);
15338 bool IsLogicalAndOperator
= BO
&& BO
->getOpcode() == BO_LAnd
;
15339 for (Stmt
*SubStmt
: E
->children()) {
15340 Expr
*ChildExpr
= dyn_cast_or_null
<Expr
>(SubStmt
);
15344 if (auto *CSE
= dyn_cast
<CoroutineSuspendExpr
>(E
))
15345 if (ChildExpr
== CSE
->getOperand())
15346 // Do not recurse over a CoroutineSuspendExpr's operand.
15347 // The operand is also a subexpression of getCommonExpr(), and
15348 // recursing into it directly would produce duplicate diagnostics.
15351 if (IsLogicalAndOperator
&&
15352 isa
<StringLiteral
>(ChildExpr
->IgnoreParenImpCasts()))
15353 // Ignore checking string literals that are in logical and operators.
15354 // This is a common pattern for asserts.
15356 WorkList
.push_back({ChildExpr
, CC
, IsListInit
});
15359 if (BO
&& BO
->isLogicalOp()) {
15360 Expr
*SubExpr
= BO
->getLHS()->IgnoreParenImpCasts();
15361 if (!IsLogicalAndOperator
|| !isa
<StringLiteral
>(SubExpr
))
15362 ::CheckBoolLikeConversion(S
, SubExpr
, BO
->getExprLoc());
15364 SubExpr
= BO
->getRHS()->IgnoreParenImpCasts();
15365 if (!IsLogicalAndOperator
|| !isa
<StringLiteral
>(SubExpr
))
15366 ::CheckBoolLikeConversion(S
, SubExpr
, BO
->getExprLoc());
15369 if (const UnaryOperator
*U
= dyn_cast
<UnaryOperator
>(E
)) {
15370 if (U
->getOpcode() == UO_LNot
) {
15371 ::CheckBoolLikeConversion(S
, U
->getSubExpr(), CC
);
15372 } else if (U
->getOpcode() != UO_AddrOf
) {
15373 if (U
->getSubExpr()->getType()->isAtomicType())
15374 S
.Diag(U
->getSubExpr()->getBeginLoc(),
15375 diag::warn_atomic_implicit_seq_cst
);
15380 /// AnalyzeImplicitConversions - Find and report any interesting
15381 /// implicit conversions in the given expression. There are a couple
15382 /// of competing diagnostics here, -Wconversion and -Wsign-compare.
15383 static void AnalyzeImplicitConversions(Sema
&S
, Expr
*OrigE
, SourceLocation CC
,
15384 bool IsListInit
/*= false*/) {
15385 llvm::SmallVector
<AnalyzeImplicitConversionsWorkItem
, 16> WorkList
;
15386 WorkList
.push_back({OrigE
, CC
, IsListInit
});
15387 while (!WorkList
.empty())
15388 AnalyzeImplicitConversions(S
, WorkList
.pop_back_val(), WorkList
);
15391 /// Diagnose integer type and any valid implicit conversion to it.
15392 static bool checkOpenCLEnqueueIntType(Sema
&S
, Expr
*E
, const QualType
&IntT
) {
15393 // Taking into account implicit conversions,
15394 // allow any integer.
15395 if (!E
->getType()->isIntegerType()) {
15396 S
.Diag(E
->getBeginLoc(),
15397 diag::err_opencl_enqueue_kernel_invalid_local_size_type
);
15400 // Potentially emit standard warnings for implicit conversions if enabled
15401 // using -Wconversion.
15402 CheckImplicitConversion(S
, E
, IntT
, E
->getBeginLoc());
15406 // Helper function for Sema::DiagnoseAlwaysNonNullPointer.
15407 // Returns true when emitting a warning about taking the address of a reference.
15408 static bool CheckForReference(Sema
&SemaRef
, const Expr
*E
,
15409 const PartialDiagnostic
&PD
) {
15410 E
= E
->IgnoreParenImpCasts();
15412 const FunctionDecl
*FD
= nullptr;
15414 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
15415 if (!DRE
->getDecl()->getType()->isReferenceType())
15417 } else if (const MemberExpr
*M
= dyn_cast
<MemberExpr
>(E
)) {
15418 if (!M
->getMemberDecl()->getType()->isReferenceType())
15420 } else if (const CallExpr
*Call
= dyn_cast
<CallExpr
>(E
)) {
15421 if (!Call
->getCallReturnType(SemaRef
.Context
)->isReferenceType())
15423 FD
= Call
->getDirectCallee();
15428 SemaRef
.Diag(E
->getExprLoc(), PD
);
15430 // If possible, point to location of function.
15432 SemaRef
.Diag(FD
->getLocation(), diag::note_reference_is_return_value
) << FD
;
15438 // Returns true if the SourceLocation is expanded from any macro body.
15439 // Returns false if the SourceLocation is invalid, is from not in a macro
15440 // expansion, or is from expanded from a top-level macro argument.
15441 static bool IsInAnyMacroBody(const SourceManager
&SM
, SourceLocation Loc
) {
15442 if (Loc
.isInvalid())
15445 while (Loc
.isMacroID()) {
15446 if (SM
.isMacroBodyExpansion(Loc
))
15448 Loc
= SM
.getImmediateMacroCallerLoc(Loc
);
15454 /// Diagnose pointers that are always non-null.
15455 /// \param E the expression containing the pointer
15456 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
15457 /// compared to a null pointer
15458 /// \param IsEqual True when the comparison is equal to a null pointer
15459 /// \param Range Extra SourceRange to highlight in the diagnostic
15460 void Sema::DiagnoseAlwaysNonNullPointer(Expr
*E
,
15461 Expr::NullPointerConstantKind NullKind
,
15462 bool IsEqual
, SourceRange Range
) {
15466 // Don't warn inside macros.
15467 if (E
->getExprLoc().isMacroID()) {
15468 const SourceManager
&SM
= getSourceManager();
15469 if (IsInAnyMacroBody(SM
, E
->getExprLoc()) ||
15470 IsInAnyMacroBody(SM
, Range
.getBegin()))
15473 E
= E
->IgnoreImpCasts();
15475 const bool IsCompare
= NullKind
!= Expr::NPCK_NotNull
;
15477 if (isa
<CXXThisExpr
>(E
)) {
15478 unsigned DiagID
= IsCompare
? diag::warn_this_null_compare
15479 : diag::warn_this_bool_conversion
;
15480 Diag(E
->getExprLoc(), DiagID
) << E
->getSourceRange() << Range
<< IsEqual
;
15484 bool IsAddressOf
= false;
15486 if (auto *UO
= dyn_cast
<UnaryOperator
>(E
->IgnoreParens())) {
15487 if (UO
->getOpcode() != UO_AddrOf
)
15489 IsAddressOf
= true;
15490 E
= UO
->getSubExpr();
15494 unsigned DiagID
= IsCompare
15495 ? diag::warn_address_of_reference_null_compare
15496 : diag::warn_address_of_reference_bool_conversion
;
15497 PartialDiagnostic PD
= PDiag(DiagID
) << E
->getSourceRange() << Range
15499 if (CheckForReference(*this, E
, PD
)) {
15504 auto ComplainAboutNonnullParamOrCall
= [&](const Attr
*NonnullAttr
) {
15505 bool IsParam
= isa
<NonNullAttr
>(NonnullAttr
);
15507 llvm::raw_string_ostream
S(Str
);
15508 E
->printPretty(S
, nullptr, getPrintingPolicy());
15509 unsigned DiagID
= IsCompare
? diag::warn_nonnull_expr_compare
15510 : diag::warn_cast_nonnull_to_bool
;
15511 Diag(E
->getExprLoc(), DiagID
) << IsParam
<< S
.str()
15512 << E
->getSourceRange() << Range
<< IsEqual
;
15513 Diag(NonnullAttr
->getLocation(), diag::note_declared_nonnull
) << IsParam
;
15516 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
15517 if (auto *Call
= dyn_cast
<CallExpr
>(E
->IgnoreParenImpCasts())) {
15518 if (auto *Callee
= Call
->getDirectCallee()) {
15519 if (const Attr
*A
= Callee
->getAttr
<ReturnsNonNullAttr
>()) {
15520 ComplainAboutNonnullParamOrCall(A
);
15526 // Expect to find a single Decl. Skip anything more complicated.
15527 ValueDecl
*D
= nullptr;
15528 if (DeclRefExpr
*R
= dyn_cast
<DeclRefExpr
>(E
)) {
15530 } else if (MemberExpr
*M
= dyn_cast
<MemberExpr
>(E
)) {
15531 D
= M
->getMemberDecl();
15534 // Weak Decls can be null.
15535 if (!D
|| D
->isWeak())
15538 // Check for parameter decl with nonnull attribute
15539 if (const auto* PV
= dyn_cast
<ParmVarDecl
>(D
)) {
15540 if (getCurFunction() &&
15541 !getCurFunction()->ModifiedNonNullParams
.count(PV
)) {
15542 if (const Attr
*A
= PV
->getAttr
<NonNullAttr
>()) {
15543 ComplainAboutNonnullParamOrCall(A
);
15547 if (const auto *FD
= dyn_cast
<FunctionDecl
>(PV
->getDeclContext())) {
15548 // Skip function template not specialized yet.
15549 if (FD
->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate
)
15551 auto ParamIter
= llvm::find(FD
->parameters(), PV
);
15552 assert(ParamIter
!= FD
->param_end());
15553 unsigned ParamNo
= std::distance(FD
->param_begin(), ParamIter
);
15555 for (const auto *NonNull
: FD
->specific_attrs
<NonNullAttr
>()) {
15556 if (!NonNull
->args_size()) {
15557 ComplainAboutNonnullParamOrCall(NonNull
);
15561 for (const ParamIdx
&ArgNo
: NonNull
->args()) {
15562 if (ArgNo
.getASTIndex() == ParamNo
) {
15563 ComplainAboutNonnullParamOrCall(NonNull
);
15572 QualType T
= D
->getType();
15573 const bool IsArray
= T
->isArrayType();
15574 const bool IsFunction
= T
->isFunctionType();
15576 // Address of function is used to silence the function warning.
15577 if (IsAddressOf
&& IsFunction
) {
15582 if (!IsAddressOf
&& !IsFunction
&& !IsArray
)
15585 // Pretty print the expression for the diagnostic.
15587 llvm::raw_string_ostream
S(Str
);
15588 E
->printPretty(S
, nullptr, getPrintingPolicy());
15590 unsigned DiagID
= IsCompare
? diag::warn_null_pointer_compare
15591 : diag::warn_impcast_pointer_to_bool
;
15598 DiagType
= AddressOf
;
15599 else if (IsFunction
)
15600 DiagType
= FunctionPointer
;
15602 DiagType
= ArrayPointer
;
15604 llvm_unreachable("Could not determine diagnostic.");
15605 Diag(E
->getExprLoc(), DiagID
) << DiagType
<< S
.str() << E
->getSourceRange()
15606 << Range
<< IsEqual
;
15611 // Suggest '&' to silence the function warning.
15612 Diag(E
->getExprLoc(), diag::note_function_warning_silence
)
15613 << FixItHint::CreateInsertion(E
->getBeginLoc(), "&");
15615 // Check to see if '()' fixit should be emitted.
15616 QualType ReturnType
;
15617 UnresolvedSet
<4> NonTemplateOverloads
;
15618 tryExprAsCall(*E
, ReturnType
, NonTemplateOverloads
);
15619 if (ReturnType
.isNull())
15623 // There are two cases here. If there is null constant, the only suggest
15624 // for a pointer return type. If the null is 0, then suggest if the return
15625 // type is a pointer or an integer type.
15626 if (!ReturnType
->isPointerType()) {
15627 if (NullKind
== Expr::NPCK_ZeroExpression
||
15628 NullKind
== Expr::NPCK_ZeroLiteral
) {
15629 if (!ReturnType
->isIntegerType())
15635 } else { // !IsCompare
15636 // For function to bool, only suggest if the function pointer has bool
15638 if (!ReturnType
->isSpecificBuiltinType(BuiltinType::Bool
))
15641 Diag(E
->getExprLoc(), diag::note_function_to_function_call
)
15642 << FixItHint::CreateInsertion(getLocForEndOfToken(E
->getEndLoc()), "()");
15645 /// Diagnoses "dangerous" implicit conversions within the given
15646 /// expression (which is a full expression). Implements -Wconversion
15647 /// and -Wsign-compare.
15649 /// \param CC the "context" location of the implicit conversion, i.e.
15650 /// the most location of the syntactic entity requiring the implicit
15652 void Sema::CheckImplicitConversions(Expr
*E
, SourceLocation CC
) {
15653 // Don't diagnose in unevaluated contexts.
15654 if (isUnevaluatedContext())
15657 // Don't diagnose for value- or type-dependent expressions.
15658 if (E
->isTypeDependent() || E
->isValueDependent())
15661 // Check for array bounds violations in cases where the check isn't triggered
15662 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
15663 // ArraySubscriptExpr is on the RHS of a variable initialization.
15664 CheckArrayAccess(E
);
15666 // This is not the right CC for (e.g.) a variable initialization.
15667 AnalyzeImplicitConversions(*this, E
, CC
);
15670 /// CheckBoolLikeConversion - Check conversion of given expression to boolean.
15671 /// Input argument E is a logical expression.
15672 void Sema::CheckBoolLikeConversion(Expr
*E
, SourceLocation CC
) {
15673 ::CheckBoolLikeConversion(*this, E
, CC
);
15676 /// Diagnose when expression is an integer constant expression and its evaluation
15677 /// results in integer overflow
15678 void Sema::CheckForIntOverflow (const Expr
*E
) {
15679 // Use a work list to deal with nested struct initializers.
15680 SmallVector
<const Expr
*, 2> Exprs(1, E
);
15683 const Expr
*OriginalE
= Exprs
.pop_back_val();
15684 const Expr
*E
= OriginalE
->IgnoreParenCasts();
15686 if (isa
<BinaryOperator
, UnaryOperator
>(E
)) {
15687 E
->EvaluateForOverflow(Context
);
15691 if (const auto *InitList
= dyn_cast
<InitListExpr
>(OriginalE
))
15692 Exprs
.append(InitList
->inits().begin(), InitList
->inits().end());
15693 else if (isa
<ObjCBoxedExpr
>(OriginalE
))
15694 E
->EvaluateForOverflow(Context
);
15695 else if (const auto *Call
= dyn_cast
<CallExpr
>(E
))
15696 Exprs
.append(Call
->arg_begin(), Call
->arg_end());
15697 else if (const auto *Message
= dyn_cast
<ObjCMessageExpr
>(E
))
15698 Exprs
.append(Message
->arg_begin(), Message
->arg_end());
15699 else if (const auto *Construct
= dyn_cast
<CXXConstructExpr
>(E
))
15700 Exprs
.append(Construct
->arg_begin(), Construct
->arg_end());
15701 else if (const auto *Temporary
= dyn_cast
<CXXBindTemporaryExpr
>(E
))
15702 Exprs
.push_back(Temporary
->getSubExpr());
15703 else if (const auto *Array
= dyn_cast
<ArraySubscriptExpr
>(E
))
15704 Exprs
.push_back(Array
->getIdx());
15705 else if (const auto *Compound
= dyn_cast
<CompoundLiteralExpr
>(E
))
15706 Exprs
.push_back(Compound
->getInitializer());
15707 else if (const auto *New
= dyn_cast
<CXXNewExpr
>(E
);
15708 New
&& New
->isArray()) {
15709 if (auto ArraySize
= New
->getArraySize())
15710 Exprs
.push_back(*ArraySize
);
15712 } while (!Exprs
.empty());
15717 /// Visitor for expressions which looks for unsequenced operations on the
15719 class SequenceChecker
: public ConstEvaluatedExprVisitor
<SequenceChecker
> {
15720 using Base
= ConstEvaluatedExprVisitor
<SequenceChecker
>;
15722 /// A tree of sequenced regions within an expression. Two regions are
15723 /// unsequenced if one is an ancestor or a descendent of the other. When we
15724 /// finish processing an expression with sequencing, such as a comma
15725 /// expression, we fold its tree nodes into its parent, since they are
15726 /// unsequenced with respect to nodes we will visit later.
15727 class SequenceTree
{
15729 explicit Value(unsigned Parent
) : Parent(Parent
), Merged(false) {}
15730 unsigned Parent
: 31;
15731 unsigned Merged
: 1;
15733 SmallVector
<Value
, 8> Values
;
15736 /// A region within an expression which may be sequenced with respect
15737 /// to some other region.
15739 friend class SequenceTree
;
15743 explicit Seq(unsigned N
) : Index(N
) {}
15746 Seq() : Index(0) {}
15749 SequenceTree() { Values
.push_back(Value(0)); }
15750 Seq
root() const { return Seq(0); }
15752 /// Create a new sequence of operations, which is an unsequenced
15753 /// subset of \p Parent. This sequence of operations is sequenced with
15754 /// respect to other children of \p Parent.
15755 Seq
allocate(Seq Parent
) {
15756 Values
.push_back(Value(Parent
.Index
));
15757 return Seq(Values
.size() - 1);
15760 /// Merge a sequence of operations into its parent.
15761 void merge(Seq S
) {
15762 Values
[S
.Index
].Merged
= true;
15765 /// Determine whether two operations are unsequenced. This operation
15766 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
15767 /// should have been merged into its parent as appropriate.
15768 bool isUnsequenced(Seq Cur
, Seq Old
) {
15769 unsigned C
= representative(Cur
.Index
);
15770 unsigned Target
= representative(Old
.Index
);
15771 while (C
>= Target
) {
15774 C
= Values
[C
].Parent
;
15780 /// Pick a representative for a sequence.
15781 unsigned representative(unsigned K
) {
15782 if (Values
[K
].Merged
)
15783 // Perform path compression as we go.
15784 return Values
[K
].Parent
= representative(Values
[K
].Parent
);
15789 /// An object for which we can track unsequenced uses.
15790 using Object
= const NamedDecl
*;
15792 /// Different flavors of object usage which we track. We only track the
15793 /// least-sequenced usage of each kind.
15795 /// A read of an object. Multiple unsequenced reads are OK.
15798 /// A modification of an object which is sequenced before the value
15799 /// computation of the expression, such as ++n in C++.
15802 /// A modification of an object which is not sequenced before the value
15803 /// computation of the expression, such as n++.
15804 UK_ModAsSideEffect
,
15806 UK_Count
= UK_ModAsSideEffect
+ 1
15809 /// Bundle together a sequencing region and the expression corresponding
15810 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
15812 const Expr
*UsageExpr
;
15813 SequenceTree::Seq Seq
;
15815 Usage() : UsageExpr(nullptr) {}
15819 Usage Uses
[UK_Count
];
15821 /// Have we issued a diagnostic for this object already?
15824 UsageInfo() : Diagnosed(false) {}
15826 using UsageInfoMap
= llvm::SmallDenseMap
<Object
, UsageInfo
, 16>;
15830 /// Sequenced regions within the expression.
15833 /// Declaration modifications and references which we have seen.
15834 UsageInfoMap UsageMap
;
15836 /// The region we are currently within.
15837 SequenceTree::Seq Region
;
15839 /// Filled in with declarations which were modified as a side-effect
15840 /// (that is, post-increment operations).
15841 SmallVectorImpl
<std::pair
<Object
, Usage
>> *ModAsSideEffect
= nullptr;
15843 /// Expressions to check later. We defer checking these to reduce
15845 SmallVectorImpl
<const Expr
*> &WorkList
;
15847 /// RAII object wrapping the visitation of a sequenced subexpression of an
15848 /// expression. At the end of this process, the side-effects of the evaluation
15849 /// become sequenced with respect to the value computation of the result, so
15850 /// we downgrade any UK_ModAsSideEffect within the evaluation to
15852 struct SequencedSubexpression
{
15853 SequencedSubexpression(SequenceChecker
&Self
)
15854 : Self(Self
), OldModAsSideEffect(Self
.ModAsSideEffect
) {
15855 Self
.ModAsSideEffect
= &ModAsSideEffect
;
15858 ~SequencedSubexpression() {
15859 for (const std::pair
<Object
, Usage
> &M
: llvm::reverse(ModAsSideEffect
)) {
15860 // Add a new usage with usage kind UK_ModAsValue, and then restore
15861 // the previous usage with UK_ModAsSideEffect (thus clearing it if
15862 // the previous one was empty).
15863 UsageInfo
&UI
= Self
.UsageMap
[M
.first
];
15864 auto &SideEffectUsage
= UI
.Uses
[UK_ModAsSideEffect
];
15865 Self
.addUsage(M
.first
, UI
, SideEffectUsage
.UsageExpr
, UK_ModAsValue
);
15866 SideEffectUsage
= M
.second
;
15868 Self
.ModAsSideEffect
= OldModAsSideEffect
;
15871 SequenceChecker
&Self
;
15872 SmallVector
<std::pair
<Object
, Usage
>, 4> ModAsSideEffect
;
15873 SmallVectorImpl
<std::pair
<Object
, Usage
>> *OldModAsSideEffect
;
15876 /// RAII object wrapping the visitation of a subexpression which we might
15877 /// choose to evaluate as a constant. If any subexpression is evaluated and
15878 /// found to be non-constant, this allows us to suppress the evaluation of
15879 /// the outer expression.
15880 class EvaluationTracker
{
15882 EvaluationTracker(SequenceChecker
&Self
)
15883 : Self(Self
), Prev(Self
.EvalTracker
) {
15884 Self
.EvalTracker
= this;
15887 ~EvaluationTracker() {
15888 Self
.EvalTracker
= Prev
;
15890 Prev
->EvalOK
&= EvalOK
;
15893 bool evaluate(const Expr
*E
, bool &Result
) {
15894 if (!EvalOK
|| E
->isValueDependent())
15896 EvalOK
= E
->EvaluateAsBooleanCondition(
15897 Result
, Self
.SemaRef
.Context
, Self
.SemaRef
.isConstantEvaluated());
15902 SequenceChecker
&Self
;
15903 EvaluationTracker
*Prev
;
15904 bool EvalOK
= true;
15905 } *EvalTracker
= nullptr;
15907 /// Find the object which is produced by the specified expression,
15909 Object
getObject(const Expr
*E
, bool Mod
) const {
15910 E
= E
->IgnoreParenCasts();
15911 if (const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(E
)) {
15912 if (Mod
&& (UO
->getOpcode() == UO_PreInc
|| UO
->getOpcode() == UO_PreDec
))
15913 return getObject(UO
->getSubExpr(), Mod
);
15914 } else if (const BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(E
)) {
15915 if (BO
->getOpcode() == BO_Comma
)
15916 return getObject(BO
->getRHS(), Mod
);
15917 if (Mod
&& BO
->isAssignmentOp())
15918 return getObject(BO
->getLHS(), Mod
);
15919 } else if (const MemberExpr
*ME
= dyn_cast
<MemberExpr
>(E
)) {
15920 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
15921 if (isa
<CXXThisExpr
>(ME
->getBase()->IgnoreParenCasts()))
15922 return ME
->getMemberDecl();
15923 } else if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
))
15924 // FIXME: If this is a reference, map through to its value.
15925 return DRE
->getDecl();
15929 /// Note that an object \p O was modified or used by an expression
15930 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
15931 /// the object \p O as obtained via the \p UsageMap.
15932 void addUsage(Object O
, UsageInfo
&UI
, const Expr
*UsageExpr
, UsageKind UK
) {
15933 // Get the old usage for the given object and usage kind.
15934 Usage
&U
= UI
.Uses
[UK
];
15935 if (!U
.UsageExpr
|| !Tree
.isUnsequenced(Region
, U
.Seq
)) {
15936 // If we have a modification as side effect and are in a sequenced
15937 // subexpression, save the old Usage so that we can restore it later
15938 // in SequencedSubexpression::~SequencedSubexpression.
15939 if (UK
== UK_ModAsSideEffect
&& ModAsSideEffect
)
15940 ModAsSideEffect
->push_back(std::make_pair(O
, U
));
15941 // Then record the new usage with the current sequencing region.
15942 U
.UsageExpr
= UsageExpr
;
15947 /// Check whether a modification or use of an object \p O in an expression
15948 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
15949 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
15950 /// \p IsModMod is true when we are checking for a mod-mod unsequenced
15951 /// usage and false we are checking for a mod-use unsequenced usage.
15952 void checkUsage(Object O
, UsageInfo
&UI
, const Expr
*UsageExpr
,
15953 UsageKind OtherKind
, bool IsModMod
) {
15957 const Usage
&U
= UI
.Uses
[OtherKind
];
15958 if (!U
.UsageExpr
|| !Tree
.isUnsequenced(Region
, U
.Seq
))
15961 const Expr
*Mod
= U
.UsageExpr
;
15962 const Expr
*ModOrUse
= UsageExpr
;
15963 if (OtherKind
== UK_Use
)
15964 std::swap(Mod
, ModOrUse
);
15966 SemaRef
.DiagRuntimeBehavior(
15967 Mod
->getExprLoc(), {Mod
, ModOrUse
},
15968 SemaRef
.PDiag(IsModMod
? diag::warn_unsequenced_mod_mod
15969 : diag::warn_unsequenced_mod_use
)
15970 << O
<< SourceRange(ModOrUse
->getExprLoc()));
15971 UI
.Diagnosed
= true;
15974 // A note on note{Pre, Post}{Use, Mod}:
15976 // (It helps to follow the algorithm with an expression such as
15977 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
15978 // operations before C++17 and both are well-defined in C++17).
15980 // When visiting a node which uses/modify an object we first call notePreUse
15981 // or notePreMod before visiting its sub-expression(s). At this point the
15982 // children of the current node have not yet been visited and so the eventual
15983 // uses/modifications resulting from the children of the current node have not
15984 // been recorded yet.
15986 // We then visit the children of the current node. After that notePostUse or
15987 // notePostMod is called. These will 1) detect an unsequenced modification
15988 // as side effect (as in "k++ + k") and 2) add a new usage with the
15989 // appropriate usage kind.
15991 // We also have to be careful that some operation sequences modification as
15992 // side effect as well (for example: || or ,). To account for this we wrap
15993 // the visitation of such a sub-expression (for example: the LHS of || or ,)
15994 // with SequencedSubexpression. SequencedSubexpression is an RAII object
15995 // which record usages which are modifications as side effect, and then
15996 // downgrade them (or more accurately restore the previous usage which was a
15997 // modification as side effect) when exiting the scope of the sequenced
16000 void notePreUse(Object O
, const Expr
*UseExpr
) {
16001 UsageInfo
&UI
= UsageMap
[O
];
16002 // Uses conflict with other modifications.
16003 checkUsage(O
, UI
, UseExpr
, /*OtherKind=*/UK_ModAsValue
, /*IsModMod=*/false);
16006 void notePostUse(Object O
, const Expr
*UseExpr
) {
16007 UsageInfo
&UI
= UsageMap
[O
];
16008 checkUsage(O
, UI
, UseExpr
, /*OtherKind=*/UK_ModAsSideEffect
,
16009 /*IsModMod=*/false);
16010 addUsage(O
, UI
, UseExpr
, /*UsageKind=*/UK_Use
);
16013 void notePreMod(Object O
, const Expr
*ModExpr
) {
16014 UsageInfo
&UI
= UsageMap
[O
];
16015 // Modifications conflict with other modifications and with uses.
16016 checkUsage(O
, UI
, ModExpr
, /*OtherKind=*/UK_ModAsValue
, /*IsModMod=*/true);
16017 checkUsage(O
, UI
, ModExpr
, /*OtherKind=*/UK_Use
, /*IsModMod=*/false);
16020 void notePostMod(Object O
, const Expr
*ModExpr
, UsageKind UK
) {
16021 UsageInfo
&UI
= UsageMap
[O
];
16022 checkUsage(O
, UI
, ModExpr
, /*OtherKind=*/UK_ModAsSideEffect
,
16023 /*IsModMod=*/true);
16024 addUsage(O
, UI
, ModExpr
, /*UsageKind=*/UK
);
16028 SequenceChecker(Sema
&S
, const Expr
*E
,
16029 SmallVectorImpl
<const Expr
*> &WorkList
)
16030 : Base(S
.Context
), SemaRef(S
), Region(Tree
.root()), WorkList(WorkList
) {
16032 // Silence a -Wunused-private-field since WorkList is now unused.
16033 // TODO: Evaluate if it can be used, and if not remove it.
16034 (void)this->WorkList
;
16037 void VisitStmt(const Stmt
*S
) {
16038 // Skip all statements which aren't expressions for now.
16041 void VisitExpr(const Expr
*E
) {
16042 // By default, just recurse to evaluated subexpressions.
16043 Base::VisitStmt(E
);
16046 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr
*CSE
) {
16047 for (auto *Sub
: CSE
->children()) {
16048 const Expr
*ChildExpr
= dyn_cast_or_null
<Expr
>(Sub
);
16052 if (ChildExpr
== CSE
->getOperand())
16053 // Do not recurse over a CoroutineSuspendExpr's operand.
16054 // The operand is also a subexpression of getCommonExpr(), and
16055 // recursing into it directly could confuse object management
16056 // for the sake of sequence tracking.
16063 void VisitCastExpr(const CastExpr
*E
) {
16064 Object O
= Object();
16065 if (E
->getCastKind() == CK_LValueToRValue
)
16066 O
= getObject(E
->getSubExpr(), false);
16075 void VisitSequencedExpressions(const Expr
*SequencedBefore
,
16076 const Expr
*SequencedAfter
) {
16077 SequenceTree::Seq BeforeRegion
= Tree
.allocate(Region
);
16078 SequenceTree::Seq AfterRegion
= Tree
.allocate(Region
);
16079 SequenceTree::Seq OldRegion
= Region
;
16082 SequencedSubexpression
SeqBefore(*this);
16083 Region
= BeforeRegion
;
16084 Visit(SequencedBefore
);
16087 Region
= AfterRegion
;
16088 Visit(SequencedAfter
);
16090 Region
= OldRegion
;
16092 Tree
.merge(BeforeRegion
);
16093 Tree
.merge(AfterRegion
);
16096 void VisitArraySubscriptExpr(const ArraySubscriptExpr
*ASE
) {
16097 // C++17 [expr.sub]p1:
16098 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
16099 // expression E1 is sequenced before the expression E2.
16100 if (SemaRef
.getLangOpts().CPlusPlus17
)
16101 VisitSequencedExpressions(ASE
->getLHS(), ASE
->getRHS());
16103 Visit(ASE
->getLHS());
16104 Visit(ASE
->getRHS());
16108 void VisitBinPtrMemD(const BinaryOperator
*BO
) { VisitBinPtrMem(BO
); }
16109 void VisitBinPtrMemI(const BinaryOperator
*BO
) { VisitBinPtrMem(BO
); }
16110 void VisitBinPtrMem(const BinaryOperator
*BO
) {
16111 // C++17 [expr.mptr.oper]p4:
16112 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
16113 // the expression E1 is sequenced before the expression E2.
16114 if (SemaRef
.getLangOpts().CPlusPlus17
)
16115 VisitSequencedExpressions(BO
->getLHS(), BO
->getRHS());
16117 Visit(BO
->getLHS());
16118 Visit(BO
->getRHS());
16122 void VisitBinShl(const BinaryOperator
*BO
) { VisitBinShlShr(BO
); }
16123 void VisitBinShr(const BinaryOperator
*BO
) { VisitBinShlShr(BO
); }
16124 void VisitBinShlShr(const BinaryOperator
*BO
) {
16125 // C++17 [expr.shift]p4:
16126 // The expression E1 is sequenced before the expression E2.
16127 if (SemaRef
.getLangOpts().CPlusPlus17
)
16128 VisitSequencedExpressions(BO
->getLHS(), BO
->getRHS());
16130 Visit(BO
->getLHS());
16131 Visit(BO
->getRHS());
16135 void VisitBinComma(const BinaryOperator
*BO
) {
16136 // C++11 [expr.comma]p1:
16137 // Every value computation and side effect associated with the left
16138 // expression is sequenced before every value computation and side
16139 // effect associated with the right expression.
16140 VisitSequencedExpressions(BO
->getLHS(), BO
->getRHS());
16143 void VisitBinAssign(const BinaryOperator
*BO
) {
16144 SequenceTree::Seq RHSRegion
;
16145 SequenceTree::Seq LHSRegion
;
16146 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16147 RHSRegion
= Tree
.allocate(Region
);
16148 LHSRegion
= Tree
.allocate(Region
);
16150 RHSRegion
= Region
;
16151 LHSRegion
= Region
;
16153 SequenceTree::Seq OldRegion
= Region
;
16155 // C++11 [expr.ass]p1:
16156 // [...] the assignment is sequenced after the value computation
16157 // of the right and left operands, [...]
16159 // so check it before inspecting the operands and update the
16161 Object O
= getObject(BO
->getLHS(), /*Mod=*/true);
16165 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16166 // C++17 [expr.ass]p1:
16167 // [...] The right operand is sequenced before the left operand. [...]
16169 SequencedSubexpression
SeqBefore(*this);
16170 Region
= RHSRegion
;
16171 Visit(BO
->getRHS());
16174 Region
= LHSRegion
;
16175 Visit(BO
->getLHS());
16177 if (O
&& isa
<CompoundAssignOperator
>(BO
))
16178 notePostUse(O
, BO
);
16181 // C++11 does not specify any sequencing between the LHS and RHS.
16182 Region
= LHSRegion
;
16183 Visit(BO
->getLHS());
16185 if (O
&& isa
<CompoundAssignOperator
>(BO
))
16186 notePostUse(O
, BO
);
16188 Region
= RHSRegion
;
16189 Visit(BO
->getRHS());
16192 // C++11 [expr.ass]p1:
16193 // the assignment is sequenced [...] before the value computation of the
16194 // assignment expression.
16195 // C11 6.5.16/3 has no such rule.
16196 Region
= OldRegion
;
16199 SemaRef
.getLangOpts().CPlusPlus
? UK_ModAsValue
16200 : UK_ModAsSideEffect
);
16201 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16202 Tree
.merge(RHSRegion
);
16203 Tree
.merge(LHSRegion
);
16207 void VisitCompoundAssignOperator(const CompoundAssignOperator
*CAO
) {
16208 VisitBinAssign(CAO
);
16211 void VisitUnaryPreInc(const UnaryOperator
*UO
) { VisitUnaryPreIncDec(UO
); }
16212 void VisitUnaryPreDec(const UnaryOperator
*UO
) { VisitUnaryPreIncDec(UO
); }
16213 void VisitUnaryPreIncDec(const UnaryOperator
*UO
) {
16214 Object O
= getObject(UO
->getSubExpr(), true);
16216 return VisitExpr(UO
);
16219 Visit(UO
->getSubExpr());
16220 // C++11 [expr.pre.incr]p1:
16221 // the expression ++x is equivalent to x+=1
16223 SemaRef
.getLangOpts().CPlusPlus
? UK_ModAsValue
16224 : UK_ModAsSideEffect
);
16227 void VisitUnaryPostInc(const UnaryOperator
*UO
) { VisitUnaryPostIncDec(UO
); }
16228 void VisitUnaryPostDec(const UnaryOperator
*UO
) { VisitUnaryPostIncDec(UO
); }
16229 void VisitUnaryPostIncDec(const UnaryOperator
*UO
) {
16230 Object O
= getObject(UO
->getSubExpr(), true);
16232 return VisitExpr(UO
);
16235 Visit(UO
->getSubExpr());
16236 notePostMod(O
, UO
, UK_ModAsSideEffect
);
16239 void VisitBinLOr(const BinaryOperator
*BO
) {
16240 // C++11 [expr.log.or]p2:
16241 // If the second expression is evaluated, every value computation and
16242 // side effect associated with the first expression is sequenced before
16243 // every value computation and side effect associated with the
16244 // second expression.
16245 SequenceTree::Seq LHSRegion
= Tree
.allocate(Region
);
16246 SequenceTree::Seq RHSRegion
= Tree
.allocate(Region
);
16247 SequenceTree::Seq OldRegion
= Region
;
16249 EvaluationTracker
Eval(*this);
16251 SequencedSubexpression
Sequenced(*this);
16252 Region
= LHSRegion
;
16253 Visit(BO
->getLHS());
16256 // C++11 [expr.log.or]p1:
16257 // [...] the second operand is not evaluated if the first operand
16258 // evaluates to true.
16259 bool EvalResult
= false;
16260 bool EvalOK
= Eval
.evaluate(BO
->getLHS(), EvalResult
);
16261 bool ShouldVisitRHS
= !EvalOK
|| (EvalOK
&& !EvalResult
);
16262 if (ShouldVisitRHS
) {
16263 Region
= RHSRegion
;
16264 Visit(BO
->getRHS());
16267 Region
= OldRegion
;
16268 Tree
.merge(LHSRegion
);
16269 Tree
.merge(RHSRegion
);
16272 void VisitBinLAnd(const BinaryOperator
*BO
) {
16273 // C++11 [expr.log.and]p2:
16274 // If the second expression is evaluated, every value computation and
16275 // side effect associated with the first expression is sequenced before
16276 // every value computation and side effect associated with the
16277 // second expression.
16278 SequenceTree::Seq LHSRegion
= Tree
.allocate(Region
);
16279 SequenceTree::Seq RHSRegion
= Tree
.allocate(Region
);
16280 SequenceTree::Seq OldRegion
= Region
;
16282 EvaluationTracker
Eval(*this);
16284 SequencedSubexpression
Sequenced(*this);
16285 Region
= LHSRegion
;
16286 Visit(BO
->getLHS());
16289 // C++11 [expr.log.and]p1:
16290 // [...] the second operand is not evaluated if the first operand is false.
16291 bool EvalResult
= false;
16292 bool EvalOK
= Eval
.evaluate(BO
->getLHS(), EvalResult
);
16293 bool ShouldVisitRHS
= !EvalOK
|| (EvalOK
&& EvalResult
);
16294 if (ShouldVisitRHS
) {
16295 Region
= RHSRegion
;
16296 Visit(BO
->getRHS());
16299 Region
= OldRegion
;
16300 Tree
.merge(LHSRegion
);
16301 Tree
.merge(RHSRegion
);
16304 void VisitAbstractConditionalOperator(const AbstractConditionalOperator
*CO
) {
16305 // C++11 [expr.cond]p1:
16306 // [...] Every value computation and side effect associated with the first
16307 // expression is sequenced before every value computation and side effect
16308 // associated with the second or third expression.
16309 SequenceTree::Seq ConditionRegion
= Tree
.allocate(Region
);
16311 // No sequencing is specified between the true and false expression.
16312 // However since exactly one of both is going to be evaluated we can
16313 // consider them to be sequenced. This is needed to avoid warning on
16314 // something like "x ? y+= 1 : y += 2;" in the case where we will visit
16315 // both the true and false expressions because we can't evaluate x.
16316 // This will still allow us to detect an expression like (pre C++17)
16317 // "(x ? y += 1 : y += 2) = y".
16319 // We don't wrap the visitation of the true and false expression with
16320 // SequencedSubexpression because we don't want to downgrade modifications
16321 // as side effect in the true and false expressions after the visition
16322 // is done. (for example in the expression "(x ? y++ : y++) + y" we should
16323 // not warn between the two "y++", but we should warn between the "y++"
16325 SequenceTree::Seq TrueRegion
= Tree
.allocate(Region
);
16326 SequenceTree::Seq FalseRegion
= Tree
.allocate(Region
);
16327 SequenceTree::Seq OldRegion
= Region
;
16329 EvaluationTracker
Eval(*this);
16331 SequencedSubexpression
Sequenced(*this);
16332 Region
= ConditionRegion
;
16333 Visit(CO
->getCond());
16336 // C++11 [expr.cond]p1:
16337 // [...] The first expression is contextually converted to bool (Clause 4).
16338 // It is evaluated and if it is true, the result of the conditional
16339 // expression is the value of the second expression, otherwise that of the
16340 // third expression. Only one of the second and third expressions is
16341 // evaluated. [...]
16342 bool EvalResult
= false;
16343 bool EvalOK
= Eval
.evaluate(CO
->getCond(), EvalResult
);
16344 bool ShouldVisitTrueExpr
= !EvalOK
|| (EvalOK
&& EvalResult
);
16345 bool ShouldVisitFalseExpr
= !EvalOK
|| (EvalOK
&& !EvalResult
);
16346 if (ShouldVisitTrueExpr
) {
16347 Region
= TrueRegion
;
16348 Visit(CO
->getTrueExpr());
16350 if (ShouldVisitFalseExpr
) {
16351 Region
= FalseRegion
;
16352 Visit(CO
->getFalseExpr());
16355 Region
= OldRegion
;
16356 Tree
.merge(ConditionRegion
);
16357 Tree
.merge(TrueRegion
);
16358 Tree
.merge(FalseRegion
);
16361 void VisitCallExpr(const CallExpr
*CE
) {
16362 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
16364 if (CE
->isUnevaluatedBuiltinCall(Context
))
16367 // C++11 [intro.execution]p15:
16368 // When calling a function [...], every value computation and side effect
16369 // associated with any argument expression, or with the postfix expression
16370 // designating the called function, is sequenced before execution of every
16371 // expression or statement in the body of the function [and thus before
16372 // the value computation of its result].
16373 SequencedSubexpression
Sequenced(*this);
16374 SemaRef
.runWithSufficientStackSpace(CE
->getExprLoc(), [&] {
16375 // C++17 [expr.call]p5
16376 // The postfix-expression is sequenced before each expression in the
16377 // expression-list and any default argument. [...]
16378 SequenceTree::Seq CalleeRegion
;
16379 SequenceTree::Seq OtherRegion
;
16380 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16381 CalleeRegion
= Tree
.allocate(Region
);
16382 OtherRegion
= Tree
.allocate(Region
);
16384 CalleeRegion
= Region
;
16385 OtherRegion
= Region
;
16387 SequenceTree::Seq OldRegion
= Region
;
16389 // Visit the callee expression first.
16390 Region
= CalleeRegion
;
16391 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16392 SequencedSubexpression
Sequenced(*this);
16393 Visit(CE
->getCallee());
16395 Visit(CE
->getCallee());
16398 // Then visit the argument expressions.
16399 Region
= OtherRegion
;
16400 for (const Expr
*Argument
: CE
->arguments())
16403 Region
= OldRegion
;
16404 if (SemaRef
.getLangOpts().CPlusPlus17
) {
16405 Tree
.merge(CalleeRegion
);
16406 Tree
.merge(OtherRegion
);
16411 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr
*CXXOCE
) {
16412 // C++17 [over.match.oper]p2:
16413 // [...] the operator notation is first transformed to the equivalent
16414 // function-call notation as summarized in Table 12 (where @ denotes one
16415 // of the operators covered in the specified subclause). However, the
16416 // operands are sequenced in the order prescribed for the built-in
16417 // operator (Clause 8).
16419 // From the above only overloaded binary operators and overloaded call
16420 // operators have sequencing rules in C++17 that we need to handle
16422 if (!SemaRef
.getLangOpts().CPlusPlus17
||
16423 (CXXOCE
->getNumArgs() != 2 && CXXOCE
->getOperator() != OO_Call
))
16424 return VisitCallExpr(CXXOCE
);
16432 switch (CXXOCE
->getOperator()) {
16435 case OO_MinusEqual
:
16437 case OO_SlashEqual
:
16438 case OO_PercentEqual
:
16439 case OO_CaretEqual
:
16442 case OO_LessLessEqual
:
16443 case OO_GreaterGreaterEqual
:
16444 SequencingKind
= RHSBeforeLHS
;
16448 case OO_GreaterGreater
:
16454 SequencingKind
= LHSBeforeRHS
;
16458 SequencingKind
= LHSBeforeRest
;
16462 SequencingKind
= NoSequencing
;
16466 if (SequencingKind
== NoSequencing
)
16467 return VisitCallExpr(CXXOCE
);
16469 // This is a call, so all subexpressions are sequenced before the result.
16470 SequencedSubexpression
Sequenced(*this);
16472 SemaRef
.runWithSufficientStackSpace(CXXOCE
->getExprLoc(), [&] {
16473 assert(SemaRef
.getLangOpts().CPlusPlus17
&&
16474 "Should only get there with C++17 and above!");
16475 assert((CXXOCE
->getNumArgs() == 2 || CXXOCE
->getOperator() == OO_Call
) &&
16476 "Should only get there with an overloaded binary operator"
16477 " or an overloaded call operator!");
16479 if (SequencingKind
== LHSBeforeRest
) {
16480 assert(CXXOCE
->getOperator() == OO_Call
&&
16481 "We should only have an overloaded call operator here!");
16483 // This is very similar to VisitCallExpr, except that we only have the
16484 // C++17 case. The postfix-expression is the first argument of the
16485 // CXXOperatorCallExpr. The expressions in the expression-list, if any,
16486 // are in the following arguments.
16488 // Note that we intentionally do not visit the callee expression since
16489 // it is just a decayed reference to a function.
16490 SequenceTree::Seq PostfixExprRegion
= Tree
.allocate(Region
);
16491 SequenceTree::Seq ArgsRegion
= Tree
.allocate(Region
);
16492 SequenceTree::Seq OldRegion
= Region
;
16494 assert(CXXOCE
->getNumArgs() >= 1 &&
16495 "An overloaded call operator must have at least one argument"
16496 " for the postfix-expression!");
16497 const Expr
*PostfixExpr
= CXXOCE
->getArgs()[0];
16498 llvm::ArrayRef
<const Expr
*> Args(CXXOCE
->getArgs() + 1,
16499 CXXOCE
->getNumArgs() - 1);
16501 // Visit the postfix-expression first.
16503 Region
= PostfixExprRegion
;
16504 SequencedSubexpression
Sequenced(*this);
16505 Visit(PostfixExpr
);
16508 // Then visit the argument expressions.
16509 Region
= ArgsRegion
;
16510 for (const Expr
*Arg
: Args
)
16513 Region
= OldRegion
;
16514 Tree
.merge(PostfixExprRegion
);
16515 Tree
.merge(ArgsRegion
);
16517 assert(CXXOCE
->getNumArgs() == 2 &&
16518 "Should only have two arguments here!");
16519 assert((SequencingKind
== LHSBeforeRHS
||
16520 SequencingKind
== RHSBeforeLHS
) &&
16521 "Unexpected sequencing kind!");
16523 // We do not visit the callee expression since it is just a decayed
16524 // reference to a function.
16525 const Expr
*E1
= CXXOCE
->getArg(0);
16526 const Expr
*E2
= CXXOCE
->getArg(1);
16527 if (SequencingKind
== RHSBeforeLHS
)
16530 return VisitSequencedExpressions(E1
, E2
);
16535 void VisitCXXConstructExpr(const CXXConstructExpr
*CCE
) {
16536 // This is a call, so all subexpressions are sequenced before the result.
16537 SequencedSubexpression
Sequenced(*this);
16539 if (!CCE
->isListInitialization())
16540 return VisitExpr(CCE
);
16542 // In C++11, list initializations are sequenced.
16543 SmallVector
<SequenceTree::Seq
, 32> Elts
;
16544 SequenceTree::Seq Parent
= Region
;
16545 for (CXXConstructExpr::const_arg_iterator I
= CCE
->arg_begin(),
16546 E
= CCE
->arg_end();
16548 Region
= Tree
.allocate(Parent
);
16549 Elts
.push_back(Region
);
16553 // Forget that the initializers are sequenced.
16555 for (unsigned I
= 0; I
< Elts
.size(); ++I
)
16556 Tree
.merge(Elts
[I
]);
16559 void VisitInitListExpr(const InitListExpr
*ILE
) {
16560 if (!SemaRef
.getLangOpts().CPlusPlus11
)
16561 return VisitExpr(ILE
);
16563 // In C++11, list initializations are sequenced.
16564 SmallVector
<SequenceTree::Seq
, 32> Elts
;
16565 SequenceTree::Seq Parent
= Region
;
16566 for (unsigned I
= 0; I
< ILE
->getNumInits(); ++I
) {
16567 const Expr
*E
= ILE
->getInit(I
);
16570 Region
= Tree
.allocate(Parent
);
16571 Elts
.push_back(Region
);
16575 // Forget that the initializers are sequenced.
16577 for (unsigned I
= 0; I
< Elts
.size(); ++I
)
16578 Tree
.merge(Elts
[I
]);
16584 void Sema::CheckUnsequencedOperations(const Expr
*E
) {
16585 SmallVector
<const Expr
*, 8> WorkList
;
16586 WorkList
.push_back(E
);
16587 while (!WorkList
.empty()) {
16588 const Expr
*Item
= WorkList
.pop_back_val();
16589 SequenceChecker(*this, Item
, WorkList
);
16593 void Sema::CheckCompletedExpr(Expr
*E
, SourceLocation CheckLoc
,
16594 bool IsConstexpr
) {
16595 llvm::SaveAndRestore
ConstantContext(isConstantEvaluatedOverride
,
16596 IsConstexpr
|| isa
<ConstantExpr
>(E
));
16597 CheckImplicitConversions(E
, CheckLoc
);
16598 if (!E
->isInstantiationDependent())
16599 CheckUnsequencedOperations(E
);
16600 if (!IsConstexpr
&& !E
->isValueDependent())
16601 CheckForIntOverflow(E
);
16602 DiagnoseMisalignedMembers();
16605 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc
,
16606 FieldDecl
*BitField
,
16608 (void) AnalyzeBitFieldAssignment(*this, BitField
, Init
, InitLoc
);
16611 static void diagnoseArrayStarInParamType(Sema
&S
, QualType PType
,
16612 SourceLocation Loc
) {
16613 if (!PType
->isVariablyModifiedType())
16615 if (const auto *PointerTy
= dyn_cast
<PointerType
>(PType
)) {
16616 diagnoseArrayStarInParamType(S
, PointerTy
->getPointeeType(), Loc
);
16619 if (const auto *ReferenceTy
= dyn_cast
<ReferenceType
>(PType
)) {
16620 diagnoseArrayStarInParamType(S
, ReferenceTy
->getPointeeType(), Loc
);
16623 if (const auto *ParenTy
= dyn_cast
<ParenType
>(PType
)) {
16624 diagnoseArrayStarInParamType(S
, ParenTy
->getInnerType(), Loc
);
16628 const ArrayType
*AT
= S
.Context
.getAsArrayType(PType
);
16632 if (AT
->getSizeModifier() != ArrayType::Star
) {
16633 diagnoseArrayStarInParamType(S
, AT
->getElementType(), Loc
);
16637 S
.Diag(Loc
, diag::err_array_star_in_function_definition
);
16640 /// CheckParmsForFunctionDef - Check that the parameters of the given
16641 /// function are appropriate for the definition of a function. This
16642 /// takes care of any checks that cannot be performed on the
16643 /// declaration itself, e.g., that the types of each of the function
16644 /// parameters are complete.
16645 bool Sema::CheckParmsForFunctionDef(ArrayRef
<ParmVarDecl
*> Parameters
,
16646 bool CheckParameterNames
) {
16647 bool HasInvalidParm
= false;
16648 for (ParmVarDecl
*Param
: Parameters
) {
16649 assert(Param
&& "null in a parameter list");
16650 // C99 6.7.5.3p4: the parameters in a parameter type list in a
16651 // function declarator that is part of a function definition of
16652 // that function shall not have incomplete type.
16654 // C++23 [dcl.fct.def.general]/p2
16655 // The type of a parameter [...] for a function definition
16656 // shall not be a (possibly cv-qualified) class type that is incomplete
16657 // or abstract within the function body unless the function is deleted.
16658 if (!Param
->isInvalidDecl() &&
16659 (RequireCompleteType(Param
->getLocation(), Param
->getType(),
16660 diag::err_typecheck_decl_incomplete_type
) ||
16661 RequireNonAbstractType(Param
->getBeginLoc(), Param
->getOriginalType(),
16662 diag::err_abstract_type_in_decl
,
16663 AbstractParamType
))) {
16664 Param
->setInvalidDecl();
16665 HasInvalidParm
= true;
16668 // C99 6.9.1p5: If the declarator includes a parameter type list, the
16669 // declaration of each parameter shall include an identifier.
16670 if (CheckParameterNames
&& Param
->getIdentifier() == nullptr &&
16671 !Param
->isImplicit() && !getLangOpts().CPlusPlus
) {
16672 // Diagnose this as an extension in C17 and earlier.
16673 if (!getLangOpts().C2x
)
16674 Diag(Param
->getLocation(), diag::ext_parameter_name_omitted_c2x
);
16678 // If the function declarator is not part of a definition of that
16679 // function, parameters may have incomplete type and may use the [*]
16680 // notation in their sequences of declarator specifiers to specify
16681 // variable length array types.
16682 QualType PType
= Param
->getOriginalType();
16683 // FIXME: This diagnostic should point the '[*]' if source-location
16684 // information is added for it.
16685 diagnoseArrayStarInParamType(*this, PType
, Param
->getLocation());
16687 // If the parameter is a c++ class type and it has to be destructed in the
16688 // callee function, declare the destructor so that it can be called by the
16689 // callee function. Do not perform any direct access check on the dtor here.
16690 if (!Param
->isInvalidDecl()) {
16691 if (CXXRecordDecl
*ClassDecl
= Param
->getType()->getAsCXXRecordDecl()) {
16692 if (!ClassDecl
->isInvalidDecl() &&
16693 !ClassDecl
->hasIrrelevantDestructor() &&
16694 !ClassDecl
->isDependentContext() &&
16695 ClassDecl
->isParamDestroyedInCallee()) {
16696 CXXDestructorDecl
*Destructor
= LookupDestructor(ClassDecl
);
16697 MarkFunctionReferenced(Param
->getLocation(), Destructor
);
16698 DiagnoseUseOfDecl(Destructor
, Param
->getLocation());
16703 // Parameters with the pass_object_size attribute only need to be marked
16704 // constant at function definitions. Because we lack information about
16705 // whether we're on a declaration or definition when we're instantiating the
16706 // attribute, we need to check for constness here.
16707 if (const auto *Attr
= Param
->getAttr
<PassObjectSizeAttr
>())
16708 if (!Param
->getType().isConstQualified())
16709 Diag(Param
->getLocation(), diag::err_attribute_pointers_only
)
16710 << Attr
->getSpelling() << 1;
16712 // Check for parameter names shadowing fields from the class.
16713 if (LangOpts
.CPlusPlus
&& !Param
->isInvalidDecl()) {
16714 // The owning context for the parameter should be the function, but we
16715 // want to see if this function's declaration context is a record.
16716 DeclContext
*DC
= Param
->getDeclContext();
16717 if (DC
&& DC
->isFunctionOrMethod()) {
16718 if (auto *RD
= dyn_cast
<CXXRecordDecl
>(DC
->getParent()))
16719 CheckShadowInheritedFields(Param
->getLocation(), Param
->getDeclName(),
16720 RD
, /*DeclIsField*/ false);
16724 if (!Param
->isInvalidDecl() &&
16725 Param
->getOriginalType()->isWebAssemblyTableType()) {
16726 Param
->setInvalidDecl();
16727 HasInvalidParm
= true;
16728 Diag(Param
->getLocation(), diag::err_wasm_table_as_function_parameter
);
16732 return HasInvalidParm
;
16735 std::optional
<std::pair
<
16736 CharUnits
, CharUnits
>> static getBaseAlignmentAndOffsetFromPtr(const Expr
16741 /// Compute the alignment and offset of the base class object given the
16742 /// derived-to-base cast expression and the alignment and offset of the derived
16744 static std::pair
<CharUnits
, CharUnits
>
16745 getDerivedToBaseAlignmentAndOffset(const CastExpr
*CE
, QualType DerivedType
,
16746 CharUnits BaseAlignment
, CharUnits Offset
,
16748 for (auto PathI
= CE
->path_begin(), PathE
= CE
->path_end(); PathI
!= PathE
;
16750 const CXXBaseSpecifier
*Base
= *PathI
;
16751 const CXXRecordDecl
*BaseDecl
= Base
->getType()->getAsCXXRecordDecl();
16752 if (Base
->isVirtual()) {
16753 // The complete object may have a lower alignment than the non-virtual
16754 // alignment of the base, in which case the base may be misaligned. Choose
16755 // the smaller of the non-virtual alignment and BaseAlignment, which is a
16756 // conservative lower bound of the complete object alignment.
16757 CharUnits NonVirtualAlignment
=
16758 Ctx
.getASTRecordLayout(BaseDecl
).getNonVirtualAlignment();
16759 BaseAlignment
= std::min(BaseAlignment
, NonVirtualAlignment
);
16760 Offset
= CharUnits::Zero();
16762 const ASTRecordLayout
&RL
=
16763 Ctx
.getASTRecordLayout(DerivedType
->getAsCXXRecordDecl());
16764 Offset
+= RL
.getBaseClassOffset(BaseDecl
);
16766 DerivedType
= Base
->getType();
16769 return std::make_pair(BaseAlignment
, Offset
);
16772 /// Compute the alignment and offset of a binary additive operator.
16773 static std::optional
<std::pair
<CharUnits
, CharUnits
>>
16774 getAlignmentAndOffsetFromBinAddOrSub(const Expr
*PtrE
, const Expr
*IntE
,
16775 bool IsSub
, ASTContext
&Ctx
) {
16776 QualType PointeeType
= PtrE
->getType()->getPointeeType();
16778 if (!PointeeType
->isConstantSizeType())
16779 return std::nullopt
;
16781 auto P
= getBaseAlignmentAndOffsetFromPtr(PtrE
, Ctx
);
16784 return std::nullopt
;
16786 CharUnits EltSize
= Ctx
.getTypeSizeInChars(PointeeType
);
16787 if (std::optional
<llvm::APSInt
> IdxRes
= IntE
->getIntegerConstantExpr(Ctx
)) {
16788 CharUnits Offset
= EltSize
* IdxRes
->getExtValue();
16791 return std::make_pair(P
->first
, P
->second
+ Offset
);
16794 // If the integer expression isn't a constant expression, compute the lower
16795 // bound of the alignment using the alignment and offset of the pointer
16796 // expression and the element size.
16797 return std::make_pair(
16798 P
->first
.alignmentAtOffset(P
->second
).alignmentAtOffset(EltSize
),
16799 CharUnits::Zero());
16802 /// This helper function takes an lvalue expression and returns the alignment of
16803 /// a VarDecl and a constant offset from the VarDecl.
16804 std::optional
<std::pair
<
16806 CharUnits
>> static getBaseAlignmentAndOffsetFromLValue(const Expr
*E
,
16808 E
= E
->IgnoreParens();
16809 switch (E
->getStmtClass()) {
16812 case Stmt::CStyleCastExprClass
:
16813 case Stmt::CXXStaticCastExprClass
:
16814 case Stmt::ImplicitCastExprClass
: {
16815 auto *CE
= cast
<CastExpr
>(E
);
16816 const Expr
*From
= CE
->getSubExpr();
16817 switch (CE
->getCastKind()) {
16821 return getBaseAlignmentAndOffsetFromLValue(From
, Ctx
);
16822 case CK_UncheckedDerivedToBase
:
16823 case CK_DerivedToBase
: {
16824 auto P
= getBaseAlignmentAndOffsetFromLValue(From
, Ctx
);
16827 return getDerivedToBaseAlignmentAndOffset(CE
, From
->getType(), P
->first
,
16833 case Stmt::ArraySubscriptExprClass
: {
16834 auto *ASE
= cast
<ArraySubscriptExpr
>(E
);
16835 return getAlignmentAndOffsetFromBinAddOrSub(ASE
->getBase(), ASE
->getIdx(),
16838 case Stmt::DeclRefExprClass
: {
16839 if (auto *VD
= dyn_cast
<VarDecl
>(cast
<DeclRefExpr
>(E
)->getDecl())) {
16840 // FIXME: If VD is captured by copy or is an escaping __block variable,
16841 // use the alignment of VD's type.
16842 if (!VD
->getType()->isReferenceType()) {
16843 // Dependent alignment cannot be resolved -> bail out.
16844 if (VD
->hasDependentAlignment())
16846 return std::make_pair(Ctx
.getDeclAlign(VD
), CharUnits::Zero());
16849 return getBaseAlignmentAndOffsetFromLValue(VD
->getInit(), Ctx
);
16853 case Stmt::MemberExprClass
: {
16854 auto *ME
= cast
<MemberExpr
>(E
);
16855 auto *FD
= dyn_cast
<FieldDecl
>(ME
->getMemberDecl());
16856 if (!FD
|| FD
->getType()->isReferenceType() ||
16857 FD
->getParent()->isInvalidDecl())
16859 std::optional
<std::pair
<CharUnits
, CharUnits
>> P
;
16861 P
= getBaseAlignmentAndOffsetFromPtr(ME
->getBase(), Ctx
);
16863 P
= getBaseAlignmentAndOffsetFromLValue(ME
->getBase(), Ctx
);
16866 const ASTRecordLayout
&Layout
= Ctx
.getASTRecordLayout(FD
->getParent());
16867 uint64_t Offset
= Layout
.getFieldOffset(FD
->getFieldIndex());
16868 return std::make_pair(P
->first
,
16869 P
->second
+ CharUnits::fromQuantity(Offset
));
16871 case Stmt::UnaryOperatorClass
: {
16872 auto *UO
= cast
<UnaryOperator
>(E
);
16873 switch (UO
->getOpcode()) {
16877 return getBaseAlignmentAndOffsetFromPtr(UO
->getSubExpr(), Ctx
);
16881 case Stmt::BinaryOperatorClass
: {
16882 auto *BO
= cast
<BinaryOperator
>(E
);
16883 auto Opcode
= BO
->getOpcode();
16888 return getBaseAlignmentAndOffsetFromLValue(BO
->getRHS(), Ctx
);
16893 return std::nullopt
;
16896 /// This helper function takes a pointer expression and returns the alignment of
16897 /// a VarDecl and a constant offset from the VarDecl.
16898 std::optional
<std::pair
<
16899 CharUnits
, CharUnits
>> static getBaseAlignmentAndOffsetFromPtr(const Expr
16903 E
= E
->IgnoreParens();
16904 switch (E
->getStmtClass()) {
16907 case Stmt::CStyleCastExprClass
:
16908 case Stmt::CXXStaticCastExprClass
:
16909 case Stmt::ImplicitCastExprClass
: {
16910 auto *CE
= cast
<CastExpr
>(E
);
16911 const Expr
*From
= CE
->getSubExpr();
16912 switch (CE
->getCastKind()) {
16916 return getBaseAlignmentAndOffsetFromPtr(From
, Ctx
);
16917 case CK_ArrayToPointerDecay
:
16918 return getBaseAlignmentAndOffsetFromLValue(From
, Ctx
);
16919 case CK_UncheckedDerivedToBase
:
16920 case CK_DerivedToBase
: {
16921 auto P
= getBaseAlignmentAndOffsetFromPtr(From
, Ctx
);
16924 return getDerivedToBaseAlignmentAndOffset(
16925 CE
, From
->getType()->getPointeeType(), P
->first
, P
->second
, Ctx
);
16930 case Stmt::CXXThisExprClass
: {
16931 auto *RD
= E
->getType()->getPointeeType()->getAsCXXRecordDecl();
16932 CharUnits Alignment
= Ctx
.getASTRecordLayout(RD
).getNonVirtualAlignment();
16933 return std::make_pair(Alignment
, CharUnits::Zero());
16935 case Stmt::UnaryOperatorClass
: {
16936 auto *UO
= cast
<UnaryOperator
>(E
);
16937 if (UO
->getOpcode() == UO_AddrOf
)
16938 return getBaseAlignmentAndOffsetFromLValue(UO
->getSubExpr(), Ctx
);
16941 case Stmt::BinaryOperatorClass
: {
16942 auto *BO
= cast
<BinaryOperator
>(E
);
16943 auto Opcode
= BO
->getOpcode();
16949 const Expr
*LHS
= BO
->getLHS(), *RHS
= BO
->getRHS();
16950 if (Opcode
== BO_Add
&& !RHS
->getType()->isIntegralOrEnumerationType())
16951 std::swap(LHS
, RHS
);
16952 return getAlignmentAndOffsetFromBinAddOrSub(LHS
, RHS
, Opcode
== BO_Sub
,
16956 return getBaseAlignmentAndOffsetFromPtr(BO
->getRHS(), Ctx
);
16961 return std::nullopt
;
16964 static CharUnits
getPresumedAlignmentOfPointer(const Expr
*E
, Sema
&S
) {
16965 // See if we can compute the alignment of a VarDecl and an offset from it.
16966 std::optional
<std::pair
<CharUnits
, CharUnits
>> P
=
16967 getBaseAlignmentAndOffsetFromPtr(E
, S
.Context
);
16970 return P
->first
.alignmentAtOffset(P
->second
);
16972 // If that failed, return the type's alignment.
16973 return S
.Context
.getTypeAlignInChars(E
->getType()->getPointeeType());
16976 /// CheckCastAlign - Implements -Wcast-align, which warns when a
16977 /// pointer cast increases the alignment requirements.
16978 void Sema::CheckCastAlign(Expr
*Op
, QualType T
, SourceRange TRange
) {
16979 // This is actually a lot of work to potentially be doing on every
16980 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
16981 if (getDiagnostics().isIgnored(diag::warn_cast_align
, TRange
.getBegin()))
16984 // Ignore dependent types.
16985 if (T
->isDependentType() || Op
->getType()->isDependentType())
16988 // Require that the destination be a pointer type.
16989 const PointerType
*DestPtr
= T
->getAs
<PointerType
>();
16990 if (!DestPtr
) return;
16992 // If the destination has alignment 1, we're done.
16993 QualType DestPointee
= DestPtr
->getPointeeType();
16994 if (DestPointee
->isIncompleteType()) return;
16995 CharUnits DestAlign
= Context
.getTypeAlignInChars(DestPointee
);
16996 if (DestAlign
.isOne()) return;
16998 // Require that the source be a pointer type.
16999 const PointerType
*SrcPtr
= Op
->getType()->getAs
<PointerType
>();
17000 if (!SrcPtr
) return;
17001 QualType SrcPointee
= SrcPtr
->getPointeeType();
17003 // Explicitly allow casts from cv void*. We already implicitly
17004 // allowed casts to cv void*, since they have alignment 1.
17005 // Also allow casts involving incomplete types, which implicitly
17006 // includes 'void'.
17007 if (SrcPointee
->isIncompleteType()) return;
17009 CharUnits SrcAlign
= getPresumedAlignmentOfPointer(Op
, *this);
17011 if (SrcAlign
>= DestAlign
) return;
17013 Diag(TRange
.getBegin(), diag::warn_cast_align
)
17014 << Op
->getType() << T
17015 << static_cast<unsigned>(SrcAlign
.getQuantity())
17016 << static_cast<unsigned>(DestAlign
.getQuantity())
17017 << TRange
<< Op
->getSourceRange();
17020 void Sema::CheckArrayAccess(const Expr
*BaseExpr
, const Expr
*IndexExpr
,
17021 const ArraySubscriptExpr
*ASE
,
17022 bool AllowOnePastEnd
, bool IndexNegated
) {
17023 // Already diagnosed by the constant evaluator.
17024 if (isConstantEvaluated())
17027 IndexExpr
= IndexExpr
->IgnoreParenImpCasts();
17028 if (IndexExpr
->isValueDependent())
17031 const Type
*EffectiveType
=
17032 BaseExpr
->getType()->getPointeeOrArrayElementType();
17033 BaseExpr
= BaseExpr
->IgnoreParenCasts();
17034 const ConstantArrayType
*ArrayTy
=
17035 Context
.getAsConstantArrayType(BaseExpr
->getType());
17037 LangOptions::StrictFlexArraysLevelKind
17038 StrictFlexArraysLevel
= getLangOpts().getStrictFlexArraysLevel();
17040 const Type
*BaseType
=
17041 ArrayTy
== nullptr ? nullptr : ArrayTy
->getElementType().getTypePtr();
17042 bool IsUnboundedArray
=
17043 BaseType
== nullptr || BaseExpr
->isFlexibleArrayMemberLike(
17044 Context
, StrictFlexArraysLevel
,
17045 /*IgnoreTemplateOrMacroSubstitution=*/true);
17046 if (EffectiveType
->isDependentType() ||
17047 (!IsUnboundedArray
&& BaseType
->isDependentType()))
17050 Expr::EvalResult Result
;
17051 if (!IndexExpr
->EvaluateAsInt(Result
, Context
, Expr::SE_AllowSideEffects
))
17054 llvm::APSInt index
= Result
.Val
.getInt();
17055 if (IndexNegated
) {
17056 index
.setIsUnsigned(false);
17060 if (IsUnboundedArray
) {
17061 if (EffectiveType
->isFunctionType())
17063 if (index
.isUnsigned() || !index
.isNegative()) {
17064 const auto &ASTC
= getASTContext();
17065 unsigned AddrBits
= ASTC
.getTargetInfo().getPointerWidth(
17066 EffectiveType
->getCanonicalTypeInternal().getAddressSpace());
17067 if (index
.getBitWidth() < AddrBits
)
17068 index
= index
.zext(AddrBits
);
17069 std::optional
<CharUnits
> ElemCharUnits
=
17070 ASTC
.getTypeSizeInCharsIfKnown(EffectiveType
);
17071 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
17072 // pointer) bounds-checking isn't meaningful.
17073 if (!ElemCharUnits
)
17075 llvm::APInt
ElemBytes(index
.getBitWidth(), ElemCharUnits
->getQuantity());
17076 // If index has more active bits than address space, we already know
17077 // we have a bounds violation to warn about. Otherwise, compute
17078 // address of (index + 1)th element, and warn about bounds violation
17079 // only if that address exceeds address space.
17080 if (index
.getActiveBits() <= AddrBits
) {
17082 llvm::APInt
Product(index
);
17084 Product
= Product
.umul_ov(ElemBytes
, Overflow
);
17085 if (!Overflow
&& Product
.getActiveBits() <= AddrBits
)
17089 // Need to compute max possible elements in address space, since that
17090 // is included in diag message.
17091 llvm::APInt MaxElems
= llvm::APInt::getMaxValue(AddrBits
);
17092 MaxElems
= MaxElems
.zext(std::max(AddrBits
+ 1, ElemBytes
.getBitWidth()));
17094 ElemBytes
= ElemBytes
.zextOrTrunc(MaxElems
.getBitWidth());
17095 MaxElems
= MaxElems
.udiv(ElemBytes
);
17098 ASE
? diag::warn_array_index_exceeds_max_addressable_bounds
17099 : diag::warn_ptr_arith_exceeds_max_addressable_bounds
;
17101 // Diag message shows element size in bits and in "bytes" (platform-
17102 // dependent CharUnits)
17103 DiagRuntimeBehavior(BaseExpr
->getBeginLoc(), BaseExpr
,
17105 << toString(index
, 10, true) << AddrBits
17106 << (unsigned)ASTC
.toBits(*ElemCharUnits
)
17107 << toString(ElemBytes
, 10, false)
17108 << toString(MaxElems
, 10, false)
17109 << (unsigned)MaxElems
.getLimitedValue(~0U)
17110 << IndexExpr
->getSourceRange());
17112 const NamedDecl
*ND
= nullptr;
17113 // Try harder to find a NamedDecl to point at in the note.
17114 while (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(BaseExpr
))
17115 BaseExpr
= ASE
->getBase()->IgnoreParenCasts();
17116 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(BaseExpr
))
17117 ND
= DRE
->getDecl();
17118 if (const auto *ME
= dyn_cast
<MemberExpr
>(BaseExpr
))
17119 ND
= ME
->getMemberDecl();
17122 DiagRuntimeBehavior(ND
->getBeginLoc(), BaseExpr
,
17123 PDiag(diag::note_array_declared_here
) << ND
);
17128 if (index
.isUnsigned() || !index
.isNegative()) {
17129 // It is possible that the type of the base expression after
17130 // IgnoreParenCasts is incomplete, even though the type of the base
17131 // expression before IgnoreParenCasts is complete (see PR39746 for an
17132 // example). In this case we have no information about whether the array
17133 // access exceeds the array bounds. However we can still diagnose an array
17134 // access which precedes the array bounds.
17135 if (BaseType
->isIncompleteType())
17138 llvm::APInt size
= ArrayTy
->getSize();
17140 if (BaseType
!= EffectiveType
) {
17141 // Make sure we're comparing apples to apples when comparing index to
17143 uint64_t ptrarith_typesize
= Context
.getTypeSize(EffectiveType
);
17144 uint64_t array_typesize
= Context
.getTypeSize(BaseType
);
17146 // Handle ptrarith_typesize being zero, such as when casting to void*.
17147 // Use the size in bits (what "getTypeSize()" returns) rather than bytes.
17148 if (!ptrarith_typesize
)
17149 ptrarith_typesize
= Context
.getCharWidth();
17151 if (ptrarith_typesize
!= array_typesize
) {
17152 // There's a cast to a different size type involved.
17153 uint64_t ratio
= array_typesize
/ ptrarith_typesize
;
17155 // TODO: Be smarter about handling cases where array_typesize is not a
17156 // multiple of ptrarith_typesize.
17157 if (ptrarith_typesize
* ratio
== array_typesize
)
17158 size
*= llvm::APInt(size
.getBitWidth(), ratio
);
17162 if (size
.getBitWidth() > index
.getBitWidth())
17163 index
= index
.zext(size
.getBitWidth());
17164 else if (size
.getBitWidth() < index
.getBitWidth())
17165 size
= size
.zext(index
.getBitWidth());
17167 // For array subscripting the index must be less than size, but for pointer
17168 // arithmetic also allow the index (offset) to be equal to size since
17169 // computing the next address after the end of the array is legal and
17170 // commonly done e.g. in C++ iterators and range-based for loops.
17171 if (AllowOnePastEnd
? index
.ule(size
) : index
.ult(size
))
17174 // Suppress the warning if the subscript expression (as identified by the
17175 // ']' location) and the index expression are both from macro expansions
17176 // within a system header.
17178 SourceLocation RBracketLoc
= SourceMgr
.getSpellingLoc(
17179 ASE
->getRBracketLoc());
17180 if (SourceMgr
.isInSystemHeader(RBracketLoc
)) {
17181 SourceLocation IndexLoc
=
17182 SourceMgr
.getSpellingLoc(IndexExpr
->getBeginLoc());
17183 if (SourceMgr
.isWrittenInSameFile(RBracketLoc
, IndexLoc
))
17188 unsigned DiagID
= ASE
? diag::warn_array_index_exceeds_bounds
17189 : diag::warn_ptr_arith_exceeds_bounds
;
17190 unsigned CastMsg
= (!ASE
|| BaseType
== EffectiveType
) ? 0 : 1;
17191 QualType CastMsgTy
= ASE
? ASE
->getLHS()->getType() : QualType();
17193 DiagRuntimeBehavior(
17194 BaseExpr
->getBeginLoc(), BaseExpr
,
17195 PDiag(DiagID
) << toString(index
, 10, true) << ArrayTy
->desugar()
17196 << CastMsg
<< CastMsgTy
<< IndexExpr
->getSourceRange());
17198 unsigned DiagID
= diag::warn_array_index_precedes_bounds
;
17200 DiagID
= diag::warn_ptr_arith_precedes_bounds
;
17201 if (index
.isNegative()) index
= -index
;
17204 DiagRuntimeBehavior(BaseExpr
->getBeginLoc(), BaseExpr
,
17205 PDiag(DiagID
) << toString(index
, 10, true)
17206 << IndexExpr
->getSourceRange());
17209 const NamedDecl
*ND
= nullptr;
17210 // Try harder to find a NamedDecl to point at in the note.
17211 while (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(BaseExpr
))
17212 BaseExpr
= ASE
->getBase()->IgnoreParenCasts();
17213 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(BaseExpr
))
17214 ND
= DRE
->getDecl();
17215 if (const auto *ME
= dyn_cast
<MemberExpr
>(BaseExpr
))
17216 ND
= ME
->getMemberDecl();
17219 DiagRuntimeBehavior(ND
->getBeginLoc(), BaseExpr
,
17220 PDiag(diag::note_array_declared_here
) << ND
);
17223 void Sema::CheckArrayAccess(const Expr
*expr
) {
17224 int AllowOnePastEnd
= 0;
17226 expr
= expr
->IgnoreParenImpCasts();
17227 switch (expr
->getStmtClass()) {
17228 case Stmt::ArraySubscriptExprClass
: {
17229 const ArraySubscriptExpr
*ASE
= cast
<ArraySubscriptExpr
>(expr
);
17230 CheckArrayAccess(ASE
->getBase(), ASE
->getIdx(), ASE
,
17231 AllowOnePastEnd
> 0);
17232 expr
= ASE
->getBase();
17235 case Stmt::MemberExprClass
: {
17236 expr
= cast
<MemberExpr
>(expr
)->getBase();
17239 case Stmt::OMPArraySectionExprClass
: {
17240 const OMPArraySectionExpr
*ASE
= cast
<OMPArraySectionExpr
>(expr
);
17241 if (ASE
->getLowerBound())
17242 CheckArrayAccess(ASE
->getBase(), ASE
->getLowerBound(),
17243 /*ASE=*/nullptr, AllowOnePastEnd
> 0);
17246 case Stmt::UnaryOperatorClass
: {
17247 // Only unwrap the * and & unary operators
17248 const UnaryOperator
*UO
= cast
<UnaryOperator
>(expr
);
17249 expr
= UO
->getSubExpr();
17250 switch (UO
->getOpcode()) {
17262 case Stmt::ConditionalOperatorClass
: {
17263 const ConditionalOperator
*cond
= cast
<ConditionalOperator
>(expr
);
17264 if (const Expr
*lhs
= cond
->getLHS())
17265 CheckArrayAccess(lhs
);
17266 if (const Expr
*rhs
= cond
->getRHS())
17267 CheckArrayAccess(rhs
);
17270 case Stmt::CXXOperatorCallExprClass
: {
17271 const auto *OCE
= cast
<CXXOperatorCallExpr
>(expr
);
17272 for (const auto *Arg
: OCE
->arguments())
17273 CheckArrayAccess(Arg
);
17282 //===--- CHECK: Objective-C retain cycles ----------------------------------//
17286 struct RetainCycleOwner
{
17287 VarDecl
*Variable
= nullptr;
17289 SourceLocation Loc
;
17290 bool Indirect
= false;
17292 RetainCycleOwner() = default;
17294 void setLocsFrom(Expr
*e
) {
17295 Loc
= e
->getExprLoc();
17296 Range
= e
->getSourceRange();
17302 /// Consider whether capturing the given variable can possibly lead to
17303 /// a retain cycle.
17304 static bool considerVariable(VarDecl
*var
, Expr
*ref
, RetainCycleOwner
&owner
) {
17305 // In ARC, it's captured strongly iff the variable has __strong
17306 // lifetime. In MRR, it's captured strongly if the variable is
17307 // __block and has an appropriate type.
17308 if (var
->getType().getObjCLifetime() != Qualifiers::OCL_Strong
)
17311 owner
.Variable
= var
;
17313 owner
.setLocsFrom(ref
);
17317 static bool findRetainCycleOwner(Sema
&S
, Expr
*e
, RetainCycleOwner
&owner
) {
17319 e
= e
->IgnoreParens();
17320 if (CastExpr
*cast
= dyn_cast
<CastExpr
>(e
)) {
17321 switch (cast
->getCastKind()) {
17323 case CK_LValueBitCast
:
17324 case CK_LValueToRValue
:
17325 case CK_ARCReclaimReturnedObject
:
17326 e
= cast
->getSubExpr();
17334 if (ObjCIvarRefExpr
*ref
= dyn_cast
<ObjCIvarRefExpr
>(e
)) {
17335 ObjCIvarDecl
*ivar
= ref
->getDecl();
17336 if (ivar
->getType().getObjCLifetime() != Qualifiers::OCL_Strong
)
17339 // Try to find a retain cycle in the base.
17340 if (!findRetainCycleOwner(S
, ref
->getBase(), owner
))
17343 if (ref
->isFreeIvar()) owner
.setLocsFrom(ref
);
17344 owner
.Indirect
= true;
17348 if (DeclRefExpr
*ref
= dyn_cast
<DeclRefExpr
>(e
)) {
17349 VarDecl
*var
= dyn_cast
<VarDecl
>(ref
->getDecl());
17350 if (!var
) return false;
17351 return considerVariable(var
, ref
, owner
);
17354 if (MemberExpr
*member
= dyn_cast
<MemberExpr
>(e
)) {
17355 if (member
->isArrow()) return false;
17357 // Don't count this as an indirect ownership.
17358 e
= member
->getBase();
17362 if (PseudoObjectExpr
*pseudo
= dyn_cast
<PseudoObjectExpr
>(e
)) {
17363 // Only pay attention to pseudo-objects on property references.
17364 ObjCPropertyRefExpr
*pre
17365 = dyn_cast
<ObjCPropertyRefExpr
>(pseudo
->getSyntacticForm()
17367 if (!pre
) return false;
17368 if (pre
->isImplicitProperty()) return false;
17369 ObjCPropertyDecl
*property
= pre
->getExplicitProperty();
17370 if (!property
->isRetaining() &&
17371 !(property
->getPropertyIvarDecl() &&
17372 property
->getPropertyIvarDecl()->getType()
17373 .getObjCLifetime() == Qualifiers::OCL_Strong
))
17376 owner
.Indirect
= true;
17377 if (pre
->isSuperReceiver()) {
17378 owner
.Variable
= S
.getCurMethodDecl()->getSelfDecl();
17379 if (!owner
.Variable
)
17381 owner
.Loc
= pre
->getLocation();
17382 owner
.Range
= pre
->getSourceRange();
17385 e
= const_cast<Expr
*>(cast
<OpaqueValueExpr
>(pre
->getBase())
17386 ->getSourceExpr());
17398 struct FindCaptureVisitor
: EvaluatedExprVisitor
<FindCaptureVisitor
> {
17400 Expr
*Capturer
= nullptr;
17401 bool VarWillBeReased
= false;
17403 FindCaptureVisitor(ASTContext
&Context
, VarDecl
*variable
)
17404 : EvaluatedExprVisitor
<FindCaptureVisitor
>(Context
),
17405 Variable(variable
) {}
17407 void VisitDeclRefExpr(DeclRefExpr
*ref
) {
17408 if (ref
->getDecl() == Variable
&& !Capturer
)
17412 void VisitObjCIvarRefExpr(ObjCIvarRefExpr
*ref
) {
17413 if (Capturer
) return;
17414 Visit(ref
->getBase());
17415 if (Capturer
&& ref
->isFreeIvar())
17419 void VisitBlockExpr(BlockExpr
*block
) {
17420 // Look inside nested blocks
17421 if (block
->getBlockDecl()->capturesVariable(Variable
))
17422 Visit(block
->getBlockDecl()->getBody());
17425 void VisitOpaqueValueExpr(OpaqueValueExpr
*OVE
) {
17426 if (Capturer
) return;
17427 if (OVE
->getSourceExpr())
17428 Visit(OVE
->getSourceExpr());
17431 void VisitBinaryOperator(BinaryOperator
*BinOp
) {
17432 if (!Variable
|| VarWillBeReased
|| BinOp
->getOpcode() != BO_Assign
)
17434 Expr
*LHS
= BinOp
->getLHS();
17435 if (const DeclRefExpr
*DRE
= dyn_cast_or_null
<DeclRefExpr
>(LHS
)) {
17436 if (DRE
->getDecl() != Variable
)
17438 if (Expr
*RHS
= BinOp
->getRHS()) {
17439 RHS
= RHS
->IgnoreParenCasts();
17440 std::optional
<llvm::APSInt
> Value
;
17442 (RHS
&& (Value
= RHS
->getIntegerConstantExpr(Context
)) &&
17451 /// Check whether the given argument is a block which captures a
17453 static Expr
*findCapturingExpr(Sema
&S
, Expr
*e
, RetainCycleOwner
&owner
) {
17454 assert(owner
.Variable
&& owner
.Loc
.isValid());
17456 e
= e
->IgnoreParenCasts();
17458 // Look through [^{...} copy] and Block_copy(^{...}).
17459 if (ObjCMessageExpr
*ME
= dyn_cast
<ObjCMessageExpr
>(e
)) {
17460 Selector Cmd
= ME
->getSelector();
17461 if (Cmd
.isUnarySelector() && Cmd
.getNameForSlot(0) == "copy") {
17462 e
= ME
->getInstanceReceiver();
17465 e
= e
->IgnoreParenCasts();
17467 } else if (CallExpr
*CE
= dyn_cast
<CallExpr
>(e
)) {
17468 if (CE
->getNumArgs() == 1) {
17469 FunctionDecl
*Fn
= dyn_cast_or_null
<FunctionDecl
>(CE
->getCalleeDecl());
17471 const IdentifierInfo
*FnI
= Fn
->getIdentifier();
17472 if (FnI
&& FnI
->isStr("_Block_copy")) {
17473 e
= CE
->getArg(0)->IgnoreParenCasts();
17479 BlockExpr
*block
= dyn_cast
<BlockExpr
>(e
);
17480 if (!block
|| !block
->getBlockDecl()->capturesVariable(owner
.Variable
))
17483 FindCaptureVisitor
visitor(S
.Context
, owner
.Variable
);
17484 visitor
.Visit(block
->getBlockDecl()->getBody());
17485 return visitor
.VarWillBeReased
? nullptr : visitor
.Capturer
;
17488 static void diagnoseRetainCycle(Sema
&S
, Expr
*capturer
,
17489 RetainCycleOwner
&owner
) {
17491 assert(owner
.Variable
&& owner
.Loc
.isValid());
17493 S
.Diag(capturer
->getExprLoc(), diag::warn_arc_retain_cycle
)
17494 << owner
.Variable
<< capturer
->getSourceRange();
17495 S
.Diag(owner
.Loc
, diag::note_arc_retain_cycle_owner
)
17496 << owner
.Indirect
<< owner
.Range
;
17499 /// Check for a keyword selector that starts with the word 'add' or
17501 static bool isSetterLikeSelector(Selector sel
) {
17502 if (sel
.isUnarySelector()) return false;
17504 StringRef str
= sel
.getNameForSlot(0);
17505 while (!str
.empty() && str
.front() == '_') str
= str
.substr(1);
17506 if (str
.startswith("set"))
17507 str
= str
.substr(3);
17508 else if (str
.startswith("add")) {
17509 // Specially allow 'addOperationWithBlock:'.
17510 if (sel
.getNumArgs() == 1 && str
.startswith("addOperationWithBlock"))
17512 str
= str
.substr(3);
17517 if (str
.empty()) return true;
17518 return !isLowercase(str
.front());
17521 static std::optional
<int>
17522 GetNSMutableArrayArgumentIndex(Sema
&S
, ObjCMessageExpr
*Message
) {
17523 bool IsMutableArray
= S
.NSAPIObj
->isSubclassOfNSClass(
17524 Message
->getReceiverInterface(),
17525 NSAPI::ClassId_NSMutableArray
);
17526 if (!IsMutableArray
) {
17527 return std::nullopt
;
17530 Selector Sel
= Message
->getSelector();
17532 std::optional
<NSAPI::NSArrayMethodKind
> MKOpt
=
17533 S
.NSAPIObj
->getNSArrayMethodKind(Sel
);
17535 return std::nullopt
;
17538 NSAPI::NSArrayMethodKind MK
= *MKOpt
;
17541 case NSAPI::NSMutableArr_addObject
:
17542 case NSAPI::NSMutableArr_insertObjectAtIndex
:
17543 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript
:
17545 case NSAPI::NSMutableArr_replaceObjectAtIndex
:
17549 return std::nullopt
;
17552 return std::nullopt
;
17555 static std::optional
<int>
17556 GetNSMutableDictionaryArgumentIndex(Sema
&S
, ObjCMessageExpr
*Message
) {
17557 bool IsMutableDictionary
= S
.NSAPIObj
->isSubclassOfNSClass(
17558 Message
->getReceiverInterface(),
17559 NSAPI::ClassId_NSMutableDictionary
);
17560 if (!IsMutableDictionary
) {
17561 return std::nullopt
;
17564 Selector Sel
= Message
->getSelector();
17566 std::optional
<NSAPI::NSDictionaryMethodKind
> MKOpt
=
17567 S
.NSAPIObj
->getNSDictionaryMethodKind(Sel
);
17569 return std::nullopt
;
17572 NSAPI::NSDictionaryMethodKind MK
= *MKOpt
;
17575 case NSAPI::NSMutableDict_setObjectForKey
:
17576 case NSAPI::NSMutableDict_setValueForKey
:
17577 case NSAPI::NSMutableDict_setObjectForKeyedSubscript
:
17581 return std::nullopt
;
17584 return std::nullopt
;
17587 static std::optional
<int> GetNSSetArgumentIndex(Sema
&S
,
17588 ObjCMessageExpr
*Message
) {
17589 bool IsMutableSet
= S
.NSAPIObj
->isSubclassOfNSClass(
17590 Message
->getReceiverInterface(),
17591 NSAPI::ClassId_NSMutableSet
);
17593 bool IsMutableOrderedSet
= S
.NSAPIObj
->isSubclassOfNSClass(
17594 Message
->getReceiverInterface(),
17595 NSAPI::ClassId_NSMutableOrderedSet
);
17596 if (!IsMutableSet
&& !IsMutableOrderedSet
) {
17597 return std::nullopt
;
17600 Selector Sel
= Message
->getSelector();
17602 std::optional
<NSAPI::NSSetMethodKind
> MKOpt
=
17603 S
.NSAPIObj
->getNSSetMethodKind(Sel
);
17605 return std::nullopt
;
17608 NSAPI::NSSetMethodKind MK
= *MKOpt
;
17611 case NSAPI::NSMutableSet_addObject
:
17612 case NSAPI::NSOrderedSet_setObjectAtIndex
:
17613 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript
:
17614 case NSAPI::NSOrderedSet_insertObjectAtIndex
:
17616 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject
:
17620 return std::nullopt
;
17623 void Sema::CheckObjCCircularContainer(ObjCMessageExpr
*Message
) {
17624 if (!Message
->isInstanceMessage()) {
17628 std::optional
<int> ArgOpt
;
17630 if (!(ArgOpt
= GetNSMutableArrayArgumentIndex(*this, Message
)) &&
17631 !(ArgOpt
= GetNSMutableDictionaryArgumentIndex(*this, Message
)) &&
17632 !(ArgOpt
= GetNSSetArgumentIndex(*this, Message
))) {
17636 int ArgIndex
= *ArgOpt
;
17638 Expr
*Arg
= Message
->getArg(ArgIndex
)->IgnoreImpCasts();
17639 if (OpaqueValueExpr
*OE
= dyn_cast
<OpaqueValueExpr
>(Arg
)) {
17640 Arg
= OE
->getSourceExpr()->IgnoreImpCasts();
17643 if (Message
->getReceiverKind() == ObjCMessageExpr::SuperInstance
) {
17644 if (DeclRefExpr
*ArgRE
= dyn_cast
<DeclRefExpr
>(Arg
)) {
17645 if (ArgRE
->isObjCSelfExpr()) {
17646 Diag(Message
->getSourceRange().getBegin(),
17647 diag::warn_objc_circular_container
)
17648 << ArgRE
->getDecl() << StringRef("'super'");
17652 Expr
*Receiver
= Message
->getInstanceReceiver()->IgnoreImpCasts();
17654 if (OpaqueValueExpr
*OE
= dyn_cast
<OpaqueValueExpr
>(Receiver
)) {
17655 Receiver
= OE
->getSourceExpr()->IgnoreImpCasts();
17658 if (DeclRefExpr
*ReceiverRE
= dyn_cast
<DeclRefExpr
>(Receiver
)) {
17659 if (DeclRefExpr
*ArgRE
= dyn_cast
<DeclRefExpr
>(Arg
)) {
17660 if (ReceiverRE
->getDecl() == ArgRE
->getDecl()) {
17661 ValueDecl
*Decl
= ReceiverRE
->getDecl();
17662 Diag(Message
->getSourceRange().getBegin(),
17663 diag::warn_objc_circular_container
)
17665 if (!ArgRE
->isObjCSelfExpr()) {
17666 Diag(Decl
->getLocation(),
17667 diag::note_objc_circular_container_declared_here
)
17672 } else if (ObjCIvarRefExpr
*IvarRE
= dyn_cast
<ObjCIvarRefExpr
>(Receiver
)) {
17673 if (ObjCIvarRefExpr
*IvarArgRE
= dyn_cast
<ObjCIvarRefExpr
>(Arg
)) {
17674 if (IvarRE
->getDecl() == IvarArgRE
->getDecl()) {
17675 ObjCIvarDecl
*Decl
= IvarRE
->getDecl();
17676 Diag(Message
->getSourceRange().getBegin(),
17677 diag::warn_objc_circular_container
)
17679 Diag(Decl
->getLocation(),
17680 diag::note_objc_circular_container_declared_here
)
17688 /// Check a message send to see if it's likely to cause a retain cycle.
17689 void Sema::checkRetainCycles(ObjCMessageExpr
*msg
) {
17690 // Only check instance methods whose selector looks like a setter.
17691 if (!msg
->isInstanceMessage() || !isSetterLikeSelector(msg
->getSelector()))
17694 // Try to find a variable that the receiver is strongly owned by.
17695 RetainCycleOwner owner
;
17696 if (msg
->getReceiverKind() == ObjCMessageExpr::Instance
) {
17697 if (!findRetainCycleOwner(*this, msg
->getInstanceReceiver(), owner
))
17700 assert(msg
->getReceiverKind() == ObjCMessageExpr::SuperInstance
);
17701 owner
.Variable
= getCurMethodDecl()->getSelfDecl();
17702 owner
.Loc
= msg
->getSuperLoc();
17703 owner
.Range
= msg
->getSuperLoc();
17706 // Check whether the receiver is captured by any of the arguments.
17707 const ObjCMethodDecl
*MD
= msg
->getMethodDecl();
17708 for (unsigned i
= 0, e
= msg
->getNumArgs(); i
!= e
; ++i
) {
17709 if (Expr
*capturer
= findCapturingExpr(*this, msg
->getArg(i
), owner
)) {
17710 // noescape blocks should not be retained by the method.
17711 if (MD
&& MD
->parameters()[i
]->hasAttr
<NoEscapeAttr
>())
17713 return diagnoseRetainCycle(*this, capturer
, owner
);
17718 /// Check a property assign to see if it's likely to cause a retain cycle.
17719 void Sema::checkRetainCycles(Expr
*receiver
, Expr
*argument
) {
17720 RetainCycleOwner owner
;
17721 if (!findRetainCycleOwner(*this, receiver
, owner
))
17724 if (Expr
*capturer
= findCapturingExpr(*this, argument
, owner
))
17725 diagnoseRetainCycle(*this, capturer
, owner
);
17728 void Sema::checkRetainCycles(VarDecl
*Var
, Expr
*Init
) {
17729 RetainCycleOwner Owner
;
17730 if (!considerVariable(Var
, /*DeclRefExpr=*/nullptr, Owner
))
17733 // Because we don't have an expression for the variable, we have to set the
17734 // location explicitly here.
17735 Owner
.Loc
= Var
->getLocation();
17736 Owner
.Range
= Var
->getSourceRange();
17738 if (Expr
*Capturer
= findCapturingExpr(*this, Init
, Owner
))
17739 diagnoseRetainCycle(*this, Capturer
, Owner
);
17742 static bool checkUnsafeAssignLiteral(Sema
&S
, SourceLocation Loc
,
17743 Expr
*RHS
, bool isProperty
) {
17744 // Check if RHS is an Objective-C object literal, which also can get
17745 // immediately zapped in a weak reference. Note that we explicitly
17746 // allow ObjCStringLiterals, since those are designed to never really die.
17747 RHS
= RHS
->IgnoreParenImpCasts();
17749 // This enum needs to match with the 'select' in
17750 // warn_objc_arc_literal_assign (off-by-1).
17751 Sema::ObjCLiteralKind Kind
= S
.CheckLiteralKind(RHS
);
17752 if (Kind
== Sema::LK_String
|| Kind
== Sema::LK_None
)
17755 S
.Diag(Loc
, diag::warn_arc_literal_assign
)
17757 << (isProperty
? 0 : 1)
17758 << RHS
->getSourceRange();
17763 static bool checkUnsafeAssignObject(Sema
&S
, SourceLocation Loc
,
17764 Qualifiers::ObjCLifetime LT
,
17765 Expr
*RHS
, bool isProperty
) {
17766 // Strip off any implicit cast added to get to the one ARC-specific.
17767 while (ImplicitCastExpr
*cast
= dyn_cast
<ImplicitCastExpr
>(RHS
)) {
17768 if (cast
->getCastKind() == CK_ARCConsumeObject
) {
17769 S
.Diag(Loc
, diag::warn_arc_retained_assign
)
17770 << (LT
== Qualifiers::OCL_ExplicitNone
)
17771 << (isProperty
? 0 : 1)
17772 << RHS
->getSourceRange();
17775 RHS
= cast
->getSubExpr();
17778 if (LT
== Qualifiers::OCL_Weak
&&
17779 checkUnsafeAssignLiteral(S
, Loc
, RHS
, isProperty
))
17785 bool Sema::checkUnsafeAssigns(SourceLocation Loc
,
17786 QualType LHS
, Expr
*RHS
) {
17787 Qualifiers::ObjCLifetime LT
= LHS
.getObjCLifetime();
17789 if (LT
!= Qualifiers::OCL_Weak
&& LT
!= Qualifiers::OCL_ExplicitNone
)
17792 if (checkUnsafeAssignObject(*this, Loc
, LT
, RHS
, false))
17798 void Sema::checkUnsafeExprAssigns(SourceLocation Loc
,
17799 Expr
*LHS
, Expr
*RHS
) {
17801 // PropertyRef on LHS type need be directly obtained from
17802 // its declaration as it has a PseudoType.
17803 ObjCPropertyRefExpr
*PRE
17804 = dyn_cast
<ObjCPropertyRefExpr
>(LHS
->IgnoreParens());
17805 if (PRE
&& !PRE
->isImplicitProperty()) {
17806 const ObjCPropertyDecl
*PD
= PRE
->getExplicitProperty();
17808 LHSType
= PD
->getType();
17811 if (LHSType
.isNull())
17812 LHSType
= LHS
->getType();
17814 Qualifiers::ObjCLifetime LT
= LHSType
.getObjCLifetime();
17816 if (LT
== Qualifiers::OCL_Weak
) {
17817 if (!Diags
.isIgnored(diag::warn_arc_repeated_use_of_weak
, Loc
))
17818 getCurFunction()->markSafeWeakUse(LHS
);
17821 if (checkUnsafeAssigns(Loc
, LHSType
, RHS
))
17824 // FIXME. Check for other life times.
17825 if (LT
!= Qualifiers::OCL_None
)
17829 if (PRE
->isImplicitProperty())
17831 const ObjCPropertyDecl
*PD
= PRE
->getExplicitProperty();
17835 unsigned Attributes
= PD
->getPropertyAttributes();
17836 if (Attributes
& ObjCPropertyAttribute::kind_assign
) {
17837 // when 'assign' attribute was not explicitly specified
17838 // by user, ignore it and rely on property type itself
17839 // for lifetime info.
17840 unsigned AsWrittenAttr
= PD
->getPropertyAttributesAsWritten();
17841 if (!(AsWrittenAttr
& ObjCPropertyAttribute::kind_assign
) &&
17842 LHSType
->isObjCRetainableType())
17845 while (ImplicitCastExpr
*cast
= dyn_cast
<ImplicitCastExpr
>(RHS
)) {
17846 if (cast
->getCastKind() == CK_ARCConsumeObject
) {
17847 Diag(Loc
, diag::warn_arc_retained_property_assign
)
17848 << RHS
->getSourceRange();
17851 RHS
= cast
->getSubExpr();
17853 } else if (Attributes
& ObjCPropertyAttribute::kind_weak
) {
17854 if (checkUnsafeAssignObject(*this, Loc
, Qualifiers::OCL_Weak
, RHS
, true))
17860 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
17862 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager
&SourceMgr
,
17863 SourceLocation StmtLoc
,
17864 const NullStmt
*Body
) {
17865 // Do not warn if the body is a macro that expands to nothing, e.g:
17870 if (Body
->hasLeadingEmptyMacro())
17873 // Get line numbers of statement and body.
17874 bool StmtLineInvalid
;
17875 unsigned StmtLine
= SourceMgr
.getPresumedLineNumber(StmtLoc
,
17877 if (StmtLineInvalid
)
17880 bool BodyLineInvalid
;
17881 unsigned BodyLine
= SourceMgr
.getSpellingLineNumber(Body
->getSemiLoc(),
17883 if (BodyLineInvalid
)
17886 // Warn if null statement and body are on the same line.
17887 if (StmtLine
!= BodyLine
)
17893 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc
,
17896 // Since this is a syntactic check, don't emit diagnostic for template
17897 // instantiations, this just adds noise.
17898 if (CurrentInstantiationScope
)
17901 // The body should be a null statement.
17902 const NullStmt
*NBody
= dyn_cast
<NullStmt
>(Body
);
17906 // Do the usual checks.
17907 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr
, StmtLoc
, NBody
))
17910 Diag(NBody
->getSemiLoc(), DiagID
);
17911 Diag(NBody
->getSemiLoc(), diag::note_empty_body_on_separate_line
);
17914 void Sema::DiagnoseEmptyLoopBody(const Stmt
*S
,
17915 const Stmt
*PossibleBody
) {
17916 assert(!CurrentInstantiationScope
); // Ensured by caller
17918 SourceLocation StmtLoc
;
17921 if (const ForStmt
*FS
= dyn_cast
<ForStmt
>(S
)) {
17922 StmtLoc
= FS
->getRParenLoc();
17923 Body
= FS
->getBody();
17924 DiagID
= diag::warn_empty_for_body
;
17925 } else if (const WhileStmt
*WS
= dyn_cast
<WhileStmt
>(S
)) {
17926 StmtLoc
= WS
->getRParenLoc();
17927 Body
= WS
->getBody();
17928 DiagID
= diag::warn_empty_while_body
;
17930 return; // Neither `for' nor `while'.
17932 // The body should be a null statement.
17933 const NullStmt
*NBody
= dyn_cast
<NullStmt
>(Body
);
17937 // Skip expensive checks if diagnostic is disabled.
17938 if (Diags
.isIgnored(DiagID
, NBody
->getSemiLoc()))
17941 // Do the usual checks.
17942 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr
, StmtLoc
, NBody
))
17945 // `for(...);' and `while(...);' are popular idioms, so in order to keep
17946 // noise level low, emit diagnostics only if for/while is followed by a
17947 // CompoundStmt, e.g.:
17948 // for (int i = 0; i < n; i++);
17952 // or if for/while is followed by a statement with more indentation
17953 // than for/while itself:
17954 // for (int i = 0; i < n; i++);
17956 bool ProbableTypo
= isa
<CompoundStmt
>(PossibleBody
);
17957 if (!ProbableTypo
) {
17958 bool BodyColInvalid
;
17959 unsigned BodyCol
= SourceMgr
.getPresumedColumnNumber(
17960 PossibleBody
->getBeginLoc(), &BodyColInvalid
);
17961 if (BodyColInvalid
)
17964 bool StmtColInvalid
;
17966 SourceMgr
.getPresumedColumnNumber(S
->getBeginLoc(), &StmtColInvalid
);
17967 if (StmtColInvalid
)
17970 if (BodyCol
> StmtCol
)
17971 ProbableTypo
= true;
17974 if (ProbableTypo
) {
17975 Diag(NBody
->getSemiLoc(), DiagID
);
17976 Diag(NBody
->getSemiLoc(), diag::note_empty_body_on_separate_line
);
17980 //===--- CHECK: Warn on self move with std::move. -------------------------===//
17982 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
17983 void Sema::DiagnoseSelfMove(const Expr
*LHSExpr
, const Expr
*RHSExpr
,
17984 SourceLocation OpLoc
) {
17985 if (Diags
.isIgnored(diag::warn_sizeof_pointer_expr_memaccess
, OpLoc
))
17988 if (inTemplateInstantiation())
17991 // Strip parens and casts away.
17992 LHSExpr
= LHSExpr
->IgnoreParenImpCasts();
17993 RHSExpr
= RHSExpr
->IgnoreParenImpCasts();
17995 // Check for a call expression
17996 const CallExpr
*CE
= dyn_cast
<CallExpr
>(RHSExpr
);
17997 if (!CE
|| CE
->getNumArgs() != 1)
18000 // Check for a call to std::move
18001 if (!CE
->isCallToStdMove())
18004 // Get argument from std::move
18005 RHSExpr
= CE
->getArg(0);
18007 const DeclRefExpr
*LHSDeclRef
= dyn_cast
<DeclRefExpr
>(LHSExpr
);
18008 const DeclRefExpr
*RHSDeclRef
= dyn_cast
<DeclRefExpr
>(RHSExpr
);
18010 // Two DeclRefExpr's, check that the decls are the same.
18011 if (LHSDeclRef
&& RHSDeclRef
) {
18012 if (!LHSDeclRef
->getDecl() || !RHSDeclRef
->getDecl())
18014 if (LHSDeclRef
->getDecl()->getCanonicalDecl() !=
18015 RHSDeclRef
->getDecl()->getCanonicalDecl())
18018 auto D
= Diag(OpLoc
, diag::warn_self_move
)
18019 << LHSExpr
->getType() << LHSExpr
->getSourceRange()
18020 << RHSExpr
->getSourceRange();
18021 if (const FieldDecl
*F
=
18022 getSelfAssignmentClassMemberCandidate(RHSDeclRef
->getDecl()))
18024 << FixItHint::CreateInsertion(LHSDeclRef
->getBeginLoc(), "this->");
18030 // Member variables require a different approach to check for self moves.
18031 // MemberExpr's are the same if every nested MemberExpr refers to the same
18032 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
18033 // the base Expr's are CXXThisExpr's.
18034 const Expr
*LHSBase
= LHSExpr
;
18035 const Expr
*RHSBase
= RHSExpr
;
18036 const MemberExpr
*LHSME
= dyn_cast
<MemberExpr
>(LHSExpr
);
18037 const MemberExpr
*RHSME
= dyn_cast
<MemberExpr
>(RHSExpr
);
18038 if (!LHSME
|| !RHSME
)
18041 while (LHSME
&& RHSME
) {
18042 if (LHSME
->getMemberDecl()->getCanonicalDecl() !=
18043 RHSME
->getMemberDecl()->getCanonicalDecl())
18046 LHSBase
= LHSME
->getBase();
18047 RHSBase
= RHSME
->getBase();
18048 LHSME
= dyn_cast
<MemberExpr
>(LHSBase
);
18049 RHSME
= dyn_cast
<MemberExpr
>(RHSBase
);
18052 LHSDeclRef
= dyn_cast
<DeclRefExpr
>(LHSBase
);
18053 RHSDeclRef
= dyn_cast
<DeclRefExpr
>(RHSBase
);
18054 if (LHSDeclRef
&& RHSDeclRef
) {
18055 if (!LHSDeclRef
->getDecl() || !RHSDeclRef
->getDecl())
18057 if (LHSDeclRef
->getDecl()->getCanonicalDecl() !=
18058 RHSDeclRef
->getDecl()->getCanonicalDecl())
18061 Diag(OpLoc
, diag::warn_self_move
)
18062 << LHSExpr
->getType() << 0 << LHSExpr
->getSourceRange()
18063 << RHSExpr
->getSourceRange();
18067 if (isa
<CXXThisExpr
>(LHSBase
) && isa
<CXXThisExpr
>(RHSBase
))
18068 Diag(OpLoc
, diag::warn_self_move
)
18069 << LHSExpr
->getType() << 0 << LHSExpr
->getSourceRange()
18070 << RHSExpr
->getSourceRange();
18073 //===--- Layout compatibility ----------------------------------------------//
18075 static bool isLayoutCompatible(ASTContext
&C
, QualType T1
, QualType T2
);
18077 /// Check if two enumeration types are layout-compatible.
18078 static bool isLayoutCompatible(ASTContext
&C
, EnumDecl
*ED1
, EnumDecl
*ED2
) {
18079 // C++11 [dcl.enum] p8:
18080 // Two enumeration types are layout-compatible if they have the same
18081 // underlying type.
18082 return ED1
->isComplete() && ED2
->isComplete() &&
18083 C
.hasSameType(ED1
->getIntegerType(), ED2
->getIntegerType());
18086 /// Check if two fields are layout-compatible.
18087 static bool isLayoutCompatible(ASTContext
&C
, FieldDecl
*Field1
,
18088 FieldDecl
*Field2
) {
18089 if (!isLayoutCompatible(C
, Field1
->getType(), Field2
->getType()))
18092 if (Field1
->isBitField() != Field2
->isBitField())
18095 if (Field1
->isBitField()) {
18096 // Make sure that the bit-fields are the same length.
18097 unsigned Bits1
= Field1
->getBitWidthValue(C
);
18098 unsigned Bits2
= Field2
->getBitWidthValue(C
);
18100 if (Bits1
!= Bits2
)
18107 /// Check if two standard-layout structs are layout-compatible.
18108 /// (C++11 [class.mem] p17)
18109 static bool isLayoutCompatibleStruct(ASTContext
&C
, RecordDecl
*RD1
,
18111 // If both records are C++ classes, check that base classes match.
18112 if (const CXXRecordDecl
*D1CXX
= dyn_cast
<CXXRecordDecl
>(RD1
)) {
18113 // If one of records is a CXXRecordDecl we are in C++ mode,
18114 // thus the other one is a CXXRecordDecl, too.
18115 const CXXRecordDecl
*D2CXX
= cast
<CXXRecordDecl
>(RD2
);
18116 // Check number of base classes.
18117 if (D1CXX
->getNumBases() != D2CXX
->getNumBases())
18120 // Check the base classes.
18121 for (CXXRecordDecl::base_class_const_iterator
18122 Base1
= D1CXX
->bases_begin(),
18123 BaseEnd1
= D1CXX
->bases_end(),
18124 Base2
= D2CXX
->bases_begin();
18126 ++Base1
, ++Base2
) {
18127 if (!isLayoutCompatible(C
, Base1
->getType(), Base2
->getType()))
18130 } else if (const CXXRecordDecl
*D2CXX
= dyn_cast
<CXXRecordDecl
>(RD2
)) {
18131 // If only RD2 is a C++ class, it should have zero base classes.
18132 if (D2CXX
->getNumBases() > 0)
18136 // Check the fields.
18137 RecordDecl::field_iterator Field2
= RD2
->field_begin(),
18138 Field2End
= RD2
->field_end(),
18139 Field1
= RD1
->field_begin(),
18140 Field1End
= RD1
->field_end();
18141 for ( ; Field1
!= Field1End
&& Field2
!= Field2End
; ++Field1
, ++Field2
) {
18142 if (!isLayoutCompatible(C
, *Field1
, *Field2
))
18145 if (Field1
!= Field1End
|| Field2
!= Field2End
)
18151 /// Check if two standard-layout unions are layout-compatible.
18152 /// (C++11 [class.mem] p18)
18153 static bool isLayoutCompatibleUnion(ASTContext
&C
, RecordDecl
*RD1
,
18155 llvm::SmallPtrSet
<FieldDecl
*, 8> UnmatchedFields
;
18156 for (auto *Field2
: RD2
->fields())
18157 UnmatchedFields
.insert(Field2
);
18159 for (auto *Field1
: RD1
->fields()) {
18160 llvm::SmallPtrSet
<FieldDecl
*, 8>::iterator
18161 I
= UnmatchedFields
.begin(),
18162 E
= UnmatchedFields
.end();
18164 for ( ; I
!= E
; ++I
) {
18165 if (isLayoutCompatible(C
, Field1
, *I
)) {
18166 bool Result
= UnmatchedFields
.erase(*I
);
18176 return UnmatchedFields
.empty();
18179 static bool isLayoutCompatible(ASTContext
&C
, RecordDecl
*RD1
,
18181 if (RD1
->isUnion() != RD2
->isUnion())
18184 if (RD1
->isUnion())
18185 return isLayoutCompatibleUnion(C
, RD1
, RD2
);
18187 return isLayoutCompatibleStruct(C
, RD1
, RD2
);
18190 /// Check if two types are layout-compatible in C++11 sense.
18191 static bool isLayoutCompatible(ASTContext
&C
, QualType T1
, QualType T2
) {
18192 if (T1
.isNull() || T2
.isNull())
18195 // C++11 [basic.types] p11:
18196 // If two types T1 and T2 are the same type, then T1 and T2 are
18197 // layout-compatible types.
18198 if (C
.hasSameType(T1
, T2
))
18201 T1
= T1
.getCanonicalType().getUnqualifiedType();
18202 T2
= T2
.getCanonicalType().getUnqualifiedType();
18204 const Type::TypeClass TC1
= T1
->getTypeClass();
18205 const Type::TypeClass TC2
= T2
->getTypeClass();
18210 if (TC1
== Type::Enum
) {
18211 return isLayoutCompatible(C
,
18212 cast
<EnumType
>(T1
)->getDecl(),
18213 cast
<EnumType
>(T2
)->getDecl());
18214 } else if (TC1
== Type::Record
) {
18215 if (!T1
->isStandardLayoutType() || !T2
->isStandardLayoutType())
18218 return isLayoutCompatible(C
,
18219 cast
<RecordType
>(T1
)->getDecl(),
18220 cast
<RecordType
>(T2
)->getDecl());
18226 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
18228 /// Given a type tag expression find the type tag itself.
18230 /// \param TypeExpr Type tag expression, as it appears in user's code.
18232 /// \param VD Declaration of an identifier that appears in a type tag.
18234 /// \param MagicValue Type tag magic value.
18236 /// \param isConstantEvaluated whether the evalaution should be performed in
18238 /// constant context.
18239 static bool FindTypeTagExpr(const Expr
*TypeExpr
, const ASTContext
&Ctx
,
18240 const ValueDecl
**VD
, uint64_t *MagicValue
,
18241 bool isConstantEvaluated
) {
18246 TypeExpr
= TypeExpr
->IgnoreParenImpCasts()->IgnoreParenCasts();
18248 switch (TypeExpr
->getStmtClass()) {
18249 case Stmt::UnaryOperatorClass
: {
18250 const UnaryOperator
*UO
= cast
<UnaryOperator
>(TypeExpr
);
18251 if (UO
->getOpcode() == UO_AddrOf
|| UO
->getOpcode() == UO_Deref
) {
18252 TypeExpr
= UO
->getSubExpr();
18258 case Stmt::DeclRefExprClass
: {
18259 const DeclRefExpr
*DRE
= cast
<DeclRefExpr
>(TypeExpr
);
18260 *VD
= DRE
->getDecl();
18264 case Stmt::IntegerLiteralClass
: {
18265 const IntegerLiteral
*IL
= cast
<IntegerLiteral
>(TypeExpr
);
18266 llvm::APInt MagicValueAPInt
= IL
->getValue();
18267 if (MagicValueAPInt
.getActiveBits() <= 64) {
18268 *MagicValue
= MagicValueAPInt
.getZExtValue();
18274 case Stmt::BinaryConditionalOperatorClass
:
18275 case Stmt::ConditionalOperatorClass
: {
18276 const AbstractConditionalOperator
*ACO
=
18277 cast
<AbstractConditionalOperator
>(TypeExpr
);
18279 if (ACO
->getCond()->EvaluateAsBooleanCondition(Result
, Ctx
,
18280 isConstantEvaluated
)) {
18282 TypeExpr
= ACO
->getTrueExpr();
18284 TypeExpr
= ACO
->getFalseExpr();
18290 case Stmt::BinaryOperatorClass
: {
18291 const BinaryOperator
*BO
= cast
<BinaryOperator
>(TypeExpr
);
18292 if (BO
->getOpcode() == BO_Comma
) {
18293 TypeExpr
= BO
->getRHS();
18305 /// Retrieve the C type corresponding to type tag TypeExpr.
18307 /// \param TypeExpr Expression that specifies a type tag.
18309 /// \param MagicValues Registered magic values.
18311 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
18314 /// \param TypeInfo Information about the corresponding C type.
18316 /// \param isConstantEvaluated whether the evalaution should be performed in
18317 /// constant context.
18319 /// \returns true if the corresponding C type was found.
18320 static bool GetMatchingCType(
18321 const IdentifierInfo
*ArgumentKind
, const Expr
*TypeExpr
,
18322 const ASTContext
&Ctx
,
18323 const llvm::DenseMap
<Sema::TypeTagMagicValue
, Sema::TypeTagData
>
18325 bool &FoundWrongKind
, Sema::TypeTagData
&TypeInfo
,
18326 bool isConstantEvaluated
) {
18327 FoundWrongKind
= false;
18329 // Variable declaration that has type_tag_for_datatype attribute.
18330 const ValueDecl
*VD
= nullptr;
18332 uint64_t MagicValue
;
18334 if (!FindTypeTagExpr(TypeExpr
, Ctx
, &VD
, &MagicValue
, isConstantEvaluated
))
18338 if (TypeTagForDatatypeAttr
*I
= VD
->getAttr
<TypeTagForDatatypeAttr
>()) {
18339 if (I
->getArgumentKind() != ArgumentKind
) {
18340 FoundWrongKind
= true;
18343 TypeInfo
.Type
= I
->getMatchingCType();
18344 TypeInfo
.LayoutCompatible
= I
->getLayoutCompatible();
18345 TypeInfo
.MustBeNull
= I
->getMustBeNull();
18354 llvm::DenseMap
<Sema::TypeTagMagicValue
,
18355 Sema::TypeTagData
>::const_iterator I
=
18356 MagicValues
->find(std::make_pair(ArgumentKind
, MagicValue
));
18357 if (I
== MagicValues
->end())
18360 TypeInfo
= I
->second
;
18364 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo
*ArgumentKind
,
18365 uint64_t MagicValue
, QualType Type
,
18366 bool LayoutCompatible
,
18368 if (!TypeTagForDatatypeMagicValues
)
18369 TypeTagForDatatypeMagicValues
.reset(
18370 new llvm::DenseMap
<TypeTagMagicValue
, TypeTagData
>);
18372 TypeTagMagicValue
Magic(ArgumentKind
, MagicValue
);
18373 (*TypeTagForDatatypeMagicValues
)[Magic
] =
18374 TypeTagData(Type
, LayoutCompatible
, MustBeNull
);
18377 static bool IsSameCharType(QualType T1
, QualType T2
) {
18378 const BuiltinType
*BT1
= T1
->getAs
<BuiltinType
>();
18382 const BuiltinType
*BT2
= T2
->getAs
<BuiltinType
>();
18386 BuiltinType::Kind T1Kind
= BT1
->getKind();
18387 BuiltinType::Kind T2Kind
= BT2
->getKind();
18389 return (T1Kind
== BuiltinType::SChar
&& T2Kind
== BuiltinType::Char_S
) ||
18390 (T1Kind
== BuiltinType::UChar
&& T2Kind
== BuiltinType::Char_U
) ||
18391 (T1Kind
== BuiltinType::Char_U
&& T2Kind
== BuiltinType::UChar
) ||
18392 (T1Kind
== BuiltinType::Char_S
&& T2Kind
== BuiltinType::SChar
);
18395 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr
*Attr
,
18396 const ArrayRef
<const Expr
*> ExprArgs
,
18397 SourceLocation CallSiteLoc
) {
18398 const IdentifierInfo
*ArgumentKind
= Attr
->getArgumentKind();
18399 bool IsPointerAttr
= Attr
->getIsPointer();
18401 // Retrieve the argument representing the 'type_tag'.
18402 unsigned TypeTagIdxAST
= Attr
->getTypeTagIdx().getASTIndex();
18403 if (TypeTagIdxAST
>= ExprArgs
.size()) {
18404 Diag(CallSiteLoc
, diag::err_tag_index_out_of_range
)
18405 << 0 << Attr
->getTypeTagIdx().getSourceIndex();
18408 const Expr
*TypeTagExpr
= ExprArgs
[TypeTagIdxAST
];
18409 bool FoundWrongKind
;
18410 TypeTagData TypeInfo
;
18411 if (!GetMatchingCType(ArgumentKind
, TypeTagExpr
, Context
,
18412 TypeTagForDatatypeMagicValues
.get(), FoundWrongKind
,
18413 TypeInfo
, isConstantEvaluated())) {
18414 if (FoundWrongKind
)
18415 Diag(TypeTagExpr
->getExprLoc(),
18416 diag::warn_type_tag_for_datatype_wrong_kind
)
18417 << TypeTagExpr
->getSourceRange();
18421 // Retrieve the argument representing the 'arg_idx'.
18422 unsigned ArgumentIdxAST
= Attr
->getArgumentIdx().getASTIndex();
18423 if (ArgumentIdxAST
>= ExprArgs
.size()) {
18424 Diag(CallSiteLoc
, diag::err_tag_index_out_of_range
)
18425 << 1 << Attr
->getArgumentIdx().getSourceIndex();
18428 const Expr
*ArgumentExpr
= ExprArgs
[ArgumentIdxAST
];
18429 if (IsPointerAttr
) {
18430 // Skip implicit cast of pointer to `void *' (as a function argument).
18431 if (const ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(ArgumentExpr
))
18432 if (ICE
->getType()->isVoidPointerType() &&
18433 ICE
->getCastKind() == CK_BitCast
)
18434 ArgumentExpr
= ICE
->getSubExpr();
18436 QualType ArgumentType
= ArgumentExpr
->getType();
18438 // Passing a `void*' pointer shouldn't trigger a warning.
18439 if (IsPointerAttr
&& ArgumentType
->isVoidPointerType())
18442 if (TypeInfo
.MustBeNull
) {
18443 // Type tag with matching void type requires a null pointer.
18444 if (!ArgumentExpr
->isNullPointerConstant(Context
,
18445 Expr::NPC_ValueDependentIsNotNull
)) {
18446 Diag(ArgumentExpr
->getExprLoc(),
18447 diag::warn_type_safety_null_pointer_required
)
18448 << ArgumentKind
->getName()
18449 << ArgumentExpr
->getSourceRange()
18450 << TypeTagExpr
->getSourceRange();
18455 QualType RequiredType
= TypeInfo
.Type
;
18457 RequiredType
= Context
.getPointerType(RequiredType
);
18459 bool mismatch
= false;
18460 if (!TypeInfo
.LayoutCompatible
) {
18461 mismatch
= !Context
.hasSameType(ArgumentType
, RequiredType
);
18463 // C++11 [basic.fundamental] p1:
18464 // Plain char, signed char, and unsigned char are three distinct types.
18466 // But we treat plain `char' as equivalent to `signed char' or `unsigned
18467 // char' depending on the current char signedness mode.
18469 if ((IsPointerAttr
&& IsSameCharType(ArgumentType
->getPointeeType(),
18470 RequiredType
->getPointeeType())) ||
18471 (!IsPointerAttr
&& IsSameCharType(ArgumentType
, RequiredType
)))
18475 mismatch
= !isLayoutCompatible(Context
,
18476 ArgumentType
->getPointeeType(),
18477 RequiredType
->getPointeeType());
18479 mismatch
= !isLayoutCompatible(Context
, ArgumentType
, RequiredType
);
18482 Diag(ArgumentExpr
->getExprLoc(), diag::warn_type_safety_type_mismatch
)
18483 << ArgumentType
<< ArgumentKind
18484 << TypeInfo
.LayoutCompatible
<< RequiredType
18485 << ArgumentExpr
->getSourceRange()
18486 << TypeTagExpr
->getSourceRange();
18489 void Sema::AddPotentialMisalignedMembers(Expr
*E
, RecordDecl
*RD
, ValueDecl
*MD
,
18490 CharUnits Alignment
) {
18491 MisalignedMembers
.emplace_back(E
, RD
, MD
, Alignment
);
18494 void Sema::DiagnoseMisalignedMembers() {
18495 for (MisalignedMember
&m
: MisalignedMembers
) {
18496 const NamedDecl
*ND
= m
.RD
;
18497 if (ND
->getName().empty()) {
18498 if (const TypedefNameDecl
*TD
= m
.RD
->getTypedefNameForAnonDecl())
18501 Diag(m
.E
->getBeginLoc(), diag::warn_taking_address_of_packed_member
)
18502 << m
.MD
<< ND
<< m
.E
->getSourceRange();
18504 MisalignedMembers
.clear();
18507 void Sema::DiscardMisalignedMemberAddress(const Type
*T
, Expr
*E
) {
18508 E
= E
->IgnoreParens();
18509 if (!T
->isPointerType() && !T
->isIntegerType() && !T
->isDependentType())
18511 if (isa
<UnaryOperator
>(E
) &&
18512 cast
<UnaryOperator
>(E
)->getOpcode() == UO_AddrOf
) {
18513 auto *Op
= cast
<UnaryOperator
>(E
)->getSubExpr()->IgnoreParens();
18514 if (isa
<MemberExpr
>(Op
)) {
18515 auto *MA
= llvm::find(MisalignedMembers
, MisalignedMember(Op
));
18516 if (MA
!= MisalignedMembers
.end() &&
18517 (T
->isDependentType() || T
->isIntegerType() ||
18518 (T
->isPointerType() && (T
->getPointeeType()->isIncompleteType() ||
18519 Context
.getTypeAlignInChars(
18520 T
->getPointeeType()) <= MA
->Alignment
))))
18521 MisalignedMembers
.erase(MA
);
18526 void Sema::RefersToMemberWithReducedAlignment(
18528 llvm::function_ref
<void(Expr
*, RecordDecl
*, FieldDecl
*, CharUnits
)>
18530 const auto *ME
= dyn_cast
<MemberExpr
>(E
);
18534 // No need to check expressions with an __unaligned-qualified type.
18535 if (E
->getType().getQualifiers().hasUnaligned())
18538 // For a chain of MemberExpr like "a.b.c.d" this list
18539 // will keep FieldDecl's like [d, c, b].
18540 SmallVector
<FieldDecl
*, 4> ReverseMemberChain
;
18541 const MemberExpr
*TopME
= nullptr;
18542 bool AnyIsPacked
= false;
18544 QualType BaseType
= ME
->getBase()->getType();
18545 if (BaseType
->isDependentType())
18548 BaseType
= BaseType
->getPointeeType();
18549 RecordDecl
*RD
= BaseType
->castAs
<RecordType
>()->getDecl();
18550 if (RD
->isInvalidDecl())
18553 ValueDecl
*MD
= ME
->getMemberDecl();
18554 auto *FD
= dyn_cast
<FieldDecl
>(MD
);
18555 // We do not care about non-data members.
18556 if (!FD
|| FD
->isInvalidDecl())
18560 AnyIsPacked
|| (RD
->hasAttr
<PackedAttr
>() || MD
->hasAttr
<PackedAttr
>());
18561 ReverseMemberChain
.push_back(FD
);
18564 ME
= dyn_cast
<MemberExpr
>(ME
->getBase()->IgnoreParens());
18566 assert(TopME
&& "We did not compute a topmost MemberExpr!");
18568 // Not the scope of this diagnostic.
18572 const Expr
*TopBase
= TopME
->getBase()->IgnoreParenImpCasts();
18573 const auto *DRE
= dyn_cast
<DeclRefExpr
>(TopBase
);
18574 // TODO: The innermost base of the member expression may be too complicated.
18575 // For now, just disregard these cases. This is left for future
18577 if (!DRE
&& !isa
<CXXThisExpr
>(TopBase
))
18580 // Alignment expected by the whole expression.
18581 CharUnits ExpectedAlignment
= Context
.getTypeAlignInChars(E
->getType());
18583 // No need to do anything else with this case.
18584 if (ExpectedAlignment
.isOne())
18587 // Synthesize offset of the whole access.
18589 for (const FieldDecl
*FD
: llvm::reverse(ReverseMemberChain
))
18590 Offset
+= Context
.toCharUnitsFromBits(Context
.getFieldOffset(FD
));
18592 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
18593 CharUnits CompleteObjectAlignment
= Context
.getTypeAlignInChars(
18594 ReverseMemberChain
.back()->getParent()->getTypeForDecl());
18596 // The base expression of the innermost MemberExpr may give
18597 // stronger guarantees than the class containing the member.
18598 if (DRE
&& !TopME
->isArrow()) {
18599 const ValueDecl
*VD
= DRE
->getDecl();
18600 if (!VD
->getType()->isReferenceType())
18601 CompleteObjectAlignment
=
18602 std::max(CompleteObjectAlignment
, Context
.getDeclAlign(VD
));
18605 // Check if the synthesized offset fulfills the alignment.
18606 if (Offset
% ExpectedAlignment
!= 0 ||
18607 // It may fulfill the offset it but the effective alignment may still be
18608 // lower than the expected expression alignment.
18609 CompleteObjectAlignment
< ExpectedAlignment
) {
18610 // If this happens, we want to determine a sensible culprit of this.
18611 // Intuitively, watching the chain of member expressions from right to
18612 // left, we start with the required alignment (as required by the field
18613 // type) but some packed attribute in that chain has reduced the alignment.
18614 // It may happen that another packed structure increases it again. But if
18615 // we are here such increase has not been enough. So pointing the first
18616 // FieldDecl that either is packed or else its RecordDecl is,
18617 // seems reasonable.
18618 FieldDecl
*FD
= nullptr;
18619 CharUnits Alignment
;
18620 for (FieldDecl
*FDI
: ReverseMemberChain
) {
18621 if (FDI
->hasAttr
<PackedAttr
>() ||
18622 FDI
->getParent()->hasAttr
<PackedAttr
>()) {
18624 Alignment
= std::min(
18625 Context
.getTypeAlignInChars(FD
->getType()),
18626 Context
.getTypeAlignInChars(FD
->getParent()->getTypeForDecl()));
18630 assert(FD
&& "We did not find a packed FieldDecl!");
18631 Action(E
, FD
->getParent(), FD
, Alignment
);
18635 void Sema::CheckAddressOfPackedMember(Expr
*rhs
) {
18636 using namespace std::placeholders
;
18638 RefersToMemberWithReducedAlignment(
18639 rhs
, std::bind(&Sema::AddPotentialMisalignedMembers
, std::ref(*this), _1
,
18643 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr
*TheCall
) {
18644 if (checkArgCount(*this, TheCall
, 1))
18647 ExprResult A
= UsualUnaryConversions(TheCall
->getArg(0));
18651 TheCall
->setArg(0, A
.get());
18652 QualType TyA
= A
.get()->getType();
18654 if (checkMathBuiltinElementType(*this, A
.get()->getBeginLoc(), TyA
))
18657 TheCall
->setType(TyA
);
18661 bool Sema::SemaBuiltinElementwiseMath(CallExpr
*TheCall
) {
18662 if (checkArgCount(*this, TheCall
, 2))
18665 ExprResult A
= TheCall
->getArg(0);
18666 ExprResult B
= TheCall
->getArg(1);
18667 // Do standard promotions between the two arguments, returning their common
18670 UsualArithmeticConversions(A
, B
, TheCall
->getExprLoc(), ACK_Comparison
);
18671 if (A
.isInvalid() || B
.isInvalid())
18674 QualType TyA
= A
.get()->getType();
18675 QualType TyB
= B
.get()->getType();
18677 if (Res
.isNull() || TyA
.getCanonicalType() != TyB
.getCanonicalType())
18678 return Diag(A
.get()->getBeginLoc(),
18679 diag::err_typecheck_call_different_arg_types
)
18682 if (checkMathBuiltinElementType(*this, A
.get()->getBeginLoc(), TyA
))
18685 TheCall
->setArg(0, A
.get());
18686 TheCall
->setArg(1, B
.get());
18687 TheCall
->setType(Res
);
18691 bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr
*TheCall
) {
18692 if (checkArgCount(*this, TheCall
, 3))
18696 for (int I
= 0; I
< 3; ++I
) {
18697 ExprResult Converted
= UsualUnaryConversions(TheCall
->getArg(I
));
18698 if (Converted
.isInvalid())
18700 Args
[I
] = Converted
.get();
18703 int ArgOrdinal
= 1;
18704 for (Expr
*Arg
: Args
) {
18705 if (checkFPMathBuiltinElementType(*this, Arg
->getBeginLoc(), Arg
->getType(),
18710 for (int I
= 1; I
< 3; ++I
) {
18711 if (Args
[0]->getType().getCanonicalType() !=
18712 Args
[I
]->getType().getCanonicalType()) {
18713 return Diag(Args
[0]->getBeginLoc(),
18714 diag::err_typecheck_call_different_arg_types
)
18715 << Args
[0]->getType() << Args
[I
]->getType();
18718 TheCall
->setArg(I
, Args
[I
]);
18721 TheCall
->setType(Args
[0]->getType());
18725 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr
*TheCall
) {
18726 if (checkArgCount(*this, TheCall
, 1))
18729 ExprResult A
= UsualUnaryConversions(TheCall
->getArg(0));
18733 TheCall
->setArg(0, A
.get());
18737 bool Sema::SemaBuiltinNonDeterministicValue(CallExpr
*TheCall
) {
18738 if (checkArgCount(*this, TheCall
, 1))
18741 ExprResult Arg
= TheCall
->getArg(0);
18742 QualType TyArg
= Arg
.get()->getType();
18744 if (!TyArg
->isBuiltinType() && !TyArg
->isVectorType())
18745 return Diag(TheCall
->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18746 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg
;
18748 TheCall
->setType(TyArg
);
18752 ExprResult
Sema::SemaBuiltinMatrixTranspose(CallExpr
*TheCall
,
18753 ExprResult CallResult
) {
18754 if (checkArgCount(*this, TheCall
, 1))
18755 return ExprError();
18757 ExprResult MatrixArg
= DefaultLvalueConversion(TheCall
->getArg(0));
18758 if (MatrixArg
.isInvalid())
18760 Expr
*Matrix
= MatrixArg
.get();
18762 auto *MType
= Matrix
->getType()->getAs
<ConstantMatrixType
>();
18764 Diag(Matrix
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18765 << 1 << /* matrix ty*/ 1 << Matrix
->getType();
18766 return ExprError();
18769 // Create returned matrix type by swapping rows and columns of the argument
18771 QualType ResultType
= Context
.getConstantMatrixType(
18772 MType
->getElementType(), MType
->getNumColumns(), MType
->getNumRows());
18774 // Change the return type to the type of the returned matrix.
18775 TheCall
->setType(ResultType
);
18777 // Update call argument to use the possibly converted matrix argument.
18778 TheCall
->setArg(0, Matrix
);
18782 // Get and verify the matrix dimensions.
18783 static std::optional
<unsigned>
18784 getAndVerifyMatrixDimension(Expr
*Expr
, StringRef Name
, Sema
&S
) {
18785 SourceLocation ErrorPos
;
18786 std::optional
<llvm::APSInt
> Value
=
18787 Expr
->getIntegerConstantExpr(S
.Context
, &ErrorPos
);
18789 S
.Diag(Expr
->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg
)
18793 uint64_t Dim
= Value
->getZExtValue();
18794 if (!ConstantMatrixType::isDimensionValid(Dim
)) {
18795 S
.Diag(Expr
->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension
)
18796 << Name
<< ConstantMatrixType::getMaxElementsPerDimension();
18802 ExprResult
Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr
*TheCall
,
18803 ExprResult CallResult
) {
18804 if (!getLangOpts().MatrixTypes
) {
18805 Diag(TheCall
->getBeginLoc(), diag::err_builtin_matrix_disabled
);
18806 return ExprError();
18809 if (checkArgCount(*this, TheCall
, 4))
18810 return ExprError();
18812 unsigned PtrArgIdx
= 0;
18813 Expr
*PtrExpr
= TheCall
->getArg(PtrArgIdx
);
18814 Expr
*RowsExpr
= TheCall
->getArg(1);
18815 Expr
*ColumnsExpr
= TheCall
->getArg(2);
18816 Expr
*StrideExpr
= TheCall
->getArg(3);
18818 bool ArgError
= false;
18820 // Check pointer argument.
18822 ExprResult PtrConv
= DefaultFunctionArrayLvalueConversion(PtrExpr
);
18823 if (PtrConv
.isInvalid())
18825 PtrExpr
= PtrConv
.get();
18826 TheCall
->setArg(0, PtrExpr
);
18827 if (PtrExpr
->isTypeDependent()) {
18828 TheCall
->setType(Context
.DependentTy
);
18833 auto *PtrTy
= PtrExpr
->getType()->getAs
<PointerType
>();
18834 QualType ElementTy
;
18836 Diag(PtrExpr
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18837 << PtrArgIdx
+ 1 << /*pointer to element ty*/ 2 << PtrExpr
->getType();
18840 ElementTy
= PtrTy
->getPointeeType().getUnqualifiedType();
18842 if (!ConstantMatrixType::isValidElementType(ElementTy
)) {
18843 Diag(PtrExpr
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18844 << PtrArgIdx
+ 1 << /* pointer to element ty*/ 2
18845 << PtrExpr
->getType();
18850 // Apply default Lvalue conversions and convert the expression to size_t.
18851 auto ApplyArgumentConversions
= [this](Expr
*E
) {
18852 ExprResult Conv
= DefaultLvalueConversion(E
);
18853 if (Conv
.isInvalid())
18856 return tryConvertExprToType(Conv
.get(), Context
.getSizeType());
18859 // Apply conversion to row and column expressions.
18860 ExprResult RowsConv
= ApplyArgumentConversions(RowsExpr
);
18861 if (!RowsConv
.isInvalid()) {
18862 RowsExpr
= RowsConv
.get();
18863 TheCall
->setArg(1, RowsExpr
);
18865 RowsExpr
= nullptr;
18867 ExprResult ColumnsConv
= ApplyArgumentConversions(ColumnsExpr
);
18868 if (!ColumnsConv
.isInvalid()) {
18869 ColumnsExpr
= ColumnsConv
.get();
18870 TheCall
->setArg(2, ColumnsExpr
);
18872 ColumnsExpr
= nullptr;
18874 // If any part of the result matrix type is still pending, just use
18875 // Context.DependentTy, until all parts are resolved.
18876 if ((RowsExpr
&& RowsExpr
->isTypeDependent()) ||
18877 (ColumnsExpr
&& ColumnsExpr
->isTypeDependent())) {
18878 TheCall
->setType(Context
.DependentTy
);
18882 // Check row and column dimensions.
18883 std::optional
<unsigned> MaybeRows
;
18885 MaybeRows
= getAndVerifyMatrixDimension(RowsExpr
, "row", *this);
18887 std::optional
<unsigned> MaybeColumns
;
18889 MaybeColumns
= getAndVerifyMatrixDimension(ColumnsExpr
, "column", *this);
18891 // Check stride argument.
18892 ExprResult StrideConv
= ApplyArgumentConversions(StrideExpr
);
18893 if (StrideConv
.isInvalid())
18894 return ExprError();
18895 StrideExpr
= StrideConv
.get();
18896 TheCall
->setArg(3, StrideExpr
);
18899 if (std::optional
<llvm::APSInt
> Value
=
18900 StrideExpr
->getIntegerConstantExpr(Context
)) {
18901 uint64_t Stride
= Value
->getZExtValue();
18902 if (Stride
< *MaybeRows
) {
18903 Diag(StrideExpr
->getBeginLoc(),
18904 diag::err_builtin_matrix_stride_too_small
);
18910 if (ArgError
|| !MaybeRows
|| !MaybeColumns
)
18911 return ExprError();
18914 Context
.getConstantMatrixType(ElementTy
, *MaybeRows
, *MaybeColumns
));
18918 ExprResult
Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr
*TheCall
,
18919 ExprResult CallResult
) {
18920 if (checkArgCount(*this, TheCall
, 3))
18921 return ExprError();
18923 unsigned PtrArgIdx
= 1;
18924 Expr
*MatrixExpr
= TheCall
->getArg(0);
18925 Expr
*PtrExpr
= TheCall
->getArg(PtrArgIdx
);
18926 Expr
*StrideExpr
= TheCall
->getArg(2);
18928 bool ArgError
= false;
18931 ExprResult MatrixConv
= DefaultLvalueConversion(MatrixExpr
);
18932 if (MatrixConv
.isInvalid())
18934 MatrixExpr
= MatrixConv
.get();
18935 TheCall
->setArg(0, MatrixExpr
);
18937 if (MatrixExpr
->isTypeDependent()) {
18938 TheCall
->setType(Context
.DependentTy
);
18942 auto *MatrixTy
= MatrixExpr
->getType()->getAs
<ConstantMatrixType
>();
18944 Diag(MatrixExpr
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18945 << 1 << /*matrix ty */ 1 << MatrixExpr
->getType();
18950 ExprResult PtrConv
= DefaultFunctionArrayLvalueConversion(PtrExpr
);
18951 if (PtrConv
.isInvalid())
18953 PtrExpr
= PtrConv
.get();
18954 TheCall
->setArg(1, PtrExpr
);
18955 if (PtrExpr
->isTypeDependent()) {
18956 TheCall
->setType(Context
.DependentTy
);
18961 // Check pointer argument.
18962 auto *PtrTy
= PtrExpr
->getType()->getAs
<PointerType
>();
18964 Diag(PtrExpr
->getBeginLoc(), diag::err_builtin_invalid_arg_type
)
18965 << PtrArgIdx
+ 1 << /*pointer to element ty*/ 2 << PtrExpr
->getType();
18968 QualType ElementTy
= PtrTy
->getPointeeType();
18969 if (ElementTy
.isConstQualified()) {
18970 Diag(PtrExpr
->getBeginLoc(), diag::err_builtin_matrix_store_to_const
);
18973 ElementTy
= ElementTy
.getUnqualifiedType().getCanonicalType();
18975 !Context
.hasSameType(ElementTy
, MatrixTy
->getElementType())) {
18976 Diag(PtrExpr
->getBeginLoc(),
18977 diag::err_builtin_matrix_pointer_arg_mismatch
)
18978 << ElementTy
<< MatrixTy
->getElementType();
18983 // Apply default Lvalue conversions and convert the stride expression to
18986 ExprResult StrideConv
= DefaultLvalueConversion(StrideExpr
);
18987 if (StrideConv
.isInvalid())
18990 StrideConv
= tryConvertExprToType(StrideConv
.get(), Context
.getSizeType());
18991 if (StrideConv
.isInvalid())
18993 StrideExpr
= StrideConv
.get();
18994 TheCall
->setArg(2, StrideExpr
);
18997 // Check stride argument.
18999 if (std::optional
<llvm::APSInt
> Value
=
19000 StrideExpr
->getIntegerConstantExpr(Context
)) {
19001 uint64_t Stride
= Value
->getZExtValue();
19002 if (Stride
< MatrixTy
->getNumRows()) {
19003 Diag(StrideExpr
->getBeginLoc(),
19004 diag::err_builtin_matrix_stride_too_small
);
19011 return ExprError();
19016 /// Checks the argument at the given index is a WebAssembly table and if it
19017 /// is, sets ElTy to the element type.
19018 static bool CheckWasmBuiltinArgIsTable(Sema
&S
, CallExpr
*E
, unsigned ArgIndex
,
19020 Expr
*ArgExpr
= E
->getArg(ArgIndex
);
19021 const auto *ATy
= dyn_cast
<ArrayType
>(ArgExpr
->getType());
19022 if (!ATy
|| !ATy
->getElementType().isWebAssemblyReferenceType()) {
19023 return S
.Diag(ArgExpr
->getBeginLoc(),
19024 diag::err_wasm_builtin_arg_must_be_table_type
)
19025 << ArgIndex
+ 1 << ArgExpr
->getSourceRange();
19027 ElTy
= ATy
->getElementType();
19031 /// Checks the argument at the given index is an integer.
19032 static bool CheckWasmBuiltinArgIsInteger(Sema
&S
, CallExpr
*E
,
19033 unsigned ArgIndex
) {
19034 Expr
*ArgExpr
= E
->getArg(ArgIndex
);
19035 if (!ArgExpr
->getType()->isIntegerType()) {
19036 return S
.Diag(ArgExpr
->getBeginLoc(),
19037 diag::err_wasm_builtin_arg_must_be_integer_type
)
19038 << ArgIndex
+ 1 << ArgExpr
->getSourceRange();
19043 /// Check that the first argument is a WebAssembly table, and the second
19044 /// is an index to use as index into the table.
19045 bool Sema::BuiltinWasmTableGet(CallExpr
*TheCall
) {
19046 if (checkArgCount(*this, TheCall
, 2))
19050 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, ElTy
))
19053 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, 1))
19056 // If all is well, we set the type of TheCall to be the type of the
19057 // element of the table.
19058 // i.e. a table.get on an externref table has type externref,
19059 // or whatever the type of the table element is.
19060 TheCall
->setType(ElTy
);
19065 /// Check that the first argumnet is a WebAssembly table, the second is
19066 /// an index to use as index into the table and the third is the reference
19067 /// type to set into the table.
19068 bool Sema::BuiltinWasmTableSet(CallExpr
*TheCall
) {
19069 if (checkArgCount(*this, TheCall
, 3))
19073 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, ElTy
))
19076 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, 1))
19079 if (!Context
.hasSameType(ElTy
, TheCall
->getArg(2)->getType()))
19085 /// Check that the argument is a WebAssembly table.
19086 bool Sema::BuiltinWasmTableSize(CallExpr
*TheCall
) {
19087 if (checkArgCount(*this, TheCall
, 1))
19091 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, ElTy
))
19097 /// Check that the first argument is a WebAssembly table, the second is the
19098 /// value to use for new elements (of a type matching the table type), the
19099 /// third value is an integer.
19100 bool Sema::BuiltinWasmTableGrow(CallExpr
*TheCall
) {
19101 if (checkArgCount(*this, TheCall
, 3))
19105 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, ElTy
))
19108 Expr
*NewElemArg
= TheCall
->getArg(1);
19109 if (!Context
.hasSameType(ElTy
, NewElemArg
->getType())) {
19110 return Diag(NewElemArg
->getBeginLoc(),
19111 diag::err_wasm_builtin_arg_must_match_table_element_type
)
19112 << 2 << 1 << NewElemArg
->getSourceRange();
19115 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, 2))
19121 /// Check that the first argument is a WebAssembly table, the second is an
19122 /// integer, the third is the value to use to fill the table (of a type
19123 /// matching the table type), and the fourth is an integer.
19124 bool Sema::BuiltinWasmTableFill(CallExpr
*TheCall
) {
19125 if (checkArgCount(*this, TheCall
, 4))
19129 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, ElTy
))
19132 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, 1))
19135 Expr
*NewElemArg
= TheCall
->getArg(2);
19136 if (!Context
.hasSameType(ElTy
, NewElemArg
->getType())) {
19137 return Diag(NewElemArg
->getBeginLoc(),
19138 diag::err_wasm_builtin_arg_must_match_table_element_type
)
19139 << 3 << 1 << NewElemArg
->getSourceRange();
19142 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, 3))
19148 /// Check that the first argument is a WebAssembly table, the second is also a
19149 /// WebAssembly table (of the same element type), and the third to fifth
19150 /// arguments are integers.
19151 bool Sema::BuiltinWasmTableCopy(CallExpr
*TheCall
) {
19152 if (checkArgCount(*this, TheCall
, 5))
19156 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 0, XElTy
))
19160 if (CheckWasmBuiltinArgIsTable(*this, TheCall
, 1, YElTy
))
19163 Expr
*TableYArg
= TheCall
->getArg(1);
19164 if (!Context
.hasSameType(XElTy
, YElTy
)) {
19165 return Diag(TableYArg
->getBeginLoc(),
19166 diag::err_wasm_builtin_arg_must_match_table_element_type
)
19167 << 2 << 1 << TableYArg
->getSourceRange();
19170 for (int I
= 2; I
<= 4; I
++) {
19171 if (CheckWasmBuiltinArgIsInteger(*this, TheCall
, I
))
19178 /// \brief Enforce the bounds of a TCB
19179 /// CheckTCBEnforcement - Enforces that every function in a named TCB only
19180 /// directly calls other functions in the same TCB as marked by the enforce_tcb
19181 /// and enforce_tcb_leaf attributes.
19182 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc
,
19183 const NamedDecl
*Callee
) {
19184 // This warning does not make sense in code that has no runtime behavior.
19185 if (isUnevaluatedContext())
19188 const NamedDecl
*Caller
= getCurFunctionOrMethodDecl();
19190 if (!Caller
|| !Caller
->hasAttr
<EnforceTCBAttr
>())
19193 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
19194 // all TCBs the callee is a part of.
19195 llvm::StringSet
<> CalleeTCBs
;
19196 for (const auto *A
: Callee
->specific_attrs
<EnforceTCBAttr
>())
19197 CalleeTCBs
.insert(A
->getTCBName());
19198 for (const auto *A
: Callee
->specific_attrs
<EnforceTCBLeafAttr
>())
19199 CalleeTCBs
.insert(A
->getTCBName());
19201 // Go through the TCBs the caller is a part of and emit warnings if Caller
19202 // is in a TCB that the Callee is not.
19203 for (const auto *A
: Caller
->specific_attrs
<EnforceTCBAttr
>()) {
19204 StringRef CallerTCB
= A
->getTCBName();
19205 if (CalleeTCBs
.count(CallerTCB
) == 0) {
19206 this->Diag(CallExprLoc
, diag::warn_tcb_enforcement_violation
)
19207 << Callee
<< CallerTCB
;