[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / clang / lib / Sema / SemaChecking.cpp
blob1842a783dc29aaa9b413cce30928b189271ab7af
1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements extra semantic analysis beyond what is enforced
10 // by the C type system.
12 //===----------------------------------------------------------------------===//
14 #include "clang/AST/APValue.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/AttrIterator.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclBase.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/DeclarationName.h"
24 #include "clang/AST/EvaluatedExprVisitor.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/FormatString.h"
30 #include "clang/AST/NSAPI.h"
31 #include "clang/AST/NonTrivialTypeVisitor.h"
32 #include "clang/AST/OperationKinds.h"
33 #include "clang/AST/RecordLayout.h"
34 #include "clang/AST/Stmt.h"
35 #include "clang/AST/TemplateBase.h"
36 #include "clang/AST/Type.h"
37 #include "clang/AST/TypeLoc.h"
38 #include "clang/AST/UnresolvedSet.h"
39 #include "clang/Basic/AddressSpaces.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/IdentifierTable.h"
43 #include "clang/Basic/LLVM.h"
44 #include "clang/Basic/LangOptions.h"
45 #include "clang/Basic/OpenCLOptions.h"
46 #include "clang/Basic/OperatorKinds.h"
47 #include "clang/Basic/PartialDiagnostic.h"
48 #include "clang/Basic/SourceLocation.h"
49 #include "clang/Basic/SourceManager.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
52 #include "clang/Basic/TargetBuiltins.h"
53 #include "clang/Basic/TargetCXXABI.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57 #include "clang/Sema/Initialization.h"
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
63 #include "clang/Sema/SemaInternal.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/STLExtras.h"
71 #include "llvm/ADT/SmallBitVector.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallString.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/StringSet.h"
78 #include "llvm/ADT/StringSwitch.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/ConvertUTF.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/Format.h"
85 #include "llvm/Support/Locale.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/SaveAndRestore.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/TargetParser/RISCVTargetParser.h"
90 #include "llvm/TargetParser/Triple.h"
91 #include <algorithm>
92 #include <bitset>
93 #include <cassert>
94 #include <cctype>
95 #include <cstddef>
96 #include <cstdint>
97 #include <functional>
98 #include <limits>
99 #include <optional>
100 #include <string>
101 #include <tuple>
102 #include <utility>
104 using namespace clang;
105 using namespace sema;
107 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
108 unsigned ByteNo) const {
109 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
110 Context.getTargetInfo());
113 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
114 Sema::FormatArgumentPassingKind B) {
115 return (A << 8) | B;
118 /// Checks that a call expression's argument count is at least the desired
119 /// number. This is useful when doing custom type-checking on a variadic
120 /// function. Returns true on error.
121 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
122 unsigned MinArgCount) {
123 unsigned ArgCount = Call->getNumArgs();
124 if (ArgCount >= MinArgCount)
125 return false;
127 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
128 << 0 /*function call*/ << MinArgCount << ArgCount
129 << /*is non object*/ 0 << Call->getSourceRange();
132 /// Checks that a call expression's argument count is at most the desired
133 /// number. This is useful when doing custom type-checking on a variadic
134 /// function. Returns true on error.
135 static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
136 unsigned ArgCount = Call->getNumArgs();
137 if (ArgCount <= MaxArgCount)
138 return false;
139 return S.Diag(Call->getEndLoc(),
140 diag::err_typecheck_call_too_many_args_at_most)
141 << 0 /*function call*/ << MaxArgCount << ArgCount
142 << /*is non object*/ 0 << Call->getSourceRange();
145 /// Checks that a call expression's argument count is in the desired range. This
146 /// is useful when doing custom type-checking on a variadic function. Returns
147 /// true on error.
148 static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
149 unsigned MaxArgCount) {
150 return checkArgCountAtLeast(S, Call, MinArgCount) ||
151 checkArgCountAtMost(S, Call, MaxArgCount);
154 /// Checks that a call expression's argument count is the desired number.
155 /// This is useful when doing custom type-checking. Returns true on error.
156 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
157 unsigned ArgCount = Call->getNumArgs();
158 if (ArgCount == DesiredArgCount)
159 return false;
161 if (checkArgCountAtLeast(S, Call, DesiredArgCount))
162 return true;
163 assert(ArgCount > DesiredArgCount && "should have diagnosed this");
165 // Highlight all the excess arguments.
166 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
167 Call->getArg(ArgCount - 1)->getEndLoc());
169 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
170 << 0 /*function call*/ << DesiredArgCount << ArgCount
171 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
174 static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
175 if (Value->isTypeDependent())
176 return false;
178 InitializedEntity Entity =
179 InitializedEntity::InitializeParameter(S.Context, Ty, false);
180 ExprResult Result =
181 S.PerformCopyInitialization(Entity, SourceLocation(), Value);
182 if (Result.isInvalid())
183 return true;
184 Value = Result.get();
185 return false;
188 /// Check that the first argument to __builtin_annotation is an integer
189 /// and the second argument is a non-wide string literal.
190 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
191 if (checkArgCount(S, TheCall, 2))
192 return true;
194 // First argument should be an integer.
195 Expr *ValArg = TheCall->getArg(0);
196 QualType Ty = ValArg->getType();
197 if (!Ty->isIntegerType()) {
198 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
199 << ValArg->getSourceRange();
200 return true;
203 // Second argument should be a constant string.
204 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
205 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
206 if (!Literal || !Literal->isOrdinary()) {
207 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
208 << StrArg->getSourceRange();
209 return true;
212 TheCall->setType(Ty);
213 return false;
216 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
217 // We need at least one argument.
218 if (TheCall->getNumArgs() < 1) {
219 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
220 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
221 << TheCall->getCallee()->getSourceRange();
222 return true;
225 // All arguments should be wide string literals.
226 for (Expr *Arg : TheCall->arguments()) {
227 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
228 if (!Literal || !Literal->isWide()) {
229 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
230 << Arg->getSourceRange();
231 return true;
235 return false;
238 /// Check that the argument to __builtin_addressof is a glvalue, and set the
239 /// result type to the corresponding pointer type.
240 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
241 if (checkArgCount(S, TheCall, 1))
242 return true;
244 ExprResult Arg(TheCall->getArg(0));
245 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
246 if (ResultType.isNull())
247 return true;
249 TheCall->setArg(0, Arg.get());
250 TheCall->setType(ResultType);
251 return false;
254 /// Check that the argument to __builtin_function_start is a function.
255 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
256 if (checkArgCount(S, TheCall, 1))
257 return true;
259 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
260 if (Arg.isInvalid())
261 return true;
263 TheCall->setArg(0, Arg.get());
264 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
265 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext()));
267 if (!FD) {
268 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
269 << TheCall->getSourceRange();
270 return true;
273 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
274 TheCall->getBeginLoc());
277 /// Check the number of arguments and set the result type to
278 /// the argument type.
279 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
280 if (checkArgCount(S, TheCall, 1))
281 return true;
283 TheCall->setType(TheCall->getArg(0)->getType());
284 return false;
287 /// Check that the value argument for __builtin_is_aligned(value, alignment) and
288 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
289 /// type (but not a function pointer) and that the alignment is a power-of-two.
290 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
291 if (checkArgCount(S, TheCall, 2))
292 return true;
294 clang::Expr *Source = TheCall->getArg(0);
295 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
297 auto IsValidIntegerType = [](QualType Ty) {
298 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
300 QualType SrcTy = Source->getType();
301 // We should also be able to use it with arrays (but not functions!).
302 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
303 SrcTy = S.Context.getDecayedType(SrcTy);
305 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
306 SrcTy->isFunctionPointerType()) {
307 // FIXME: this is not quite the right error message since we don't allow
308 // floating point types, or member pointers.
309 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
310 << SrcTy;
311 return true;
314 clang::Expr *AlignOp = TheCall->getArg(1);
315 if (!IsValidIntegerType(AlignOp->getType())) {
316 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
317 << AlignOp->getType();
318 return true;
320 Expr::EvalResult AlignResult;
321 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
322 // We can't check validity of alignment if it is value dependent.
323 if (!AlignOp->isValueDependent() &&
324 AlignOp->EvaluateAsInt(AlignResult, S.Context,
325 Expr::SE_AllowSideEffects)) {
326 llvm::APSInt AlignValue = AlignResult.Val.getInt();
327 llvm::APSInt MaxValue(
328 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
329 if (AlignValue < 1) {
330 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
331 return true;
333 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
334 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
335 << toString(MaxValue, 10);
336 return true;
338 if (!AlignValue.isPowerOf2()) {
339 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
340 return true;
342 if (AlignValue == 1) {
343 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
344 << IsBooleanAlignBuiltin;
348 ExprResult SrcArg = S.PerformCopyInitialization(
349 InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
350 SourceLocation(), Source);
351 if (SrcArg.isInvalid())
352 return true;
353 TheCall->setArg(0, SrcArg.get());
354 ExprResult AlignArg =
355 S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
356 S.Context, AlignOp->getType(), false),
357 SourceLocation(), AlignOp);
358 if (AlignArg.isInvalid())
359 return true;
360 TheCall->setArg(1, AlignArg.get());
361 // For align_up/align_down, the return type is the same as the (potentially
362 // decayed) argument type including qualifiers. For is_aligned(), the result
363 // is always bool.
364 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
365 return false;
368 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
369 unsigned BuiltinID) {
370 if (checkArgCount(S, TheCall, 3))
371 return true;
373 std::pair<unsigned, const char *> Builtins[] = {
374 { Builtin::BI__builtin_add_overflow, "ckd_add" },
375 { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
376 { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
379 bool CkdOperation = llvm::any_of(Builtins, [&](const std::pair<unsigned,
380 const char *> &P) {
381 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
382 Lexer::getImmediateMacroName(TheCall->getExprLoc(),
383 S.getSourceManager(), S.getLangOpts()) == P.second;
386 auto ValidCkdIntType = [](QualType QT) {
387 // A valid checked integer type is an integer type other than a plain char,
388 // bool, a bit-precise type, or an enumeration type.
389 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
390 return (BT->getKind() >= BuiltinType::Short &&
391 BT->getKind() <= BuiltinType::Int128) || (
392 BT->getKind() >= BuiltinType::UShort &&
393 BT->getKind() <= BuiltinType::UInt128) ||
394 BT->getKind() == BuiltinType::UChar ||
395 BT->getKind() == BuiltinType::SChar;
396 return false;
399 // First two arguments should be integers.
400 for (unsigned I = 0; I < 2; ++I) {
401 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
402 if (Arg.isInvalid()) return true;
403 TheCall->setArg(I, Arg.get());
405 QualType Ty = Arg.get()->getType();
406 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
407 if (!IsValid) {
408 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
409 << CkdOperation << Ty << Arg.get()->getSourceRange();
410 return true;
414 // Third argument should be a pointer to a non-const integer.
415 // IRGen correctly handles volatile, restrict, and address spaces, and
416 // the other qualifiers aren't possible.
418 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
419 if (Arg.isInvalid()) return true;
420 TheCall->setArg(2, Arg.get());
422 QualType Ty = Arg.get()->getType();
423 const auto *PtrTy = Ty->getAs<PointerType>();
424 if (!PtrTy ||
425 !PtrTy->getPointeeType()->isIntegerType() ||
426 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
427 PtrTy->getPointeeType().isConstQualified()) {
428 S.Diag(Arg.get()->getBeginLoc(),
429 diag::err_overflow_builtin_must_be_ptr_int)
430 << CkdOperation << Ty << Arg.get()->getSourceRange();
431 return true;
435 // Disallow signed bit-precise integer args larger than 128 bits to mul
436 // function until we improve backend support.
437 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
438 for (unsigned I = 0; I < 3; ++I) {
439 const auto Arg = TheCall->getArg(I);
440 // Third argument will be a pointer.
441 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
442 if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
443 S.getASTContext().getIntWidth(Ty) > 128)
444 return S.Diag(Arg->getBeginLoc(),
445 diag::err_overflow_builtin_bit_int_max_size)
446 << 128;
450 return false;
453 namespace {
454 struct BuiltinDumpStructGenerator {
455 Sema &S;
456 CallExpr *TheCall;
457 SourceLocation Loc = TheCall->getBeginLoc();
458 SmallVector<Expr *, 32> Actions;
459 DiagnosticErrorTrap ErrorTracker;
460 PrintingPolicy Policy;
462 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
463 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
464 Policy(S.Context.getPrintingPolicy()) {
465 Policy.AnonymousTagLocations = false;
468 Expr *makeOpaqueValueExpr(Expr *Inner) {
469 auto *OVE = new (S.Context)
470 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
471 Inner->getObjectKind(), Inner);
472 Actions.push_back(OVE);
473 return OVE;
476 Expr *getStringLiteral(llvm::StringRef Str) {
477 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str);
478 // Wrap the literal in parentheses to attach a source location.
479 return new (S.Context) ParenExpr(Loc, Loc, Lit);
482 bool callPrintFunction(llvm::StringRef Format,
483 llvm::ArrayRef<Expr *> Exprs = {}) {
484 SmallVector<Expr *, 8> Args;
485 assert(TheCall->getNumArgs() >= 2);
486 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
487 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
488 Args.push_back(getStringLiteral(Format));
489 Args.insert(Args.end(), Exprs.begin(), Exprs.end());
491 // Register a note to explain why we're performing the call.
492 Sema::CodeSynthesisContext Ctx;
493 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall;
494 Ctx.PointOfInstantiation = Loc;
495 Ctx.CallArgs = Args.data();
496 Ctx.NumCallArgs = Args.size();
497 S.pushCodeSynthesisContext(Ctx);
499 ExprResult RealCall =
500 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
501 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
503 S.popCodeSynthesisContext();
504 if (!RealCall.isInvalid())
505 Actions.push_back(RealCall.get());
506 // Bail out if we've hit any errors, even if we managed to build the
507 // call. We don't want to produce more than one error.
508 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
511 Expr *getIndentString(unsigned Depth) {
512 if (!Depth)
513 return nullptr;
515 llvm::SmallString<32> Indent;
516 Indent.resize(Depth * Policy.Indentation, ' ');
517 return getStringLiteral(Indent);
520 Expr *getTypeString(QualType T) {
521 return getStringLiteral(T.getAsString(Policy));
524 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
525 llvm::raw_svector_ostream OS(Str);
527 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
528 // than trying to print a single character.
529 if (auto *BT = T->getAs<BuiltinType>()) {
530 switch (BT->getKind()) {
531 case BuiltinType::Bool:
532 OS << "%d";
533 return true;
534 case BuiltinType::Char_U:
535 case BuiltinType::UChar:
536 OS << "%hhu";
537 return true;
538 case BuiltinType::Char_S:
539 case BuiltinType::SChar:
540 OS << "%hhd";
541 return true;
542 default:
543 break;
547 analyze_printf::PrintfSpecifier Specifier;
548 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
549 // We were able to guess how to format this.
550 if (Specifier.getConversionSpecifier().getKind() ==
551 analyze_printf::PrintfConversionSpecifier::sArg) {
552 // Wrap double-quotes around a '%s' specifier and limit its maximum
553 // length. Ideally we'd also somehow escape special characters in the
554 // contents but printf doesn't support that.
555 // FIXME: '%s' formatting is not safe in general.
556 OS << '"';
557 Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
558 Specifier.toString(OS);
559 OS << '"';
560 // FIXME: It would be nice to include a '...' if the string doesn't fit
561 // in the length limit.
562 } else {
563 Specifier.toString(OS);
565 return true;
568 if (T->isPointerType()) {
569 // Format all pointers with '%p'.
570 OS << "%p";
571 return true;
574 return false;
577 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
578 Expr *IndentLit = getIndentString(Depth);
579 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
580 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
581 : callPrintFunction("%s", {TypeLit}))
582 return true;
584 return dumpRecordValue(RD, E, IndentLit, Depth);
587 // Dump a record value. E should be a pointer or lvalue referring to an RD.
588 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
589 unsigned Depth) {
590 // FIXME: Decide what to do if RD is a union. At least we should probably
591 // turn off printing `const char*` members with `%s`, because that is very
592 // likely to crash if that's not the active member. Whatever we decide, we
593 // should document it.
595 // Build an OpaqueValueExpr so we can refer to E more than once without
596 // triggering re-evaluation.
597 Expr *RecordArg = makeOpaqueValueExpr(E);
598 bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
600 if (callPrintFunction(" {\n"))
601 return true;
603 // Dump each base class, regardless of whether they're aggregates.
604 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
605 for (const auto &Base : CXXRD->bases()) {
606 QualType BaseType =
607 RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
608 : S.Context.getLValueReferenceType(Base.getType());
609 ExprResult BasePtr = S.BuildCStyleCastExpr(
610 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc,
611 RecordArg);
612 if (BasePtr.isInvalid() ||
613 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
614 Depth + 1))
615 return true;
619 Expr *FieldIndentArg = getIndentString(Depth + 1);
621 // Dump each field.
622 for (auto *D : RD->decls()) {
623 auto *IFD = dyn_cast<IndirectFieldDecl>(D);
624 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
625 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
626 continue;
628 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
629 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
630 getTypeString(FD->getType()),
631 getStringLiteral(FD->getName())};
633 if (FD->isBitField()) {
634 Format += ": %zu ";
635 QualType SizeT = S.Context.getSizeType();
636 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
637 FD->getBitWidthValue(S.Context));
638 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
641 Format += "=";
643 ExprResult Field =
644 IFD ? S.BuildAnonymousStructUnionMemberReference(
645 CXXScopeSpec(), Loc, IFD,
646 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
647 : S.BuildFieldReferenceExpr(
648 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
649 DeclAccessPair::make(FD, AS_public),
650 DeclarationNameInfo(FD->getDeclName(), Loc));
651 if (Field.isInvalid())
652 return true;
654 auto *InnerRD = FD->getType()->getAsRecordDecl();
655 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
656 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
657 // Recursively print the values of members of aggregate record type.
658 if (callPrintFunction(Format, Args) ||
659 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
660 return true;
661 } else {
662 Format += " ";
663 if (appendFormatSpecifier(FD->getType(), Format)) {
664 // We know how to print this field.
665 Args.push_back(Field.get());
666 } else {
667 // We don't know how to print this field. Print out its address
668 // with a format specifier that a smart tool will be able to
669 // recognize and treat specially.
670 Format += "*%p";
671 ExprResult FieldAddr =
672 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
673 if (FieldAddr.isInvalid())
674 return true;
675 Args.push_back(FieldAddr.get());
677 Format += "\n";
678 if (callPrintFunction(Format, Args))
679 return true;
683 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
684 : callPrintFunction("}\n");
687 Expr *buildWrapper() {
688 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
689 PseudoObjectExpr::NoResult);
690 TheCall->setType(Wrapper->getType());
691 TheCall->setValueKind(Wrapper->getValueKind());
692 return Wrapper;
695 } // namespace
697 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
698 if (checkArgCountAtLeast(S, TheCall, 2))
699 return ExprError();
701 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
702 if (PtrArgResult.isInvalid())
703 return ExprError();
704 TheCall->setArg(0, PtrArgResult.get());
706 // First argument should be a pointer to a struct.
707 QualType PtrArgType = PtrArgResult.get()->getType();
708 if (!PtrArgType->isPointerType() ||
709 !PtrArgType->getPointeeType()->isRecordType()) {
710 S.Diag(PtrArgResult.get()->getBeginLoc(),
711 diag::err_expected_struct_pointer_argument)
712 << 1 << TheCall->getDirectCallee() << PtrArgType;
713 return ExprError();
715 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl();
717 // Second argument is a callable, but we can't fully validate it until we try
718 // calling it.
719 QualType FnArgType = TheCall->getArg(1)->getType();
720 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
721 !FnArgType->isBlockPointerType() &&
722 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
723 auto *BT = FnArgType->getAs<BuiltinType>();
724 switch (BT ? BT->getKind() : BuiltinType::Void) {
725 case BuiltinType::Dependent:
726 case BuiltinType::Overload:
727 case BuiltinType::BoundMember:
728 case BuiltinType::PseudoObject:
729 case BuiltinType::UnknownAny:
730 case BuiltinType::BuiltinFn:
731 // This might be a callable.
732 break;
734 default:
735 S.Diag(TheCall->getArg(1)->getBeginLoc(),
736 diag::err_expected_callable_argument)
737 << 2 << TheCall->getDirectCallee() << FnArgType;
738 return ExprError();
742 BuiltinDumpStructGenerator Generator(S, TheCall);
744 // Wrap parentheses around the given pointer. This is not necessary for
745 // correct code generation, but it means that when we pretty-print the call
746 // arguments in our diagnostics we will produce '(&s)->n' instead of the
747 // incorrect '&s->n'.
748 Expr *PtrArg = PtrArgResult.get();
749 PtrArg = new (S.Context)
750 ParenExpr(PtrArg->getBeginLoc(),
751 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
752 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
753 return ExprError();
755 return Generator.buildWrapper();
758 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
759 if (checkArgCount(S, BuiltinCall, 2))
760 return true;
762 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
763 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
764 Expr *Call = BuiltinCall->getArg(0);
765 Expr *Chain = BuiltinCall->getArg(1);
767 if (Call->getStmtClass() != Stmt::CallExprClass) {
768 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
769 << Call->getSourceRange();
770 return true;
773 auto CE = cast<CallExpr>(Call);
774 if (CE->getCallee()->getType()->isBlockPointerType()) {
775 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
776 << Call->getSourceRange();
777 return true;
780 const Decl *TargetDecl = CE->getCalleeDecl();
781 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
782 if (FD->getBuiltinID()) {
783 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
784 << Call->getSourceRange();
785 return true;
788 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
789 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
790 << Call->getSourceRange();
791 return true;
794 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
795 if (ChainResult.isInvalid())
796 return true;
797 if (!ChainResult.get()->getType()->isPointerType()) {
798 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
799 << Chain->getSourceRange();
800 return true;
803 QualType ReturnTy = CE->getCallReturnType(S.Context);
804 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
805 QualType BuiltinTy = S.Context.getFunctionType(
806 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
807 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
809 Builtin =
810 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
812 BuiltinCall->setType(CE->getType());
813 BuiltinCall->setValueKind(CE->getValueKind());
814 BuiltinCall->setObjectKind(CE->getObjectKind());
815 BuiltinCall->setCallee(Builtin);
816 BuiltinCall->setArg(1, ChainResult.get());
818 return false;
821 namespace {
823 class ScanfDiagnosticFormatHandler
824 : public analyze_format_string::FormatStringHandler {
825 // Accepts the argument index (relative to the first destination index) of the
826 // argument whose size we want.
827 using ComputeSizeFunction =
828 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
830 // Accepts the argument index (relative to the first destination index), the
831 // destination size, and the source size).
832 using DiagnoseFunction =
833 llvm::function_ref<void(unsigned, unsigned, unsigned)>;
835 ComputeSizeFunction ComputeSizeArgument;
836 DiagnoseFunction Diagnose;
838 public:
839 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
840 DiagnoseFunction Diagnose)
841 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
843 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
844 const char *StartSpecifier,
845 unsigned specifierLen) override {
846 if (!FS.consumesDataArgument())
847 return true;
849 unsigned NulByte = 0;
850 switch ((FS.getConversionSpecifier().getKind())) {
851 default:
852 return true;
853 case analyze_format_string::ConversionSpecifier::sArg:
854 case analyze_format_string::ConversionSpecifier::ScanListArg:
855 NulByte = 1;
856 break;
857 case analyze_format_string::ConversionSpecifier::cArg:
858 break;
861 analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
862 if (FW.getHowSpecified() !=
863 analyze_format_string::OptionalAmount::HowSpecified::Constant)
864 return true;
866 unsigned SourceSize = FW.getConstantAmount() + NulByte;
868 std::optional<llvm::APSInt> DestSizeAPS =
869 ComputeSizeArgument(FS.getArgIndex());
870 if (!DestSizeAPS)
871 return true;
873 unsigned DestSize = DestSizeAPS->getZExtValue();
875 if (DestSize < SourceSize)
876 Diagnose(FS.getArgIndex(), DestSize, SourceSize);
878 return true;
882 class EstimateSizeFormatHandler
883 : public analyze_format_string::FormatStringHandler {
884 size_t Size;
885 /// Whether the format string contains Linux kernel's format specifier
886 /// extension.
887 bool IsKernelCompatible = true;
889 public:
890 EstimateSizeFormatHandler(StringRef Format)
891 : Size(std::min(Format.find(0), Format.size()) +
892 1 /* null byte always written by sprintf */) {}
894 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
895 const char *, unsigned SpecifierLen,
896 const TargetInfo &) override {
898 const size_t FieldWidth = computeFieldWidth(FS);
899 const size_t Precision = computePrecision(FS);
901 // The actual format.
902 switch (FS.getConversionSpecifier().getKind()) {
903 // Just a char.
904 case analyze_format_string::ConversionSpecifier::cArg:
905 case analyze_format_string::ConversionSpecifier::CArg:
906 Size += std::max(FieldWidth, (size_t)1);
907 break;
908 // Just an integer.
909 case analyze_format_string::ConversionSpecifier::dArg:
910 case analyze_format_string::ConversionSpecifier::DArg:
911 case analyze_format_string::ConversionSpecifier::iArg:
912 case analyze_format_string::ConversionSpecifier::oArg:
913 case analyze_format_string::ConversionSpecifier::OArg:
914 case analyze_format_string::ConversionSpecifier::uArg:
915 case analyze_format_string::ConversionSpecifier::UArg:
916 case analyze_format_string::ConversionSpecifier::xArg:
917 case analyze_format_string::ConversionSpecifier::XArg:
918 Size += std::max(FieldWidth, Precision);
919 break;
921 // %g style conversion switches between %f or %e style dynamically.
922 // %g removes trailing zeros, and does not print decimal point if there are
923 // no digits that follow it. Thus %g can print a single digit.
924 // FIXME: If it is alternative form:
925 // For g and G conversions, trailing zeros are not removed from the result.
926 case analyze_format_string::ConversionSpecifier::gArg:
927 case analyze_format_string::ConversionSpecifier::GArg:
928 Size += 1;
929 break;
931 // Floating point number in the form '[+]ddd.ddd'.
932 case analyze_format_string::ConversionSpecifier::fArg:
933 case analyze_format_string::ConversionSpecifier::FArg:
934 Size += std::max(FieldWidth, 1 /* integer part */ +
935 (Precision ? 1 + Precision
936 : 0) /* period + decimal */);
937 break;
939 // Floating point number in the form '[-]d.ddde[+-]dd'.
940 case analyze_format_string::ConversionSpecifier::eArg:
941 case analyze_format_string::ConversionSpecifier::EArg:
942 Size +=
943 std::max(FieldWidth,
944 1 /* integer part */ +
945 (Precision ? 1 + Precision : 0) /* period + decimal */ +
946 1 /* e or E letter */ + 2 /* exponent */);
947 break;
949 // Floating point number in the form '[-]0xh.hhhhp±dd'.
950 case analyze_format_string::ConversionSpecifier::aArg:
951 case analyze_format_string::ConversionSpecifier::AArg:
952 Size +=
953 std::max(FieldWidth,
954 2 /* 0x */ + 1 /* integer part */ +
955 (Precision ? 1 + Precision : 0) /* period + decimal */ +
956 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
957 break;
959 // Just a string.
960 case analyze_format_string::ConversionSpecifier::sArg:
961 case analyze_format_string::ConversionSpecifier::SArg:
962 Size += FieldWidth;
963 break;
965 // Just a pointer in the form '0xddd'.
966 case analyze_format_string::ConversionSpecifier::pArg:
967 // Linux kernel has its own extesion for `%p` specifier.
968 // Kernel Document:
969 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
970 IsKernelCompatible = false;
971 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
972 break;
974 // A plain percent.
975 case analyze_format_string::ConversionSpecifier::PercentArg:
976 Size += 1;
977 break;
979 default:
980 break;
983 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
985 if (FS.hasAlternativeForm()) {
986 switch (FS.getConversionSpecifier().getKind()) {
987 // For o conversion, it increases the precision, if and only if necessary,
988 // to force the first digit of the result to be a zero
989 // (if the value and precision are both 0, a single 0 is printed)
990 case analyze_format_string::ConversionSpecifier::oArg:
991 // For b conversion, a nonzero result has 0b prefixed to it.
992 case analyze_format_string::ConversionSpecifier::bArg:
993 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
994 // it.
995 case analyze_format_string::ConversionSpecifier::xArg:
996 case analyze_format_string::ConversionSpecifier::XArg:
997 // Note: even when the prefix is added, if
998 // (prefix_width <= FieldWidth - formatted_length) holds,
999 // the prefix does not increase the format
1000 // size. e.g.(("%#3x", 0xf) is "0xf")
1002 // If the result is zero, o, b, x, X adds nothing.
1003 break;
1004 // For a, A, e, E, f, F, g, and G conversions,
1005 // the result of converting a floating-point number always contains a
1006 // decimal-point
1007 case analyze_format_string::ConversionSpecifier::aArg:
1008 case analyze_format_string::ConversionSpecifier::AArg:
1009 case analyze_format_string::ConversionSpecifier::eArg:
1010 case analyze_format_string::ConversionSpecifier::EArg:
1011 case analyze_format_string::ConversionSpecifier::fArg:
1012 case analyze_format_string::ConversionSpecifier::FArg:
1013 case analyze_format_string::ConversionSpecifier::gArg:
1014 case analyze_format_string::ConversionSpecifier::GArg:
1015 Size += (Precision ? 0 : 1);
1016 break;
1017 // For other conversions, the behavior is undefined.
1018 default:
1019 break;
1022 assert(SpecifierLen <= Size && "no underflow");
1023 Size -= SpecifierLen;
1024 return true;
1027 size_t getSizeLowerBound() const { return Size; }
1028 bool isKernelCompatible() const { return IsKernelCompatible; }
1030 private:
1031 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
1032 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
1033 size_t FieldWidth = 0;
1034 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
1035 FieldWidth = FW.getConstantAmount();
1036 return FieldWidth;
1039 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
1040 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
1041 size_t Precision = 0;
1043 // See man 3 printf for default precision value based on the specifier.
1044 switch (FW.getHowSpecified()) {
1045 case analyze_format_string::OptionalAmount::NotSpecified:
1046 switch (FS.getConversionSpecifier().getKind()) {
1047 default:
1048 break;
1049 case analyze_format_string::ConversionSpecifier::dArg: // %d
1050 case analyze_format_string::ConversionSpecifier::DArg: // %D
1051 case analyze_format_string::ConversionSpecifier::iArg: // %i
1052 Precision = 1;
1053 break;
1054 case analyze_format_string::ConversionSpecifier::oArg: // %d
1055 case analyze_format_string::ConversionSpecifier::OArg: // %D
1056 case analyze_format_string::ConversionSpecifier::uArg: // %d
1057 case analyze_format_string::ConversionSpecifier::UArg: // %D
1058 case analyze_format_string::ConversionSpecifier::xArg: // %d
1059 case analyze_format_string::ConversionSpecifier::XArg: // %D
1060 Precision = 1;
1061 break;
1062 case analyze_format_string::ConversionSpecifier::fArg: // %f
1063 case analyze_format_string::ConversionSpecifier::FArg: // %F
1064 case analyze_format_string::ConversionSpecifier::eArg: // %e
1065 case analyze_format_string::ConversionSpecifier::EArg: // %E
1066 case analyze_format_string::ConversionSpecifier::gArg: // %g
1067 case analyze_format_string::ConversionSpecifier::GArg: // %G
1068 Precision = 6;
1069 break;
1070 case analyze_format_string::ConversionSpecifier::pArg: // %d
1071 Precision = 1;
1072 break;
1074 break;
1075 case analyze_format_string::OptionalAmount::Constant:
1076 Precision = FW.getConstantAmount();
1077 break;
1078 default:
1079 break;
1081 return Precision;
1085 } // namespace
1087 static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
1088 StringRef &FormatStrRef, size_t &StrLen,
1089 ASTContext &Context) {
1090 if (const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
1091 Format && (Format->isOrdinary() || Format->isUTF8())) {
1092 FormatStrRef = Format->getString();
1093 const ConstantArrayType *T =
1094 Context.getAsConstantArrayType(Format->getType());
1095 assert(T && "String literal not of constant array type!");
1096 size_t TypeSize = T->getSize().getZExtValue();
1097 // In case there's a null byte somewhere.
1098 StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
1099 return true;
1101 return false;
1104 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
1105 CallExpr *TheCall) {
1106 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
1107 isConstantEvaluatedContext())
1108 return;
1110 bool UseDABAttr = false;
1111 const FunctionDecl *UseDecl = FD;
1113 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1114 if (DABAttr) {
1115 UseDecl = DABAttr->getFunction();
1116 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1117 UseDABAttr = true;
1120 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
1122 if (!BuiltinID)
1123 return;
1125 const TargetInfo &TI = getASTContext().getTargetInfo();
1126 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
1128 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
1129 // If we refer to a diagnose_as_builtin attribute, we need to change the
1130 // argument index to refer to the arguments of the called function. Unless
1131 // the index is out of bounds, which presumably means it's a variadic
1132 // function.
1133 if (!UseDABAttr)
1134 return Index;
1135 unsigned DABIndices = DABAttr->argIndices_size();
1136 unsigned NewIndex = Index < DABIndices
1137 ? DABAttr->argIndices_begin()[Index]
1138 : Index - DABIndices + FD->getNumParams();
1139 if (NewIndex >= TheCall->getNumArgs())
1140 return std::nullopt;
1141 return NewIndex;
1144 auto ComputeExplicitObjectSizeArgument =
1145 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1146 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1147 if (!IndexOptional)
1148 return std::nullopt;
1149 unsigned NewIndex = *IndexOptional;
1150 Expr::EvalResult Result;
1151 Expr *SizeArg = TheCall->getArg(NewIndex);
1152 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
1153 return std::nullopt;
1154 llvm::APSInt Integer = Result.Val.getInt();
1155 Integer.setIsUnsigned(true);
1156 return Integer;
1159 auto ComputeSizeArgument =
1160 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1161 // If the parameter has a pass_object_size attribute, then we should use its
1162 // (potentially) more strict checking mode. Otherwise, conservatively assume
1163 // type 0.
1164 int BOSType = 0;
1165 // This check can fail for variadic functions.
1166 if (Index < FD->getNumParams()) {
1167 if (const auto *POS =
1168 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1169 BOSType = POS->getType();
1172 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1173 if (!IndexOptional)
1174 return std::nullopt;
1175 unsigned NewIndex = *IndexOptional;
1177 if (NewIndex >= TheCall->getNumArgs())
1178 return std::nullopt;
1180 const Expr *ObjArg = TheCall->getArg(NewIndex);
1181 uint64_t Result;
1182 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
1183 return std::nullopt;
1185 // Get the object size in the target's size_t width.
1186 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
1189 auto ComputeStrLenArgument =
1190 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1191 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1192 if (!IndexOptional)
1193 return std::nullopt;
1194 unsigned NewIndex = *IndexOptional;
1196 const Expr *ObjArg = TheCall->getArg(NewIndex);
1197 uint64_t Result;
1198 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
1199 return std::nullopt;
1200 // Add 1 for null byte.
1201 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
1204 std::optional<llvm::APSInt> SourceSize;
1205 std::optional<llvm::APSInt> DestinationSize;
1206 unsigned DiagID = 0;
1207 bool IsChkVariant = false;
1209 auto GetFunctionName = [&]() {
1210 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
1211 // Skim off the details of whichever builtin was called to produce a better
1212 // diagnostic, as it's unlikely that the user wrote the __builtin
1213 // explicitly.
1214 if (IsChkVariant) {
1215 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
1216 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
1217 } else if (FunctionName.startswith("__builtin_")) {
1218 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
1220 return FunctionName;
1223 switch (BuiltinID) {
1224 default:
1225 return;
1226 case Builtin::BI__builtin_strcpy:
1227 case Builtin::BIstrcpy: {
1228 DiagID = diag::warn_fortify_strlen_overflow;
1229 SourceSize = ComputeStrLenArgument(1);
1230 DestinationSize = ComputeSizeArgument(0);
1231 break;
1234 case Builtin::BI__builtin___strcpy_chk: {
1235 DiagID = diag::warn_fortify_strlen_overflow;
1236 SourceSize = ComputeStrLenArgument(1);
1237 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1238 IsChkVariant = true;
1239 break;
1242 case Builtin::BIscanf:
1243 case Builtin::BIfscanf:
1244 case Builtin::BIsscanf: {
1245 unsigned FormatIndex = 1;
1246 unsigned DataIndex = 2;
1247 if (BuiltinID == Builtin::BIscanf) {
1248 FormatIndex = 0;
1249 DataIndex = 1;
1252 const auto *FormatExpr =
1253 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1255 StringRef FormatStrRef;
1256 size_t StrLen;
1257 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
1258 return;
1260 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1261 unsigned SourceSize) {
1262 DiagID = diag::warn_fortify_scanf_overflow;
1263 unsigned Index = ArgIndex + DataIndex;
1264 StringRef FunctionName = GetFunctionName();
1265 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
1266 PDiag(DiagID) << FunctionName << (Index + 1)
1267 << DestSize << SourceSize);
1270 auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1271 return ComputeSizeArgument(Index + DataIndex);
1273 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1274 const char *FormatBytes = FormatStrRef.data();
1275 analyze_format_string::ParseScanfString(H, FormatBytes,
1276 FormatBytes + StrLen, getLangOpts(),
1277 Context.getTargetInfo());
1279 // Unlike the other cases, in this one we have already issued the diagnostic
1280 // here, so no need to continue (because unlike the other cases, here the
1281 // diagnostic refers to the argument number).
1282 return;
1285 case Builtin::BIsprintf:
1286 case Builtin::BI__builtin___sprintf_chk: {
1287 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1288 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1290 StringRef FormatStrRef;
1291 size_t StrLen;
1292 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1293 EstimateSizeFormatHandler H(FormatStrRef);
1294 const char *FormatBytes = FormatStrRef.data();
1295 if (!analyze_format_string::ParsePrintfString(
1296 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1297 Context.getTargetInfo(), false)) {
1298 DiagID = H.isKernelCompatible()
1299 ? diag::warn_format_overflow
1300 : diag::warn_format_overflow_non_kprintf;
1301 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1302 .extOrTrunc(SizeTypeWidth);
1303 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1304 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1305 IsChkVariant = true;
1306 } else {
1307 DestinationSize = ComputeSizeArgument(0);
1309 break;
1312 return;
1314 case Builtin::BI__builtin___memcpy_chk:
1315 case Builtin::BI__builtin___memmove_chk:
1316 case Builtin::BI__builtin___memset_chk:
1317 case Builtin::BI__builtin___strlcat_chk:
1318 case Builtin::BI__builtin___strlcpy_chk:
1319 case Builtin::BI__builtin___strncat_chk:
1320 case Builtin::BI__builtin___strncpy_chk:
1321 case Builtin::BI__builtin___stpncpy_chk:
1322 case Builtin::BI__builtin___memccpy_chk:
1323 case Builtin::BI__builtin___mempcpy_chk: {
1324 DiagID = diag::warn_builtin_chk_overflow;
1325 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1326 DestinationSize =
1327 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1328 IsChkVariant = true;
1329 break;
1332 case Builtin::BI__builtin___snprintf_chk:
1333 case Builtin::BI__builtin___vsnprintf_chk: {
1334 DiagID = diag::warn_builtin_chk_overflow;
1335 SourceSize = ComputeExplicitObjectSizeArgument(1);
1336 DestinationSize = ComputeExplicitObjectSizeArgument(3);
1337 IsChkVariant = true;
1338 break;
1341 case Builtin::BIstrncat:
1342 case Builtin::BI__builtin_strncat:
1343 case Builtin::BIstrncpy:
1344 case Builtin::BI__builtin_strncpy:
1345 case Builtin::BIstpncpy:
1346 case Builtin::BI__builtin_stpncpy: {
1347 // Whether these functions overflow depends on the runtime strlen of the
1348 // string, not just the buffer size, so emitting the "always overflow"
1349 // diagnostic isn't quite right. We should still diagnose passing a buffer
1350 // size larger than the destination buffer though; this is a runtime abort
1351 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1352 DiagID = diag::warn_fortify_source_size_mismatch;
1353 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1354 DestinationSize = ComputeSizeArgument(0);
1355 break;
1358 case Builtin::BImemcpy:
1359 case Builtin::BI__builtin_memcpy:
1360 case Builtin::BImemmove:
1361 case Builtin::BI__builtin_memmove:
1362 case Builtin::BImemset:
1363 case Builtin::BI__builtin_memset:
1364 case Builtin::BImempcpy:
1365 case Builtin::BI__builtin_mempcpy: {
1366 DiagID = diag::warn_fortify_source_overflow;
1367 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1368 DestinationSize = ComputeSizeArgument(0);
1369 break;
1371 case Builtin::BIsnprintf:
1372 case Builtin::BI__builtin_snprintf:
1373 case Builtin::BIvsnprintf:
1374 case Builtin::BI__builtin_vsnprintf: {
1375 DiagID = diag::warn_fortify_source_size_mismatch;
1376 SourceSize = ComputeExplicitObjectSizeArgument(1);
1377 const auto *FormatExpr = TheCall->getArg(2)->IgnoreParenImpCasts();
1378 StringRef FormatStrRef;
1379 size_t StrLen;
1380 if (SourceSize &&
1381 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1382 EstimateSizeFormatHandler H(FormatStrRef);
1383 const char *FormatBytes = FormatStrRef.data();
1384 if (!analyze_format_string::ParsePrintfString(
1385 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1386 Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
1387 llvm::APSInt FormatSize =
1388 llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1389 .extOrTrunc(SizeTypeWidth);
1390 if (FormatSize > *SourceSize && *SourceSize != 0) {
1391 unsigned TruncationDiagID =
1392 H.isKernelCompatible() ? diag::warn_format_truncation
1393 : diag::warn_format_truncation_non_kprintf;
1394 SmallString<16> SpecifiedSizeStr;
1395 SmallString<16> FormatSizeStr;
1396 SourceSize->toString(SpecifiedSizeStr, /*Radix=*/10);
1397 FormatSize.toString(FormatSizeStr, /*Radix=*/10);
1398 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1399 PDiag(TruncationDiagID)
1400 << GetFunctionName() << SpecifiedSizeStr
1401 << FormatSizeStr);
1405 DestinationSize = ComputeSizeArgument(0);
1409 if (!SourceSize || !DestinationSize ||
1410 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0)
1411 return;
1413 StringRef FunctionName = GetFunctionName();
1415 SmallString<16> DestinationStr;
1416 SmallString<16> SourceStr;
1417 DestinationSize->toString(DestinationStr, /*Radix=*/10);
1418 SourceSize->toString(SourceStr, /*Radix=*/10);
1419 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1420 PDiag(DiagID)
1421 << FunctionName << DestinationStr << SourceStr);
1424 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1425 Scope::ScopeFlags NeededScopeFlags,
1426 unsigned DiagID) {
1427 // Scopes aren't available during instantiation. Fortunately, builtin
1428 // functions cannot be template args so they cannot be formed through template
1429 // instantiation. Therefore checking once during the parse is sufficient.
1430 if (SemaRef.inTemplateInstantiation())
1431 return false;
1433 Scope *S = SemaRef.getCurScope();
1434 while (S && !S->isSEHExceptScope())
1435 S = S->getParent();
1436 if (!S || !(S->getFlags() & NeededScopeFlags)) {
1437 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1438 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1439 << DRE->getDecl()->getIdentifier();
1440 return true;
1443 return false;
1446 static inline bool isBlockPointer(Expr *Arg) {
1447 return Arg->getType()->isBlockPointerType();
1450 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1451 /// void*, which is a requirement of device side enqueue.
1452 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
1453 const BlockPointerType *BPT =
1454 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1455 ArrayRef<QualType> Params =
1456 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
1457 unsigned ArgCounter = 0;
1458 bool IllegalParams = false;
1459 // Iterate through the block parameters until either one is found that is not
1460 // a local void*, or the block is valid.
1461 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
1462 I != E; ++I, ++ArgCounter) {
1463 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
1464 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
1465 LangAS::opencl_local) {
1466 // Get the location of the error. If a block literal has been passed
1467 // (BlockExpr) then we can point straight to the offending argument,
1468 // else we just point to the variable reference.
1469 SourceLocation ErrorLoc;
1470 if (isa<BlockExpr>(BlockArg)) {
1471 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
1472 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
1473 } else if (isa<DeclRefExpr>(BlockArg)) {
1474 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
1476 S.Diag(ErrorLoc,
1477 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
1478 IllegalParams = true;
1482 return IllegalParams;
1485 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
1486 // OpenCL device can support extension but not the feature as extension
1487 // requires subgroup independent forward progress, but subgroup independent
1488 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1489 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) &&
1490 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups",
1491 S.getLangOpts())) {
1492 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
1493 << 1 << Call->getDirectCallee()
1494 << "cl_khr_subgroups or __opencl_c_subgroups";
1495 return true;
1497 return false;
1500 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
1501 if (checkArgCount(S, TheCall, 2))
1502 return true;
1504 if (checkOpenCLSubgroupExt(S, TheCall))
1505 return true;
1507 // First argument is an ndrange_t type.
1508 Expr *NDRangeArg = TheCall->getArg(0);
1509 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1510 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1511 << TheCall->getDirectCallee() << "'ndrange_t'";
1512 return true;
1515 Expr *BlockArg = TheCall->getArg(1);
1516 if (!isBlockPointer(BlockArg)) {
1517 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1518 << TheCall->getDirectCallee() << "block";
1519 return true;
1521 return checkOpenCLBlockArgs(S, BlockArg);
1524 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1525 /// get_kernel_work_group_size
1526 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
1527 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
1528 if (checkArgCount(S, TheCall, 1))
1529 return true;
1531 Expr *BlockArg = TheCall->getArg(0);
1532 if (!isBlockPointer(BlockArg)) {
1533 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1534 << TheCall->getDirectCallee() << "block";
1535 return true;
1537 return checkOpenCLBlockArgs(S, BlockArg);
1540 /// Diagnose integer type and any valid implicit conversion to it.
1541 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
1542 const QualType &IntType);
1544 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
1545 unsigned Start, unsigned End) {
1546 bool IllegalParams = false;
1547 for (unsigned I = Start; I <= End; ++I)
1548 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
1549 S.Context.getSizeType());
1550 return IllegalParams;
1553 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1554 /// 'local void*' parameter of passed block.
1555 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
1556 Expr *BlockArg,
1557 unsigned NumNonVarArgs) {
1558 const BlockPointerType *BPT =
1559 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1560 unsigned NumBlockParams =
1561 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
1562 unsigned TotalNumArgs = TheCall->getNumArgs();
1564 // For each argument passed to the block, a corresponding uint needs to
1565 // be passed to describe the size of the local memory.
1566 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
1567 S.Diag(TheCall->getBeginLoc(),
1568 diag::err_opencl_enqueue_kernel_local_size_args);
1569 return true;
1572 // Check that the sizes of the local memory are specified by integers.
1573 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
1574 TotalNumArgs - 1);
1577 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1578 /// overload formats specified in Table 6.13.17.1.
1579 /// int enqueue_kernel(queue_t queue,
1580 /// kernel_enqueue_flags_t flags,
1581 /// const ndrange_t ndrange,
1582 /// void (^block)(void))
1583 /// int enqueue_kernel(queue_t queue,
1584 /// kernel_enqueue_flags_t flags,
1585 /// const ndrange_t ndrange,
1586 /// uint num_events_in_wait_list,
1587 /// clk_event_t *event_wait_list,
1588 /// clk_event_t *event_ret,
1589 /// void (^block)(void))
1590 /// int enqueue_kernel(queue_t queue,
1591 /// kernel_enqueue_flags_t flags,
1592 /// const ndrange_t ndrange,
1593 /// void (^block)(local void*, ...),
1594 /// uint size0, ...)
1595 /// int enqueue_kernel(queue_t queue,
1596 /// kernel_enqueue_flags_t flags,
1597 /// const ndrange_t ndrange,
1598 /// uint num_events_in_wait_list,
1599 /// clk_event_t *event_wait_list,
1600 /// clk_event_t *event_ret,
1601 /// void (^block)(local void*, ...),
1602 /// uint size0, ...)
1603 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
1604 unsigned NumArgs = TheCall->getNumArgs();
1606 if (NumArgs < 4) {
1607 S.Diag(TheCall->getBeginLoc(),
1608 diag::err_typecheck_call_too_few_args_at_least)
1609 << 0 << 4 << NumArgs << /*is non object*/ 0;
1610 return true;
1613 Expr *Arg0 = TheCall->getArg(0);
1614 Expr *Arg1 = TheCall->getArg(1);
1615 Expr *Arg2 = TheCall->getArg(2);
1616 Expr *Arg3 = TheCall->getArg(3);
1618 // First argument always needs to be a queue_t type.
1619 if (!Arg0->getType()->isQueueT()) {
1620 S.Diag(TheCall->getArg(0)->getBeginLoc(),
1621 diag::err_opencl_builtin_expected_type)
1622 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
1623 return true;
1626 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1627 if (!Arg1->getType()->isIntegerType()) {
1628 S.Diag(TheCall->getArg(1)->getBeginLoc(),
1629 diag::err_opencl_builtin_expected_type)
1630 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1631 return true;
1634 // Third argument is always an ndrange_t type.
1635 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1636 S.Diag(TheCall->getArg(2)->getBeginLoc(),
1637 diag::err_opencl_builtin_expected_type)
1638 << TheCall->getDirectCallee() << "'ndrange_t'";
1639 return true;
1642 // With four arguments, there is only one form that the function could be
1643 // called in: no events and no variable arguments.
1644 if (NumArgs == 4) {
1645 // check that the last argument is the right block type.
1646 if (!isBlockPointer(Arg3)) {
1647 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1648 << TheCall->getDirectCallee() << "block";
1649 return true;
1651 // we have a block type, check the prototype
1652 const BlockPointerType *BPT =
1653 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1654 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1655 S.Diag(Arg3->getBeginLoc(),
1656 diag::err_opencl_enqueue_kernel_blocks_no_args);
1657 return true;
1659 return false;
1661 // we can have block + varargs.
1662 if (isBlockPointer(Arg3))
1663 return (checkOpenCLBlockArgs(S, Arg3) ||
1664 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1665 // last two cases with either exactly 7 args or 7 args and varargs.
1666 if (NumArgs >= 7) {
1667 // check common block argument.
1668 Expr *Arg6 = TheCall->getArg(6);
1669 if (!isBlockPointer(Arg6)) {
1670 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1671 << TheCall->getDirectCallee() << "block";
1672 return true;
1674 if (checkOpenCLBlockArgs(S, Arg6))
1675 return true;
1677 // Forth argument has to be any integer type.
1678 if (!Arg3->getType()->isIntegerType()) {
1679 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1680 diag::err_opencl_builtin_expected_type)
1681 << TheCall->getDirectCallee() << "integer";
1682 return true;
1684 // check remaining common arguments.
1685 Expr *Arg4 = TheCall->getArg(4);
1686 Expr *Arg5 = TheCall->getArg(5);
1688 // Fifth argument is always passed as a pointer to clk_event_t.
1689 if (!Arg4->isNullPointerConstant(S.Context,
1690 Expr::NPC_ValueDependentIsNotNull) &&
1691 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1692 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1693 diag::err_opencl_builtin_expected_type)
1694 << TheCall->getDirectCallee()
1695 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1696 return true;
1699 // Sixth argument is always passed as a pointer to clk_event_t.
1700 if (!Arg5->isNullPointerConstant(S.Context,
1701 Expr::NPC_ValueDependentIsNotNull) &&
1702 !(Arg5->getType()->isPointerType() &&
1703 Arg5->getType()->getPointeeType()->isClkEventT())) {
1704 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1705 diag::err_opencl_builtin_expected_type)
1706 << TheCall->getDirectCallee()
1707 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1708 return true;
1711 if (NumArgs == 7)
1712 return false;
1714 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1717 // None of the specific case has been detected, give generic error
1718 S.Diag(TheCall->getBeginLoc(),
1719 diag::err_opencl_enqueue_kernel_incorrect_args);
1720 return true;
1723 /// Returns OpenCL access qual.
1724 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1725 return D->getAttr<OpenCLAccessAttr>();
1728 /// Returns true if pipe element type is different from the pointer.
1729 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1730 const Expr *Arg0 = Call->getArg(0);
1731 // First argument type should always be pipe.
1732 if (!Arg0->getType()->isPipeType()) {
1733 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1734 << Call->getDirectCallee() << Arg0->getSourceRange();
1735 return true;
1737 OpenCLAccessAttr *AccessQual =
1738 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1739 // Validates the access qualifier is compatible with the call.
1740 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1741 // read_only and write_only, and assumed to be read_only if no qualifier is
1742 // specified.
1743 switch (Call->getDirectCallee()->getBuiltinID()) {
1744 case Builtin::BIread_pipe:
1745 case Builtin::BIreserve_read_pipe:
1746 case Builtin::BIcommit_read_pipe:
1747 case Builtin::BIwork_group_reserve_read_pipe:
1748 case Builtin::BIsub_group_reserve_read_pipe:
1749 case Builtin::BIwork_group_commit_read_pipe:
1750 case Builtin::BIsub_group_commit_read_pipe:
1751 if (!(!AccessQual || AccessQual->isReadOnly())) {
1752 S.Diag(Arg0->getBeginLoc(),
1753 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1754 << "read_only" << Arg0->getSourceRange();
1755 return true;
1757 break;
1758 case Builtin::BIwrite_pipe:
1759 case Builtin::BIreserve_write_pipe:
1760 case Builtin::BIcommit_write_pipe:
1761 case Builtin::BIwork_group_reserve_write_pipe:
1762 case Builtin::BIsub_group_reserve_write_pipe:
1763 case Builtin::BIwork_group_commit_write_pipe:
1764 case Builtin::BIsub_group_commit_write_pipe:
1765 if (!(AccessQual && AccessQual->isWriteOnly())) {
1766 S.Diag(Arg0->getBeginLoc(),
1767 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1768 << "write_only" << Arg0->getSourceRange();
1769 return true;
1771 break;
1772 default:
1773 break;
1775 return false;
1778 /// Returns true if pipe element type is different from the pointer.
1779 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1780 const Expr *Arg0 = Call->getArg(0);
1781 const Expr *ArgIdx = Call->getArg(Idx);
1782 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1783 const QualType EltTy = PipeTy->getElementType();
1784 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1785 // The Idx argument should be a pointer and the type of the pointer and
1786 // the type of pipe element should also be the same.
1787 if (!ArgTy ||
1788 !S.Context.hasSameType(
1789 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1790 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1791 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1792 << ArgIdx->getType() << ArgIdx->getSourceRange();
1793 return true;
1795 return false;
1798 // Performs semantic analysis for the read/write_pipe call.
1799 // \param S Reference to the semantic analyzer.
1800 // \param Call A pointer to the builtin call.
1801 // \return True if a semantic error has been found, false otherwise.
1802 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1803 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1804 // functions have two forms.
1805 switch (Call->getNumArgs()) {
1806 case 2:
1807 if (checkOpenCLPipeArg(S, Call))
1808 return true;
1809 // The call with 2 arguments should be
1810 // read/write_pipe(pipe T, T*).
1811 // Check packet type T.
1812 if (checkOpenCLPipePacketType(S, Call, 1))
1813 return true;
1814 break;
1816 case 4: {
1817 if (checkOpenCLPipeArg(S, Call))
1818 return true;
1819 // The call with 4 arguments should be
1820 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1821 // Check reserve_id_t.
1822 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1823 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1824 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1825 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1826 return true;
1829 // Check the index.
1830 const Expr *Arg2 = Call->getArg(2);
1831 if (!Arg2->getType()->isIntegerType() &&
1832 !Arg2->getType()->isUnsignedIntegerType()) {
1833 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1834 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1835 << Arg2->getType() << Arg2->getSourceRange();
1836 return true;
1839 // Check packet type T.
1840 if (checkOpenCLPipePacketType(S, Call, 3))
1841 return true;
1842 } break;
1843 default:
1844 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1845 << Call->getDirectCallee() << Call->getSourceRange();
1846 return true;
1849 return false;
1852 // Performs a semantic analysis on the {work_group_/sub_group_
1853 // /_}reserve_{read/write}_pipe
1854 // \param S Reference to the semantic analyzer.
1855 // \param Call The call to the builtin function to be analyzed.
1856 // \return True if a semantic error was found, false otherwise.
1857 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1858 if (checkArgCount(S, Call, 2))
1859 return true;
1861 if (checkOpenCLPipeArg(S, Call))
1862 return true;
1864 // Check the reserve size.
1865 if (!Call->getArg(1)->getType()->isIntegerType() &&
1866 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1867 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1868 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1869 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1870 return true;
1873 // Since return type of reserve_read/write_pipe built-in function is
1874 // reserve_id_t, which is not defined in the builtin def file , we used int
1875 // as return type and need to override the return type of these functions.
1876 Call->setType(S.Context.OCLReserveIDTy);
1878 return false;
1881 // Performs a semantic analysis on {work_group_/sub_group_
1882 // /_}commit_{read/write}_pipe
1883 // \param S Reference to the semantic analyzer.
1884 // \param Call The call to the builtin function to be analyzed.
1885 // \return True if a semantic error was found, false otherwise.
1886 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1887 if (checkArgCount(S, Call, 2))
1888 return true;
1890 if (checkOpenCLPipeArg(S, Call))
1891 return true;
1893 // Check reserve_id_t.
1894 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1895 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1896 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1897 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1898 return true;
1901 return false;
1904 // Performs a semantic analysis on the call to built-in Pipe
1905 // Query Functions.
1906 // \param S Reference to the semantic analyzer.
1907 // \param Call The call to the builtin function to be analyzed.
1908 // \return True if a semantic error was found, false otherwise.
1909 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1910 if (checkArgCount(S, Call, 1))
1911 return true;
1913 if (!Call->getArg(0)->getType()->isPipeType()) {
1914 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1915 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1916 return true;
1919 return false;
1922 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1923 // Performs semantic analysis for the to_global/local/private call.
1924 // \param S Reference to the semantic analyzer.
1925 // \param BuiltinID ID of the builtin function.
1926 // \param Call A pointer to the builtin call.
1927 // \return True if a semantic error has been found, false otherwise.
1928 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1929 CallExpr *Call) {
1930 if (checkArgCount(S, Call, 1))
1931 return true;
1933 auto RT = Call->getArg(0)->getType();
1934 if (!RT->isPointerType() || RT->getPointeeType()
1935 .getAddressSpace() == LangAS::opencl_constant) {
1936 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1937 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1938 return true;
1941 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1942 S.Diag(Call->getArg(0)->getBeginLoc(),
1943 diag::warn_opencl_generic_address_space_arg)
1944 << Call->getDirectCallee()->getNameInfo().getAsString()
1945 << Call->getArg(0)->getSourceRange();
1948 RT = RT->getPointeeType();
1949 auto Qual = RT.getQualifiers();
1950 switch (BuiltinID) {
1951 case Builtin::BIto_global:
1952 Qual.setAddressSpace(LangAS::opencl_global);
1953 break;
1954 case Builtin::BIto_local:
1955 Qual.setAddressSpace(LangAS::opencl_local);
1956 break;
1957 case Builtin::BIto_private:
1958 Qual.setAddressSpace(LangAS::opencl_private);
1959 break;
1960 default:
1961 llvm_unreachable("Invalid builtin function");
1963 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1964 RT.getUnqualifiedType(), Qual)));
1966 return false;
1969 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1970 if (checkArgCount(S, TheCall, 1))
1971 return ExprError();
1973 // Compute __builtin_launder's parameter type from the argument.
1974 // The parameter type is:
1975 // * The type of the argument if it's not an array or function type,
1976 // Otherwise,
1977 // * The decayed argument type.
1978 QualType ParamTy = [&]() {
1979 QualType ArgTy = TheCall->getArg(0)->getType();
1980 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1981 return S.Context.getPointerType(Ty->getElementType());
1982 if (ArgTy->isFunctionType()) {
1983 return S.Context.getPointerType(ArgTy);
1985 return ArgTy;
1986 }();
1988 TheCall->setType(ParamTy);
1990 auto DiagSelect = [&]() -> std::optional<unsigned> {
1991 if (!ParamTy->isPointerType())
1992 return 0;
1993 if (ParamTy->isFunctionPointerType())
1994 return 1;
1995 if (ParamTy->isVoidPointerType())
1996 return 2;
1997 return std::optional<unsigned>{};
1998 }();
1999 if (DiagSelect) {
2000 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
2001 << *DiagSelect << TheCall->getSourceRange();
2002 return ExprError();
2005 // We either have an incomplete class type, or we have a class template
2006 // whose instantiation has not been forced. Example:
2008 // template <class T> struct Foo { T value; };
2009 // Foo<int> *p = nullptr;
2010 // auto *d = __builtin_launder(p);
2011 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
2012 diag::err_incomplete_type))
2013 return ExprError();
2015 assert(ParamTy->getPointeeType()->isObjectType() &&
2016 "Unhandled non-object pointer case");
2018 InitializedEntity Entity =
2019 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
2020 ExprResult Arg =
2021 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
2022 if (Arg.isInvalid())
2023 return ExprError();
2024 TheCall->setArg(0, Arg.get());
2026 return TheCall;
2029 // Emit an error and return true if the current object format type is in the
2030 // list of unsupported types.
2031 static bool CheckBuiltinTargetNotInUnsupported(
2032 Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2033 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
2034 llvm::Triple::ObjectFormatType CurObjFormat =
2035 S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
2036 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
2037 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2038 << TheCall->getSourceRange();
2039 return true;
2041 return false;
2044 // Emit an error and return true if the current architecture is not in the list
2045 // of supported architectures.
2046 static bool
2047 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2048 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
2049 llvm::Triple::ArchType CurArch =
2050 S.getASTContext().getTargetInfo().getTriple().getArch();
2051 if (llvm::is_contained(SupportedArchs, CurArch))
2052 return false;
2053 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2054 << TheCall->getSourceRange();
2055 return true;
2058 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
2059 SourceLocation CallSiteLoc);
2061 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2062 CallExpr *TheCall) {
2063 switch (TI.getTriple().getArch()) {
2064 default:
2065 // Some builtins don't require additional checking, so just consider these
2066 // acceptable.
2067 return false;
2068 case llvm::Triple::arm:
2069 case llvm::Triple::armeb:
2070 case llvm::Triple::thumb:
2071 case llvm::Triple::thumbeb:
2072 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
2073 case llvm::Triple::aarch64:
2074 case llvm::Triple::aarch64_32:
2075 case llvm::Triple::aarch64_be:
2076 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
2077 case llvm::Triple::bpfeb:
2078 case llvm::Triple::bpfel:
2079 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
2080 case llvm::Triple::hexagon:
2081 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
2082 case llvm::Triple::mips:
2083 case llvm::Triple::mipsel:
2084 case llvm::Triple::mips64:
2085 case llvm::Triple::mips64el:
2086 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
2087 case llvm::Triple::systemz:
2088 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
2089 case llvm::Triple::x86:
2090 case llvm::Triple::x86_64:
2091 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
2092 case llvm::Triple::ppc:
2093 case llvm::Triple::ppcle:
2094 case llvm::Triple::ppc64:
2095 case llvm::Triple::ppc64le:
2096 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
2097 case llvm::Triple::amdgcn:
2098 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
2099 case llvm::Triple::riscv32:
2100 case llvm::Triple::riscv64:
2101 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
2102 case llvm::Triple::loongarch32:
2103 case llvm::Triple::loongarch64:
2104 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
2105 case llvm::Triple::wasm32:
2106 case llvm::Triple::wasm64:
2107 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
2108 case llvm::Triple::nvptx:
2109 case llvm::Triple::nvptx64:
2110 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
2114 // Check if \p Ty is a valid type for the elementwise math builtins. If it is
2115 // not a valid type, emit an error message and return true. Otherwise return
2116 // false.
2117 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
2118 QualType Ty) {
2119 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) {
2120 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2121 << 1 << /* vector, integer or float ty*/ 0 << Ty;
2124 return false;
2127 static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc,
2128 QualType ArgTy, int ArgIndex) {
2129 QualType EltTy = ArgTy;
2130 if (auto *VecTy = EltTy->getAs<VectorType>())
2131 EltTy = VecTy->getElementType();
2133 if (!EltTy->isRealFloatingType()) {
2134 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2135 << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
2138 return false;
2141 ExprResult
2142 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
2143 CallExpr *TheCall) {
2144 ExprResult TheCallResult(TheCall);
2146 // Find out if any arguments are required to be integer constant expressions.
2147 unsigned ICEArguments = 0;
2148 ASTContext::GetBuiltinTypeError Error;
2149 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
2150 if (Error != ASTContext::GE_None)
2151 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
2153 // If any arguments are required to be ICE's, check and diagnose.
2154 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
2155 // Skip arguments not required to be ICE's.
2156 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
2158 llvm::APSInt Result;
2159 // If we don't have enough arguments, continue so we can issue better
2160 // diagnostic in checkArgCount(...)
2161 if (ArgNo < TheCall->getNumArgs() &&
2162 SemaBuiltinConstantArg(TheCall, ArgNo, Result))
2163 return true;
2164 ICEArguments &= ~(1 << ArgNo);
2167 switch (BuiltinID) {
2168 case Builtin::BI__builtin___CFStringMakeConstantString:
2169 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2170 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2171 if (CheckBuiltinTargetNotInUnsupported(
2172 *this, BuiltinID, TheCall,
2173 {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2174 return ExprError();
2175 assert(TheCall->getNumArgs() == 1 &&
2176 "Wrong # arguments to builtin CFStringMakeConstantString");
2177 if (CheckObjCString(TheCall->getArg(0)))
2178 return ExprError();
2179 break;
2180 case Builtin::BI__builtin_ms_va_start:
2181 case Builtin::BI__builtin_stdarg_start:
2182 case Builtin::BI__builtin_va_start:
2183 if (SemaBuiltinVAStart(BuiltinID, TheCall))
2184 return ExprError();
2185 break;
2186 case Builtin::BI__va_start: {
2187 switch (Context.getTargetInfo().getTriple().getArch()) {
2188 case llvm::Triple::aarch64:
2189 case llvm::Triple::arm:
2190 case llvm::Triple::thumb:
2191 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
2192 return ExprError();
2193 break;
2194 default:
2195 if (SemaBuiltinVAStart(BuiltinID, TheCall))
2196 return ExprError();
2197 break;
2199 break;
2202 // The acquire, release, and no fence variants are ARM and AArch64 only.
2203 case Builtin::BI_interlockedbittestandset_acq:
2204 case Builtin::BI_interlockedbittestandset_rel:
2205 case Builtin::BI_interlockedbittestandset_nf:
2206 case Builtin::BI_interlockedbittestandreset_acq:
2207 case Builtin::BI_interlockedbittestandreset_rel:
2208 case Builtin::BI_interlockedbittestandreset_nf:
2209 if (CheckBuiltinTargetInSupported(
2210 *this, BuiltinID, TheCall,
2211 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2212 return ExprError();
2213 break;
2215 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2216 case Builtin::BI_bittest64:
2217 case Builtin::BI_bittestandcomplement64:
2218 case Builtin::BI_bittestandreset64:
2219 case Builtin::BI_bittestandset64:
2220 case Builtin::BI_interlockedbittestandreset64:
2221 case Builtin::BI_interlockedbittestandset64:
2222 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
2223 {llvm::Triple::x86_64, llvm::Triple::arm,
2224 llvm::Triple::thumb,
2225 llvm::Triple::aarch64}))
2226 return ExprError();
2227 break;
2229 case Builtin::BI__builtin_set_flt_rounds:
2230 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
2231 {llvm::Triple::x86, llvm::Triple::x86_64,
2232 llvm::Triple::arm, llvm::Triple::thumb,
2233 llvm::Triple::aarch64}))
2234 return ExprError();
2235 break;
2237 case Builtin::BI__builtin_isgreater:
2238 case Builtin::BI__builtin_isgreaterequal:
2239 case Builtin::BI__builtin_isless:
2240 case Builtin::BI__builtin_islessequal:
2241 case Builtin::BI__builtin_islessgreater:
2242 case Builtin::BI__builtin_isunordered:
2243 if (SemaBuiltinUnorderedCompare(TheCall))
2244 return ExprError();
2245 break;
2246 case Builtin::BI__builtin_fpclassify:
2247 if (SemaBuiltinFPClassification(TheCall, 6))
2248 return ExprError();
2249 break;
2250 case Builtin::BI__builtin_isfpclass:
2251 if (SemaBuiltinFPClassification(TheCall, 2))
2252 return ExprError();
2253 break;
2254 case Builtin::BI__builtin_isfinite:
2255 case Builtin::BI__builtin_isinf:
2256 case Builtin::BI__builtin_isinf_sign:
2257 case Builtin::BI__builtin_isnan:
2258 case Builtin::BI__builtin_issignaling:
2259 case Builtin::BI__builtin_isnormal:
2260 case Builtin::BI__builtin_issubnormal:
2261 case Builtin::BI__builtin_iszero:
2262 case Builtin::BI__builtin_signbit:
2263 case Builtin::BI__builtin_signbitf:
2264 case Builtin::BI__builtin_signbitl:
2265 if (SemaBuiltinFPClassification(TheCall, 1))
2266 return ExprError();
2267 break;
2268 case Builtin::BI__builtin_shufflevector:
2269 return SemaBuiltinShuffleVector(TheCall);
2270 // TheCall will be freed by the smart pointer here, but that's fine, since
2271 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
2272 case Builtin::BI__builtin_prefetch:
2273 if (SemaBuiltinPrefetch(TheCall))
2274 return ExprError();
2275 break;
2276 case Builtin::BI__builtin_alloca_with_align:
2277 case Builtin::BI__builtin_alloca_with_align_uninitialized:
2278 if (SemaBuiltinAllocaWithAlign(TheCall))
2279 return ExprError();
2280 [[fallthrough]];
2281 case Builtin::BI__builtin_alloca:
2282 case Builtin::BI__builtin_alloca_uninitialized:
2283 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2284 << TheCall->getDirectCallee();
2285 break;
2286 case Builtin::BI__arithmetic_fence:
2287 if (SemaBuiltinArithmeticFence(TheCall))
2288 return ExprError();
2289 break;
2290 case Builtin::BI__assume:
2291 case Builtin::BI__builtin_assume:
2292 if (SemaBuiltinAssume(TheCall))
2293 return ExprError();
2294 break;
2295 case Builtin::BI__builtin_assume_aligned:
2296 if (SemaBuiltinAssumeAligned(TheCall))
2297 return ExprError();
2298 break;
2299 case Builtin::BI__builtin_dynamic_object_size:
2300 case Builtin::BI__builtin_object_size:
2301 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
2302 return ExprError();
2303 break;
2304 case Builtin::BI__builtin_longjmp:
2305 if (SemaBuiltinLongjmp(TheCall))
2306 return ExprError();
2307 break;
2308 case Builtin::BI__builtin_setjmp:
2309 if (SemaBuiltinSetjmp(TheCall))
2310 return ExprError();
2311 break;
2312 case Builtin::BI__builtin_classify_type:
2313 if (checkArgCount(*this, TheCall, 1)) return true;
2314 TheCall->setType(Context.IntTy);
2315 break;
2316 case Builtin::BI__builtin_complex:
2317 if (SemaBuiltinComplex(TheCall))
2318 return ExprError();
2319 break;
2320 case Builtin::BI__builtin_constant_p: {
2321 if (checkArgCount(*this, TheCall, 1)) return true;
2322 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
2323 if (Arg.isInvalid()) return true;
2324 TheCall->setArg(0, Arg.get());
2325 TheCall->setType(Context.IntTy);
2326 break;
2328 case Builtin::BI__builtin_launder:
2329 return SemaBuiltinLaunder(*this, TheCall);
2330 case Builtin::BI__sync_fetch_and_add:
2331 case Builtin::BI__sync_fetch_and_add_1:
2332 case Builtin::BI__sync_fetch_and_add_2:
2333 case Builtin::BI__sync_fetch_and_add_4:
2334 case Builtin::BI__sync_fetch_and_add_8:
2335 case Builtin::BI__sync_fetch_and_add_16:
2336 case Builtin::BI__sync_fetch_and_sub:
2337 case Builtin::BI__sync_fetch_and_sub_1:
2338 case Builtin::BI__sync_fetch_and_sub_2:
2339 case Builtin::BI__sync_fetch_and_sub_4:
2340 case Builtin::BI__sync_fetch_and_sub_8:
2341 case Builtin::BI__sync_fetch_and_sub_16:
2342 case Builtin::BI__sync_fetch_and_or:
2343 case Builtin::BI__sync_fetch_and_or_1:
2344 case Builtin::BI__sync_fetch_and_or_2:
2345 case Builtin::BI__sync_fetch_and_or_4:
2346 case Builtin::BI__sync_fetch_and_or_8:
2347 case Builtin::BI__sync_fetch_and_or_16:
2348 case Builtin::BI__sync_fetch_and_and:
2349 case Builtin::BI__sync_fetch_and_and_1:
2350 case Builtin::BI__sync_fetch_and_and_2:
2351 case Builtin::BI__sync_fetch_and_and_4:
2352 case Builtin::BI__sync_fetch_and_and_8:
2353 case Builtin::BI__sync_fetch_and_and_16:
2354 case Builtin::BI__sync_fetch_and_xor:
2355 case Builtin::BI__sync_fetch_and_xor_1:
2356 case Builtin::BI__sync_fetch_and_xor_2:
2357 case Builtin::BI__sync_fetch_and_xor_4:
2358 case Builtin::BI__sync_fetch_and_xor_8:
2359 case Builtin::BI__sync_fetch_and_xor_16:
2360 case Builtin::BI__sync_fetch_and_nand:
2361 case Builtin::BI__sync_fetch_and_nand_1:
2362 case Builtin::BI__sync_fetch_and_nand_2:
2363 case Builtin::BI__sync_fetch_and_nand_4:
2364 case Builtin::BI__sync_fetch_and_nand_8:
2365 case Builtin::BI__sync_fetch_and_nand_16:
2366 case Builtin::BI__sync_add_and_fetch:
2367 case Builtin::BI__sync_add_and_fetch_1:
2368 case Builtin::BI__sync_add_and_fetch_2:
2369 case Builtin::BI__sync_add_and_fetch_4:
2370 case Builtin::BI__sync_add_and_fetch_8:
2371 case Builtin::BI__sync_add_and_fetch_16:
2372 case Builtin::BI__sync_sub_and_fetch:
2373 case Builtin::BI__sync_sub_and_fetch_1:
2374 case Builtin::BI__sync_sub_and_fetch_2:
2375 case Builtin::BI__sync_sub_and_fetch_4:
2376 case Builtin::BI__sync_sub_and_fetch_8:
2377 case Builtin::BI__sync_sub_and_fetch_16:
2378 case Builtin::BI__sync_and_and_fetch:
2379 case Builtin::BI__sync_and_and_fetch_1:
2380 case Builtin::BI__sync_and_and_fetch_2:
2381 case Builtin::BI__sync_and_and_fetch_4:
2382 case Builtin::BI__sync_and_and_fetch_8:
2383 case Builtin::BI__sync_and_and_fetch_16:
2384 case Builtin::BI__sync_or_and_fetch:
2385 case Builtin::BI__sync_or_and_fetch_1:
2386 case Builtin::BI__sync_or_and_fetch_2:
2387 case Builtin::BI__sync_or_and_fetch_4:
2388 case Builtin::BI__sync_or_and_fetch_8:
2389 case Builtin::BI__sync_or_and_fetch_16:
2390 case Builtin::BI__sync_xor_and_fetch:
2391 case Builtin::BI__sync_xor_and_fetch_1:
2392 case Builtin::BI__sync_xor_and_fetch_2:
2393 case Builtin::BI__sync_xor_and_fetch_4:
2394 case Builtin::BI__sync_xor_and_fetch_8:
2395 case Builtin::BI__sync_xor_and_fetch_16:
2396 case Builtin::BI__sync_nand_and_fetch:
2397 case Builtin::BI__sync_nand_and_fetch_1:
2398 case Builtin::BI__sync_nand_and_fetch_2:
2399 case Builtin::BI__sync_nand_and_fetch_4:
2400 case Builtin::BI__sync_nand_and_fetch_8:
2401 case Builtin::BI__sync_nand_and_fetch_16:
2402 case Builtin::BI__sync_val_compare_and_swap:
2403 case Builtin::BI__sync_val_compare_and_swap_1:
2404 case Builtin::BI__sync_val_compare_and_swap_2:
2405 case Builtin::BI__sync_val_compare_and_swap_4:
2406 case Builtin::BI__sync_val_compare_and_swap_8:
2407 case Builtin::BI__sync_val_compare_and_swap_16:
2408 case Builtin::BI__sync_bool_compare_and_swap:
2409 case Builtin::BI__sync_bool_compare_and_swap_1:
2410 case Builtin::BI__sync_bool_compare_and_swap_2:
2411 case Builtin::BI__sync_bool_compare_and_swap_4:
2412 case Builtin::BI__sync_bool_compare_and_swap_8:
2413 case Builtin::BI__sync_bool_compare_and_swap_16:
2414 case Builtin::BI__sync_lock_test_and_set:
2415 case Builtin::BI__sync_lock_test_and_set_1:
2416 case Builtin::BI__sync_lock_test_and_set_2:
2417 case Builtin::BI__sync_lock_test_and_set_4:
2418 case Builtin::BI__sync_lock_test_and_set_8:
2419 case Builtin::BI__sync_lock_test_and_set_16:
2420 case Builtin::BI__sync_lock_release:
2421 case Builtin::BI__sync_lock_release_1:
2422 case Builtin::BI__sync_lock_release_2:
2423 case Builtin::BI__sync_lock_release_4:
2424 case Builtin::BI__sync_lock_release_8:
2425 case Builtin::BI__sync_lock_release_16:
2426 case Builtin::BI__sync_swap:
2427 case Builtin::BI__sync_swap_1:
2428 case Builtin::BI__sync_swap_2:
2429 case Builtin::BI__sync_swap_4:
2430 case Builtin::BI__sync_swap_8:
2431 case Builtin::BI__sync_swap_16:
2432 return SemaBuiltinAtomicOverloaded(TheCallResult);
2433 case Builtin::BI__sync_synchronize:
2434 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2435 << TheCall->getCallee()->getSourceRange();
2436 break;
2437 case Builtin::BI__builtin_nontemporal_load:
2438 case Builtin::BI__builtin_nontemporal_store:
2439 return SemaBuiltinNontemporalOverloaded(TheCallResult);
2440 case Builtin::BI__builtin_memcpy_inline: {
2441 clang::Expr *SizeOp = TheCall->getArg(2);
2442 // We warn about copying to or from `nullptr` pointers when `size` is
2443 // greater than 0. When `size` is value dependent we cannot evaluate its
2444 // value so we bail out.
2445 if (SizeOp->isValueDependent())
2446 break;
2447 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
2448 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2449 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
2451 break;
2453 case Builtin::BI__builtin_memset_inline: {
2454 clang::Expr *SizeOp = TheCall->getArg(2);
2455 // We warn about filling to `nullptr` pointers when `size` is greater than
2456 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2457 // out.
2458 if (SizeOp->isValueDependent())
2459 break;
2460 if (!SizeOp->EvaluateKnownConstInt(Context).isZero())
2461 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2462 break;
2464 #define BUILTIN(ID, TYPE, ATTRS)
2465 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2466 case Builtin::BI##ID: \
2467 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2468 #include "clang/Basic/Builtins.def"
2469 case Builtin::BI__annotation:
2470 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
2471 return ExprError();
2472 break;
2473 case Builtin::BI__builtin_annotation:
2474 if (SemaBuiltinAnnotation(*this, TheCall))
2475 return ExprError();
2476 break;
2477 case Builtin::BI__builtin_addressof:
2478 if (SemaBuiltinAddressof(*this, TheCall))
2479 return ExprError();
2480 break;
2481 case Builtin::BI__builtin_function_start:
2482 if (SemaBuiltinFunctionStart(*this, TheCall))
2483 return ExprError();
2484 break;
2485 case Builtin::BI__builtin_is_aligned:
2486 case Builtin::BI__builtin_align_up:
2487 case Builtin::BI__builtin_align_down:
2488 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
2489 return ExprError();
2490 break;
2491 case Builtin::BI__builtin_add_overflow:
2492 case Builtin::BI__builtin_sub_overflow:
2493 case Builtin::BI__builtin_mul_overflow:
2494 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
2495 return ExprError();
2496 break;
2497 case Builtin::BI__builtin_operator_new:
2498 case Builtin::BI__builtin_operator_delete: {
2499 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2500 ExprResult Res =
2501 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2502 if (Res.isInvalid())
2503 CorrectDelayedTyposInExpr(TheCallResult.get());
2504 return Res;
2506 case Builtin::BI__builtin_dump_struct:
2507 return SemaBuiltinDumpStruct(*this, TheCall);
2508 case Builtin::BI__builtin_expect_with_probability: {
2509 // We first want to ensure we are called with 3 arguments
2510 if (checkArgCount(*this, TheCall, 3))
2511 return ExprError();
2512 // then check probability is constant float in range [0.0, 1.0]
2513 const Expr *ProbArg = TheCall->getArg(2);
2514 SmallVector<PartialDiagnosticAt, 8> Notes;
2515 Expr::EvalResult Eval;
2516 Eval.Diag = &Notes;
2517 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
2518 !Eval.Val.isFloat()) {
2519 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2520 << ProbArg->getSourceRange();
2521 for (const PartialDiagnosticAt &PDiag : Notes)
2522 Diag(PDiag.first, PDiag.second);
2523 return ExprError();
2525 llvm::APFloat Probability = Eval.Val.getFloat();
2526 bool LoseInfo = false;
2527 Probability.convert(llvm::APFloat::IEEEdouble(),
2528 llvm::RoundingMode::Dynamic, &LoseInfo);
2529 if (!(Probability >= llvm::APFloat(0.0) &&
2530 Probability <= llvm::APFloat(1.0))) {
2531 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2532 << ProbArg->getSourceRange();
2533 return ExprError();
2535 break;
2537 case Builtin::BI__builtin_preserve_access_index:
2538 if (SemaBuiltinPreserveAI(*this, TheCall))
2539 return ExprError();
2540 break;
2541 case Builtin::BI__builtin_call_with_static_chain:
2542 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
2543 return ExprError();
2544 break;
2545 case Builtin::BI__exception_code:
2546 case Builtin::BI_exception_code:
2547 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2548 diag::err_seh___except_block))
2549 return ExprError();
2550 break;
2551 case Builtin::BI__exception_info:
2552 case Builtin::BI_exception_info:
2553 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2554 diag::err_seh___except_filter))
2555 return ExprError();
2556 break;
2557 case Builtin::BI__GetExceptionInfo:
2558 if (checkArgCount(*this, TheCall, 1))
2559 return ExprError();
2561 if (CheckCXXThrowOperand(
2562 TheCall->getBeginLoc(),
2563 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
2564 TheCall))
2565 return ExprError();
2567 TheCall->setType(Context.VoidPtrTy);
2568 break;
2569 case Builtin::BIaddressof:
2570 case Builtin::BI__addressof:
2571 case Builtin::BIforward:
2572 case Builtin::BIforward_like:
2573 case Builtin::BImove:
2574 case Builtin::BImove_if_noexcept:
2575 case Builtin::BIas_const: {
2576 // These are all expected to be of the form
2577 // T &/&&/* f(U &/&&)
2578 // where T and U only differ in qualification.
2579 if (checkArgCount(*this, TheCall, 1))
2580 return ExprError();
2581 QualType Param = FDecl->getParamDecl(0)->getType();
2582 QualType Result = FDecl->getReturnType();
2583 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
2584 BuiltinID == Builtin::BI__addressof;
2585 if (!(Param->isReferenceType() &&
2586 (ReturnsPointer ? Result->isAnyPointerType()
2587 : Result->isReferenceType()) &&
2588 Context.hasSameUnqualifiedType(Param->getPointeeType(),
2589 Result->getPointeeType()))) {
2590 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
2591 << FDecl;
2592 return ExprError();
2594 break;
2596 // OpenCL v2.0, s6.13.16 - Pipe functions
2597 case Builtin::BIread_pipe:
2598 case Builtin::BIwrite_pipe:
2599 // Since those two functions are declared with var args, we need a semantic
2600 // check for the argument.
2601 if (SemaBuiltinRWPipe(*this, TheCall))
2602 return ExprError();
2603 break;
2604 case Builtin::BIreserve_read_pipe:
2605 case Builtin::BIreserve_write_pipe:
2606 case Builtin::BIwork_group_reserve_read_pipe:
2607 case Builtin::BIwork_group_reserve_write_pipe:
2608 if (SemaBuiltinReserveRWPipe(*this, TheCall))
2609 return ExprError();
2610 break;
2611 case Builtin::BIsub_group_reserve_read_pipe:
2612 case Builtin::BIsub_group_reserve_write_pipe:
2613 if (checkOpenCLSubgroupExt(*this, TheCall) ||
2614 SemaBuiltinReserveRWPipe(*this, TheCall))
2615 return ExprError();
2616 break;
2617 case Builtin::BIcommit_read_pipe:
2618 case Builtin::BIcommit_write_pipe:
2619 case Builtin::BIwork_group_commit_read_pipe:
2620 case Builtin::BIwork_group_commit_write_pipe:
2621 if (SemaBuiltinCommitRWPipe(*this, TheCall))
2622 return ExprError();
2623 break;
2624 case Builtin::BIsub_group_commit_read_pipe:
2625 case Builtin::BIsub_group_commit_write_pipe:
2626 if (checkOpenCLSubgroupExt(*this, TheCall) ||
2627 SemaBuiltinCommitRWPipe(*this, TheCall))
2628 return ExprError();
2629 break;
2630 case Builtin::BIget_pipe_num_packets:
2631 case Builtin::BIget_pipe_max_packets:
2632 if (SemaBuiltinPipePackets(*this, TheCall))
2633 return ExprError();
2634 break;
2635 case Builtin::BIto_global:
2636 case Builtin::BIto_local:
2637 case Builtin::BIto_private:
2638 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
2639 return ExprError();
2640 break;
2641 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2642 case Builtin::BIenqueue_kernel:
2643 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
2644 return ExprError();
2645 break;
2646 case Builtin::BIget_kernel_work_group_size:
2647 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2648 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
2649 return ExprError();
2650 break;
2651 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2652 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2653 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
2654 return ExprError();
2655 break;
2656 case Builtin::BI__builtin_os_log_format:
2657 Cleanup.setExprNeedsCleanups(true);
2658 [[fallthrough]];
2659 case Builtin::BI__builtin_os_log_format_buffer_size:
2660 if (SemaBuiltinOSLogFormat(TheCall))
2661 return ExprError();
2662 break;
2663 case Builtin::BI__builtin_frame_address:
2664 case Builtin::BI__builtin_return_address: {
2665 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
2666 return ExprError();
2668 // -Wframe-address warning if non-zero passed to builtin
2669 // return/frame address.
2670 Expr::EvalResult Result;
2671 if (!TheCall->getArg(0)->isValueDependent() &&
2672 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
2673 Result.Val.getInt() != 0)
2674 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
2675 << ((BuiltinID == Builtin::BI__builtin_return_address)
2676 ? "__builtin_return_address"
2677 : "__builtin_frame_address")
2678 << TheCall->getSourceRange();
2679 break;
2682 case Builtin::BI__builtin_nondeterministic_value: {
2683 if (SemaBuiltinNonDeterministicValue(TheCall))
2684 return ExprError();
2685 break;
2688 // __builtin_elementwise_abs restricts the element type to signed integers or
2689 // floating point types only.
2690 case Builtin::BI__builtin_elementwise_abs: {
2691 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2692 return ExprError();
2694 QualType ArgTy = TheCall->getArg(0)->getType();
2695 QualType EltTy = ArgTy;
2697 if (auto *VecTy = EltTy->getAs<VectorType>())
2698 EltTy = VecTy->getElementType();
2699 if (EltTy->isUnsignedIntegerType()) {
2700 Diag(TheCall->getArg(0)->getBeginLoc(),
2701 diag::err_builtin_invalid_arg_type)
2702 << 1 << /* signed integer or float ty*/ 3 << ArgTy;
2703 return ExprError();
2705 break;
2708 // These builtins restrict the element type to floating point
2709 // types only.
2710 case Builtin::BI__builtin_elementwise_ceil:
2711 case Builtin::BI__builtin_elementwise_cos:
2712 case Builtin::BI__builtin_elementwise_exp:
2713 case Builtin::BI__builtin_elementwise_exp2:
2714 case Builtin::BI__builtin_elementwise_floor:
2715 case Builtin::BI__builtin_elementwise_log:
2716 case Builtin::BI__builtin_elementwise_log2:
2717 case Builtin::BI__builtin_elementwise_log10:
2718 case Builtin::BI__builtin_elementwise_roundeven:
2719 case Builtin::BI__builtin_elementwise_round:
2720 case Builtin::BI__builtin_elementwise_rint:
2721 case Builtin::BI__builtin_elementwise_nearbyint:
2722 case Builtin::BI__builtin_elementwise_sin:
2723 case Builtin::BI__builtin_elementwise_sqrt:
2724 case Builtin::BI__builtin_elementwise_trunc:
2725 case Builtin::BI__builtin_elementwise_canonicalize: {
2726 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2727 return ExprError();
2729 QualType ArgTy = TheCall->getArg(0)->getType();
2730 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2731 ArgTy, 1))
2732 return ExprError();
2733 break;
2735 case Builtin::BI__builtin_elementwise_fma: {
2736 if (SemaBuiltinElementwiseTernaryMath(TheCall))
2737 return ExprError();
2738 break;
2741 // These builtins restrict the element type to floating point
2742 // types only, and take in two arguments.
2743 case Builtin::BI__builtin_elementwise_pow: {
2744 if (SemaBuiltinElementwiseMath(TheCall))
2745 return ExprError();
2747 QualType ArgTy = TheCall->getArg(0)->getType();
2748 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2749 ArgTy, 1) ||
2750 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
2751 ArgTy, 2))
2752 return ExprError();
2753 break;
2756 // These builtins restrict the element type to integer
2757 // types only.
2758 case Builtin::BI__builtin_elementwise_add_sat:
2759 case Builtin::BI__builtin_elementwise_sub_sat: {
2760 if (SemaBuiltinElementwiseMath(TheCall))
2761 return ExprError();
2763 const Expr *Arg = TheCall->getArg(0);
2764 QualType ArgTy = Arg->getType();
2765 QualType EltTy = ArgTy;
2767 if (auto *VecTy = EltTy->getAs<VectorType>())
2768 EltTy = VecTy->getElementType();
2770 if (!EltTy->isIntegerType()) {
2771 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2772 << 1 << /* integer ty */ 6 << ArgTy;
2773 return ExprError();
2775 break;
2778 case Builtin::BI__builtin_elementwise_min:
2779 case Builtin::BI__builtin_elementwise_max:
2780 if (SemaBuiltinElementwiseMath(TheCall))
2781 return ExprError();
2782 break;
2784 case Builtin::BI__builtin_elementwise_bitreverse: {
2785 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2786 return ExprError();
2788 const Expr *Arg = TheCall->getArg(0);
2789 QualType ArgTy = Arg->getType();
2790 QualType EltTy = ArgTy;
2792 if (auto *VecTy = EltTy->getAs<VectorType>())
2793 EltTy = VecTy->getElementType();
2795 if (!EltTy->isIntegerType()) {
2796 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2797 << 1 << /* integer ty */ 6 << ArgTy;
2798 return ExprError();
2800 break;
2803 case Builtin::BI__builtin_elementwise_copysign: {
2804 if (checkArgCount(*this, TheCall, 2))
2805 return ExprError();
2807 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
2808 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1));
2809 if (Magnitude.isInvalid() || Sign.isInvalid())
2810 return ExprError();
2812 QualType MagnitudeTy = Magnitude.get()->getType();
2813 QualType SignTy = Sign.get()->getType();
2814 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2815 MagnitudeTy, 1) ||
2816 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
2817 SignTy, 2)) {
2818 return ExprError();
2821 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
2822 return Diag(Sign.get()->getBeginLoc(),
2823 diag::err_typecheck_call_different_arg_types)
2824 << MagnitudeTy << SignTy;
2827 TheCall->setArg(0, Magnitude.get());
2828 TheCall->setArg(1, Sign.get());
2829 TheCall->setType(Magnitude.get()->getType());
2830 break;
2832 case Builtin::BI__builtin_reduce_max:
2833 case Builtin::BI__builtin_reduce_min: {
2834 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2835 return ExprError();
2837 const Expr *Arg = TheCall->getArg(0);
2838 const auto *TyA = Arg->getType()->getAs<VectorType>();
2839 if (!TyA) {
2840 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2841 << 1 << /* vector ty*/ 4 << Arg->getType();
2842 return ExprError();
2845 TheCall->setType(TyA->getElementType());
2846 break;
2849 // These builtins support vectors of integers only.
2850 // TODO: ADD/MUL should support floating-point types.
2851 case Builtin::BI__builtin_reduce_add:
2852 case Builtin::BI__builtin_reduce_mul:
2853 case Builtin::BI__builtin_reduce_xor:
2854 case Builtin::BI__builtin_reduce_or:
2855 case Builtin::BI__builtin_reduce_and: {
2856 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2857 return ExprError();
2859 const Expr *Arg = TheCall->getArg(0);
2860 const auto *TyA = Arg->getType()->getAs<VectorType>();
2861 if (!TyA || !TyA->getElementType()->isIntegerType()) {
2862 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2863 << 1 << /* vector of integers */ 6 << Arg->getType();
2864 return ExprError();
2866 TheCall->setType(TyA->getElementType());
2867 break;
2870 case Builtin::BI__builtin_matrix_transpose:
2871 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
2873 case Builtin::BI__builtin_matrix_column_major_load:
2874 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
2876 case Builtin::BI__builtin_matrix_column_major_store:
2877 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
2879 case Builtin::BI__builtin_get_device_side_mangled_name: {
2880 auto Check = [](CallExpr *TheCall) {
2881 if (TheCall->getNumArgs() != 1)
2882 return false;
2883 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
2884 if (!DRE)
2885 return false;
2886 auto *D = DRE->getDecl();
2887 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
2888 return false;
2889 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
2890 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2892 if (!Check(TheCall)) {
2893 Diag(TheCall->getBeginLoc(),
2894 diag::err_hip_invalid_args_builtin_mangled_name);
2895 return ExprError();
2900 // Since the target specific builtins for each arch overlap, only check those
2901 // of the arch we are compiling for.
2902 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
2903 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
2904 assert(Context.getAuxTargetInfo() &&
2905 "Aux Target Builtin, but not an aux target?");
2907 if (CheckTSBuiltinFunctionCall(
2908 *Context.getAuxTargetInfo(),
2909 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2910 return ExprError();
2911 } else {
2912 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2913 TheCall))
2914 return ExprError();
2918 return TheCallResult;
2921 // Get the valid immediate range for the specified NEON type code.
2922 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2923 NeonTypeFlags Type(t);
2924 int IsQuad = ForceQuad ? true : Type.isQuad();
2925 switch (Type.getEltType()) {
2926 case NeonTypeFlags::Int8:
2927 case NeonTypeFlags::Poly8:
2928 return shift ? 7 : (8 << IsQuad) - 1;
2929 case NeonTypeFlags::Int16:
2930 case NeonTypeFlags::Poly16:
2931 return shift ? 15 : (4 << IsQuad) - 1;
2932 case NeonTypeFlags::Int32:
2933 return shift ? 31 : (2 << IsQuad) - 1;
2934 case NeonTypeFlags::Int64:
2935 case NeonTypeFlags::Poly64:
2936 return shift ? 63 : (1 << IsQuad) - 1;
2937 case NeonTypeFlags::Poly128:
2938 return shift ? 127 : (1 << IsQuad) - 1;
2939 case NeonTypeFlags::Float16:
2940 assert(!shift && "cannot shift float types!");
2941 return (4 << IsQuad) - 1;
2942 case NeonTypeFlags::Float32:
2943 assert(!shift && "cannot shift float types!");
2944 return (2 << IsQuad) - 1;
2945 case NeonTypeFlags::Float64:
2946 assert(!shift && "cannot shift float types!");
2947 return (1 << IsQuad) - 1;
2948 case NeonTypeFlags::BFloat16:
2949 assert(!shift && "cannot shift float types!");
2950 return (4 << IsQuad) - 1;
2952 llvm_unreachable("Invalid NeonTypeFlag!");
2955 /// getNeonEltType - Return the QualType corresponding to the elements of
2956 /// the vector type specified by the NeonTypeFlags. This is used to check
2957 /// the pointer arguments for Neon load/store intrinsics.
2958 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
2959 bool IsPolyUnsigned, bool IsInt64Long) {
2960 switch (Flags.getEltType()) {
2961 case NeonTypeFlags::Int8:
2962 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2963 case NeonTypeFlags::Int16:
2964 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2965 case NeonTypeFlags::Int32:
2966 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2967 case NeonTypeFlags::Int64:
2968 if (IsInt64Long)
2969 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2970 else
2971 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2972 : Context.LongLongTy;
2973 case NeonTypeFlags::Poly8:
2974 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2975 case NeonTypeFlags::Poly16:
2976 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2977 case NeonTypeFlags::Poly64:
2978 if (IsInt64Long)
2979 return Context.UnsignedLongTy;
2980 else
2981 return Context.UnsignedLongLongTy;
2982 case NeonTypeFlags::Poly128:
2983 break;
2984 case NeonTypeFlags::Float16:
2985 return Context.HalfTy;
2986 case NeonTypeFlags::Float32:
2987 return Context.FloatTy;
2988 case NeonTypeFlags::Float64:
2989 return Context.DoubleTy;
2990 case NeonTypeFlags::BFloat16:
2991 return Context.BFloat16Ty;
2993 llvm_unreachable("Invalid NeonTypeFlag!");
2996 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2997 // Range check SVE intrinsics that take immediate values.
2998 SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
3000 switch (BuiltinID) {
3001 default:
3002 return false;
3003 #define GET_SVE_IMMEDIATE_CHECK
3004 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
3005 #undef GET_SVE_IMMEDIATE_CHECK
3006 #define GET_SME_IMMEDIATE_CHECK
3007 #include "clang/Basic/arm_sme_sema_rangechecks.inc"
3008 #undef GET_SME_IMMEDIATE_CHECK
3011 // Perform all the immediate checks for this builtin call.
3012 bool HasError = false;
3013 for (auto &I : ImmChecks) {
3014 int ArgNum, CheckTy, ElementSizeInBits;
3015 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
3017 typedef bool(*OptionSetCheckFnTy)(int64_t Value);
3019 // Function that checks whether the operand (ArgNum) is an immediate
3020 // that is one of the predefined values.
3021 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
3022 int ErrDiag) -> bool {
3023 // We can't check the value of a dependent argument.
3024 Expr *Arg = TheCall->getArg(ArgNum);
3025 if (Arg->isTypeDependent() || Arg->isValueDependent())
3026 return false;
3028 // Check constant-ness first.
3029 llvm::APSInt Imm;
3030 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
3031 return true;
3033 if (!CheckImm(Imm.getSExtValue()))
3034 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
3035 return false;
3038 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
3039 case SVETypeFlags::ImmCheck0_31:
3040 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
3041 HasError = true;
3042 break;
3043 case SVETypeFlags::ImmCheck0_13:
3044 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
3045 HasError = true;
3046 break;
3047 case SVETypeFlags::ImmCheck1_16:
3048 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
3049 HasError = true;
3050 break;
3051 case SVETypeFlags::ImmCheck0_7:
3052 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
3053 HasError = true;
3054 break;
3055 case SVETypeFlags::ImmCheckExtract:
3056 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
3057 (2048 / ElementSizeInBits) - 1))
3058 HasError = true;
3059 break;
3060 case SVETypeFlags::ImmCheckShiftRight:
3061 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
3062 HasError = true;
3063 break;
3064 case SVETypeFlags::ImmCheckShiftRightNarrow:
3065 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
3066 ElementSizeInBits / 2))
3067 HasError = true;
3068 break;
3069 case SVETypeFlags::ImmCheckShiftLeft:
3070 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
3071 ElementSizeInBits - 1))
3072 HasError = true;
3073 break;
3074 case SVETypeFlags::ImmCheckLaneIndex:
3075 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
3076 (128 / (1 * ElementSizeInBits)) - 1))
3077 HasError = true;
3078 break;
3079 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
3080 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
3081 (128 / (2 * ElementSizeInBits)) - 1))
3082 HasError = true;
3083 break;
3084 case SVETypeFlags::ImmCheckLaneIndexDot:
3085 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
3086 (128 / (4 * ElementSizeInBits)) - 1))
3087 HasError = true;
3088 break;
3089 case SVETypeFlags::ImmCheckComplexRot90_270:
3090 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
3091 diag::err_rotation_argument_to_cadd))
3092 HasError = true;
3093 break;
3094 case SVETypeFlags::ImmCheckComplexRotAll90:
3095 if (CheckImmediateInSet(
3096 [](int64_t V) {
3097 return V == 0 || V == 90 || V == 180 || V == 270;
3099 diag::err_rotation_argument_to_cmla))
3100 HasError = true;
3101 break;
3102 case SVETypeFlags::ImmCheck0_1:
3103 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
3104 HasError = true;
3105 break;
3106 case SVETypeFlags::ImmCheck0_2:
3107 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
3108 HasError = true;
3109 break;
3110 case SVETypeFlags::ImmCheck0_3:
3111 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
3112 HasError = true;
3113 break;
3114 case SVETypeFlags::ImmCheck0_0:
3115 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
3116 HasError = true;
3117 break;
3118 case SVETypeFlags::ImmCheck0_15:
3119 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
3120 HasError = true;
3121 break;
3122 case SVETypeFlags::ImmCheck0_255:
3123 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
3124 HasError = true;
3125 break;
3126 case SVETypeFlags::ImmCheck2_4_Mul2:
3127 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
3128 SemaBuiltinConstantArgMultiple(TheCall, ArgNum, 2))
3129 HasError = true;
3130 break;
3134 return HasError;
3137 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
3138 unsigned BuiltinID, CallExpr *TheCall) {
3139 llvm::APSInt Result;
3140 uint64_t mask = 0;
3141 unsigned TV = 0;
3142 int PtrArgNum = -1;
3143 bool HasConstPtr = false;
3144 switch (BuiltinID) {
3145 #define GET_NEON_OVERLOAD_CHECK
3146 #include "clang/Basic/arm_neon.inc"
3147 #include "clang/Basic/arm_fp16.inc"
3148 #undef GET_NEON_OVERLOAD_CHECK
3151 // For NEON intrinsics which are overloaded on vector element type, validate
3152 // the immediate which specifies which variant to emit.
3153 unsigned ImmArg = TheCall->getNumArgs()-1;
3154 if (mask) {
3155 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
3156 return true;
3158 TV = Result.getLimitedValue(64);
3159 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
3160 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
3161 << TheCall->getArg(ImmArg)->getSourceRange();
3164 if (PtrArgNum >= 0) {
3165 // Check that pointer arguments have the specified type.
3166 Expr *Arg = TheCall->getArg(PtrArgNum);
3167 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
3168 Arg = ICE->getSubExpr();
3169 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
3170 QualType RHSTy = RHS.get()->getType();
3172 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
3173 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
3174 Arch == llvm::Triple::aarch64_32 ||
3175 Arch == llvm::Triple::aarch64_be;
3176 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
3177 QualType EltTy =
3178 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
3179 if (HasConstPtr)
3180 EltTy = EltTy.withConst();
3181 QualType LHSTy = Context.getPointerType(EltTy);
3182 AssignConvertType ConvTy;
3183 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
3184 if (RHS.isInvalid())
3185 return true;
3186 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
3187 RHS.get(), AA_Assigning))
3188 return true;
3191 // For NEON intrinsics which take an immediate value as part of the
3192 // instruction, range check them here.
3193 unsigned i = 0, l = 0, u = 0;
3194 switch (BuiltinID) {
3195 default:
3196 return false;
3197 #define GET_NEON_IMMEDIATE_CHECK
3198 #include "clang/Basic/arm_neon.inc"
3199 #include "clang/Basic/arm_fp16.inc"
3200 #undef GET_NEON_IMMEDIATE_CHECK
3203 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
3206 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3207 switch (BuiltinID) {
3208 default:
3209 return false;
3210 #include "clang/Basic/arm_mve_builtin_sema.inc"
3214 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3215 CallExpr *TheCall) {
3216 bool Err = false;
3217 switch (BuiltinID) {
3218 default:
3219 return false;
3220 #include "clang/Basic/arm_cde_builtin_sema.inc"
3223 if (Err)
3224 return true;
3226 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
3229 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
3230 const Expr *CoprocArg, bool WantCDE) {
3231 if (isConstantEvaluatedContext())
3232 return false;
3234 // We can't check the value of a dependent argument.
3235 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
3236 return false;
3238 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
3239 int64_t CoprocNo = CoprocNoAP.getExtValue();
3240 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
3242 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
3243 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
3245 if (IsCDECoproc != WantCDE)
3246 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
3247 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
3249 return false;
3252 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
3253 unsigned MaxWidth) {
3254 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3255 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3256 BuiltinID == ARM::BI__builtin_arm_strex ||
3257 BuiltinID == ARM::BI__builtin_arm_stlex ||
3258 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3259 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3260 BuiltinID == AArch64::BI__builtin_arm_strex ||
3261 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
3262 "unexpected ARM builtin");
3263 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
3264 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3265 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3266 BuiltinID == AArch64::BI__builtin_arm_ldaex;
3268 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
3270 // Ensure that we have the proper number of arguments.
3271 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
3272 return true;
3274 // Inspect the pointer argument of the atomic builtin. This should always be
3275 // a pointer type, whose element is an integral scalar or pointer type.
3276 // Because it is a pointer type, we don't have to worry about any implicit
3277 // casts here.
3278 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
3279 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
3280 if (PointerArgRes.isInvalid())
3281 return true;
3282 PointerArg = PointerArgRes.get();
3284 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
3285 if (!pointerType) {
3286 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
3287 << PointerArg->getType() << PointerArg->getSourceRange();
3288 return true;
3291 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
3292 // task is to insert the appropriate casts into the AST. First work out just
3293 // what the appropriate type is.
3294 QualType ValType = pointerType->getPointeeType();
3295 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
3296 if (IsLdrex)
3297 AddrType.addConst();
3299 // Issue a warning if the cast is dodgy.
3300 CastKind CastNeeded = CK_NoOp;
3301 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
3302 CastNeeded = CK_BitCast;
3303 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
3304 << PointerArg->getType() << Context.getPointerType(AddrType)
3305 << AA_Passing << PointerArg->getSourceRange();
3308 // Finally, do the cast and replace the argument with the corrected version.
3309 AddrType = Context.getPointerType(AddrType);
3310 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
3311 if (PointerArgRes.isInvalid())
3312 return true;
3313 PointerArg = PointerArgRes.get();
3315 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
3317 // In general, we allow ints, floats and pointers to be loaded and stored.
3318 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
3319 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
3320 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
3321 << PointerArg->getType() << PointerArg->getSourceRange();
3322 return true;
3325 // But ARM doesn't have instructions to deal with 128-bit versions.
3326 if (Context.getTypeSize(ValType) > MaxWidth) {
3327 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
3328 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
3329 << PointerArg->getType() << PointerArg->getSourceRange();
3330 return true;
3333 switch (ValType.getObjCLifetime()) {
3334 case Qualifiers::OCL_None:
3335 case Qualifiers::OCL_ExplicitNone:
3336 // okay
3337 break;
3339 case Qualifiers::OCL_Weak:
3340 case Qualifiers::OCL_Strong:
3341 case Qualifiers::OCL_Autoreleasing:
3342 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
3343 << ValType << PointerArg->getSourceRange();
3344 return true;
3347 if (IsLdrex) {
3348 TheCall->setType(ValType);
3349 return false;
3352 // Initialize the argument to be stored.
3353 ExprResult ValArg = TheCall->getArg(0);
3354 InitializedEntity Entity = InitializedEntity::InitializeParameter(
3355 Context, ValType, /*consume*/ false);
3356 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
3357 if (ValArg.isInvalid())
3358 return true;
3359 TheCall->setArg(0, ValArg.get());
3361 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
3362 // but the custom checker bypasses all default analysis.
3363 TheCall->setType(Context.IntTy);
3364 return false;
3367 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3368 CallExpr *TheCall) {
3369 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3370 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3371 BuiltinID == ARM::BI__builtin_arm_strex ||
3372 BuiltinID == ARM::BI__builtin_arm_stlex) {
3373 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
3376 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3377 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3378 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
3381 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3382 BuiltinID == ARM::BI__builtin_arm_wsr64)
3383 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
3385 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
3386 BuiltinID == ARM::BI__builtin_arm_rsrp ||
3387 BuiltinID == ARM::BI__builtin_arm_wsr ||
3388 BuiltinID == ARM::BI__builtin_arm_wsrp)
3389 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3391 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3392 return true;
3393 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
3394 return true;
3395 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
3396 return true;
3398 // For intrinsics which take an immediate value as part of the instruction,
3399 // range check them here.
3400 // FIXME: VFP Intrinsics should error if VFP not present.
3401 switch (BuiltinID) {
3402 default: return false;
3403 case ARM::BI__builtin_arm_ssat:
3404 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
3405 case ARM::BI__builtin_arm_usat:
3406 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
3407 case ARM::BI__builtin_arm_ssat16:
3408 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3409 case ARM::BI__builtin_arm_usat16:
3410 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3411 case ARM::BI__builtin_arm_vcvtr_f:
3412 case ARM::BI__builtin_arm_vcvtr_d:
3413 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3414 case ARM::BI__builtin_arm_dmb:
3415 case ARM::BI__builtin_arm_dsb:
3416 case ARM::BI__builtin_arm_isb:
3417 case ARM::BI__builtin_arm_dbg:
3418 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
3419 case ARM::BI__builtin_arm_cdp:
3420 case ARM::BI__builtin_arm_cdp2:
3421 case ARM::BI__builtin_arm_mcr:
3422 case ARM::BI__builtin_arm_mcr2:
3423 case ARM::BI__builtin_arm_mrc:
3424 case ARM::BI__builtin_arm_mrc2:
3425 case ARM::BI__builtin_arm_mcrr:
3426 case ARM::BI__builtin_arm_mcrr2:
3427 case ARM::BI__builtin_arm_mrrc:
3428 case ARM::BI__builtin_arm_mrrc2:
3429 case ARM::BI__builtin_arm_ldc:
3430 case ARM::BI__builtin_arm_ldcl:
3431 case ARM::BI__builtin_arm_ldc2:
3432 case ARM::BI__builtin_arm_ldc2l:
3433 case ARM::BI__builtin_arm_stc:
3434 case ARM::BI__builtin_arm_stcl:
3435 case ARM::BI__builtin_arm_stc2:
3436 case ARM::BI__builtin_arm_stc2l:
3437 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
3438 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
3439 /*WantCDE*/ false);
3443 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
3444 unsigned BuiltinID,
3445 CallExpr *TheCall) {
3446 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3447 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3448 BuiltinID == AArch64::BI__builtin_arm_strex ||
3449 BuiltinID == AArch64::BI__builtin_arm_stlex) {
3450 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
3453 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
3454 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3455 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) ||
3456 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
3457 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
3460 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
3461 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
3462 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
3463 BuiltinID == AArch64::BI__builtin_arm_wsr128)
3464 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3466 // Memory Tagging Extensions (MTE) Intrinsics
3467 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
3468 BuiltinID == AArch64::BI__builtin_arm_addg ||
3469 BuiltinID == AArch64::BI__builtin_arm_gmi ||
3470 BuiltinID == AArch64::BI__builtin_arm_ldg ||
3471 BuiltinID == AArch64::BI__builtin_arm_stg ||
3472 BuiltinID == AArch64::BI__builtin_arm_subp) {
3473 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
3476 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
3477 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
3478 BuiltinID == AArch64::BI__builtin_arm_wsr ||
3479 BuiltinID == AArch64::BI__builtin_arm_wsrp)
3480 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3482 // Only check the valid encoding range. Any constant in this range would be
3483 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
3484 // an exception for incorrect registers. This matches MSVC behavior.
3485 if (BuiltinID == AArch64::BI_ReadStatusReg ||
3486 BuiltinID == AArch64::BI_WriteStatusReg)
3487 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
3489 if (BuiltinID == AArch64::BI__getReg)
3490 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3492 if (BuiltinID == AArch64::BI__break)
3493 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
3495 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3496 return true;
3498 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
3499 return true;
3501 // For intrinsics which take an immediate value as part of the instruction,
3502 // range check them here.
3503 unsigned i = 0, l = 0, u = 0;
3504 switch (BuiltinID) {
3505 default: return false;
3506 case AArch64::BI__builtin_arm_dmb:
3507 case AArch64::BI__builtin_arm_dsb:
3508 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
3509 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
3512 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
3515 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
3516 if (Arg->getType()->getAsPlaceholderType())
3517 return false;
3519 // The first argument needs to be a record field access.
3520 // If it is an array element access, we delay decision
3521 // to BPF backend to check whether the access is a
3522 // field access or not.
3523 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
3524 isa<MemberExpr>(Arg->IgnoreParens()) ||
3525 isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
3528 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
3529 QualType ArgType = Arg->getType();
3530 if (ArgType->getAsPlaceholderType())
3531 return false;
3533 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
3534 // format:
3535 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
3536 // 2. <type> var;
3537 // __builtin_preserve_type_info(var, flag);
3538 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
3539 !isa<UnaryOperator>(Arg->IgnoreParens()))
3540 return false;
3542 // Typedef type.
3543 if (ArgType->getAs<TypedefType>())
3544 return true;
3546 // Record type or Enum type.
3547 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3548 if (const auto *RT = Ty->getAs<RecordType>()) {
3549 if (!RT->getDecl()->getDeclName().isEmpty())
3550 return true;
3551 } else if (const auto *ET = Ty->getAs<EnumType>()) {
3552 if (!ET->getDecl()->getDeclName().isEmpty())
3553 return true;
3556 return false;
3559 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
3560 QualType ArgType = Arg->getType();
3561 if (ArgType->getAsPlaceholderType())
3562 return false;
3564 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
3565 // format:
3566 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
3567 // flag);
3568 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
3569 if (!UO)
3570 return false;
3572 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
3573 if (!CE)
3574 return false;
3575 if (CE->getCastKind() != CK_IntegralToPointer &&
3576 CE->getCastKind() != CK_NullToPointer)
3577 return false;
3579 // The integer must be from an EnumConstantDecl.
3580 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
3581 if (!DR)
3582 return false;
3584 const EnumConstantDecl *Enumerator =
3585 dyn_cast<EnumConstantDecl>(DR->getDecl());
3586 if (!Enumerator)
3587 return false;
3589 // The type must be EnumType.
3590 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3591 const auto *ET = Ty->getAs<EnumType>();
3592 if (!ET)
3593 return false;
3595 // The enum value must be supported.
3596 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
3599 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
3600 CallExpr *TheCall) {
3601 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
3602 BuiltinID == BPF::BI__builtin_btf_type_id ||
3603 BuiltinID == BPF::BI__builtin_preserve_type_info ||
3604 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
3605 "unexpected BPF builtin");
3607 if (checkArgCount(*this, TheCall, 2))
3608 return true;
3610 // The second argument needs to be a constant int
3611 Expr *Arg = TheCall->getArg(1);
3612 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
3613 diag::kind kind;
3614 if (!Value) {
3615 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
3616 kind = diag::err_preserve_field_info_not_const;
3617 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
3618 kind = diag::err_btf_type_id_not_const;
3619 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
3620 kind = diag::err_preserve_type_info_not_const;
3621 else
3622 kind = diag::err_preserve_enum_value_not_const;
3623 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
3624 return true;
3627 // The first argument
3628 Arg = TheCall->getArg(0);
3629 bool InvalidArg = false;
3630 bool ReturnUnsignedInt = true;
3631 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
3632 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
3633 InvalidArg = true;
3634 kind = diag::err_preserve_field_info_not_field;
3636 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
3637 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
3638 InvalidArg = true;
3639 kind = diag::err_preserve_type_info_invalid;
3641 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
3642 if (!isValidBPFPreserveEnumValueArg(Arg)) {
3643 InvalidArg = true;
3644 kind = diag::err_preserve_enum_value_invalid;
3646 ReturnUnsignedInt = false;
3647 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
3648 ReturnUnsignedInt = false;
3651 if (InvalidArg) {
3652 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
3653 return true;
3656 if (ReturnUnsignedInt)
3657 TheCall->setType(Context.UnsignedIntTy);
3658 else
3659 TheCall->setType(Context.UnsignedLongTy);
3660 return false;
3663 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3664 struct ArgInfo {
3665 uint8_t OpNum;
3666 bool IsSigned;
3667 uint8_t BitWidth;
3668 uint8_t Align;
3670 struct BuiltinInfo {
3671 unsigned BuiltinID;
3672 ArgInfo Infos[2];
3675 static BuiltinInfo Infos[] = {
3676 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
3677 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
3678 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
3679 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
3680 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
3681 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
3682 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
3683 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
3684 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
3685 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
3686 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
3688 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
3689 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
3690 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
3691 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
3692 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
3693 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
3694 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
3695 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
3696 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
3697 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
3698 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
3700 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
3701 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
3702 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
3703 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
3704 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
3705 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
3706 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
3707 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
3708 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
3709 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
3710 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
3711 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
3712 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
3713 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
3714 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
3715 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
3716 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
3717 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
3718 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
3719 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
3720 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
3721 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
3722 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
3723 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
3724 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
3725 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
3726 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
3727 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
3728 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
3729 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
3730 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
3731 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
3732 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
3733 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
3734 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
3735 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
3736 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
3737 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
3738 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
3739 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
3740 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
3741 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
3742 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
3743 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
3744 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
3745 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
3746 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
3747 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
3748 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
3749 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
3750 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
3751 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
3752 {{ 1, false, 6, 0 }} },
3753 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
3754 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
3755 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
3756 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
3757 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
3758 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
3759 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
3760 {{ 1, false, 5, 0 }} },
3761 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
3762 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
3763 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
3764 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
3765 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
3766 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
3767 { 2, false, 5, 0 }} },
3768 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
3769 { 2, false, 6, 0 }} },
3770 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
3771 { 3, false, 5, 0 }} },
3772 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
3773 { 3, false, 6, 0 }} },
3774 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
3775 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
3776 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
3777 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
3778 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
3779 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
3780 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
3781 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
3782 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
3783 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
3784 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
3785 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
3786 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
3787 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
3788 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
3789 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
3790 {{ 2, false, 4, 0 },
3791 { 3, false, 5, 0 }} },
3792 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
3793 {{ 2, false, 4, 0 },
3794 { 3, false, 5, 0 }} },
3795 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
3796 {{ 2, false, 4, 0 },
3797 { 3, false, 5, 0 }} },
3798 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
3799 {{ 2, false, 4, 0 },
3800 { 3, false, 5, 0 }} },
3801 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
3802 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
3803 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
3804 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
3805 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
3806 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
3807 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
3808 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
3809 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
3810 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
3811 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
3812 { 2, false, 5, 0 }} },
3813 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
3814 { 2, false, 6, 0 }} },
3815 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
3816 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
3817 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
3818 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
3819 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
3820 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
3821 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
3822 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
3823 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
3824 {{ 1, false, 4, 0 }} },
3825 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
3826 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
3827 {{ 1, false, 4, 0 }} },
3828 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
3829 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
3830 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
3831 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
3832 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
3833 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
3834 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
3835 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
3836 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
3837 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
3838 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
3839 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
3840 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
3841 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
3842 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
3843 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
3844 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
3845 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
3846 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
3847 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
3848 {{ 3, false, 1, 0 }} },
3849 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
3850 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
3851 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
3852 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
3853 {{ 3, false, 1, 0 }} },
3854 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
3855 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
3856 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
3857 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
3858 {{ 3, false, 1, 0 }} },
3860 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
3861 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
3862 {{ 2, false, 2, 0 }} },
3863 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
3864 {{ 3, false, 2, 0 }} },
3865 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
3866 {{ 3, false, 2, 0 }} },
3867 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
3868 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
3869 {{ 2, false, 2, 0 }} },
3870 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
3871 {{ 3, false, 2, 0 }} },
3872 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
3873 {{ 3, false, 2, 0 }} },
3874 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
3875 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
3876 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
3877 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
3878 {{ 3, false, 3, 0 }} },
3879 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
3880 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
3881 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
3882 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
3883 {{ 3, false, 3, 0 }} },
3886 // Use a dynamically initialized static to sort the table exactly once on
3887 // first run.
3888 static const bool SortOnce =
3889 (llvm::sort(Infos,
3890 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
3891 return LHS.BuiltinID < RHS.BuiltinID;
3893 true);
3894 (void)SortOnce;
3896 const BuiltinInfo *F = llvm::partition_point(
3897 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
3898 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
3899 return false;
3901 bool Error = false;
3903 for (const ArgInfo &A : F->Infos) {
3904 // Ignore empty ArgInfo elements.
3905 if (A.BitWidth == 0)
3906 continue;
3908 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
3909 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
3910 if (!A.Align) {
3911 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
3912 } else {
3913 unsigned M = 1 << A.Align;
3914 Min *= M;
3915 Max *= M;
3916 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
3917 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
3920 return Error;
3923 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
3924 CallExpr *TheCall) {
3925 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
3928 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
3929 unsigned BuiltinID,
3930 CallExpr *TheCall) {
3931 switch (BuiltinID) {
3932 default:
3933 break;
3934 // Basic intrinsics.
3935 case LoongArch::BI__builtin_loongarch_cacop_d:
3936 case LoongArch::BI__builtin_loongarch_cacop_w: {
3937 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
3938 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12),
3939 llvm::maxIntN(12));
3940 break;
3942 case LoongArch::BI__builtin_loongarch_break:
3943 case LoongArch::BI__builtin_loongarch_dbar:
3944 case LoongArch::BI__builtin_loongarch_ibar:
3945 case LoongArch::BI__builtin_loongarch_syscall:
3946 // Check if immediate is in [0, 32767].
3947 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767);
3948 case LoongArch::BI__builtin_loongarch_csrrd_w:
3949 case LoongArch::BI__builtin_loongarch_csrrd_d:
3950 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383);
3951 case LoongArch::BI__builtin_loongarch_csrwr_w:
3952 case LoongArch::BI__builtin_loongarch_csrwr_d:
3953 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383);
3954 case LoongArch::BI__builtin_loongarch_csrxchg_w:
3955 case LoongArch::BI__builtin_loongarch_csrxchg_d:
3956 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383);
3957 case LoongArch::BI__builtin_loongarch_lddir_d:
3958 case LoongArch::BI__builtin_loongarch_ldpte_d:
3959 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
3960 case LoongArch::BI__builtin_loongarch_movfcsr2gr:
3961 case LoongArch::BI__builtin_loongarch_movgr2fcsr:
3962 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
3964 // LSX intrinsics.
3965 case LoongArch::BI__builtin_lsx_vbitclri_b:
3966 case LoongArch::BI__builtin_lsx_vbitrevi_b:
3967 case LoongArch::BI__builtin_lsx_vbitseti_b:
3968 case LoongArch::BI__builtin_lsx_vsat_b:
3969 case LoongArch::BI__builtin_lsx_vsat_bu:
3970 case LoongArch::BI__builtin_lsx_vslli_b:
3971 case LoongArch::BI__builtin_lsx_vsrai_b:
3972 case LoongArch::BI__builtin_lsx_vsrari_b:
3973 case LoongArch::BI__builtin_lsx_vsrli_b:
3974 case LoongArch::BI__builtin_lsx_vsllwil_h_b:
3975 case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
3976 case LoongArch::BI__builtin_lsx_vrotri_b:
3977 case LoongArch::BI__builtin_lsx_vsrlri_b:
3978 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3979 case LoongArch::BI__builtin_lsx_vbitclri_h:
3980 case LoongArch::BI__builtin_lsx_vbitrevi_h:
3981 case LoongArch::BI__builtin_lsx_vbitseti_h:
3982 case LoongArch::BI__builtin_lsx_vsat_h:
3983 case LoongArch::BI__builtin_lsx_vsat_hu:
3984 case LoongArch::BI__builtin_lsx_vslli_h:
3985 case LoongArch::BI__builtin_lsx_vsrai_h:
3986 case LoongArch::BI__builtin_lsx_vsrari_h:
3987 case LoongArch::BI__builtin_lsx_vsrli_h:
3988 case LoongArch::BI__builtin_lsx_vsllwil_w_h:
3989 case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
3990 case LoongArch::BI__builtin_lsx_vrotri_h:
3991 case LoongArch::BI__builtin_lsx_vsrlri_h:
3992 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3993 case LoongArch::BI__builtin_lsx_vssrarni_b_h:
3994 case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
3995 case LoongArch::BI__builtin_lsx_vssrani_b_h:
3996 case LoongArch::BI__builtin_lsx_vssrani_bu_h:
3997 case LoongArch::BI__builtin_lsx_vsrarni_b_h:
3998 case LoongArch::BI__builtin_lsx_vsrlni_b_h:
3999 case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
4000 case LoongArch::BI__builtin_lsx_vssrlni_b_h:
4001 case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
4002 case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
4003 case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
4004 case LoongArch::BI__builtin_lsx_vsrani_b_h:
4005 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4006 case LoongArch::BI__builtin_lsx_vslei_bu:
4007 case LoongArch::BI__builtin_lsx_vslei_hu:
4008 case LoongArch::BI__builtin_lsx_vslei_wu:
4009 case LoongArch::BI__builtin_lsx_vslei_du:
4010 case LoongArch::BI__builtin_lsx_vslti_bu:
4011 case LoongArch::BI__builtin_lsx_vslti_hu:
4012 case LoongArch::BI__builtin_lsx_vslti_wu:
4013 case LoongArch::BI__builtin_lsx_vslti_du:
4014 case LoongArch::BI__builtin_lsx_vmaxi_bu:
4015 case LoongArch::BI__builtin_lsx_vmaxi_hu:
4016 case LoongArch::BI__builtin_lsx_vmaxi_wu:
4017 case LoongArch::BI__builtin_lsx_vmaxi_du:
4018 case LoongArch::BI__builtin_lsx_vmini_bu:
4019 case LoongArch::BI__builtin_lsx_vmini_hu:
4020 case LoongArch::BI__builtin_lsx_vmini_wu:
4021 case LoongArch::BI__builtin_lsx_vmini_du:
4022 case LoongArch::BI__builtin_lsx_vaddi_bu:
4023 case LoongArch::BI__builtin_lsx_vaddi_hu:
4024 case LoongArch::BI__builtin_lsx_vaddi_wu:
4025 case LoongArch::BI__builtin_lsx_vaddi_du:
4026 case LoongArch::BI__builtin_lsx_vbitclri_w:
4027 case LoongArch::BI__builtin_lsx_vbitrevi_w:
4028 case LoongArch::BI__builtin_lsx_vbitseti_w:
4029 case LoongArch::BI__builtin_lsx_vsat_w:
4030 case LoongArch::BI__builtin_lsx_vsat_wu:
4031 case LoongArch::BI__builtin_lsx_vslli_w:
4032 case LoongArch::BI__builtin_lsx_vsrai_w:
4033 case LoongArch::BI__builtin_lsx_vsrari_w:
4034 case LoongArch::BI__builtin_lsx_vsrli_w:
4035 case LoongArch::BI__builtin_lsx_vsllwil_d_w:
4036 case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
4037 case LoongArch::BI__builtin_lsx_vsrlri_w:
4038 case LoongArch::BI__builtin_lsx_vrotri_w:
4039 case LoongArch::BI__builtin_lsx_vsubi_bu:
4040 case LoongArch::BI__builtin_lsx_vsubi_hu:
4041 case LoongArch::BI__builtin_lsx_vbsrl_v:
4042 case LoongArch::BI__builtin_lsx_vbsll_v:
4043 case LoongArch::BI__builtin_lsx_vsubi_wu:
4044 case LoongArch::BI__builtin_lsx_vsubi_du:
4045 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
4046 case LoongArch::BI__builtin_lsx_vssrarni_h_w:
4047 case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
4048 case LoongArch::BI__builtin_lsx_vssrani_h_w:
4049 case LoongArch::BI__builtin_lsx_vssrani_hu_w:
4050 case LoongArch::BI__builtin_lsx_vsrarni_h_w:
4051 case LoongArch::BI__builtin_lsx_vsrani_h_w:
4052 case LoongArch::BI__builtin_lsx_vfrstpi_b:
4053 case LoongArch::BI__builtin_lsx_vfrstpi_h:
4054 case LoongArch::BI__builtin_lsx_vsrlni_h_w:
4055 case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
4056 case LoongArch::BI__builtin_lsx_vssrlni_h_w:
4057 case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
4058 case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
4059 case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
4060 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
4061 case LoongArch::BI__builtin_lsx_vbitclri_d:
4062 case LoongArch::BI__builtin_lsx_vbitrevi_d:
4063 case LoongArch::BI__builtin_lsx_vbitseti_d:
4064 case LoongArch::BI__builtin_lsx_vsat_d:
4065 case LoongArch::BI__builtin_lsx_vsat_du:
4066 case LoongArch::BI__builtin_lsx_vslli_d:
4067 case LoongArch::BI__builtin_lsx_vsrai_d:
4068 case LoongArch::BI__builtin_lsx_vsrli_d:
4069 case LoongArch::BI__builtin_lsx_vsrari_d:
4070 case LoongArch::BI__builtin_lsx_vrotri_d:
4071 case LoongArch::BI__builtin_lsx_vsrlri_d:
4072 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
4073 case LoongArch::BI__builtin_lsx_vssrarni_w_d:
4074 case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
4075 case LoongArch::BI__builtin_lsx_vssrani_w_d:
4076 case LoongArch::BI__builtin_lsx_vssrani_wu_d:
4077 case LoongArch::BI__builtin_lsx_vsrarni_w_d:
4078 case LoongArch::BI__builtin_lsx_vsrlni_w_d:
4079 case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
4080 case LoongArch::BI__builtin_lsx_vssrlni_w_d:
4081 case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
4082 case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
4083 case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
4084 case LoongArch::BI__builtin_lsx_vsrani_w_d:
4085 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
4086 case LoongArch::BI__builtin_lsx_vssrarni_d_q:
4087 case LoongArch::BI__builtin_lsx_vssrarni_du_q:
4088 case LoongArch::BI__builtin_lsx_vssrani_d_q:
4089 case LoongArch::BI__builtin_lsx_vssrani_du_q:
4090 case LoongArch::BI__builtin_lsx_vsrarni_d_q:
4091 case LoongArch::BI__builtin_lsx_vssrlni_d_q:
4092 case LoongArch::BI__builtin_lsx_vssrlni_du_q:
4093 case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
4094 case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
4095 case LoongArch::BI__builtin_lsx_vsrani_d_q:
4096 case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
4097 case LoongArch::BI__builtin_lsx_vsrlni_d_q:
4098 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
4099 case LoongArch::BI__builtin_lsx_vseqi_b:
4100 case LoongArch::BI__builtin_lsx_vseqi_h:
4101 case LoongArch::BI__builtin_lsx_vseqi_w:
4102 case LoongArch::BI__builtin_lsx_vseqi_d:
4103 case LoongArch::BI__builtin_lsx_vslti_b:
4104 case LoongArch::BI__builtin_lsx_vslti_h:
4105 case LoongArch::BI__builtin_lsx_vslti_w:
4106 case LoongArch::BI__builtin_lsx_vslti_d:
4107 case LoongArch::BI__builtin_lsx_vslei_b:
4108 case LoongArch::BI__builtin_lsx_vslei_h:
4109 case LoongArch::BI__builtin_lsx_vslei_w:
4110 case LoongArch::BI__builtin_lsx_vslei_d:
4111 case LoongArch::BI__builtin_lsx_vmaxi_b:
4112 case LoongArch::BI__builtin_lsx_vmaxi_h:
4113 case LoongArch::BI__builtin_lsx_vmaxi_w:
4114 case LoongArch::BI__builtin_lsx_vmaxi_d:
4115 case LoongArch::BI__builtin_lsx_vmini_b:
4116 case LoongArch::BI__builtin_lsx_vmini_h:
4117 case LoongArch::BI__builtin_lsx_vmini_w:
4118 case LoongArch::BI__builtin_lsx_vmini_d:
4119 return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
4120 case LoongArch::BI__builtin_lsx_vandi_b:
4121 case LoongArch::BI__builtin_lsx_vnori_b:
4122 case LoongArch::BI__builtin_lsx_vori_b:
4123 case LoongArch::BI__builtin_lsx_vshuf4i_b:
4124 case LoongArch::BI__builtin_lsx_vshuf4i_h:
4125 case LoongArch::BI__builtin_lsx_vshuf4i_w:
4126 case LoongArch::BI__builtin_lsx_vxori_b:
4127 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
4128 case LoongArch::BI__builtin_lsx_vbitseli_b:
4129 case LoongArch::BI__builtin_lsx_vshuf4i_d:
4130 case LoongArch::BI__builtin_lsx_vextrins_b:
4131 case LoongArch::BI__builtin_lsx_vextrins_h:
4132 case LoongArch::BI__builtin_lsx_vextrins_w:
4133 case LoongArch::BI__builtin_lsx_vextrins_d:
4134 case LoongArch::BI__builtin_lsx_vpermi_w:
4135 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
4136 case LoongArch::BI__builtin_lsx_vpickve2gr_b:
4137 case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
4138 case LoongArch::BI__builtin_lsx_vreplvei_b:
4139 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
4140 case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
4141 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4142 case LoongArch::BI__builtin_lsx_vpickve2gr_h:
4143 case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
4144 case LoongArch::BI__builtin_lsx_vreplvei_h:
4145 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
4146 case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
4147 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4148 case LoongArch::BI__builtin_lsx_vpickve2gr_w:
4149 case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
4150 case LoongArch::BI__builtin_lsx_vreplvei_w:
4151 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
4152 case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
4153 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
4154 case LoongArch::BI__builtin_lsx_vpickve2gr_d:
4155 case LoongArch::BI__builtin_lsx_vpickve2gr_du:
4156 case LoongArch::BI__builtin_lsx_vreplvei_d:
4157 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4158 case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
4159 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
4160 case LoongArch::BI__builtin_lsx_vstelm_b:
4161 return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
4162 SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
4163 case LoongArch::BI__builtin_lsx_vstelm_h:
4164 return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
4165 SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
4166 case LoongArch::BI__builtin_lsx_vstelm_w:
4167 return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
4168 SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
4169 case LoongArch::BI__builtin_lsx_vstelm_d:
4170 return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
4171 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1);
4172 case LoongArch::BI__builtin_lsx_vldrepl_b:
4173 case LoongArch::BI__builtin_lsx_vld:
4174 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
4175 case LoongArch::BI__builtin_lsx_vldrepl_h:
4176 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
4177 case LoongArch::BI__builtin_lsx_vldrepl_w:
4178 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
4179 case LoongArch::BI__builtin_lsx_vldrepl_d:
4180 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
4181 case LoongArch::BI__builtin_lsx_vst:
4182 return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
4183 case LoongArch::BI__builtin_lsx_vldi:
4184 return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
4185 case LoongArch::BI__builtin_lsx_vrepli_b:
4186 case LoongArch::BI__builtin_lsx_vrepli_h:
4187 case LoongArch::BI__builtin_lsx_vrepli_w:
4188 case LoongArch::BI__builtin_lsx_vrepli_d:
4189 return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
4191 // LASX intrinsics.
4192 case LoongArch::BI__builtin_lasx_xvbitclri_b:
4193 case LoongArch::BI__builtin_lasx_xvbitrevi_b:
4194 case LoongArch::BI__builtin_lasx_xvbitseti_b:
4195 case LoongArch::BI__builtin_lasx_xvsat_b:
4196 case LoongArch::BI__builtin_lasx_xvsat_bu:
4197 case LoongArch::BI__builtin_lasx_xvslli_b:
4198 case LoongArch::BI__builtin_lasx_xvsrai_b:
4199 case LoongArch::BI__builtin_lasx_xvsrari_b:
4200 case LoongArch::BI__builtin_lasx_xvsrli_b:
4201 case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
4202 case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
4203 case LoongArch::BI__builtin_lasx_xvrotri_b:
4204 case LoongArch::BI__builtin_lasx_xvsrlri_b:
4205 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
4206 case LoongArch::BI__builtin_lasx_xvbitclri_h:
4207 case LoongArch::BI__builtin_lasx_xvbitrevi_h:
4208 case LoongArch::BI__builtin_lasx_xvbitseti_h:
4209 case LoongArch::BI__builtin_lasx_xvsat_h:
4210 case LoongArch::BI__builtin_lasx_xvsat_hu:
4211 case LoongArch::BI__builtin_lasx_xvslli_h:
4212 case LoongArch::BI__builtin_lasx_xvsrai_h:
4213 case LoongArch::BI__builtin_lasx_xvsrari_h:
4214 case LoongArch::BI__builtin_lasx_xvsrli_h:
4215 case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
4216 case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
4217 case LoongArch::BI__builtin_lasx_xvrotri_h:
4218 case LoongArch::BI__builtin_lasx_xvsrlri_h:
4219 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
4220 case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
4221 case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
4222 case LoongArch::BI__builtin_lasx_xvssrani_b_h:
4223 case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
4224 case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
4225 case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
4226 case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
4227 case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
4228 case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
4229 case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
4230 case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
4231 case LoongArch::BI__builtin_lasx_xvsrani_b_h:
4232 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4233 case LoongArch::BI__builtin_lasx_xvslei_bu:
4234 case LoongArch::BI__builtin_lasx_xvslei_hu:
4235 case LoongArch::BI__builtin_lasx_xvslei_wu:
4236 case LoongArch::BI__builtin_lasx_xvslei_du:
4237 case LoongArch::BI__builtin_lasx_xvslti_bu:
4238 case LoongArch::BI__builtin_lasx_xvslti_hu:
4239 case LoongArch::BI__builtin_lasx_xvslti_wu:
4240 case LoongArch::BI__builtin_lasx_xvslti_du:
4241 case LoongArch::BI__builtin_lasx_xvmaxi_bu:
4242 case LoongArch::BI__builtin_lasx_xvmaxi_hu:
4243 case LoongArch::BI__builtin_lasx_xvmaxi_wu:
4244 case LoongArch::BI__builtin_lasx_xvmaxi_du:
4245 case LoongArch::BI__builtin_lasx_xvmini_bu:
4246 case LoongArch::BI__builtin_lasx_xvmini_hu:
4247 case LoongArch::BI__builtin_lasx_xvmini_wu:
4248 case LoongArch::BI__builtin_lasx_xvmini_du:
4249 case LoongArch::BI__builtin_lasx_xvaddi_bu:
4250 case LoongArch::BI__builtin_lasx_xvaddi_hu:
4251 case LoongArch::BI__builtin_lasx_xvaddi_wu:
4252 case LoongArch::BI__builtin_lasx_xvaddi_du:
4253 case LoongArch::BI__builtin_lasx_xvbitclri_w:
4254 case LoongArch::BI__builtin_lasx_xvbitrevi_w:
4255 case LoongArch::BI__builtin_lasx_xvbitseti_w:
4256 case LoongArch::BI__builtin_lasx_xvsat_w:
4257 case LoongArch::BI__builtin_lasx_xvsat_wu:
4258 case LoongArch::BI__builtin_lasx_xvslli_w:
4259 case LoongArch::BI__builtin_lasx_xvsrai_w:
4260 case LoongArch::BI__builtin_lasx_xvsrari_w:
4261 case LoongArch::BI__builtin_lasx_xvsrli_w:
4262 case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
4263 case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
4264 case LoongArch::BI__builtin_lasx_xvsrlri_w:
4265 case LoongArch::BI__builtin_lasx_xvrotri_w:
4266 case LoongArch::BI__builtin_lasx_xvsubi_bu:
4267 case LoongArch::BI__builtin_lasx_xvsubi_hu:
4268 case LoongArch::BI__builtin_lasx_xvsubi_wu:
4269 case LoongArch::BI__builtin_lasx_xvsubi_du:
4270 case LoongArch::BI__builtin_lasx_xvbsrl_v:
4271 case LoongArch::BI__builtin_lasx_xvbsll_v:
4272 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
4273 case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
4274 case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
4275 case LoongArch::BI__builtin_lasx_xvssrani_h_w:
4276 case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
4277 case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
4278 case LoongArch::BI__builtin_lasx_xvsrani_h_w:
4279 case LoongArch::BI__builtin_lasx_xvfrstpi_b:
4280 case LoongArch::BI__builtin_lasx_xvfrstpi_h:
4281 case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
4282 case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
4283 case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
4284 case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
4285 case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
4286 case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
4287 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
4288 case LoongArch::BI__builtin_lasx_xvbitclri_d:
4289 case LoongArch::BI__builtin_lasx_xvbitrevi_d:
4290 case LoongArch::BI__builtin_lasx_xvbitseti_d:
4291 case LoongArch::BI__builtin_lasx_xvsat_d:
4292 case LoongArch::BI__builtin_lasx_xvsat_du:
4293 case LoongArch::BI__builtin_lasx_xvslli_d:
4294 case LoongArch::BI__builtin_lasx_xvsrai_d:
4295 case LoongArch::BI__builtin_lasx_xvsrli_d:
4296 case LoongArch::BI__builtin_lasx_xvsrari_d:
4297 case LoongArch::BI__builtin_lasx_xvrotri_d:
4298 case LoongArch::BI__builtin_lasx_xvsrlri_d:
4299 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
4300 case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
4301 case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
4302 case LoongArch::BI__builtin_lasx_xvssrani_w_d:
4303 case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
4304 case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
4305 case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
4306 case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
4307 case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
4308 case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
4309 case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
4310 case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
4311 case LoongArch::BI__builtin_lasx_xvsrani_w_d:
4312 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
4313 case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
4314 case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
4315 case LoongArch::BI__builtin_lasx_xvssrani_d_q:
4316 case LoongArch::BI__builtin_lasx_xvssrani_du_q:
4317 case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
4318 case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
4319 case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
4320 case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
4321 case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
4322 case LoongArch::BI__builtin_lasx_xvsrani_d_q:
4323 case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
4324 case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
4325 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
4326 case LoongArch::BI__builtin_lasx_xvseqi_b:
4327 case LoongArch::BI__builtin_lasx_xvseqi_h:
4328 case LoongArch::BI__builtin_lasx_xvseqi_w:
4329 case LoongArch::BI__builtin_lasx_xvseqi_d:
4330 case LoongArch::BI__builtin_lasx_xvslti_b:
4331 case LoongArch::BI__builtin_lasx_xvslti_h:
4332 case LoongArch::BI__builtin_lasx_xvslti_w:
4333 case LoongArch::BI__builtin_lasx_xvslti_d:
4334 case LoongArch::BI__builtin_lasx_xvslei_b:
4335 case LoongArch::BI__builtin_lasx_xvslei_h:
4336 case LoongArch::BI__builtin_lasx_xvslei_w:
4337 case LoongArch::BI__builtin_lasx_xvslei_d:
4338 case LoongArch::BI__builtin_lasx_xvmaxi_b:
4339 case LoongArch::BI__builtin_lasx_xvmaxi_h:
4340 case LoongArch::BI__builtin_lasx_xvmaxi_w:
4341 case LoongArch::BI__builtin_lasx_xvmaxi_d:
4342 case LoongArch::BI__builtin_lasx_xvmini_b:
4343 case LoongArch::BI__builtin_lasx_xvmini_h:
4344 case LoongArch::BI__builtin_lasx_xvmini_w:
4345 case LoongArch::BI__builtin_lasx_xvmini_d:
4346 return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
4347 case LoongArch::BI__builtin_lasx_xvandi_b:
4348 case LoongArch::BI__builtin_lasx_xvnori_b:
4349 case LoongArch::BI__builtin_lasx_xvori_b:
4350 case LoongArch::BI__builtin_lasx_xvshuf4i_b:
4351 case LoongArch::BI__builtin_lasx_xvshuf4i_h:
4352 case LoongArch::BI__builtin_lasx_xvshuf4i_w:
4353 case LoongArch::BI__builtin_lasx_xvxori_b:
4354 case LoongArch::BI__builtin_lasx_xvpermi_d:
4355 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
4356 case LoongArch::BI__builtin_lasx_xvbitseli_b:
4357 case LoongArch::BI__builtin_lasx_xvshuf4i_d:
4358 case LoongArch::BI__builtin_lasx_xvextrins_b:
4359 case LoongArch::BI__builtin_lasx_xvextrins_h:
4360 case LoongArch::BI__builtin_lasx_xvextrins_w:
4361 case LoongArch::BI__builtin_lasx_xvextrins_d:
4362 case LoongArch::BI__builtin_lasx_xvpermi_q:
4363 case LoongArch::BI__builtin_lasx_xvpermi_w:
4364 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
4365 case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
4366 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
4367 case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
4368 case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
4369 case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
4370 case LoongArch::BI__builtin_lasx_xvpickve_w_f:
4371 case LoongArch::BI__builtin_lasx_xvpickve_w:
4372 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
4373 case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
4374 case LoongArch::BI__builtin_lasx_xvinsve0_w:
4375 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4376 case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
4377 case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
4378 case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
4379 case LoongArch::BI__builtin_lasx_xvpickve_d_f:
4380 case LoongArch::BI__builtin_lasx_xvpickve_d:
4381 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
4382 case LoongArch::BI__builtin_lasx_xvinsve0_d:
4383 case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
4384 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
4385 case LoongArch::BI__builtin_lasx_xvstelm_b:
4386 return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
4387 SemaBuiltinConstantArgRange(TheCall, 3, 0, 31);
4388 case LoongArch::BI__builtin_lasx_xvstelm_h:
4389 return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
4390 SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
4391 case LoongArch::BI__builtin_lasx_xvstelm_w:
4392 return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
4393 SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
4394 case LoongArch::BI__builtin_lasx_xvstelm_d:
4395 return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
4396 SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
4397 case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
4398 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4399 case LoongArch::BI__builtin_lasx_xvldrepl_b:
4400 case LoongArch::BI__builtin_lasx_xvld:
4401 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
4402 case LoongArch::BI__builtin_lasx_xvldrepl_h:
4403 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
4404 case LoongArch::BI__builtin_lasx_xvldrepl_w:
4405 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
4406 case LoongArch::BI__builtin_lasx_xvldrepl_d:
4407 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
4408 case LoongArch::BI__builtin_lasx_xvst:
4409 return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
4410 case LoongArch::BI__builtin_lasx_xvldi:
4411 return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
4412 case LoongArch::BI__builtin_lasx_xvrepli_b:
4413 case LoongArch::BI__builtin_lasx_xvrepli_h:
4414 case LoongArch::BI__builtin_lasx_xvrepli_w:
4415 case LoongArch::BI__builtin_lasx_xvrepli_d:
4416 return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
4418 return false;
4421 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
4422 unsigned BuiltinID, CallExpr *TheCall) {
4423 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
4424 CheckMipsBuiltinArgument(BuiltinID, TheCall);
4427 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
4428 CallExpr *TheCall) {
4430 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
4431 BuiltinID <= Mips::BI__builtin_mips_lwx) {
4432 if (!TI.hasFeature("dsp"))
4433 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
4436 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
4437 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
4438 if (!TI.hasFeature("dspr2"))
4439 return Diag(TheCall->getBeginLoc(),
4440 diag::err_mips_builtin_requires_dspr2);
4443 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
4444 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
4445 if (!TI.hasFeature("msa"))
4446 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
4449 return false;
4452 // CheckMipsBuiltinArgument - Checks the constant value passed to the
4453 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
4454 // ordering for DSP is unspecified. MSA is ordered by the data format used
4455 // by the underlying instruction i.e., df/m, df/n and then by size.
4457 // FIXME: The size tests here should instead be tablegen'd along with the
4458 // definitions from include/clang/Basic/BuiltinsMips.def.
4459 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
4460 // be too.
4461 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
4462 unsigned i = 0, l = 0, u = 0, m = 0;
4463 switch (BuiltinID) {
4464 default: return false;
4465 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
4466 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
4467 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
4468 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
4469 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
4470 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
4471 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
4472 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
4473 // df/m field.
4474 // These intrinsics take an unsigned 3 bit immediate.
4475 case Mips::BI__builtin_msa_bclri_b:
4476 case Mips::BI__builtin_msa_bnegi_b:
4477 case Mips::BI__builtin_msa_bseti_b:
4478 case Mips::BI__builtin_msa_sat_s_b:
4479 case Mips::BI__builtin_msa_sat_u_b:
4480 case Mips::BI__builtin_msa_slli_b:
4481 case Mips::BI__builtin_msa_srai_b:
4482 case Mips::BI__builtin_msa_srari_b:
4483 case Mips::BI__builtin_msa_srli_b:
4484 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
4485 case Mips::BI__builtin_msa_binsli_b:
4486 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
4487 // These intrinsics take an unsigned 4 bit immediate.
4488 case Mips::BI__builtin_msa_bclri_h:
4489 case Mips::BI__builtin_msa_bnegi_h:
4490 case Mips::BI__builtin_msa_bseti_h:
4491 case Mips::BI__builtin_msa_sat_s_h:
4492 case Mips::BI__builtin_msa_sat_u_h:
4493 case Mips::BI__builtin_msa_slli_h:
4494 case Mips::BI__builtin_msa_srai_h:
4495 case Mips::BI__builtin_msa_srari_h:
4496 case Mips::BI__builtin_msa_srli_h:
4497 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
4498 case Mips::BI__builtin_msa_binsli_h:
4499 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
4500 // These intrinsics take an unsigned 5 bit immediate.
4501 // The first block of intrinsics actually have an unsigned 5 bit field,
4502 // not a df/n field.
4503 case Mips::BI__builtin_msa_cfcmsa:
4504 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
4505 case Mips::BI__builtin_msa_clei_u_b:
4506 case Mips::BI__builtin_msa_clei_u_h:
4507 case Mips::BI__builtin_msa_clei_u_w:
4508 case Mips::BI__builtin_msa_clei_u_d:
4509 case Mips::BI__builtin_msa_clti_u_b:
4510 case Mips::BI__builtin_msa_clti_u_h:
4511 case Mips::BI__builtin_msa_clti_u_w:
4512 case Mips::BI__builtin_msa_clti_u_d:
4513 case Mips::BI__builtin_msa_maxi_u_b:
4514 case Mips::BI__builtin_msa_maxi_u_h:
4515 case Mips::BI__builtin_msa_maxi_u_w:
4516 case Mips::BI__builtin_msa_maxi_u_d:
4517 case Mips::BI__builtin_msa_mini_u_b:
4518 case Mips::BI__builtin_msa_mini_u_h:
4519 case Mips::BI__builtin_msa_mini_u_w:
4520 case Mips::BI__builtin_msa_mini_u_d:
4521 case Mips::BI__builtin_msa_addvi_b:
4522 case Mips::BI__builtin_msa_addvi_h:
4523 case Mips::BI__builtin_msa_addvi_w:
4524 case Mips::BI__builtin_msa_addvi_d:
4525 case Mips::BI__builtin_msa_bclri_w:
4526 case Mips::BI__builtin_msa_bnegi_w:
4527 case Mips::BI__builtin_msa_bseti_w:
4528 case Mips::BI__builtin_msa_sat_s_w:
4529 case Mips::BI__builtin_msa_sat_u_w:
4530 case Mips::BI__builtin_msa_slli_w:
4531 case Mips::BI__builtin_msa_srai_w:
4532 case Mips::BI__builtin_msa_srari_w:
4533 case Mips::BI__builtin_msa_srli_w:
4534 case Mips::BI__builtin_msa_srlri_w:
4535 case Mips::BI__builtin_msa_subvi_b:
4536 case Mips::BI__builtin_msa_subvi_h:
4537 case Mips::BI__builtin_msa_subvi_w:
4538 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
4539 case Mips::BI__builtin_msa_binsli_w:
4540 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
4541 // These intrinsics take an unsigned 6 bit immediate.
4542 case Mips::BI__builtin_msa_bclri_d:
4543 case Mips::BI__builtin_msa_bnegi_d:
4544 case Mips::BI__builtin_msa_bseti_d:
4545 case Mips::BI__builtin_msa_sat_s_d:
4546 case Mips::BI__builtin_msa_sat_u_d:
4547 case Mips::BI__builtin_msa_slli_d:
4548 case Mips::BI__builtin_msa_srai_d:
4549 case Mips::BI__builtin_msa_srari_d:
4550 case Mips::BI__builtin_msa_srli_d:
4551 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
4552 case Mips::BI__builtin_msa_binsli_d:
4553 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
4554 // These intrinsics take a signed 5 bit immediate.
4555 case Mips::BI__builtin_msa_ceqi_b:
4556 case Mips::BI__builtin_msa_ceqi_h:
4557 case Mips::BI__builtin_msa_ceqi_w:
4558 case Mips::BI__builtin_msa_ceqi_d:
4559 case Mips::BI__builtin_msa_clti_s_b:
4560 case Mips::BI__builtin_msa_clti_s_h:
4561 case Mips::BI__builtin_msa_clti_s_w:
4562 case Mips::BI__builtin_msa_clti_s_d:
4563 case Mips::BI__builtin_msa_clei_s_b:
4564 case Mips::BI__builtin_msa_clei_s_h:
4565 case Mips::BI__builtin_msa_clei_s_w:
4566 case Mips::BI__builtin_msa_clei_s_d:
4567 case Mips::BI__builtin_msa_maxi_s_b:
4568 case Mips::BI__builtin_msa_maxi_s_h:
4569 case Mips::BI__builtin_msa_maxi_s_w:
4570 case Mips::BI__builtin_msa_maxi_s_d:
4571 case Mips::BI__builtin_msa_mini_s_b:
4572 case Mips::BI__builtin_msa_mini_s_h:
4573 case Mips::BI__builtin_msa_mini_s_w:
4574 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
4575 // These intrinsics take an unsigned 8 bit immediate.
4576 case Mips::BI__builtin_msa_andi_b:
4577 case Mips::BI__builtin_msa_nori_b:
4578 case Mips::BI__builtin_msa_ori_b:
4579 case Mips::BI__builtin_msa_shf_b:
4580 case Mips::BI__builtin_msa_shf_h:
4581 case Mips::BI__builtin_msa_shf_w:
4582 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
4583 case Mips::BI__builtin_msa_bseli_b:
4584 case Mips::BI__builtin_msa_bmnzi_b:
4585 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
4586 // df/n format
4587 // These intrinsics take an unsigned 4 bit immediate.
4588 case Mips::BI__builtin_msa_copy_s_b:
4589 case Mips::BI__builtin_msa_copy_u_b:
4590 case Mips::BI__builtin_msa_insve_b:
4591 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
4592 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
4593 // These intrinsics take an unsigned 3 bit immediate.
4594 case Mips::BI__builtin_msa_copy_s_h:
4595 case Mips::BI__builtin_msa_copy_u_h:
4596 case Mips::BI__builtin_msa_insve_h:
4597 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
4598 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
4599 // These intrinsics take an unsigned 2 bit immediate.
4600 case Mips::BI__builtin_msa_copy_s_w:
4601 case Mips::BI__builtin_msa_copy_u_w:
4602 case Mips::BI__builtin_msa_insve_w:
4603 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
4604 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
4605 // These intrinsics take an unsigned 1 bit immediate.
4606 case Mips::BI__builtin_msa_copy_s_d:
4607 case Mips::BI__builtin_msa_copy_u_d:
4608 case Mips::BI__builtin_msa_insve_d:
4609 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
4610 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
4611 // Memory offsets and immediate loads.
4612 // These intrinsics take a signed 10 bit immediate.
4613 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
4614 case Mips::BI__builtin_msa_ldi_h:
4615 case Mips::BI__builtin_msa_ldi_w:
4616 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
4617 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
4618 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
4619 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
4620 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
4621 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
4622 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
4623 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
4624 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
4625 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
4626 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
4627 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
4628 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
4631 if (!m)
4632 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
4634 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
4635 SemaBuiltinConstantArgMultiple(TheCall, i, m);
4638 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
4639 /// advancing the pointer over the consumed characters. The decoded type is
4640 /// returned. If the decoded type represents a constant integer with a
4641 /// constraint on its value then Mask is set to that value. The type descriptors
4642 /// used in Str are specific to PPC MMA builtins and are documented in the file
4643 /// defining the PPC builtins.
4644 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
4645 unsigned &Mask) {
4646 bool RequireICE = false;
4647 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
4648 switch (*Str++) {
4649 case 'V':
4650 return Context.getVectorType(Context.UnsignedCharTy, 16,
4651 VectorKind::AltiVecVector);
4652 case 'i': {
4653 char *End;
4654 unsigned size = strtoul(Str, &End, 10);
4655 assert(End != Str && "Missing constant parameter constraint");
4656 Str = End;
4657 Mask = size;
4658 return Context.IntTy;
4660 case 'W': {
4661 char *End;
4662 unsigned size = strtoul(Str, &End, 10);
4663 assert(End != Str && "Missing PowerPC MMA type size");
4664 Str = End;
4665 QualType Type;
4666 switch (size) {
4667 #define PPC_VECTOR_TYPE(typeName, Id, size) \
4668 case size: Type = Context.Id##Ty; break;
4669 #include "clang/Basic/PPCTypes.def"
4670 default: llvm_unreachable("Invalid PowerPC MMA vector type");
4672 bool CheckVectorArgs = false;
4673 while (!CheckVectorArgs) {
4674 switch (*Str++) {
4675 case '*':
4676 Type = Context.getPointerType(Type);
4677 break;
4678 case 'C':
4679 Type = Type.withConst();
4680 break;
4681 default:
4682 CheckVectorArgs = true;
4683 --Str;
4684 break;
4687 return Type;
4689 default:
4690 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
4694 static bool isPPC_64Builtin(unsigned BuiltinID) {
4695 // These builtins only work on PPC 64bit targets.
4696 switch (BuiltinID) {
4697 case PPC::BI__builtin_divde:
4698 case PPC::BI__builtin_divdeu:
4699 case PPC::BI__builtin_bpermd:
4700 case PPC::BI__builtin_pdepd:
4701 case PPC::BI__builtin_pextd:
4702 case PPC::BI__builtin_ppc_ldarx:
4703 case PPC::BI__builtin_ppc_stdcx:
4704 case PPC::BI__builtin_ppc_tdw:
4705 case PPC::BI__builtin_ppc_trapd:
4706 case PPC::BI__builtin_ppc_cmpeqb:
4707 case PPC::BI__builtin_ppc_setb:
4708 case PPC::BI__builtin_ppc_mulhd:
4709 case PPC::BI__builtin_ppc_mulhdu:
4710 case PPC::BI__builtin_ppc_maddhd:
4711 case PPC::BI__builtin_ppc_maddhdu:
4712 case PPC::BI__builtin_ppc_maddld:
4713 case PPC::BI__builtin_ppc_load8r:
4714 case PPC::BI__builtin_ppc_store8r:
4715 case PPC::BI__builtin_ppc_insert_exp:
4716 case PPC::BI__builtin_ppc_extract_sig:
4717 case PPC::BI__builtin_ppc_addex:
4718 case PPC::BI__builtin_darn:
4719 case PPC::BI__builtin_darn_raw:
4720 case PPC::BI__builtin_ppc_compare_and_swaplp:
4721 case PPC::BI__builtin_ppc_fetch_and_addlp:
4722 case PPC::BI__builtin_ppc_fetch_and_andlp:
4723 case PPC::BI__builtin_ppc_fetch_and_orlp:
4724 case PPC::BI__builtin_ppc_fetch_and_swaplp:
4725 return true;
4727 return false;
4730 /// Returns true if the argument consists of one contiguous run of 1s with any
4731 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
4732 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
4733 /// since all 1s are not contiguous.
4734 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
4735 llvm::APSInt Result;
4736 // We can't check the value of a dependent argument.
4737 Expr *Arg = TheCall->getArg(ArgNum);
4738 if (Arg->isTypeDependent() || Arg->isValueDependent())
4739 return false;
4741 // Check constant-ness first.
4742 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4743 return true;
4745 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
4746 if (Result.isShiftedMask() || (~Result).isShiftedMask())
4747 return false;
4749 return Diag(TheCall->getBeginLoc(),
4750 diag::err_argument_not_contiguous_bit_field)
4751 << ArgNum << Arg->getSourceRange();
4754 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4755 CallExpr *TheCall) {
4756 unsigned i = 0, l = 0, u = 0;
4757 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
4758 llvm::APSInt Result;
4760 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
4761 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
4762 << TheCall->getSourceRange();
4764 switch (BuiltinID) {
4765 default: return false;
4766 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
4767 case PPC::BI__builtin_altivec_crypto_vshasigmad:
4768 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
4769 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4770 case PPC::BI__builtin_altivec_dss:
4771 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
4772 case PPC::BI__builtin_tbegin:
4773 case PPC::BI__builtin_tend:
4774 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
4775 case PPC::BI__builtin_tsr:
4776 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7);
4777 case PPC::BI__builtin_tabortwc:
4778 case PPC::BI__builtin_tabortdc:
4779 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
4780 case PPC::BI__builtin_tabortwci:
4781 case PPC::BI__builtin_tabortdci:
4782 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
4783 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
4784 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
4785 // __builtin_(un)pack_longdouble are available only if long double uses IBM
4786 // extended double representation.
4787 case PPC::BI__builtin_unpack_longdouble:
4788 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1))
4789 return true;
4790 [[fallthrough]];
4791 case PPC::BI__builtin_pack_longdouble:
4792 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
4793 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
4794 << "ibmlongdouble";
4795 return false;
4796 case PPC::BI__builtin_altivec_dst:
4797 case PPC::BI__builtin_altivec_dstt:
4798 case PPC::BI__builtin_altivec_dstst:
4799 case PPC::BI__builtin_altivec_dststt:
4800 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
4801 case PPC::BI__builtin_vsx_xxpermdi:
4802 case PPC::BI__builtin_vsx_xxsldwi:
4803 return SemaBuiltinVSX(TheCall);
4804 case PPC::BI__builtin_unpack_vector_int128:
4805 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4806 case PPC::BI__builtin_altivec_vgnb:
4807 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
4808 case PPC::BI__builtin_vsx_xxeval:
4809 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
4810 case PPC::BI__builtin_altivec_vsldbi:
4811 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4812 case PPC::BI__builtin_altivec_vsrdbi:
4813 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4814 case PPC::BI__builtin_vsx_xxpermx:
4815 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
4816 case PPC::BI__builtin_ppc_tw:
4817 case PPC::BI__builtin_ppc_tdw:
4818 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
4819 case PPC::BI__builtin_ppc_cmprb:
4820 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
4821 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
4822 // be a constant that represents a contiguous bit field.
4823 case PPC::BI__builtin_ppc_rlwnm:
4824 return SemaValueIsRunOfOnes(TheCall, 2);
4825 case PPC::BI__builtin_ppc_rlwimi:
4826 case PPC::BI__builtin_ppc_rldimi:
4827 return SemaBuiltinConstantArg(TheCall, 2, Result) ||
4828 SemaValueIsRunOfOnes(TheCall, 3);
4829 case PPC::BI__builtin_ppc_addex: {
4830 if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
4831 return true;
4832 // Output warning for reserved values 1 to 3.
4833 int ArgValue =
4834 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
4835 if (ArgValue != 0)
4836 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
4837 << ArgValue;
4838 return false;
4840 case PPC::BI__builtin_ppc_mtfsb0:
4841 case PPC::BI__builtin_ppc_mtfsb1:
4842 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
4843 case PPC::BI__builtin_ppc_mtfsf:
4844 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
4845 case PPC::BI__builtin_ppc_mtfsfi:
4846 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
4847 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
4848 case PPC::BI__builtin_ppc_alignx:
4849 return SemaBuiltinConstantArgPower2(TheCall, 0);
4850 case PPC::BI__builtin_ppc_rdlam:
4851 return SemaValueIsRunOfOnes(TheCall, 2);
4852 case PPC::BI__builtin_vsx_ldrmb:
4853 case PPC::BI__builtin_vsx_strmb:
4854 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
4855 case PPC::BI__builtin_altivec_vcntmbb:
4856 case PPC::BI__builtin_altivec_vcntmbh:
4857 case PPC::BI__builtin_altivec_vcntmbw:
4858 case PPC::BI__builtin_altivec_vcntmbd:
4859 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4860 case PPC::BI__builtin_vsx_xxgenpcvbm:
4861 case PPC::BI__builtin_vsx_xxgenpcvhm:
4862 case PPC::BI__builtin_vsx_xxgenpcvwm:
4863 case PPC::BI__builtin_vsx_xxgenpcvdm:
4864 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
4865 case PPC::BI__builtin_ppc_test_data_class: {
4866 // Check if the first argument of the __builtin_ppc_test_data_class call is
4867 // valid. The argument must be 'float' or 'double' or '__float128'.
4868 QualType ArgType = TheCall->getArg(0)->getType();
4869 if (ArgType != QualType(Context.FloatTy) &&
4870 ArgType != QualType(Context.DoubleTy) &&
4871 ArgType != QualType(Context.Float128Ty))
4872 return Diag(TheCall->getBeginLoc(),
4873 diag::err_ppc_invalid_test_data_class_type);
4874 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
4876 case PPC::BI__builtin_ppc_maxfe:
4877 case PPC::BI__builtin_ppc_minfe:
4878 case PPC::BI__builtin_ppc_maxfl:
4879 case PPC::BI__builtin_ppc_minfl:
4880 case PPC::BI__builtin_ppc_maxfs:
4881 case PPC::BI__builtin_ppc_minfs: {
4882 if (Context.getTargetInfo().getTriple().isOSAIX() &&
4883 (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
4884 BuiltinID == PPC::BI__builtin_ppc_minfe))
4885 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
4886 << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
4887 << false << Context.getTargetInfo().getTriple().str();
4888 // Argument type should be exact.
4889 QualType ArgType = QualType(Context.LongDoubleTy);
4890 if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
4891 BuiltinID == PPC::BI__builtin_ppc_minfl)
4892 ArgType = QualType(Context.DoubleTy);
4893 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
4894 BuiltinID == PPC::BI__builtin_ppc_minfs)
4895 ArgType = QualType(Context.FloatTy);
4896 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
4897 if (TheCall->getArg(I)->getType() != ArgType)
4898 return Diag(TheCall->getBeginLoc(),
4899 diag::err_typecheck_convert_incompatible)
4900 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
4901 return false;
4903 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
4904 case PPC::BI__builtin_##Name: \
4905 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
4906 #include "clang/Basic/BuiltinsPPC.def"
4908 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
4911 // Check if the given type is a non-pointer PPC MMA type. This function is used
4912 // in Sema to prevent invalid uses of restricted PPC MMA types.
4913 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
4914 if (Type->isPointerType() || Type->isArrayType())
4915 return false;
4917 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
4918 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
4919 if (false
4920 #include "clang/Basic/PPCTypes.def"
4922 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
4923 return true;
4925 return false;
4928 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
4929 CallExpr *TheCall) {
4930 // position of memory order and scope arguments in the builtin
4931 unsigned OrderIndex, ScopeIndex;
4932 switch (BuiltinID) {
4933 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
4934 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
4935 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
4936 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
4937 OrderIndex = 2;
4938 ScopeIndex = 3;
4939 break;
4940 case AMDGPU::BI__builtin_amdgcn_fence:
4941 OrderIndex = 0;
4942 ScopeIndex = 1;
4943 break;
4944 default:
4945 return false;
4948 ExprResult Arg = TheCall->getArg(OrderIndex);
4949 auto ArgExpr = Arg.get();
4950 Expr::EvalResult ArgResult;
4952 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
4953 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
4954 << ArgExpr->getType();
4955 auto Ord = ArgResult.Val.getInt().getZExtValue();
4957 // Check validity of memory ordering as per C11 / C++11's memody model.
4958 // Only fence needs check. Atomic dec/inc allow all memory orders.
4959 if (!llvm::isValidAtomicOrderingCABI(Ord))
4960 return Diag(ArgExpr->getBeginLoc(),
4961 diag::warn_atomic_op_has_invalid_memory_order)
4962 << ArgExpr->getSourceRange();
4963 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
4964 case llvm::AtomicOrderingCABI::relaxed:
4965 case llvm::AtomicOrderingCABI::consume:
4966 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
4967 return Diag(ArgExpr->getBeginLoc(),
4968 diag::warn_atomic_op_has_invalid_memory_order)
4969 << ArgExpr->getSourceRange();
4970 break;
4971 case llvm::AtomicOrderingCABI::acquire:
4972 case llvm::AtomicOrderingCABI::release:
4973 case llvm::AtomicOrderingCABI::acq_rel:
4974 case llvm::AtomicOrderingCABI::seq_cst:
4975 break;
4978 Arg = TheCall->getArg(ScopeIndex);
4979 ArgExpr = Arg.get();
4980 Expr::EvalResult ArgResult1;
4981 // Check that sync scope is a constant literal
4982 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
4983 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
4984 << ArgExpr->getType();
4986 return false;
4989 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
4990 llvm::APSInt Result;
4992 // We can't check the value of a dependent argument.
4993 Expr *Arg = TheCall->getArg(ArgNum);
4994 if (Arg->isTypeDependent() || Arg->isValueDependent())
4995 return false;
4997 // Check constant-ness first.
4998 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4999 return true;
5001 int64_t Val = Result.getSExtValue();
5002 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
5003 return false;
5005 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
5006 << Arg->getSourceRange();
5009 static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
5010 Sema &S, QualType Type, int EGW) {
5011 assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
5013 // LMUL * VLEN >= EGW
5014 unsigned ElemSize = Type->isRVVType(32, false) ? 32 : 64;
5015 unsigned MinElemCount = Type->isRVVType(1) ? 1
5016 : Type->isRVVType(2) ? 2
5017 : Type->isRVVType(4) ? 4
5018 : Type->isRVVType(8) ? 8
5019 : 16;
5021 unsigned EGS = EGW / ElemSize;
5022 // If EGS is less than or equal to the minimum number of elements, then the
5023 // type is valid.
5024 if (EGS <= MinElemCount)
5025 return false;
5027 // Otherwise, we need vscale to be at least EGS / MinElemCont.
5028 assert(EGS % MinElemCount == 0);
5029 unsigned VScaleFactor = EGS / MinElemCount;
5030 // Vscale is VLEN/RVVBitsPerBlock.
5031 unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
5032 std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
5033 if (!TI.hasFeature(RequiredExt))
5034 return S.Diag(TheCall->getBeginLoc(),
5035 diag::err_riscv_type_requires_extension) << Type << RequiredExt;
5037 return false;
5040 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
5041 unsigned BuiltinID,
5042 CallExpr *TheCall) {
5043 // CodeGenFunction can also detect this, but this gives a better error
5044 // message.
5045 bool FeatureMissing = false;
5046 SmallVector<StringRef> ReqFeatures;
5047 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
5048 Features.split(ReqFeatures, ',', -1, false);
5050 // Check if each required feature is included
5051 for (StringRef F : ReqFeatures) {
5052 SmallVector<StringRef> ReqOpFeatures;
5053 F.split(ReqOpFeatures, '|');
5055 if (llvm::none_of(ReqOpFeatures,
5056 [&TI](StringRef OF) { return TI.hasFeature(OF); })) {
5057 std::string FeatureStrs;
5058 bool IsExtension = true;
5059 for (StringRef OF : ReqOpFeatures) {
5060 // If the feature is 64bit, alter the string so it will print better in
5061 // the diagnostic.
5062 if (OF == "64bit") {
5063 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone");
5064 OF = "RV64";
5065 IsExtension = false;
5067 if (OF == "32bit") {
5068 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone");
5069 OF = "RV32";
5070 IsExtension = false;
5073 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
5074 OF.consume_front("experimental-");
5075 std::string FeatureStr = OF.str();
5076 FeatureStr[0] = std::toupper(FeatureStr[0]);
5077 // Combine strings.
5078 FeatureStrs += FeatureStrs.empty() ? "" : ", ";
5079 FeatureStrs += "'";
5080 FeatureStrs += FeatureStr;
5081 FeatureStrs += "'";
5083 // Error message
5084 FeatureMissing = true;
5085 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
5086 << IsExtension
5087 << TheCall->getSourceRange() << StringRef(FeatureStrs);
5091 if (FeatureMissing)
5092 return true;
5094 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
5095 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
5096 switch (BuiltinID) {
5097 default:
5098 break;
5099 case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
5100 case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
5101 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
5102 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
5103 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
5104 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
5105 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
5106 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
5107 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
5108 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
5109 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
5110 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
5111 case RISCVVector::BI__builtin_rvv_vmulhu_vv:
5112 case RISCVVector::BI__builtin_rvv_vmulhu_vx:
5113 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
5114 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
5115 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
5116 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
5117 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
5118 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
5119 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
5120 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
5121 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
5122 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
5123 case RISCVVector::BI__builtin_rvv_vmulh_vv:
5124 case RISCVVector::BI__builtin_rvv_vmulh_vx:
5125 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
5126 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
5127 case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
5128 case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
5129 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
5130 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
5131 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
5132 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
5133 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
5134 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
5135 case RISCVVector::BI__builtin_rvv_vsmul_vv:
5136 case RISCVVector::BI__builtin_rvv_vsmul_vx:
5137 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
5138 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
5139 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
5140 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
5141 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
5142 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
5143 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
5144 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
5145 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
5146 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
5147 bool RequireV = false;
5148 for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum)
5149 RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType(
5150 /* Bitwidth */ 64, /* IsFloat */ false);
5152 if (RequireV && !TI.hasFeature("v"))
5153 return Diag(TheCall->getBeginLoc(),
5154 diag::err_riscv_builtin_requires_extension)
5155 << /* IsExtension */ false << TheCall->getSourceRange() << "v";
5157 break;
5161 switch (BuiltinID) {
5162 case RISCVVector::BI__builtin_rvv_vsetvli:
5163 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
5164 CheckRISCVLMUL(TheCall, 2);
5165 case RISCVVector::BI__builtin_rvv_vsetvlimax:
5166 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5167 CheckRISCVLMUL(TheCall, 1);
5168 case RISCVVector::BI__builtin_rvv_vget_v: {
5169 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5170 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
5171 TheCall->getType().getCanonicalType().getTypePtr()));
5172 ASTContext::BuiltinVectorTypeInfo VecInfo =
5173 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
5174 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
5175 unsigned MaxIndex;
5176 if (VecInfo.NumVectors != 1) // vget for tuple type
5177 MaxIndex = VecInfo.NumVectors;
5178 else // vget for non-tuple type
5179 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
5180 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
5181 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
5183 case RISCVVector::BI__builtin_rvv_vset_v: {
5184 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5185 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
5186 TheCall->getType().getCanonicalType().getTypePtr()));
5187 ASTContext::BuiltinVectorTypeInfo VecInfo =
5188 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
5189 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
5190 unsigned MaxIndex;
5191 if (ResVecInfo.NumVectors != 1) // vset for tuple type
5192 MaxIndex = ResVecInfo.NumVectors;
5193 else // vset fo non-tuple type
5194 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
5195 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
5196 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
5198 // Vector Crypto
5199 case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
5200 case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
5201 case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
5202 case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
5203 QualType Op1Type = TheCall->getArg(0)->getType();
5204 QualType Op2Type = TheCall->getArg(1)->getType();
5205 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
5206 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) ||
5207 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
5209 case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
5210 case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
5211 QualType Op1Type = TheCall->getArg(0)->getType();
5212 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) ||
5213 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
5215 case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
5216 case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
5217 QualType Op1Type = TheCall->getArg(0)->getType();
5218 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
5219 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
5221 case RISCVVector::BI__builtin_rvv_vaesdf_vv:
5222 case RISCVVector::BI__builtin_rvv_vaesdf_vs:
5223 case RISCVVector::BI__builtin_rvv_vaesdm_vv:
5224 case RISCVVector::BI__builtin_rvv_vaesdm_vs:
5225 case RISCVVector::BI__builtin_rvv_vaesef_vv:
5226 case RISCVVector::BI__builtin_rvv_vaesef_vs:
5227 case RISCVVector::BI__builtin_rvv_vaesem_vv:
5228 case RISCVVector::BI__builtin_rvv_vaesem_vs:
5229 case RISCVVector::BI__builtin_rvv_vaesz_vs:
5230 case RISCVVector::BI__builtin_rvv_vsm4r_vv:
5231 case RISCVVector::BI__builtin_rvv_vsm4r_vs:
5232 case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
5233 case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
5234 case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
5235 case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
5236 case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
5237 case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
5238 case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
5239 case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
5240 case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
5241 case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
5242 case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
5243 QualType Op1Type = TheCall->getArg(0)->getType();
5244 QualType Op2Type = TheCall->getArg(1)->getType();
5245 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
5246 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128);
5248 case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
5249 case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
5250 case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
5251 case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
5252 case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
5253 case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
5254 QualType Op1Type = TheCall->getArg(0)->getType();
5255 QualType Op2Type = TheCall->getArg(1)->getType();
5256 QualType Op3Type = TheCall->getArg(2)->getType();
5257 uint64_t ElemSize = Op1Type->isRVVType(32, false) ? 32 : 64;
5258 if (ElemSize == 64 && !TI.hasFeature("experimental-zvknhb"))
5259 return
5260 Diag(TheCall->getBeginLoc(), diag::err_riscv_type_requires_extension)
5261 << Op1Type << "experimental-zvknhb";
5263 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize << 2) ||
5264 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize << 2) ||
5265 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize << 2);
5268 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8:
5269 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4:
5270 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2:
5271 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1:
5272 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2:
5273 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4:
5274 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8:
5275 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4:
5276 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2:
5277 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1:
5278 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2:
5279 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4:
5280 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8:
5281 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2:
5282 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1:
5283 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2:
5284 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4:
5285 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8:
5286 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1:
5287 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2:
5288 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4:
5289 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8:
5290 // bit_27_26, bit_24_20, bit_11_7, simm5
5291 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5292 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
5293 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
5294 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
5295 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
5296 // bit_27_26, bit_11_7, vs2, simm5
5297 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5298 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
5299 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
5300 case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
5301 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
5302 // bit_27_26, bit_24_20, simm5
5303 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5304 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
5305 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
5306 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
5307 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
5308 // bit_27_26, vs2, simm5
5309 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5310 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
5311 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
5312 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
5313 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
5314 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
5315 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
5316 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
5317 // bit_27_26, vd, vs2, simm5
5318 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5319 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
5320 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8:
5321 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4:
5322 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2:
5323 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1:
5324 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2:
5325 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4:
5326 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8:
5327 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4:
5328 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2:
5329 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1:
5330 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2:
5331 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4:
5332 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8:
5333 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2:
5334 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1:
5335 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2:
5336 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4:
5337 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8:
5338 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1:
5339 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2:
5340 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4:
5341 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8:
5342 // bit_27_26, bit_24_20, bit_11_7, xs1
5343 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5344 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
5345 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
5346 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
5347 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
5348 // bit_27_26, bit_11_7, vs2, xs1/vs1
5349 case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
5350 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
5351 // bit_27_26, bit_24-20, xs1
5352 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
5353 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
5354 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
5355 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
5356 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
5357 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
5358 // bit_27_26, vd, vs2, xs1
5359 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
5360 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
5361 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
5362 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
5363 // bit_27_26, vs2, xs1/vs1
5364 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
5365 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
5366 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
5367 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
5368 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
5369 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
5370 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
5371 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
5372 // bit_27_26, vd, vs2, xs1/vs1
5373 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
5374 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
5375 // bit_26, bit_11_7, vs2, fs1
5376 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
5377 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
5378 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
5379 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
5380 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
5381 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
5382 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
5383 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
5384 // bit_26, vd, vs2, fs1
5385 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
5386 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
5387 // bit_26, vs2, fs1
5388 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
5389 // Check if byteselect is in [0, 3]
5390 case RISCV::BI__builtin_riscv_aes32dsi:
5391 case RISCV::BI__builtin_riscv_aes32dsmi:
5392 case RISCV::BI__builtin_riscv_aes32esi:
5393 case RISCV::BI__builtin_riscv_aes32esmi:
5394 case RISCV::BI__builtin_riscv_sm4ks:
5395 case RISCV::BI__builtin_riscv_sm4ed:
5396 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
5397 // Check if rnum is in [0, 10]
5398 case RISCV::BI__builtin_riscv_aes64ks1i:
5399 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
5400 // Check if value range for vxrm is in [0, 3]
5401 case RISCVVector::BI__builtin_rvv_vaaddu_vv:
5402 case RISCVVector::BI__builtin_rvv_vaaddu_vx:
5403 case RISCVVector::BI__builtin_rvv_vaadd_vv:
5404 case RISCVVector::BI__builtin_rvv_vaadd_vx:
5405 case RISCVVector::BI__builtin_rvv_vasubu_vv:
5406 case RISCVVector::BI__builtin_rvv_vasubu_vx:
5407 case RISCVVector::BI__builtin_rvv_vasub_vv:
5408 case RISCVVector::BI__builtin_rvv_vasub_vx:
5409 case RISCVVector::BI__builtin_rvv_vsmul_vv:
5410 case RISCVVector::BI__builtin_rvv_vsmul_vx:
5411 case RISCVVector::BI__builtin_rvv_vssra_vv:
5412 case RISCVVector::BI__builtin_rvv_vssra_vx:
5413 case RISCVVector::BI__builtin_rvv_vssrl_vv:
5414 case RISCVVector::BI__builtin_rvv_vssrl_vx:
5415 case RISCVVector::BI__builtin_rvv_vnclip_wv:
5416 case RISCVVector::BI__builtin_rvv_vnclip_wx:
5417 case RISCVVector::BI__builtin_rvv_vnclipu_wv:
5418 case RISCVVector::BI__builtin_rvv_vnclipu_wx:
5419 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
5420 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
5421 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
5422 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
5423 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
5424 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
5425 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
5426 case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
5427 case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
5428 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
5429 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
5430 case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
5431 case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
5432 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
5433 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
5434 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
5435 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
5436 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
5437 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
5438 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
5439 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
5440 case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
5441 case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
5442 case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
5443 case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
5444 case RISCVVector::BI__builtin_rvv_vasub_vv_m:
5445 case RISCVVector::BI__builtin_rvv_vasub_vx_m:
5446 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
5447 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
5448 case RISCVVector::BI__builtin_rvv_vssra_vv_m:
5449 case RISCVVector::BI__builtin_rvv_vssra_vx_m:
5450 case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
5451 case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
5452 case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
5453 case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
5454 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
5455 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
5456 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
5457 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
5458 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
5459 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
5460 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
5461 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
5462 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
5463 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
5464 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
5465 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
5466 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
5467 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
5468 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
5469 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
5470 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
5471 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
5472 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
5473 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
5474 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
5475 case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
5476 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
5477 case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
5478 case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
5479 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
5480 case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
5481 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
5482 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
5483 case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
5484 case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
5485 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
5486 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
5487 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
5488 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
5489 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
5490 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
5491 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
5492 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
5493 case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
5494 case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
5495 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
5496 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
5497 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
5498 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
5499 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
5500 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
5501 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
5502 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
5503 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
5504 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
5505 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
5506 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
5507 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
5508 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
5509 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
5510 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
5511 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
5512 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
5513 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
5514 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
5515 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
5516 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
5517 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
5518 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
5519 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
5520 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
5521 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
5522 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
5523 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
5524 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
5525 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4);
5526 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
5527 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
5528 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
5529 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
5530 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
5531 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
5532 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
5533 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
5534 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
5535 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
5536 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
5537 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
5538 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
5539 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
5540 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
5541 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
5542 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
5543 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
5544 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
5545 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
5546 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
5547 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
5548 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
5549 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
5550 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
5551 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
5552 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
5553 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
5554 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
5555 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
5556 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
5557 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
5558 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
5559 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
5560 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
5561 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
5562 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
5563 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
5564 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
5565 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
5566 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
5567 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
5568 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
5569 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
5570 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
5571 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
5572 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
5573 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
5574 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
5575 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
5576 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
5577 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
5578 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
5579 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
5580 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
5581 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
5582 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
5583 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
5584 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
5585 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
5586 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
5587 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
5588 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
5589 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
5590 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
5591 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
5592 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
5593 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
5594 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
5595 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
5596 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
5597 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
5598 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
5599 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
5600 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
5601 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
5602 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
5603 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
5604 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
5605 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
5606 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
5607 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
5608 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
5609 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
5610 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
5611 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
5612 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
5613 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
5614 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
5615 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
5616 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
5617 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
5618 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
5619 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
5620 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
5621 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
5622 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
5623 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
5624 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
5625 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
5626 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
5627 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
5628 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
5629 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
5630 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
5631 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
5632 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
5633 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
5634 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
5635 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
5636 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
5637 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
5638 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
5639 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
5640 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
5641 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
5642 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
5643 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
5644 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
5645 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
5646 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
5647 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
5648 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
5649 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
5650 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
5651 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
5652 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
5653 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
5654 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
5655 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
5656 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
5657 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
5658 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
5659 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
5660 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
5661 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
5662 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
5663 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
5664 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
5665 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
5666 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
5667 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
5668 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
5669 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
5670 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
5671 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
5672 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
5673 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
5674 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
5675 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
5676 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
5677 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
5678 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
5679 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
5680 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
5681 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
5682 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
5683 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
5684 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
5685 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
5686 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
5687 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
5688 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
5689 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
5690 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
5691 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
5692 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
5693 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
5694 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
5695 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
5696 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
5697 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
5698 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
5699 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
5700 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
5701 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
5702 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
5703 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
5704 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
5705 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
5706 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
5707 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
5708 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
5709 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
5710 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
5711 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
5712 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
5713 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
5714 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
5715 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
5716 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
5717 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
5718 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
5719 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
5720 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
5721 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
5722 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
5723 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
5724 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
5725 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
5726 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
5727 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
5728 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
5729 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
5730 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
5731 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
5732 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
5733 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
5734 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
5735 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
5736 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
5737 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
5738 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
5739 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
5740 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
5741 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
5742 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
5743 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
5744 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
5745 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
5746 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
5747 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
5748 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
5749 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
5750 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
5751 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
5752 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
5753 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
5754 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
5755 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
5756 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
5757 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
5758 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
5759 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
5760 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
5761 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
5762 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
5763 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
5764 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
5765 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
5766 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
5767 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
5768 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
5769 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
5770 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
5771 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
5772 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
5773 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
5774 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
5775 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
5776 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
5777 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
5778 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
5779 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
5780 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
5781 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
5782 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
5783 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
5784 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
5785 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
5786 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
5787 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
5788 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
5789 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
5790 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
5791 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
5792 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
5793 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
5794 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
5795 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
5796 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
5797 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
5798 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
5799 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
5800 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
5801 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
5802 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
5803 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
5804 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
5805 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
5806 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
5807 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
5808 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
5809 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
5810 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
5811 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
5812 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
5813 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
5814 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
5815 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
5816 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
5817 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
5818 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
5819 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
5820 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
5821 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
5822 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
5823 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
5824 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
5825 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
5826 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
5827 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
5828 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
5829 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
5830 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
5831 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
5832 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
5833 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
5834 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
5835 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
5836 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
5837 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
5838 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
5839 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
5840 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
5841 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
5842 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
5843 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
5844 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
5845 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
5846 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
5847 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
5848 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
5849 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
5850 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
5851 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
5852 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
5853 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
5854 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
5855 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
5856 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
5857 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
5858 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
5859 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
5860 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
5861 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
5862 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
5863 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
5864 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
5865 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
5866 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
5867 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
5868 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
5869 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
5870 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
5871 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
5872 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
5873 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4);
5874 case RISCV::BI__builtin_riscv_ntl_load:
5875 case RISCV::BI__builtin_riscv_ntl_store:
5876 DeclRefExpr *DRE =
5877 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5878 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
5879 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
5880 "Unexpected RISC-V nontemporal load/store builtin!");
5881 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
5882 unsigned NumArgs = IsStore ? 3 : 2;
5884 if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1))
5885 return true;
5887 if (checkArgCountAtMost(*this, TheCall, NumArgs))
5888 return true;
5890 // Domain value should be compile-time constant.
5891 // 2 <= domain <= 5
5892 if (TheCall->getNumArgs() == NumArgs &&
5893 SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
5894 return true;
5896 Expr *PointerArg = TheCall->getArg(0);
5897 ExprResult PointerArgResult =
5898 DefaultFunctionArrayLvalueConversion(PointerArg);
5900 if (PointerArgResult.isInvalid())
5901 return true;
5902 PointerArg = PointerArgResult.get();
5904 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
5905 if (!PtrType) {
5906 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
5907 << PointerArg->getType() << PointerArg->getSourceRange();
5908 return true;
5911 QualType ValType = PtrType->getPointeeType();
5912 ValType = ValType.getUnqualifiedType();
5913 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5914 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
5915 !ValType->isVectorType() && !ValType->isRVVType()) {
5916 Diag(DRE->getBeginLoc(),
5917 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
5918 << PointerArg->getType() << PointerArg->getSourceRange();
5919 return true;
5922 if (!IsStore) {
5923 TheCall->setType(ValType);
5924 return false;
5927 ExprResult ValArg = TheCall->getArg(1);
5928 InitializedEntity Entity = InitializedEntity::InitializeParameter(
5929 Context, ValType, /*consume*/ false);
5930 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
5931 if (ValArg.isInvalid())
5932 return true;
5934 TheCall->setArg(1, ValArg.get());
5935 TheCall->setType(Context.VoidTy);
5936 return false;
5939 return false;
5942 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
5943 CallExpr *TheCall) {
5944 if (BuiltinID == SystemZ::BI__builtin_tabort) {
5945 Expr *Arg = TheCall->getArg(0);
5946 if (std::optional<llvm::APSInt> AbortCode =
5947 Arg->getIntegerConstantExpr(Context))
5948 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
5949 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
5950 << Arg->getSourceRange();
5953 // For intrinsics which take an immediate value as part of the instruction,
5954 // range check them here.
5955 unsigned i = 0, l = 0, u = 0;
5956 switch (BuiltinID) {
5957 default: return false;
5958 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
5959 case SystemZ::BI__builtin_s390_verimb:
5960 case SystemZ::BI__builtin_s390_verimh:
5961 case SystemZ::BI__builtin_s390_verimf:
5962 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
5963 case SystemZ::BI__builtin_s390_vfaeb:
5964 case SystemZ::BI__builtin_s390_vfaeh:
5965 case SystemZ::BI__builtin_s390_vfaef:
5966 case SystemZ::BI__builtin_s390_vfaebs:
5967 case SystemZ::BI__builtin_s390_vfaehs:
5968 case SystemZ::BI__builtin_s390_vfaefs:
5969 case SystemZ::BI__builtin_s390_vfaezb:
5970 case SystemZ::BI__builtin_s390_vfaezh:
5971 case SystemZ::BI__builtin_s390_vfaezf:
5972 case SystemZ::BI__builtin_s390_vfaezbs:
5973 case SystemZ::BI__builtin_s390_vfaezhs:
5974 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
5975 case SystemZ::BI__builtin_s390_vfisb:
5976 case SystemZ::BI__builtin_s390_vfidb:
5977 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
5978 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
5979 case SystemZ::BI__builtin_s390_vftcisb:
5980 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
5981 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
5982 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
5983 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
5984 case SystemZ::BI__builtin_s390_vstrcb:
5985 case SystemZ::BI__builtin_s390_vstrch:
5986 case SystemZ::BI__builtin_s390_vstrcf:
5987 case SystemZ::BI__builtin_s390_vstrczb:
5988 case SystemZ::BI__builtin_s390_vstrczh:
5989 case SystemZ::BI__builtin_s390_vstrczf:
5990 case SystemZ::BI__builtin_s390_vstrcbs:
5991 case SystemZ::BI__builtin_s390_vstrchs:
5992 case SystemZ::BI__builtin_s390_vstrcfs:
5993 case SystemZ::BI__builtin_s390_vstrczbs:
5994 case SystemZ::BI__builtin_s390_vstrczhs:
5995 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
5996 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
5997 case SystemZ::BI__builtin_s390_vfminsb:
5998 case SystemZ::BI__builtin_s390_vfmaxsb:
5999 case SystemZ::BI__builtin_s390_vfmindb:
6000 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
6001 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
6002 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
6003 case SystemZ::BI__builtin_s390_vclfnhs:
6004 case SystemZ::BI__builtin_s390_vclfnls:
6005 case SystemZ::BI__builtin_s390_vcfn:
6006 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
6007 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
6009 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
6012 bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
6013 unsigned BuiltinID,
6014 CallExpr *TheCall) {
6015 switch (BuiltinID) {
6016 case WebAssembly::BI__builtin_wasm_ref_null_extern:
6017 return BuiltinWasmRefNullExtern(TheCall);
6018 case WebAssembly::BI__builtin_wasm_ref_null_func:
6019 return BuiltinWasmRefNullFunc(TheCall);
6020 case WebAssembly::BI__builtin_wasm_table_get:
6021 return BuiltinWasmTableGet(TheCall);
6022 case WebAssembly::BI__builtin_wasm_table_set:
6023 return BuiltinWasmTableSet(TheCall);
6024 case WebAssembly::BI__builtin_wasm_table_size:
6025 return BuiltinWasmTableSize(TheCall);
6026 case WebAssembly::BI__builtin_wasm_table_grow:
6027 return BuiltinWasmTableGrow(TheCall);
6028 case WebAssembly::BI__builtin_wasm_table_fill:
6029 return BuiltinWasmTableFill(TheCall);
6030 case WebAssembly::BI__builtin_wasm_table_copy:
6031 return BuiltinWasmTableCopy(TheCall);
6034 return false;
6037 void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) {
6038 const TargetInfo &TI = Context.getTargetInfo();
6039 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
6040 // least zve64x
6041 if ((Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ false) ||
6042 Ty->isRVVType(/* ElementCount */ 1)) &&
6043 !TI.hasFeature("zve64x"))
6044 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
6045 if (Ty->isRVVType(/* Bitwidth */ 16, /* IsFloat */ true) &&
6046 !TI.hasFeature("zvfh") && !TI.hasFeature("zvfhmin"))
6047 Diag(Loc, diag::err_riscv_type_requires_extension, D)
6048 << Ty << "zvfh or zvfhmin";
6049 if (Ty->isRVVType(/* Bitwidth */ 32, /* IsFloat */ true) &&
6050 !TI.hasFeature("zve32f"))
6051 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
6052 if (Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ true) &&
6053 !TI.hasFeature("zve64d"))
6054 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
6055 // Given that caller already checked isRVVType() before calling this function,
6056 // if we don't have at least zve32x supported, then we need to emit error.
6057 if (!TI.hasFeature("zve32x"))
6058 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
6061 bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
6062 unsigned BuiltinID,
6063 CallExpr *TheCall) {
6064 switch (BuiltinID) {
6065 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
6066 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
6067 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
6068 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
6069 return checkArgCountAtMost(*this, TheCall, 3);
6072 return false;
6075 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
6076 /// This checks that the target supports __builtin_cpu_supports and
6077 /// that the string argument is constant and valid.
6078 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
6079 CallExpr *TheCall) {
6080 Expr *Arg = TheCall->getArg(0);
6082 // Check if the argument is a string literal.
6083 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
6084 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
6085 << Arg->getSourceRange();
6087 // Check the contents of the string.
6088 StringRef Feature =
6089 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
6090 if (!TI.validateCpuSupports(Feature))
6091 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
6092 << Arg->getSourceRange();
6093 return false;
6096 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
6097 /// This checks that the target supports __builtin_cpu_is and
6098 /// that the string argument is constant and valid.
6099 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
6100 Expr *Arg = TheCall->getArg(0);
6102 // Check if the argument is a string literal.
6103 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
6104 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
6105 << Arg->getSourceRange();
6107 // Check the contents of the string.
6108 StringRef Feature =
6109 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
6110 if (!TI.validateCpuIs(Feature))
6111 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
6112 << Arg->getSourceRange();
6113 return false;
6116 // Check if the rounding mode is legal.
6117 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
6118 // Indicates if this instruction has rounding control or just SAE.
6119 bool HasRC = false;
6121 unsigned ArgNum = 0;
6122 switch (BuiltinID) {
6123 default:
6124 return false;
6125 case X86::BI__builtin_ia32_vcvttsd2si32:
6126 case X86::BI__builtin_ia32_vcvttsd2si64:
6127 case X86::BI__builtin_ia32_vcvttsd2usi32:
6128 case X86::BI__builtin_ia32_vcvttsd2usi64:
6129 case X86::BI__builtin_ia32_vcvttss2si32:
6130 case X86::BI__builtin_ia32_vcvttss2si64:
6131 case X86::BI__builtin_ia32_vcvttss2usi32:
6132 case X86::BI__builtin_ia32_vcvttss2usi64:
6133 case X86::BI__builtin_ia32_vcvttsh2si32:
6134 case X86::BI__builtin_ia32_vcvttsh2si64:
6135 case X86::BI__builtin_ia32_vcvttsh2usi32:
6136 case X86::BI__builtin_ia32_vcvttsh2usi64:
6137 ArgNum = 1;
6138 break;
6139 case X86::BI__builtin_ia32_maxpd512:
6140 case X86::BI__builtin_ia32_maxps512:
6141 case X86::BI__builtin_ia32_minpd512:
6142 case X86::BI__builtin_ia32_minps512:
6143 case X86::BI__builtin_ia32_maxph512:
6144 case X86::BI__builtin_ia32_minph512:
6145 ArgNum = 2;
6146 break;
6147 case X86::BI__builtin_ia32_vcvtph2pd512_mask:
6148 case X86::BI__builtin_ia32_vcvtph2psx512_mask:
6149 case X86::BI__builtin_ia32_cvtps2pd512_mask:
6150 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
6151 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
6152 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
6153 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
6154 case X86::BI__builtin_ia32_cvttps2dq512_mask:
6155 case X86::BI__builtin_ia32_cvttps2qq512_mask:
6156 case X86::BI__builtin_ia32_cvttps2udq512_mask:
6157 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
6158 case X86::BI__builtin_ia32_vcvttph2w512_mask:
6159 case X86::BI__builtin_ia32_vcvttph2uw512_mask:
6160 case X86::BI__builtin_ia32_vcvttph2dq512_mask:
6161 case X86::BI__builtin_ia32_vcvttph2udq512_mask:
6162 case X86::BI__builtin_ia32_vcvttph2qq512_mask:
6163 case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
6164 case X86::BI__builtin_ia32_exp2pd_mask:
6165 case X86::BI__builtin_ia32_exp2ps_mask:
6166 case X86::BI__builtin_ia32_getexppd512_mask:
6167 case X86::BI__builtin_ia32_getexpps512_mask:
6168 case X86::BI__builtin_ia32_getexpph512_mask:
6169 case X86::BI__builtin_ia32_rcp28pd_mask:
6170 case X86::BI__builtin_ia32_rcp28ps_mask:
6171 case X86::BI__builtin_ia32_rsqrt28pd_mask:
6172 case X86::BI__builtin_ia32_rsqrt28ps_mask:
6173 case X86::BI__builtin_ia32_vcomisd:
6174 case X86::BI__builtin_ia32_vcomiss:
6175 case X86::BI__builtin_ia32_vcomish:
6176 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
6177 ArgNum = 3;
6178 break;
6179 case X86::BI__builtin_ia32_cmppd512_mask:
6180 case X86::BI__builtin_ia32_cmpps512_mask:
6181 case X86::BI__builtin_ia32_cmpsd_mask:
6182 case X86::BI__builtin_ia32_cmpss_mask:
6183 case X86::BI__builtin_ia32_cmpsh_mask:
6184 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
6185 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
6186 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
6187 case X86::BI__builtin_ia32_getexpsd128_round_mask:
6188 case X86::BI__builtin_ia32_getexpss128_round_mask:
6189 case X86::BI__builtin_ia32_getexpsh128_round_mask:
6190 case X86::BI__builtin_ia32_getmantpd512_mask:
6191 case X86::BI__builtin_ia32_getmantps512_mask:
6192 case X86::BI__builtin_ia32_getmantph512_mask:
6193 case X86::BI__builtin_ia32_maxsd_round_mask:
6194 case X86::BI__builtin_ia32_maxss_round_mask:
6195 case X86::BI__builtin_ia32_maxsh_round_mask:
6196 case X86::BI__builtin_ia32_minsd_round_mask:
6197 case X86::BI__builtin_ia32_minss_round_mask:
6198 case X86::BI__builtin_ia32_minsh_round_mask:
6199 case X86::BI__builtin_ia32_rcp28sd_round_mask:
6200 case X86::BI__builtin_ia32_rcp28ss_round_mask:
6201 case X86::BI__builtin_ia32_reducepd512_mask:
6202 case X86::BI__builtin_ia32_reduceps512_mask:
6203 case X86::BI__builtin_ia32_reduceph512_mask:
6204 case X86::BI__builtin_ia32_rndscalepd_mask:
6205 case X86::BI__builtin_ia32_rndscaleps_mask:
6206 case X86::BI__builtin_ia32_rndscaleph_mask:
6207 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
6208 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
6209 ArgNum = 4;
6210 break;
6211 case X86::BI__builtin_ia32_fixupimmpd512_mask:
6212 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
6213 case X86::BI__builtin_ia32_fixupimmps512_mask:
6214 case X86::BI__builtin_ia32_fixupimmps512_maskz:
6215 case X86::BI__builtin_ia32_fixupimmsd_mask:
6216 case X86::BI__builtin_ia32_fixupimmsd_maskz:
6217 case X86::BI__builtin_ia32_fixupimmss_mask:
6218 case X86::BI__builtin_ia32_fixupimmss_maskz:
6219 case X86::BI__builtin_ia32_getmantsd_round_mask:
6220 case X86::BI__builtin_ia32_getmantss_round_mask:
6221 case X86::BI__builtin_ia32_getmantsh_round_mask:
6222 case X86::BI__builtin_ia32_rangepd512_mask:
6223 case X86::BI__builtin_ia32_rangeps512_mask:
6224 case X86::BI__builtin_ia32_rangesd128_round_mask:
6225 case X86::BI__builtin_ia32_rangess128_round_mask:
6226 case X86::BI__builtin_ia32_reducesd_mask:
6227 case X86::BI__builtin_ia32_reducess_mask:
6228 case X86::BI__builtin_ia32_reducesh_mask:
6229 case X86::BI__builtin_ia32_rndscalesd_round_mask:
6230 case X86::BI__builtin_ia32_rndscaless_round_mask:
6231 case X86::BI__builtin_ia32_rndscalesh_round_mask:
6232 ArgNum = 5;
6233 break;
6234 case X86::BI__builtin_ia32_vcvtsd2si64:
6235 case X86::BI__builtin_ia32_vcvtsd2si32:
6236 case X86::BI__builtin_ia32_vcvtsd2usi32:
6237 case X86::BI__builtin_ia32_vcvtsd2usi64:
6238 case X86::BI__builtin_ia32_vcvtss2si32:
6239 case X86::BI__builtin_ia32_vcvtss2si64:
6240 case X86::BI__builtin_ia32_vcvtss2usi32:
6241 case X86::BI__builtin_ia32_vcvtss2usi64:
6242 case X86::BI__builtin_ia32_vcvtsh2si32:
6243 case X86::BI__builtin_ia32_vcvtsh2si64:
6244 case X86::BI__builtin_ia32_vcvtsh2usi32:
6245 case X86::BI__builtin_ia32_vcvtsh2usi64:
6246 case X86::BI__builtin_ia32_sqrtpd512:
6247 case X86::BI__builtin_ia32_sqrtps512:
6248 case X86::BI__builtin_ia32_sqrtph512:
6249 ArgNum = 1;
6250 HasRC = true;
6251 break;
6252 case X86::BI__builtin_ia32_addph512:
6253 case X86::BI__builtin_ia32_divph512:
6254 case X86::BI__builtin_ia32_mulph512:
6255 case X86::BI__builtin_ia32_subph512:
6256 case X86::BI__builtin_ia32_addpd512:
6257 case X86::BI__builtin_ia32_addps512:
6258 case X86::BI__builtin_ia32_divpd512:
6259 case X86::BI__builtin_ia32_divps512:
6260 case X86::BI__builtin_ia32_mulpd512:
6261 case X86::BI__builtin_ia32_mulps512:
6262 case X86::BI__builtin_ia32_subpd512:
6263 case X86::BI__builtin_ia32_subps512:
6264 case X86::BI__builtin_ia32_cvtsi2sd64:
6265 case X86::BI__builtin_ia32_cvtsi2ss32:
6266 case X86::BI__builtin_ia32_cvtsi2ss64:
6267 case X86::BI__builtin_ia32_cvtusi2sd64:
6268 case X86::BI__builtin_ia32_cvtusi2ss32:
6269 case X86::BI__builtin_ia32_cvtusi2ss64:
6270 case X86::BI__builtin_ia32_vcvtusi2sh:
6271 case X86::BI__builtin_ia32_vcvtusi642sh:
6272 case X86::BI__builtin_ia32_vcvtsi2sh:
6273 case X86::BI__builtin_ia32_vcvtsi642sh:
6274 ArgNum = 2;
6275 HasRC = true;
6276 break;
6277 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
6278 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
6279 case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
6280 case X86::BI__builtin_ia32_vcvtps2phx512_mask:
6281 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
6282 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
6283 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
6284 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
6285 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
6286 case X86::BI__builtin_ia32_cvtps2dq512_mask:
6287 case X86::BI__builtin_ia32_cvtps2qq512_mask:
6288 case X86::BI__builtin_ia32_cvtps2udq512_mask:
6289 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
6290 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
6291 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
6292 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
6293 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
6294 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
6295 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
6296 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
6297 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
6298 case X86::BI__builtin_ia32_vcvtph2w512_mask:
6299 case X86::BI__builtin_ia32_vcvtph2uw512_mask:
6300 case X86::BI__builtin_ia32_vcvtph2dq512_mask:
6301 case X86::BI__builtin_ia32_vcvtph2udq512_mask:
6302 case X86::BI__builtin_ia32_vcvtph2qq512_mask:
6303 case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
6304 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
6305 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
6306 ArgNum = 3;
6307 HasRC = true;
6308 break;
6309 case X86::BI__builtin_ia32_addsh_round_mask:
6310 case X86::BI__builtin_ia32_addss_round_mask:
6311 case X86::BI__builtin_ia32_addsd_round_mask:
6312 case X86::BI__builtin_ia32_divsh_round_mask:
6313 case X86::BI__builtin_ia32_divss_round_mask:
6314 case X86::BI__builtin_ia32_divsd_round_mask:
6315 case X86::BI__builtin_ia32_mulsh_round_mask:
6316 case X86::BI__builtin_ia32_mulss_round_mask:
6317 case X86::BI__builtin_ia32_mulsd_round_mask:
6318 case X86::BI__builtin_ia32_subsh_round_mask:
6319 case X86::BI__builtin_ia32_subss_round_mask:
6320 case X86::BI__builtin_ia32_subsd_round_mask:
6321 case X86::BI__builtin_ia32_scalefph512_mask:
6322 case X86::BI__builtin_ia32_scalefpd512_mask:
6323 case X86::BI__builtin_ia32_scalefps512_mask:
6324 case X86::BI__builtin_ia32_scalefsd_round_mask:
6325 case X86::BI__builtin_ia32_scalefss_round_mask:
6326 case X86::BI__builtin_ia32_scalefsh_round_mask:
6327 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
6328 case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
6329 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
6330 case X86::BI__builtin_ia32_sqrtsd_round_mask:
6331 case X86::BI__builtin_ia32_sqrtss_round_mask:
6332 case X86::BI__builtin_ia32_sqrtsh_round_mask:
6333 case X86::BI__builtin_ia32_vfmaddsd3_mask:
6334 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
6335 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
6336 case X86::BI__builtin_ia32_vfmaddss3_mask:
6337 case X86::BI__builtin_ia32_vfmaddss3_maskz:
6338 case X86::BI__builtin_ia32_vfmaddss3_mask3:
6339 case X86::BI__builtin_ia32_vfmaddsh3_mask:
6340 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
6341 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
6342 case X86::BI__builtin_ia32_vfmaddpd512_mask:
6343 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
6344 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
6345 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
6346 case X86::BI__builtin_ia32_vfmaddps512_mask:
6347 case X86::BI__builtin_ia32_vfmaddps512_maskz:
6348 case X86::BI__builtin_ia32_vfmaddps512_mask3:
6349 case X86::BI__builtin_ia32_vfmsubps512_mask3:
6350 case X86::BI__builtin_ia32_vfmaddph512_mask:
6351 case X86::BI__builtin_ia32_vfmaddph512_maskz:
6352 case X86::BI__builtin_ia32_vfmaddph512_mask3:
6353 case X86::BI__builtin_ia32_vfmsubph512_mask3:
6354 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
6355 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
6356 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
6357 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
6358 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
6359 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
6360 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
6361 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
6362 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
6363 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
6364 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
6365 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
6366 case X86::BI__builtin_ia32_vfmaddcsh_mask:
6367 case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
6368 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
6369 case X86::BI__builtin_ia32_vfmaddcph512_mask:
6370 case X86::BI__builtin_ia32_vfmaddcph512_maskz:
6371 case X86::BI__builtin_ia32_vfmaddcph512_mask3:
6372 case X86::BI__builtin_ia32_vfcmaddcsh_mask:
6373 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
6374 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
6375 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
6376 case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
6377 case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
6378 case X86::BI__builtin_ia32_vfmulcsh_mask:
6379 case X86::BI__builtin_ia32_vfmulcph512_mask:
6380 case X86::BI__builtin_ia32_vfcmulcsh_mask:
6381 case X86::BI__builtin_ia32_vfcmulcph512_mask:
6382 ArgNum = 4;
6383 HasRC = true;
6384 break;
6387 llvm::APSInt Result;
6389 // We can't check the value of a dependent argument.
6390 Expr *Arg = TheCall->getArg(ArgNum);
6391 if (Arg->isTypeDependent() || Arg->isValueDependent())
6392 return false;
6394 // Check constant-ness first.
6395 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6396 return true;
6398 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
6399 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
6400 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
6401 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
6402 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
6403 Result == 8/*ROUND_NO_EXC*/ ||
6404 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
6405 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
6406 return false;
6408 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
6409 << Arg->getSourceRange();
6412 // Check if the gather/scatter scale is legal.
6413 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
6414 CallExpr *TheCall) {
6415 unsigned ArgNum = 0;
6416 switch (BuiltinID) {
6417 default:
6418 return false;
6419 case X86::BI__builtin_ia32_gatherpfdpd:
6420 case X86::BI__builtin_ia32_gatherpfdps:
6421 case X86::BI__builtin_ia32_gatherpfqpd:
6422 case X86::BI__builtin_ia32_gatherpfqps:
6423 case X86::BI__builtin_ia32_scatterpfdpd:
6424 case X86::BI__builtin_ia32_scatterpfdps:
6425 case X86::BI__builtin_ia32_scatterpfqpd:
6426 case X86::BI__builtin_ia32_scatterpfqps:
6427 ArgNum = 3;
6428 break;
6429 case X86::BI__builtin_ia32_gatherd_pd:
6430 case X86::BI__builtin_ia32_gatherd_pd256:
6431 case X86::BI__builtin_ia32_gatherq_pd:
6432 case X86::BI__builtin_ia32_gatherq_pd256:
6433 case X86::BI__builtin_ia32_gatherd_ps:
6434 case X86::BI__builtin_ia32_gatherd_ps256:
6435 case X86::BI__builtin_ia32_gatherq_ps:
6436 case X86::BI__builtin_ia32_gatherq_ps256:
6437 case X86::BI__builtin_ia32_gatherd_q:
6438 case X86::BI__builtin_ia32_gatherd_q256:
6439 case X86::BI__builtin_ia32_gatherq_q:
6440 case X86::BI__builtin_ia32_gatherq_q256:
6441 case X86::BI__builtin_ia32_gatherd_d:
6442 case X86::BI__builtin_ia32_gatherd_d256:
6443 case X86::BI__builtin_ia32_gatherq_d:
6444 case X86::BI__builtin_ia32_gatherq_d256:
6445 case X86::BI__builtin_ia32_gather3div2df:
6446 case X86::BI__builtin_ia32_gather3div2di:
6447 case X86::BI__builtin_ia32_gather3div4df:
6448 case X86::BI__builtin_ia32_gather3div4di:
6449 case X86::BI__builtin_ia32_gather3div4sf:
6450 case X86::BI__builtin_ia32_gather3div4si:
6451 case X86::BI__builtin_ia32_gather3div8sf:
6452 case X86::BI__builtin_ia32_gather3div8si:
6453 case X86::BI__builtin_ia32_gather3siv2df:
6454 case X86::BI__builtin_ia32_gather3siv2di:
6455 case X86::BI__builtin_ia32_gather3siv4df:
6456 case X86::BI__builtin_ia32_gather3siv4di:
6457 case X86::BI__builtin_ia32_gather3siv4sf:
6458 case X86::BI__builtin_ia32_gather3siv4si:
6459 case X86::BI__builtin_ia32_gather3siv8sf:
6460 case X86::BI__builtin_ia32_gather3siv8si:
6461 case X86::BI__builtin_ia32_gathersiv8df:
6462 case X86::BI__builtin_ia32_gathersiv16sf:
6463 case X86::BI__builtin_ia32_gatherdiv8df:
6464 case X86::BI__builtin_ia32_gatherdiv16sf:
6465 case X86::BI__builtin_ia32_gathersiv8di:
6466 case X86::BI__builtin_ia32_gathersiv16si:
6467 case X86::BI__builtin_ia32_gatherdiv8di:
6468 case X86::BI__builtin_ia32_gatherdiv16si:
6469 case X86::BI__builtin_ia32_scatterdiv2df:
6470 case X86::BI__builtin_ia32_scatterdiv2di:
6471 case X86::BI__builtin_ia32_scatterdiv4df:
6472 case X86::BI__builtin_ia32_scatterdiv4di:
6473 case X86::BI__builtin_ia32_scatterdiv4sf:
6474 case X86::BI__builtin_ia32_scatterdiv4si:
6475 case X86::BI__builtin_ia32_scatterdiv8sf:
6476 case X86::BI__builtin_ia32_scatterdiv8si:
6477 case X86::BI__builtin_ia32_scattersiv2df:
6478 case X86::BI__builtin_ia32_scattersiv2di:
6479 case X86::BI__builtin_ia32_scattersiv4df:
6480 case X86::BI__builtin_ia32_scattersiv4di:
6481 case X86::BI__builtin_ia32_scattersiv4sf:
6482 case X86::BI__builtin_ia32_scattersiv4si:
6483 case X86::BI__builtin_ia32_scattersiv8sf:
6484 case X86::BI__builtin_ia32_scattersiv8si:
6485 case X86::BI__builtin_ia32_scattersiv8df:
6486 case X86::BI__builtin_ia32_scattersiv16sf:
6487 case X86::BI__builtin_ia32_scatterdiv8df:
6488 case X86::BI__builtin_ia32_scatterdiv16sf:
6489 case X86::BI__builtin_ia32_scattersiv8di:
6490 case X86::BI__builtin_ia32_scattersiv16si:
6491 case X86::BI__builtin_ia32_scatterdiv8di:
6492 case X86::BI__builtin_ia32_scatterdiv16si:
6493 ArgNum = 4;
6494 break;
6497 llvm::APSInt Result;
6499 // We can't check the value of a dependent argument.
6500 Expr *Arg = TheCall->getArg(ArgNum);
6501 if (Arg->isTypeDependent() || Arg->isValueDependent())
6502 return false;
6504 // Check constant-ness first.
6505 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6506 return true;
6508 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
6509 return false;
6511 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
6512 << Arg->getSourceRange();
6515 enum { TileRegLow = 0, TileRegHigh = 7 };
6517 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
6518 ArrayRef<int> ArgNums) {
6519 for (int ArgNum : ArgNums) {
6520 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
6521 return true;
6523 return false;
6526 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
6527 ArrayRef<int> ArgNums) {
6528 // Because the max number of tile register is TileRegHigh + 1, so here we use
6529 // each bit to represent the usage of them in bitset.
6530 std::bitset<TileRegHigh + 1> ArgValues;
6531 for (int ArgNum : ArgNums) {
6532 Expr *Arg = TheCall->getArg(ArgNum);
6533 if (Arg->isTypeDependent() || Arg->isValueDependent())
6534 continue;
6536 llvm::APSInt Result;
6537 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6538 return true;
6539 int ArgExtValue = Result.getExtValue();
6540 assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
6541 "Incorrect tile register num.");
6542 if (ArgValues.test(ArgExtValue))
6543 return Diag(TheCall->getBeginLoc(),
6544 diag::err_x86_builtin_tile_arg_duplicate)
6545 << TheCall->getArg(ArgNum)->getSourceRange();
6546 ArgValues.set(ArgExtValue);
6548 return false;
6551 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
6552 ArrayRef<int> ArgNums) {
6553 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
6554 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
6557 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
6558 switch (BuiltinID) {
6559 default:
6560 return false;
6561 case X86::BI__builtin_ia32_tileloadd64:
6562 case X86::BI__builtin_ia32_tileloaddt164:
6563 case X86::BI__builtin_ia32_tilestored64:
6564 case X86::BI__builtin_ia32_tilezero:
6565 return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
6566 case X86::BI__builtin_ia32_tdpbssd:
6567 case X86::BI__builtin_ia32_tdpbsud:
6568 case X86::BI__builtin_ia32_tdpbusd:
6569 case X86::BI__builtin_ia32_tdpbuud:
6570 case X86::BI__builtin_ia32_tdpbf16ps:
6571 case X86::BI__builtin_ia32_tdpfp16ps:
6572 case X86::BI__builtin_ia32_tcmmimfp16ps:
6573 case X86::BI__builtin_ia32_tcmmrlfp16ps:
6574 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
6577 static bool isX86_32Builtin(unsigned BuiltinID) {
6578 // These builtins only work on x86-32 targets.
6579 switch (BuiltinID) {
6580 case X86::BI__builtin_ia32_readeflags_u32:
6581 case X86::BI__builtin_ia32_writeeflags_u32:
6582 return true;
6585 return false;
6588 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
6589 CallExpr *TheCall) {
6590 if (BuiltinID == X86::BI__builtin_cpu_supports)
6591 return SemaBuiltinCpuSupports(*this, TI, TheCall);
6593 if (BuiltinID == X86::BI__builtin_cpu_is)
6594 return SemaBuiltinCpuIs(*this, TI, TheCall);
6596 // Check for 32-bit only builtins on a 64-bit target.
6597 const llvm::Triple &TT = TI.getTriple();
6598 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
6599 return Diag(TheCall->getCallee()->getBeginLoc(),
6600 diag::err_32_bit_builtin_64_bit_tgt);
6602 // If the intrinsic has rounding or SAE make sure its valid.
6603 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
6604 return true;
6606 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
6607 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
6608 return true;
6610 // If the intrinsic has a tile arguments, make sure they are valid.
6611 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
6612 return true;
6614 // For intrinsics which take an immediate value as part of the instruction,
6615 // range check them here.
6616 int i = 0, l = 0, u = 0;
6617 switch (BuiltinID) {
6618 default:
6619 return false;
6620 case X86::BI__builtin_ia32_vec_ext_v2si:
6621 case X86::BI__builtin_ia32_vec_ext_v2di:
6622 case X86::BI__builtin_ia32_vextractf128_pd256:
6623 case X86::BI__builtin_ia32_vextractf128_ps256:
6624 case X86::BI__builtin_ia32_vextractf128_si256:
6625 case X86::BI__builtin_ia32_extract128i256:
6626 case X86::BI__builtin_ia32_extractf64x4_mask:
6627 case X86::BI__builtin_ia32_extracti64x4_mask:
6628 case X86::BI__builtin_ia32_extractf32x8_mask:
6629 case X86::BI__builtin_ia32_extracti32x8_mask:
6630 case X86::BI__builtin_ia32_extractf64x2_256_mask:
6631 case X86::BI__builtin_ia32_extracti64x2_256_mask:
6632 case X86::BI__builtin_ia32_extractf32x4_256_mask:
6633 case X86::BI__builtin_ia32_extracti32x4_256_mask:
6634 i = 1; l = 0; u = 1;
6635 break;
6636 case X86::BI__builtin_ia32_vec_set_v2di:
6637 case X86::BI__builtin_ia32_vinsertf128_pd256:
6638 case X86::BI__builtin_ia32_vinsertf128_ps256:
6639 case X86::BI__builtin_ia32_vinsertf128_si256:
6640 case X86::BI__builtin_ia32_insert128i256:
6641 case X86::BI__builtin_ia32_insertf32x8:
6642 case X86::BI__builtin_ia32_inserti32x8:
6643 case X86::BI__builtin_ia32_insertf64x4:
6644 case X86::BI__builtin_ia32_inserti64x4:
6645 case X86::BI__builtin_ia32_insertf64x2_256:
6646 case X86::BI__builtin_ia32_inserti64x2_256:
6647 case X86::BI__builtin_ia32_insertf32x4_256:
6648 case X86::BI__builtin_ia32_inserti32x4_256:
6649 i = 2; l = 0; u = 1;
6650 break;
6651 case X86::BI__builtin_ia32_vpermilpd:
6652 case X86::BI__builtin_ia32_vec_ext_v4hi:
6653 case X86::BI__builtin_ia32_vec_ext_v4si:
6654 case X86::BI__builtin_ia32_vec_ext_v4sf:
6655 case X86::BI__builtin_ia32_vec_ext_v4di:
6656 case X86::BI__builtin_ia32_extractf32x4_mask:
6657 case X86::BI__builtin_ia32_extracti32x4_mask:
6658 case X86::BI__builtin_ia32_extractf64x2_512_mask:
6659 case X86::BI__builtin_ia32_extracti64x2_512_mask:
6660 i = 1; l = 0; u = 3;
6661 break;
6662 case X86::BI_mm_prefetch:
6663 case X86::BI__builtin_ia32_vec_ext_v8hi:
6664 case X86::BI__builtin_ia32_vec_ext_v8si:
6665 i = 1; l = 0; u = 7;
6666 break;
6667 case X86::BI__builtin_ia32_sha1rnds4:
6668 case X86::BI__builtin_ia32_blendpd:
6669 case X86::BI__builtin_ia32_shufpd:
6670 case X86::BI__builtin_ia32_vec_set_v4hi:
6671 case X86::BI__builtin_ia32_vec_set_v4si:
6672 case X86::BI__builtin_ia32_vec_set_v4di:
6673 case X86::BI__builtin_ia32_shuf_f32x4_256:
6674 case X86::BI__builtin_ia32_shuf_f64x2_256:
6675 case X86::BI__builtin_ia32_shuf_i32x4_256:
6676 case X86::BI__builtin_ia32_shuf_i64x2_256:
6677 case X86::BI__builtin_ia32_insertf64x2_512:
6678 case X86::BI__builtin_ia32_inserti64x2_512:
6679 case X86::BI__builtin_ia32_insertf32x4:
6680 case X86::BI__builtin_ia32_inserti32x4:
6681 i = 2; l = 0; u = 3;
6682 break;
6683 case X86::BI__builtin_ia32_vpermil2pd:
6684 case X86::BI__builtin_ia32_vpermil2pd256:
6685 case X86::BI__builtin_ia32_vpermil2ps:
6686 case X86::BI__builtin_ia32_vpermil2ps256:
6687 i = 3; l = 0; u = 3;
6688 break;
6689 case X86::BI__builtin_ia32_cmpb128_mask:
6690 case X86::BI__builtin_ia32_cmpw128_mask:
6691 case X86::BI__builtin_ia32_cmpd128_mask:
6692 case X86::BI__builtin_ia32_cmpq128_mask:
6693 case X86::BI__builtin_ia32_cmpb256_mask:
6694 case X86::BI__builtin_ia32_cmpw256_mask:
6695 case X86::BI__builtin_ia32_cmpd256_mask:
6696 case X86::BI__builtin_ia32_cmpq256_mask:
6697 case X86::BI__builtin_ia32_cmpb512_mask:
6698 case X86::BI__builtin_ia32_cmpw512_mask:
6699 case X86::BI__builtin_ia32_cmpd512_mask:
6700 case X86::BI__builtin_ia32_cmpq512_mask:
6701 case X86::BI__builtin_ia32_ucmpb128_mask:
6702 case X86::BI__builtin_ia32_ucmpw128_mask:
6703 case X86::BI__builtin_ia32_ucmpd128_mask:
6704 case X86::BI__builtin_ia32_ucmpq128_mask:
6705 case X86::BI__builtin_ia32_ucmpb256_mask:
6706 case X86::BI__builtin_ia32_ucmpw256_mask:
6707 case X86::BI__builtin_ia32_ucmpd256_mask:
6708 case X86::BI__builtin_ia32_ucmpq256_mask:
6709 case X86::BI__builtin_ia32_ucmpb512_mask:
6710 case X86::BI__builtin_ia32_ucmpw512_mask:
6711 case X86::BI__builtin_ia32_ucmpd512_mask:
6712 case X86::BI__builtin_ia32_ucmpq512_mask:
6713 case X86::BI__builtin_ia32_vpcomub:
6714 case X86::BI__builtin_ia32_vpcomuw:
6715 case X86::BI__builtin_ia32_vpcomud:
6716 case X86::BI__builtin_ia32_vpcomuq:
6717 case X86::BI__builtin_ia32_vpcomb:
6718 case X86::BI__builtin_ia32_vpcomw:
6719 case X86::BI__builtin_ia32_vpcomd:
6720 case X86::BI__builtin_ia32_vpcomq:
6721 case X86::BI__builtin_ia32_vec_set_v8hi:
6722 case X86::BI__builtin_ia32_vec_set_v8si:
6723 i = 2; l = 0; u = 7;
6724 break;
6725 case X86::BI__builtin_ia32_vpermilpd256:
6726 case X86::BI__builtin_ia32_roundps:
6727 case X86::BI__builtin_ia32_roundpd:
6728 case X86::BI__builtin_ia32_roundps256:
6729 case X86::BI__builtin_ia32_roundpd256:
6730 case X86::BI__builtin_ia32_getmantpd128_mask:
6731 case X86::BI__builtin_ia32_getmantpd256_mask:
6732 case X86::BI__builtin_ia32_getmantps128_mask:
6733 case X86::BI__builtin_ia32_getmantps256_mask:
6734 case X86::BI__builtin_ia32_getmantpd512_mask:
6735 case X86::BI__builtin_ia32_getmantps512_mask:
6736 case X86::BI__builtin_ia32_getmantph128_mask:
6737 case X86::BI__builtin_ia32_getmantph256_mask:
6738 case X86::BI__builtin_ia32_getmantph512_mask:
6739 case X86::BI__builtin_ia32_vec_ext_v16qi:
6740 case X86::BI__builtin_ia32_vec_ext_v16hi:
6741 i = 1; l = 0; u = 15;
6742 break;
6743 case X86::BI__builtin_ia32_pblendd128:
6744 case X86::BI__builtin_ia32_blendps:
6745 case X86::BI__builtin_ia32_blendpd256:
6746 case X86::BI__builtin_ia32_shufpd256:
6747 case X86::BI__builtin_ia32_roundss:
6748 case X86::BI__builtin_ia32_roundsd:
6749 case X86::BI__builtin_ia32_rangepd128_mask:
6750 case X86::BI__builtin_ia32_rangepd256_mask:
6751 case X86::BI__builtin_ia32_rangepd512_mask:
6752 case X86::BI__builtin_ia32_rangeps128_mask:
6753 case X86::BI__builtin_ia32_rangeps256_mask:
6754 case X86::BI__builtin_ia32_rangeps512_mask:
6755 case X86::BI__builtin_ia32_getmantsd_round_mask:
6756 case X86::BI__builtin_ia32_getmantss_round_mask:
6757 case X86::BI__builtin_ia32_getmantsh_round_mask:
6758 case X86::BI__builtin_ia32_vec_set_v16qi:
6759 case X86::BI__builtin_ia32_vec_set_v16hi:
6760 i = 2; l = 0; u = 15;
6761 break;
6762 case X86::BI__builtin_ia32_vec_ext_v32qi:
6763 i = 1; l = 0; u = 31;
6764 break;
6765 case X86::BI__builtin_ia32_cmpps:
6766 case X86::BI__builtin_ia32_cmpss:
6767 case X86::BI__builtin_ia32_cmppd:
6768 case X86::BI__builtin_ia32_cmpsd:
6769 case X86::BI__builtin_ia32_cmpps256:
6770 case X86::BI__builtin_ia32_cmppd256:
6771 case X86::BI__builtin_ia32_cmpps128_mask:
6772 case X86::BI__builtin_ia32_cmppd128_mask:
6773 case X86::BI__builtin_ia32_cmpps256_mask:
6774 case X86::BI__builtin_ia32_cmppd256_mask:
6775 case X86::BI__builtin_ia32_cmpps512_mask:
6776 case X86::BI__builtin_ia32_cmppd512_mask:
6777 case X86::BI__builtin_ia32_cmpsd_mask:
6778 case X86::BI__builtin_ia32_cmpss_mask:
6779 case X86::BI__builtin_ia32_vec_set_v32qi:
6780 i = 2; l = 0; u = 31;
6781 break;
6782 case X86::BI__builtin_ia32_permdf256:
6783 case X86::BI__builtin_ia32_permdi256:
6784 case X86::BI__builtin_ia32_permdf512:
6785 case X86::BI__builtin_ia32_permdi512:
6786 case X86::BI__builtin_ia32_vpermilps:
6787 case X86::BI__builtin_ia32_vpermilps256:
6788 case X86::BI__builtin_ia32_vpermilpd512:
6789 case X86::BI__builtin_ia32_vpermilps512:
6790 case X86::BI__builtin_ia32_pshufd:
6791 case X86::BI__builtin_ia32_pshufd256:
6792 case X86::BI__builtin_ia32_pshufd512:
6793 case X86::BI__builtin_ia32_pshufhw:
6794 case X86::BI__builtin_ia32_pshufhw256:
6795 case X86::BI__builtin_ia32_pshufhw512:
6796 case X86::BI__builtin_ia32_pshuflw:
6797 case X86::BI__builtin_ia32_pshuflw256:
6798 case X86::BI__builtin_ia32_pshuflw512:
6799 case X86::BI__builtin_ia32_vcvtps2ph:
6800 case X86::BI__builtin_ia32_vcvtps2ph_mask:
6801 case X86::BI__builtin_ia32_vcvtps2ph256:
6802 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
6803 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
6804 case X86::BI__builtin_ia32_rndscaleps_128_mask:
6805 case X86::BI__builtin_ia32_rndscalepd_128_mask:
6806 case X86::BI__builtin_ia32_rndscaleps_256_mask:
6807 case X86::BI__builtin_ia32_rndscalepd_256_mask:
6808 case X86::BI__builtin_ia32_rndscaleps_mask:
6809 case X86::BI__builtin_ia32_rndscalepd_mask:
6810 case X86::BI__builtin_ia32_rndscaleph_mask:
6811 case X86::BI__builtin_ia32_reducepd128_mask:
6812 case X86::BI__builtin_ia32_reducepd256_mask:
6813 case X86::BI__builtin_ia32_reducepd512_mask:
6814 case X86::BI__builtin_ia32_reduceps128_mask:
6815 case X86::BI__builtin_ia32_reduceps256_mask:
6816 case X86::BI__builtin_ia32_reduceps512_mask:
6817 case X86::BI__builtin_ia32_reduceph128_mask:
6818 case X86::BI__builtin_ia32_reduceph256_mask:
6819 case X86::BI__builtin_ia32_reduceph512_mask:
6820 case X86::BI__builtin_ia32_prold512:
6821 case X86::BI__builtin_ia32_prolq512:
6822 case X86::BI__builtin_ia32_prold128:
6823 case X86::BI__builtin_ia32_prold256:
6824 case X86::BI__builtin_ia32_prolq128:
6825 case X86::BI__builtin_ia32_prolq256:
6826 case X86::BI__builtin_ia32_prord512:
6827 case X86::BI__builtin_ia32_prorq512:
6828 case X86::BI__builtin_ia32_prord128:
6829 case X86::BI__builtin_ia32_prord256:
6830 case X86::BI__builtin_ia32_prorq128:
6831 case X86::BI__builtin_ia32_prorq256:
6832 case X86::BI__builtin_ia32_fpclasspd128_mask:
6833 case X86::BI__builtin_ia32_fpclasspd256_mask:
6834 case X86::BI__builtin_ia32_fpclassps128_mask:
6835 case X86::BI__builtin_ia32_fpclassps256_mask:
6836 case X86::BI__builtin_ia32_fpclassps512_mask:
6837 case X86::BI__builtin_ia32_fpclasspd512_mask:
6838 case X86::BI__builtin_ia32_fpclassph128_mask:
6839 case X86::BI__builtin_ia32_fpclassph256_mask:
6840 case X86::BI__builtin_ia32_fpclassph512_mask:
6841 case X86::BI__builtin_ia32_fpclasssd_mask:
6842 case X86::BI__builtin_ia32_fpclassss_mask:
6843 case X86::BI__builtin_ia32_fpclasssh_mask:
6844 case X86::BI__builtin_ia32_pslldqi128_byteshift:
6845 case X86::BI__builtin_ia32_pslldqi256_byteshift:
6846 case X86::BI__builtin_ia32_pslldqi512_byteshift:
6847 case X86::BI__builtin_ia32_psrldqi128_byteshift:
6848 case X86::BI__builtin_ia32_psrldqi256_byteshift:
6849 case X86::BI__builtin_ia32_psrldqi512_byteshift:
6850 case X86::BI__builtin_ia32_kshiftliqi:
6851 case X86::BI__builtin_ia32_kshiftlihi:
6852 case X86::BI__builtin_ia32_kshiftlisi:
6853 case X86::BI__builtin_ia32_kshiftlidi:
6854 case X86::BI__builtin_ia32_kshiftriqi:
6855 case X86::BI__builtin_ia32_kshiftrihi:
6856 case X86::BI__builtin_ia32_kshiftrisi:
6857 case X86::BI__builtin_ia32_kshiftridi:
6858 i = 1; l = 0; u = 255;
6859 break;
6860 case X86::BI__builtin_ia32_vperm2f128_pd256:
6861 case X86::BI__builtin_ia32_vperm2f128_ps256:
6862 case X86::BI__builtin_ia32_vperm2f128_si256:
6863 case X86::BI__builtin_ia32_permti256:
6864 case X86::BI__builtin_ia32_pblendw128:
6865 case X86::BI__builtin_ia32_pblendw256:
6866 case X86::BI__builtin_ia32_blendps256:
6867 case X86::BI__builtin_ia32_pblendd256:
6868 case X86::BI__builtin_ia32_palignr128:
6869 case X86::BI__builtin_ia32_palignr256:
6870 case X86::BI__builtin_ia32_palignr512:
6871 case X86::BI__builtin_ia32_alignq512:
6872 case X86::BI__builtin_ia32_alignd512:
6873 case X86::BI__builtin_ia32_alignd128:
6874 case X86::BI__builtin_ia32_alignd256:
6875 case X86::BI__builtin_ia32_alignq128:
6876 case X86::BI__builtin_ia32_alignq256:
6877 case X86::BI__builtin_ia32_vcomisd:
6878 case X86::BI__builtin_ia32_vcomiss:
6879 case X86::BI__builtin_ia32_shuf_f32x4:
6880 case X86::BI__builtin_ia32_shuf_f64x2:
6881 case X86::BI__builtin_ia32_shuf_i32x4:
6882 case X86::BI__builtin_ia32_shuf_i64x2:
6883 case X86::BI__builtin_ia32_shufpd512:
6884 case X86::BI__builtin_ia32_shufps:
6885 case X86::BI__builtin_ia32_shufps256:
6886 case X86::BI__builtin_ia32_shufps512:
6887 case X86::BI__builtin_ia32_dbpsadbw128:
6888 case X86::BI__builtin_ia32_dbpsadbw256:
6889 case X86::BI__builtin_ia32_dbpsadbw512:
6890 case X86::BI__builtin_ia32_vpshldd128:
6891 case X86::BI__builtin_ia32_vpshldd256:
6892 case X86::BI__builtin_ia32_vpshldd512:
6893 case X86::BI__builtin_ia32_vpshldq128:
6894 case X86::BI__builtin_ia32_vpshldq256:
6895 case X86::BI__builtin_ia32_vpshldq512:
6896 case X86::BI__builtin_ia32_vpshldw128:
6897 case X86::BI__builtin_ia32_vpshldw256:
6898 case X86::BI__builtin_ia32_vpshldw512:
6899 case X86::BI__builtin_ia32_vpshrdd128:
6900 case X86::BI__builtin_ia32_vpshrdd256:
6901 case X86::BI__builtin_ia32_vpshrdd512:
6902 case X86::BI__builtin_ia32_vpshrdq128:
6903 case X86::BI__builtin_ia32_vpshrdq256:
6904 case X86::BI__builtin_ia32_vpshrdq512:
6905 case X86::BI__builtin_ia32_vpshrdw128:
6906 case X86::BI__builtin_ia32_vpshrdw256:
6907 case X86::BI__builtin_ia32_vpshrdw512:
6908 i = 2; l = 0; u = 255;
6909 break;
6910 case X86::BI__builtin_ia32_fixupimmpd512_mask:
6911 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
6912 case X86::BI__builtin_ia32_fixupimmps512_mask:
6913 case X86::BI__builtin_ia32_fixupimmps512_maskz:
6914 case X86::BI__builtin_ia32_fixupimmsd_mask:
6915 case X86::BI__builtin_ia32_fixupimmsd_maskz:
6916 case X86::BI__builtin_ia32_fixupimmss_mask:
6917 case X86::BI__builtin_ia32_fixupimmss_maskz:
6918 case X86::BI__builtin_ia32_fixupimmpd128_mask:
6919 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
6920 case X86::BI__builtin_ia32_fixupimmpd256_mask:
6921 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
6922 case X86::BI__builtin_ia32_fixupimmps128_mask:
6923 case X86::BI__builtin_ia32_fixupimmps128_maskz:
6924 case X86::BI__builtin_ia32_fixupimmps256_mask:
6925 case X86::BI__builtin_ia32_fixupimmps256_maskz:
6926 case X86::BI__builtin_ia32_pternlogd512_mask:
6927 case X86::BI__builtin_ia32_pternlogd512_maskz:
6928 case X86::BI__builtin_ia32_pternlogq512_mask:
6929 case X86::BI__builtin_ia32_pternlogq512_maskz:
6930 case X86::BI__builtin_ia32_pternlogd128_mask:
6931 case X86::BI__builtin_ia32_pternlogd128_maskz:
6932 case X86::BI__builtin_ia32_pternlogd256_mask:
6933 case X86::BI__builtin_ia32_pternlogd256_maskz:
6934 case X86::BI__builtin_ia32_pternlogq128_mask:
6935 case X86::BI__builtin_ia32_pternlogq128_maskz:
6936 case X86::BI__builtin_ia32_pternlogq256_mask:
6937 case X86::BI__builtin_ia32_pternlogq256_maskz:
6938 case X86::BI__builtin_ia32_vsm3rnds2:
6939 i = 3; l = 0; u = 255;
6940 break;
6941 case X86::BI__builtin_ia32_gatherpfdpd:
6942 case X86::BI__builtin_ia32_gatherpfdps:
6943 case X86::BI__builtin_ia32_gatherpfqpd:
6944 case X86::BI__builtin_ia32_gatherpfqps:
6945 case X86::BI__builtin_ia32_scatterpfdpd:
6946 case X86::BI__builtin_ia32_scatterpfdps:
6947 case X86::BI__builtin_ia32_scatterpfqpd:
6948 case X86::BI__builtin_ia32_scatterpfqps:
6949 i = 4; l = 2; u = 3;
6950 break;
6951 case X86::BI__builtin_ia32_reducesd_mask:
6952 case X86::BI__builtin_ia32_reducess_mask:
6953 case X86::BI__builtin_ia32_rndscalesd_round_mask:
6954 case X86::BI__builtin_ia32_rndscaless_round_mask:
6955 case X86::BI__builtin_ia32_rndscalesh_round_mask:
6956 case X86::BI__builtin_ia32_reducesh_mask:
6957 i = 4; l = 0; u = 255;
6958 break;
6959 case X86::BI__builtin_ia32_cmpccxadd32:
6960 case X86::BI__builtin_ia32_cmpccxadd64:
6961 i = 3; l = 0; u = 15;
6962 break;
6965 // Note that we don't force a hard error on the range check here, allowing
6966 // template-generated or macro-generated dead code to potentially have out-of-
6967 // range values. These need to code generate, but don't need to necessarily
6968 // make any sense. We use a warning that defaults to an error.
6969 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
6972 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
6973 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
6974 /// Returns true when the format fits the function and the FormatStringInfo has
6975 /// been populated.
6976 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
6977 bool IsVariadic, FormatStringInfo *FSI) {
6978 if (Format->getFirstArg() == 0)
6979 FSI->ArgPassingKind = FAPK_VAList;
6980 else if (IsVariadic)
6981 FSI->ArgPassingKind = FAPK_Variadic;
6982 else
6983 FSI->ArgPassingKind = FAPK_Fixed;
6984 FSI->FormatIdx = Format->getFormatIdx() - 1;
6985 FSI->FirstDataArg =
6986 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
6988 // The way the format attribute works in GCC, the implicit this argument
6989 // of member functions is counted. However, it doesn't appear in our own
6990 // lists, so decrement format_idx in that case.
6991 if (IsCXXMember) {
6992 if(FSI->FormatIdx == 0)
6993 return false;
6994 --FSI->FormatIdx;
6995 if (FSI->FirstDataArg != 0)
6996 --FSI->FirstDataArg;
6998 return true;
7001 /// Checks if a the given expression evaluates to null.
7003 /// Returns true if the value evaluates to null.
7004 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
7005 // If the expression has non-null type, it doesn't evaluate to null.
7006 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
7007 if (*nullability == NullabilityKind::NonNull)
7008 return false;
7011 // As a special case, transparent unions initialized with zero are
7012 // considered null for the purposes of the nonnull attribute.
7013 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
7014 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
7015 if (const CompoundLiteralExpr *CLE =
7016 dyn_cast<CompoundLiteralExpr>(Expr))
7017 if (const InitListExpr *ILE =
7018 dyn_cast<InitListExpr>(CLE->getInitializer()))
7019 Expr = ILE->getInit(0);
7022 bool Result;
7023 return (!Expr->isValueDependent() &&
7024 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
7025 !Result);
7028 static void CheckNonNullArgument(Sema &S,
7029 const Expr *ArgExpr,
7030 SourceLocation CallSiteLoc) {
7031 if (CheckNonNullExpr(S, ArgExpr))
7032 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
7033 S.PDiag(diag::warn_null_arg)
7034 << ArgExpr->getSourceRange());
7037 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
7038 FormatStringInfo FSI;
7039 if ((GetFormatStringType(Format) == FST_NSString) &&
7040 getFormatStringInfo(Format, false, true, &FSI)) {
7041 Idx = FSI.FormatIdx;
7042 return true;
7044 return false;
7047 /// Diagnose use of %s directive in an NSString which is being passed
7048 /// as formatting string to formatting method.
7049 static void
7050 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
7051 const NamedDecl *FDecl,
7052 Expr **Args,
7053 unsigned NumArgs) {
7054 unsigned Idx = 0;
7055 bool Format = false;
7056 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
7057 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
7058 Idx = 2;
7059 Format = true;
7061 else
7062 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7063 if (S.GetFormatNSStringIdx(I, Idx)) {
7064 Format = true;
7065 break;
7068 if (!Format || NumArgs <= Idx)
7069 return;
7070 const Expr *FormatExpr = Args[Idx];
7071 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
7072 FormatExpr = CSCE->getSubExpr();
7073 const StringLiteral *FormatString;
7074 if (const ObjCStringLiteral *OSL =
7075 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
7076 FormatString = OSL->getString();
7077 else
7078 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
7079 if (!FormatString)
7080 return;
7081 if (S.FormatStringHasSArg(FormatString)) {
7082 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
7083 << "%s" << 1 << 1;
7084 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
7085 << FDecl->getDeclName();
7089 /// Determine whether the given type has a non-null nullability annotation.
7090 static bool isNonNullType(QualType type) {
7091 if (auto nullability = type->getNullability())
7092 return *nullability == NullabilityKind::NonNull;
7094 return false;
7097 static void CheckNonNullArguments(Sema &S,
7098 const NamedDecl *FDecl,
7099 const FunctionProtoType *Proto,
7100 ArrayRef<const Expr *> Args,
7101 SourceLocation CallSiteLoc) {
7102 assert((FDecl || Proto) && "Need a function declaration or prototype");
7104 // Already checked by constant evaluator.
7105 if (S.isConstantEvaluatedContext())
7106 return;
7107 // Check the attributes attached to the method/function itself.
7108 llvm::SmallBitVector NonNullArgs;
7109 if (FDecl) {
7110 // Handle the nonnull attribute on the function/method declaration itself.
7111 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
7112 if (!NonNull->args_size()) {
7113 // Easy case: all pointer arguments are nonnull.
7114 for (const auto *Arg : Args)
7115 if (S.isValidPointerAttrType(Arg->getType()))
7116 CheckNonNullArgument(S, Arg, CallSiteLoc);
7117 return;
7120 for (const ParamIdx &Idx : NonNull->args()) {
7121 unsigned IdxAST = Idx.getASTIndex();
7122 if (IdxAST >= Args.size())
7123 continue;
7124 if (NonNullArgs.empty())
7125 NonNullArgs.resize(Args.size());
7126 NonNullArgs.set(IdxAST);
7131 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
7132 // Handle the nonnull attribute on the parameters of the
7133 // function/method.
7134 ArrayRef<ParmVarDecl*> parms;
7135 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
7136 parms = FD->parameters();
7137 else
7138 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
7140 unsigned ParamIndex = 0;
7141 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
7142 I != E; ++I, ++ParamIndex) {
7143 const ParmVarDecl *PVD = *I;
7144 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
7145 if (NonNullArgs.empty())
7146 NonNullArgs.resize(Args.size());
7148 NonNullArgs.set(ParamIndex);
7151 } else {
7152 // If we have a non-function, non-method declaration but no
7153 // function prototype, try to dig out the function prototype.
7154 if (!Proto) {
7155 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
7156 QualType type = VD->getType().getNonReferenceType();
7157 if (auto pointerType = type->getAs<PointerType>())
7158 type = pointerType->getPointeeType();
7159 else if (auto blockType = type->getAs<BlockPointerType>())
7160 type = blockType->getPointeeType();
7161 // FIXME: data member pointers?
7163 // Dig out the function prototype, if there is one.
7164 Proto = type->getAs<FunctionProtoType>();
7168 // Fill in non-null argument information from the nullability
7169 // information on the parameter types (if we have them).
7170 if (Proto) {
7171 unsigned Index = 0;
7172 for (auto paramType : Proto->getParamTypes()) {
7173 if (isNonNullType(paramType)) {
7174 if (NonNullArgs.empty())
7175 NonNullArgs.resize(Args.size());
7177 NonNullArgs.set(Index);
7180 ++Index;
7185 // Check for non-null arguments.
7186 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
7187 ArgIndex != ArgIndexEnd; ++ArgIndex) {
7188 if (NonNullArgs[ArgIndex])
7189 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc());
7193 // 16 byte ByVal alignment not due to a vector member is not honoured by XL
7194 // on AIX. Emit a warning here that users are generating binary incompatible
7195 // code to be safe.
7196 // Here we try to get information about the alignment of the struct member
7197 // from the struct passed to the caller function. We only warn when the struct
7198 // is passed byval, hence the series of checks and early returns if we are a not
7199 // passing a struct byval.
7200 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
7201 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
7202 if (!ICE)
7203 return;
7205 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
7206 if (!DR)
7207 return;
7209 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
7210 if (!PD || !PD->getType()->isRecordType())
7211 return;
7213 QualType ArgType = Arg->getType();
7214 for (const FieldDecl *FD :
7215 ArgType->castAs<RecordType>()->getDecl()->fields()) {
7216 if (const auto *AA = FD->getAttr<AlignedAttr>()) {
7217 CharUnits Alignment =
7218 Context.toCharUnitsFromBits(AA->getAlignment(Context));
7219 if (Alignment.getQuantity() == 16) {
7220 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
7221 Diag(Loc, diag::note_misaligned_member_used_here) << PD;
7227 /// Warn if a pointer or reference argument passed to a function points to an
7228 /// object that is less aligned than the parameter. This can happen when
7229 /// creating a typedef with a lower alignment than the original type and then
7230 /// calling functions defined in terms of the original type.
7231 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
7232 StringRef ParamName, QualType ArgTy,
7233 QualType ParamTy) {
7235 // If a function accepts a pointer or reference type
7236 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
7237 return;
7239 // If the parameter is a pointer type, get the pointee type for the
7240 // argument too. If the parameter is a reference type, don't try to get
7241 // the pointee type for the argument.
7242 if (ParamTy->isPointerType())
7243 ArgTy = ArgTy->getPointeeType();
7245 // Remove reference or pointer
7246 ParamTy = ParamTy->getPointeeType();
7248 // Find expected alignment, and the actual alignment of the passed object.
7249 // getTypeAlignInChars requires complete types
7250 if (ArgTy.isNull() || ParamTy->isDependentType() ||
7251 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
7252 ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
7253 return;
7255 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
7256 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
7258 // If the argument is less aligned than the parameter, there is a
7259 // potential alignment issue.
7260 if (ArgAlign < ParamAlign)
7261 Diag(Loc, diag::warn_param_mismatched_alignment)
7262 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
7263 << ParamName << (FDecl != nullptr) << FDecl;
7266 /// Handles the checks for format strings, non-POD arguments to vararg
7267 /// functions, NULL arguments passed to non-NULL parameters, diagnose_if
7268 /// attributes and AArch64 SME attributes.
7269 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
7270 const Expr *ThisArg, ArrayRef<const Expr *> Args,
7271 bool IsMemberFunction, SourceLocation Loc,
7272 SourceRange Range, VariadicCallType CallType) {
7273 // FIXME: We should check as much as we can in the template definition.
7274 if (CurContext->isDependentContext())
7275 return;
7277 // Printf and scanf checking.
7278 llvm::SmallBitVector CheckedVarArgs;
7279 if (FDecl) {
7280 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7281 // Only create vector if there are format attributes.
7282 CheckedVarArgs.resize(Args.size());
7284 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
7285 CheckedVarArgs);
7289 // Refuse POD arguments that weren't caught by the format string
7290 // checks above.
7291 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
7292 if (CallType != VariadicDoesNotApply &&
7293 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
7294 unsigned NumParams = Proto ? Proto->getNumParams()
7295 : FDecl && isa<FunctionDecl>(FDecl)
7296 ? cast<FunctionDecl>(FDecl)->getNumParams()
7297 : FDecl && isa<ObjCMethodDecl>(FDecl)
7298 ? cast<ObjCMethodDecl>(FDecl)->param_size()
7299 : 0;
7301 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
7302 // Args[ArgIdx] can be null in malformed code.
7303 if (const Expr *Arg = Args[ArgIdx]) {
7304 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
7305 checkVariadicArgument(Arg, CallType);
7310 if (FDecl || Proto) {
7311 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
7313 // Type safety checking.
7314 if (FDecl) {
7315 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
7316 CheckArgumentWithTypeTag(I, Args, Loc);
7320 // Check that passed arguments match the alignment of original arguments.
7321 // Try to get the missing prototype from the declaration.
7322 if (!Proto && FDecl) {
7323 const auto *FT = FDecl->getFunctionType();
7324 if (isa_and_nonnull<FunctionProtoType>(FT))
7325 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
7327 if (Proto) {
7328 // For variadic functions, we may have more args than parameters.
7329 // For some K&R functions, we may have less args than parameters.
7330 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
7331 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
7332 // Args[ArgIdx] can be null in malformed code.
7333 if (const Expr *Arg = Args[ArgIdx]) {
7334 if (Arg->containsErrors())
7335 continue;
7337 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
7338 FDecl->hasLinkage() &&
7339 FDecl->getFormalLinkage() != InternalLinkage &&
7340 CallType == VariadicDoesNotApply)
7341 checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
7343 QualType ParamTy = Proto->getParamType(ArgIdx);
7344 QualType ArgTy = Arg->getType();
7345 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
7346 ArgTy, ParamTy);
7350 // If the callee has an AArch64 SME attribute to indicate that it is an
7351 // __arm_streaming function, then the caller requires SME to be available.
7352 FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
7353 if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask) {
7354 if (auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
7355 llvm::StringMap<bool> CallerFeatureMap;
7356 Context.getFunctionFeatureMap(CallerFeatureMap, CallerFD);
7357 if (!CallerFeatureMap.contains("sme"))
7358 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7359 } else if (!Context.getTargetInfo().hasFeature("sme")) {
7360 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7364 // If the callee uses AArch64 SME ZA state but the caller doesn't define
7365 // any, then this is an error.
7366 if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateZASharedMask) {
7367 bool CallerHasZAState = false;
7368 if (const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
7369 if (CallerFD->hasAttr<ArmNewZAAttr>())
7370 CallerHasZAState = true;
7371 else if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>())
7372 CallerHasZAState = FPT->getExtProtoInfo().AArch64SMEAttributes &
7373 FunctionType::SME_PStateZASharedMask;
7376 if (!CallerHasZAState)
7377 Diag(Loc, diag::err_sme_za_call_no_za_state);
7381 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
7382 auto *AA = FDecl->getAttr<AllocAlignAttr>();
7383 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
7384 if (!Arg->isValueDependent()) {
7385 Expr::EvalResult Align;
7386 if (Arg->EvaluateAsInt(Align, Context)) {
7387 const llvm::APSInt &I = Align.Val.getInt();
7388 if (!I.isPowerOf2())
7389 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
7390 << Arg->getSourceRange();
7392 if (I > Sema::MaximumAlignment)
7393 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
7394 << Arg->getSourceRange() << Sema::MaximumAlignment;
7399 if (FD)
7400 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
7403 /// CheckConstructorCall - Check a constructor call for correctness and safety
7404 /// properties not enforced by the C type system.
7405 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
7406 ArrayRef<const Expr *> Args,
7407 const FunctionProtoType *Proto,
7408 SourceLocation Loc) {
7409 VariadicCallType CallType =
7410 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
7412 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
7413 CheckArgAlignment(
7414 Loc, FDecl, "'this'", Context.getPointerType(ThisType),
7415 Context.getPointerType(Ctor->getFunctionObjectParameterType()));
7417 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
7418 Loc, SourceRange(), CallType);
7421 /// CheckFunctionCall - Check a direct function call for various correctness
7422 /// and safety properties not strictly enforced by the C type system.
7423 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
7424 const FunctionProtoType *Proto) {
7425 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
7426 isa<CXXMethodDecl>(FDecl);
7427 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
7428 IsMemberOperatorCall;
7429 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
7430 TheCall->getCallee());
7431 Expr** Args = TheCall->getArgs();
7432 unsigned NumArgs = TheCall->getNumArgs();
7434 Expr *ImplicitThis = nullptr;
7435 if (IsMemberOperatorCall && !FDecl->isStatic() &&
7436 !FDecl->hasCXXExplicitFunctionObjectParameter()) {
7437 // If this is a call to a non-static member operator, hide the first
7438 // argument from checkCall.
7439 // FIXME: Our choice of AST representation here is less than ideal.
7440 ImplicitThis = Args[0];
7441 ++Args;
7442 --NumArgs;
7443 } else if (IsMemberFunction && !FDecl->isStatic() &&
7444 !FDecl->hasCXXExplicitFunctionObjectParameter())
7445 ImplicitThis =
7446 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
7448 if (ImplicitThis) {
7449 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
7450 // used.
7451 QualType ThisType = ImplicitThis->getType();
7452 if (!ThisType->isPointerType()) {
7453 assert(!ThisType->isReferenceType());
7454 ThisType = Context.getPointerType(ThisType);
7457 QualType ThisTypeFromDecl = Context.getPointerType(
7458 cast<CXXMethodDecl>(FDecl)->getFunctionObjectParameterType());
7460 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
7461 ThisTypeFromDecl);
7464 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs),
7465 IsMemberFunction, TheCall->getRParenLoc(),
7466 TheCall->getCallee()->getSourceRange(), CallType);
7468 IdentifierInfo *FnInfo = FDecl->getIdentifier();
7469 // None of the checks below are needed for functions that don't have
7470 // simple names (e.g., C++ conversion functions).
7471 if (!FnInfo)
7472 return false;
7474 // Enforce TCB except for builtin calls, which are always allowed.
7475 if (FDecl->getBuiltinID() == 0)
7476 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl);
7478 CheckAbsoluteValueFunction(TheCall, FDecl);
7479 CheckMaxUnsignedZero(TheCall, FDecl);
7481 if (getLangOpts().ObjC)
7482 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
7484 unsigned CMId = FDecl->getMemoryFunctionKind();
7486 // Handle memory setting and copying functions.
7487 switch (CMId) {
7488 case 0:
7489 return false;
7490 case Builtin::BIstrlcpy: // fallthrough
7491 case Builtin::BIstrlcat:
7492 CheckStrlcpycatArguments(TheCall, FnInfo);
7493 break;
7494 case Builtin::BIstrncat:
7495 CheckStrncatArguments(TheCall, FnInfo);
7496 break;
7497 case Builtin::BIfree:
7498 CheckFreeArguments(TheCall);
7499 break;
7500 default:
7501 CheckMemaccessArguments(TheCall, CMId, FnInfo);
7504 return false;
7507 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
7508 ArrayRef<const Expr *> Args) {
7509 VariadicCallType CallType =
7510 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
7512 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
7513 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
7514 CallType);
7516 CheckTCBEnforcement(lbrac, Method);
7518 return false;
7521 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
7522 const FunctionProtoType *Proto) {
7523 QualType Ty;
7524 if (const auto *V = dyn_cast<VarDecl>(NDecl))
7525 Ty = V->getType().getNonReferenceType();
7526 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
7527 Ty = F->getType().getNonReferenceType();
7528 else
7529 return false;
7531 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
7532 !Ty->isFunctionProtoType())
7533 return false;
7535 VariadicCallType CallType;
7536 if (!Proto || !Proto->isVariadic()) {
7537 CallType = VariadicDoesNotApply;
7538 } else if (Ty->isBlockPointerType()) {
7539 CallType = VariadicBlock;
7540 } else { // Ty->isFunctionPointerType()
7541 CallType = VariadicFunction;
7544 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
7545 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
7546 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
7547 TheCall->getCallee()->getSourceRange(), CallType);
7549 return false;
7552 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
7553 /// such as function pointers returned from functions.
7554 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
7555 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
7556 TheCall->getCallee());
7557 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
7558 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
7559 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
7560 TheCall->getCallee()->getSourceRange(), CallType);
7562 return false;
7565 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
7566 if (!llvm::isValidAtomicOrderingCABI(Ordering))
7567 return false;
7569 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
7570 switch (Op) {
7571 case AtomicExpr::AO__c11_atomic_init:
7572 case AtomicExpr::AO__opencl_atomic_init:
7573 llvm_unreachable("There is no ordering argument for an init");
7575 case AtomicExpr::AO__c11_atomic_load:
7576 case AtomicExpr::AO__opencl_atomic_load:
7577 case AtomicExpr::AO__hip_atomic_load:
7578 case AtomicExpr::AO__atomic_load_n:
7579 case AtomicExpr::AO__atomic_load:
7580 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
7581 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
7583 case AtomicExpr::AO__c11_atomic_store:
7584 case AtomicExpr::AO__opencl_atomic_store:
7585 case AtomicExpr::AO__hip_atomic_store:
7586 case AtomicExpr::AO__atomic_store:
7587 case AtomicExpr::AO__atomic_store_n:
7588 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
7589 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
7590 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
7592 default:
7593 return true;
7597 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
7598 AtomicExpr::AtomicOp Op) {
7599 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
7600 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
7601 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
7602 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
7603 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
7604 Op);
7607 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
7608 SourceLocation RParenLoc, MultiExprArg Args,
7609 AtomicExpr::AtomicOp Op,
7610 AtomicArgumentOrder ArgOrder) {
7611 // All the non-OpenCL operations take one of the following forms.
7612 // The OpenCL operations take the __c11 forms with one extra argument for
7613 // synchronization scope.
7614 enum {
7615 // C __c11_atomic_init(A *, C)
7616 Init,
7618 // C __c11_atomic_load(A *, int)
7619 Load,
7621 // void __atomic_load(A *, CP, int)
7622 LoadCopy,
7624 // void __atomic_store(A *, CP, int)
7625 Copy,
7627 // C __c11_atomic_add(A *, M, int)
7628 Arithmetic,
7630 // C __atomic_exchange_n(A *, CP, int)
7631 Xchg,
7633 // void __atomic_exchange(A *, C *, CP, int)
7634 GNUXchg,
7636 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
7637 C11CmpXchg,
7639 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
7640 GNUCmpXchg
7641 } Form = Init;
7643 const unsigned NumForm = GNUCmpXchg + 1;
7644 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
7645 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
7646 // where:
7647 // C is an appropriate type,
7648 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
7649 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
7650 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
7651 // the int parameters are for orderings.
7653 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
7654 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
7655 "need to update code for modified forms");
7656 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
7657 AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
7658 AtomicExpr::AO__atomic_load,
7659 "need to update code for modified C11 atomics");
7660 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
7661 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
7662 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load &&
7663 Op <= AtomicExpr::AO__hip_atomic_fetch_max;
7664 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
7665 Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
7666 IsOpenCL;
7667 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
7668 Op == AtomicExpr::AO__atomic_store_n ||
7669 Op == AtomicExpr::AO__atomic_exchange_n ||
7670 Op == AtomicExpr::AO__atomic_compare_exchange_n;
7671 // Bit mask for extra allowed value types other than integers for atomic
7672 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
7673 // allow floating point.
7674 enum ArithOpExtraValueType {
7675 AOEVT_None = 0,
7676 AOEVT_Pointer = 1,
7677 AOEVT_FP = 2,
7679 unsigned ArithAllows = AOEVT_None;
7681 switch (Op) {
7682 case AtomicExpr::AO__c11_atomic_init:
7683 case AtomicExpr::AO__opencl_atomic_init:
7684 Form = Init;
7685 break;
7687 case AtomicExpr::AO__c11_atomic_load:
7688 case AtomicExpr::AO__opencl_atomic_load:
7689 case AtomicExpr::AO__hip_atomic_load:
7690 case AtomicExpr::AO__atomic_load_n:
7691 Form = Load;
7692 break;
7694 case AtomicExpr::AO__atomic_load:
7695 Form = LoadCopy;
7696 break;
7698 case AtomicExpr::AO__c11_atomic_store:
7699 case AtomicExpr::AO__opencl_atomic_store:
7700 case AtomicExpr::AO__hip_atomic_store:
7701 case AtomicExpr::AO__atomic_store:
7702 case AtomicExpr::AO__atomic_store_n:
7703 Form = Copy;
7704 break;
7705 case AtomicExpr::AO__atomic_fetch_add:
7706 case AtomicExpr::AO__atomic_fetch_sub:
7707 case AtomicExpr::AO__atomic_add_fetch:
7708 case AtomicExpr::AO__atomic_sub_fetch:
7709 case AtomicExpr::AO__c11_atomic_fetch_add:
7710 case AtomicExpr::AO__c11_atomic_fetch_sub:
7711 case AtomicExpr::AO__opencl_atomic_fetch_add:
7712 case AtomicExpr::AO__opencl_atomic_fetch_sub:
7713 case AtomicExpr::AO__hip_atomic_fetch_add:
7714 case AtomicExpr::AO__hip_atomic_fetch_sub:
7715 ArithAllows = AOEVT_Pointer | AOEVT_FP;
7716 Form = Arithmetic;
7717 break;
7718 case AtomicExpr::AO__atomic_fetch_max:
7719 case AtomicExpr::AO__atomic_fetch_min:
7720 case AtomicExpr::AO__atomic_max_fetch:
7721 case AtomicExpr::AO__atomic_min_fetch:
7722 case AtomicExpr::AO__c11_atomic_fetch_max:
7723 case AtomicExpr::AO__c11_atomic_fetch_min:
7724 case AtomicExpr::AO__opencl_atomic_fetch_max:
7725 case AtomicExpr::AO__opencl_atomic_fetch_min:
7726 case AtomicExpr::AO__hip_atomic_fetch_max:
7727 case AtomicExpr::AO__hip_atomic_fetch_min:
7728 ArithAllows = AOEVT_FP;
7729 Form = Arithmetic;
7730 break;
7731 case AtomicExpr::AO__c11_atomic_fetch_and:
7732 case AtomicExpr::AO__c11_atomic_fetch_or:
7733 case AtomicExpr::AO__c11_atomic_fetch_xor:
7734 case AtomicExpr::AO__hip_atomic_fetch_and:
7735 case AtomicExpr::AO__hip_atomic_fetch_or:
7736 case AtomicExpr::AO__hip_atomic_fetch_xor:
7737 case AtomicExpr::AO__c11_atomic_fetch_nand:
7738 case AtomicExpr::AO__opencl_atomic_fetch_and:
7739 case AtomicExpr::AO__opencl_atomic_fetch_or:
7740 case AtomicExpr::AO__opencl_atomic_fetch_xor:
7741 case AtomicExpr::AO__atomic_fetch_and:
7742 case AtomicExpr::AO__atomic_fetch_or:
7743 case AtomicExpr::AO__atomic_fetch_xor:
7744 case AtomicExpr::AO__atomic_fetch_nand:
7745 case AtomicExpr::AO__atomic_and_fetch:
7746 case AtomicExpr::AO__atomic_or_fetch:
7747 case AtomicExpr::AO__atomic_xor_fetch:
7748 case AtomicExpr::AO__atomic_nand_fetch:
7749 Form = Arithmetic;
7750 break;
7752 case AtomicExpr::AO__c11_atomic_exchange:
7753 case AtomicExpr::AO__hip_atomic_exchange:
7754 case AtomicExpr::AO__opencl_atomic_exchange:
7755 case AtomicExpr::AO__atomic_exchange_n:
7756 Form = Xchg;
7757 break;
7759 case AtomicExpr::AO__atomic_exchange:
7760 Form = GNUXchg;
7761 break;
7763 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
7764 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
7765 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
7766 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
7767 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
7768 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
7769 Form = C11CmpXchg;
7770 break;
7772 case AtomicExpr::AO__atomic_compare_exchange:
7773 case AtomicExpr::AO__atomic_compare_exchange_n:
7774 Form = GNUCmpXchg;
7775 break;
7778 unsigned AdjustedNumArgs = NumArgs[Form];
7779 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init)
7780 ++AdjustedNumArgs;
7781 // Check we have the right number of arguments.
7782 if (Args.size() < AdjustedNumArgs) {
7783 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
7784 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
7785 << /*is non object*/ 0 << ExprRange;
7786 return ExprError();
7787 } else if (Args.size() > AdjustedNumArgs) {
7788 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
7789 diag::err_typecheck_call_too_many_args)
7790 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
7791 << /*is non object*/ 0 << ExprRange;
7792 return ExprError();
7795 // Inspect the first argument of the atomic operation.
7796 Expr *Ptr = Args[0];
7797 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
7798 if (ConvertedPtr.isInvalid())
7799 return ExprError();
7801 Ptr = ConvertedPtr.get();
7802 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
7803 if (!pointerType) {
7804 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
7805 << Ptr->getType() << Ptr->getSourceRange();
7806 return ExprError();
7809 // For a __c11 builtin, this should be a pointer to an _Atomic type.
7810 QualType AtomTy = pointerType->getPointeeType(); // 'A'
7811 QualType ValType = AtomTy; // 'C'
7812 if (IsC11) {
7813 if (!AtomTy->isAtomicType()) {
7814 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
7815 << Ptr->getType() << Ptr->getSourceRange();
7816 return ExprError();
7818 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
7819 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
7820 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
7821 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
7822 << Ptr->getSourceRange();
7823 return ExprError();
7825 ValType = AtomTy->castAs<AtomicType>()->getValueType();
7826 } else if (Form != Load && Form != LoadCopy) {
7827 if (ValType.isConstQualified()) {
7828 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
7829 << Ptr->getType() << Ptr->getSourceRange();
7830 return ExprError();
7834 // For an arithmetic operation, the implied arithmetic must be well-formed.
7835 if (Form == Arithmetic) {
7836 // GCC does not enforce these rules for GNU atomics, but we do to help catch
7837 // trivial type errors.
7838 auto IsAllowedValueType = [&](QualType ValType,
7839 unsigned AllowedType) -> bool {
7840 if (ValType->isIntegerType())
7841 return true;
7842 if (ValType->isPointerType())
7843 return AllowedType & AOEVT_Pointer;
7844 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
7845 return false;
7846 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
7847 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
7848 &Context.getTargetInfo().getLongDoubleFormat() ==
7849 &llvm::APFloat::x87DoubleExtended())
7850 return false;
7851 return true;
7853 if (!IsAllowedValueType(ValType, ArithAllows)) {
7854 auto DID = ArithAllows & AOEVT_FP
7855 ? (ArithAllows & AOEVT_Pointer
7856 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
7857 : diag::err_atomic_op_needs_atomic_int_or_fp)
7858 : diag::err_atomic_op_needs_atomic_int;
7859 Diag(ExprRange.getBegin(), DID)
7860 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
7861 return ExprError();
7863 if (IsC11 && ValType->isPointerType() &&
7864 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
7865 diag::err_incomplete_type)) {
7866 return ExprError();
7868 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
7869 // For __atomic_*_n operations, the value type must be a scalar integral or
7870 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
7871 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
7872 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
7873 return ExprError();
7876 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
7877 !AtomTy->isScalarType()) {
7878 // For GNU atomics, require a trivially-copyable type. This is not part of
7879 // the GNU atomics specification but we enforce it for consistency with
7880 // other atomics which generally all require a trivially-copyable type. This
7881 // is because atomics just copy bits.
7882 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
7883 << Ptr->getType() << Ptr->getSourceRange();
7884 return ExprError();
7887 switch (ValType.getObjCLifetime()) {
7888 case Qualifiers::OCL_None:
7889 case Qualifiers::OCL_ExplicitNone:
7890 // okay
7891 break;
7893 case Qualifiers::OCL_Weak:
7894 case Qualifiers::OCL_Strong:
7895 case Qualifiers::OCL_Autoreleasing:
7896 // FIXME: Can this happen? By this point, ValType should be known
7897 // to be trivially copyable.
7898 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
7899 << ValType << Ptr->getSourceRange();
7900 return ExprError();
7903 // All atomic operations have an overload which takes a pointer to a volatile
7904 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
7905 // into the result or the other operands. Similarly atomic_load takes a
7906 // pointer to a const 'A'.
7907 ValType.removeLocalVolatile();
7908 ValType.removeLocalConst();
7909 QualType ResultType = ValType;
7910 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
7911 Form == Init)
7912 ResultType = Context.VoidTy;
7913 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
7914 ResultType = Context.BoolTy;
7916 // The type of a parameter passed 'by value'. In the GNU atomics, such
7917 // arguments are actually passed as pointers.
7918 QualType ByValType = ValType; // 'CP'
7919 bool IsPassedByAddress = false;
7920 if (!IsC11 && !IsHIP && !IsN) {
7921 ByValType = Ptr->getType();
7922 IsPassedByAddress = true;
7925 SmallVector<Expr *, 5> APIOrderedArgs;
7926 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
7927 APIOrderedArgs.push_back(Args[0]);
7928 switch (Form) {
7929 case Init:
7930 case Load:
7931 APIOrderedArgs.push_back(Args[1]); // Val1/Order
7932 break;
7933 case LoadCopy:
7934 case Copy:
7935 case Arithmetic:
7936 case Xchg:
7937 APIOrderedArgs.push_back(Args[2]); // Val1
7938 APIOrderedArgs.push_back(Args[1]); // Order
7939 break;
7940 case GNUXchg:
7941 APIOrderedArgs.push_back(Args[2]); // Val1
7942 APIOrderedArgs.push_back(Args[3]); // Val2
7943 APIOrderedArgs.push_back(Args[1]); // Order
7944 break;
7945 case C11CmpXchg:
7946 APIOrderedArgs.push_back(Args[2]); // Val1
7947 APIOrderedArgs.push_back(Args[4]); // Val2
7948 APIOrderedArgs.push_back(Args[1]); // Order
7949 APIOrderedArgs.push_back(Args[3]); // OrderFail
7950 break;
7951 case GNUCmpXchg:
7952 APIOrderedArgs.push_back(Args[2]); // Val1
7953 APIOrderedArgs.push_back(Args[4]); // Val2
7954 APIOrderedArgs.push_back(Args[5]); // Weak
7955 APIOrderedArgs.push_back(Args[1]); // Order
7956 APIOrderedArgs.push_back(Args[3]); // OrderFail
7957 break;
7959 } else
7960 APIOrderedArgs.append(Args.begin(), Args.end());
7962 // The first argument's non-CV pointer type is used to deduce the type of
7963 // subsequent arguments, except for:
7964 // - weak flag (always converted to bool)
7965 // - memory order (always converted to int)
7966 // - scope (always converted to int)
7967 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
7968 QualType Ty;
7969 if (i < NumVals[Form] + 1) {
7970 switch (i) {
7971 case 0:
7972 // The first argument is always a pointer. It has a fixed type.
7973 // It is always dereferenced, a nullptr is undefined.
7974 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
7975 // Nothing else to do: we already know all we want about this pointer.
7976 continue;
7977 case 1:
7978 // The second argument is the non-atomic operand. For arithmetic, this
7979 // is always passed by value, and for a compare_exchange it is always
7980 // passed by address. For the rest, GNU uses by-address and C11 uses
7981 // by-value.
7982 assert(Form != Load);
7983 if (Form == Arithmetic && ValType->isPointerType())
7984 Ty = Context.getPointerDiffType();
7985 else if (Form == Init || Form == Arithmetic)
7986 Ty = ValType;
7987 else if (Form == Copy || Form == Xchg) {
7988 if (IsPassedByAddress) {
7989 // The value pointer is always dereferenced, a nullptr is undefined.
7990 CheckNonNullArgument(*this, APIOrderedArgs[i],
7991 ExprRange.getBegin());
7993 Ty = ByValType;
7994 } else {
7995 Expr *ValArg = APIOrderedArgs[i];
7996 // The value pointer is always dereferenced, a nullptr is undefined.
7997 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
7998 LangAS AS = LangAS::Default;
7999 // Keep address space of non-atomic pointer type.
8000 if (const PointerType *PtrTy =
8001 ValArg->getType()->getAs<PointerType>()) {
8002 AS = PtrTy->getPointeeType().getAddressSpace();
8004 Ty = Context.getPointerType(
8005 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
8007 break;
8008 case 2:
8009 // The third argument to compare_exchange / GNU exchange is the desired
8010 // value, either by-value (for the C11 and *_n variant) or as a pointer.
8011 if (IsPassedByAddress)
8012 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
8013 Ty = ByValType;
8014 break;
8015 case 3:
8016 // The fourth argument to GNU compare_exchange is a 'weak' flag.
8017 Ty = Context.BoolTy;
8018 break;
8020 } else {
8021 // The order(s) and scope are always converted to int.
8022 Ty = Context.IntTy;
8025 InitializedEntity Entity =
8026 InitializedEntity::InitializeParameter(Context, Ty, false);
8027 ExprResult Arg = APIOrderedArgs[i];
8028 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
8029 if (Arg.isInvalid())
8030 return true;
8031 APIOrderedArgs[i] = Arg.get();
8034 // Permute the arguments into a 'consistent' order.
8035 SmallVector<Expr*, 5> SubExprs;
8036 SubExprs.push_back(Ptr);
8037 switch (Form) {
8038 case Init:
8039 // Note, AtomicExpr::getVal1() has a special case for this atomic.
8040 SubExprs.push_back(APIOrderedArgs[1]); // Val1
8041 break;
8042 case Load:
8043 SubExprs.push_back(APIOrderedArgs[1]); // Order
8044 break;
8045 case LoadCopy:
8046 case Copy:
8047 case Arithmetic:
8048 case Xchg:
8049 SubExprs.push_back(APIOrderedArgs[2]); // Order
8050 SubExprs.push_back(APIOrderedArgs[1]); // Val1
8051 break;
8052 case GNUXchg:
8053 // Note, AtomicExpr::getVal2() has a special case for this atomic.
8054 SubExprs.push_back(APIOrderedArgs[3]); // Order
8055 SubExprs.push_back(APIOrderedArgs[1]); // Val1
8056 SubExprs.push_back(APIOrderedArgs[2]); // Val2
8057 break;
8058 case C11CmpXchg:
8059 SubExprs.push_back(APIOrderedArgs[3]); // Order
8060 SubExprs.push_back(APIOrderedArgs[1]); // Val1
8061 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
8062 SubExprs.push_back(APIOrderedArgs[2]); // Val2
8063 break;
8064 case GNUCmpXchg:
8065 SubExprs.push_back(APIOrderedArgs[4]); // Order
8066 SubExprs.push_back(APIOrderedArgs[1]); // Val1
8067 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
8068 SubExprs.push_back(APIOrderedArgs[2]); // Val2
8069 SubExprs.push_back(APIOrderedArgs[3]); // Weak
8070 break;
8073 if (SubExprs.size() >= 2 && Form != Init) {
8074 if (std::optional<llvm::APSInt> Result =
8075 SubExprs[1]->getIntegerConstantExpr(Context))
8076 if (!isValidOrderingForOp(Result->getSExtValue(), Op))
8077 Diag(SubExprs[1]->getBeginLoc(),
8078 diag::warn_atomic_op_has_invalid_memory_order)
8079 << SubExprs[1]->getSourceRange();
8082 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
8083 auto *Scope = Args[Args.size() - 1];
8084 if (std::optional<llvm::APSInt> Result =
8085 Scope->getIntegerConstantExpr(Context)) {
8086 if (!ScopeModel->isValid(Result->getZExtValue()))
8087 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
8088 << Scope->getSourceRange();
8090 SubExprs.push_back(Scope);
8093 AtomicExpr *AE = new (Context)
8094 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
8096 if ((Op == AtomicExpr::AO__c11_atomic_load ||
8097 Op == AtomicExpr::AO__c11_atomic_store ||
8098 Op == AtomicExpr::AO__opencl_atomic_load ||
8099 Op == AtomicExpr::AO__hip_atomic_load ||
8100 Op == AtomicExpr::AO__opencl_atomic_store ||
8101 Op == AtomicExpr::AO__hip_atomic_store) &&
8102 Context.AtomicUsesUnsupportedLibcall(AE))
8103 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
8104 << ((Op == AtomicExpr::AO__c11_atomic_load ||
8105 Op == AtomicExpr::AO__opencl_atomic_load ||
8106 Op == AtomicExpr::AO__hip_atomic_load)
8108 : 1);
8110 if (ValType->isBitIntType()) {
8111 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
8112 return ExprError();
8115 return AE;
8118 /// checkBuiltinArgument - Given a call to a builtin function, perform
8119 /// normal type-checking on the given argument, updating the call in
8120 /// place. This is useful when a builtin function requires custom
8121 /// type-checking for some of its arguments but not necessarily all of
8122 /// them.
8124 /// Returns true on error.
8125 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
8126 FunctionDecl *Fn = E->getDirectCallee();
8127 assert(Fn && "builtin call without direct callee!");
8129 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
8130 InitializedEntity Entity =
8131 InitializedEntity::InitializeParameter(S.Context, Param);
8133 ExprResult Arg = E->getArg(ArgIndex);
8134 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
8135 if (Arg.isInvalid())
8136 return true;
8138 E->setArg(ArgIndex, Arg.get());
8139 return false;
8142 bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
8143 if (TheCall->getNumArgs() != 0)
8144 return true;
8146 TheCall->setType(Context.getWebAssemblyExternrefType());
8148 return false;
8151 bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
8152 if (TheCall->getNumArgs() != 0) {
8153 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
8154 << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
8155 << /*is non object*/ 0;
8156 return true;
8159 // This custom type checking code ensures that the nodes are as expected
8160 // in order to later on generate the necessary builtin.
8161 QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
8162 QualType Type = Context.getPointerType(Pointee);
8163 Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
8164 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
8165 Context.getPointerType(Pointee));
8166 TheCall->setType(Type);
8168 return false;
8171 /// We have a call to a function like __sync_fetch_and_add, which is an
8172 /// overloaded function based on the pointer type of its first argument.
8173 /// The main BuildCallExpr routines have already promoted the types of
8174 /// arguments because all of these calls are prototyped as void(...).
8176 /// This function goes through and does final semantic checking for these
8177 /// builtins, as well as generating any warnings.
8178 ExprResult
8179 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
8180 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
8181 Expr *Callee = TheCall->getCallee();
8182 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
8183 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
8185 // Ensure that we have at least one argument to do type inference from.
8186 if (TheCall->getNumArgs() < 1) {
8187 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
8188 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
8189 << Callee->getSourceRange();
8190 return ExprError();
8193 // Inspect the first argument of the atomic builtin. This should always be
8194 // a pointer type, whose element is an integral scalar or pointer type.
8195 // Because it is a pointer type, we don't have to worry about any implicit
8196 // casts here.
8197 // FIXME: We don't allow floating point scalars as input.
8198 Expr *FirstArg = TheCall->getArg(0);
8199 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
8200 if (FirstArgResult.isInvalid())
8201 return ExprError();
8202 FirstArg = FirstArgResult.get();
8203 TheCall->setArg(0, FirstArg);
8205 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
8206 if (!pointerType) {
8207 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
8208 << FirstArg->getType() << FirstArg->getSourceRange();
8209 return ExprError();
8212 QualType ValType = pointerType->getPointeeType();
8213 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
8214 !ValType->isBlockPointerType()) {
8215 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
8216 << FirstArg->getType() << FirstArg->getSourceRange();
8217 return ExprError();
8220 if (ValType.isConstQualified()) {
8221 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
8222 << FirstArg->getType() << FirstArg->getSourceRange();
8223 return ExprError();
8226 switch (ValType.getObjCLifetime()) {
8227 case Qualifiers::OCL_None:
8228 case Qualifiers::OCL_ExplicitNone:
8229 // okay
8230 break;
8232 case Qualifiers::OCL_Weak:
8233 case Qualifiers::OCL_Strong:
8234 case Qualifiers::OCL_Autoreleasing:
8235 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
8236 << ValType << FirstArg->getSourceRange();
8237 return ExprError();
8240 // Strip any qualifiers off ValType.
8241 ValType = ValType.getUnqualifiedType();
8243 // The majority of builtins return a value, but a few have special return
8244 // types, so allow them to override appropriately below.
8245 QualType ResultType = ValType;
8247 // We need to figure out which concrete builtin this maps onto. For example,
8248 // __sync_fetch_and_add with a 2 byte object turns into
8249 // __sync_fetch_and_add_2.
8250 #define BUILTIN_ROW(x) \
8251 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
8252 Builtin::BI##x##_8, Builtin::BI##x##_16 }
8254 static const unsigned BuiltinIndices[][5] = {
8255 BUILTIN_ROW(__sync_fetch_and_add),
8256 BUILTIN_ROW(__sync_fetch_and_sub),
8257 BUILTIN_ROW(__sync_fetch_and_or),
8258 BUILTIN_ROW(__sync_fetch_and_and),
8259 BUILTIN_ROW(__sync_fetch_and_xor),
8260 BUILTIN_ROW(__sync_fetch_and_nand),
8262 BUILTIN_ROW(__sync_add_and_fetch),
8263 BUILTIN_ROW(__sync_sub_and_fetch),
8264 BUILTIN_ROW(__sync_and_and_fetch),
8265 BUILTIN_ROW(__sync_or_and_fetch),
8266 BUILTIN_ROW(__sync_xor_and_fetch),
8267 BUILTIN_ROW(__sync_nand_and_fetch),
8269 BUILTIN_ROW(__sync_val_compare_and_swap),
8270 BUILTIN_ROW(__sync_bool_compare_and_swap),
8271 BUILTIN_ROW(__sync_lock_test_and_set),
8272 BUILTIN_ROW(__sync_lock_release),
8273 BUILTIN_ROW(__sync_swap)
8275 #undef BUILTIN_ROW
8277 // Determine the index of the size.
8278 unsigned SizeIndex;
8279 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
8280 case 1: SizeIndex = 0; break;
8281 case 2: SizeIndex = 1; break;
8282 case 4: SizeIndex = 2; break;
8283 case 8: SizeIndex = 3; break;
8284 case 16: SizeIndex = 4; break;
8285 default:
8286 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
8287 << FirstArg->getType() << FirstArg->getSourceRange();
8288 return ExprError();
8291 // Each of these builtins has one pointer argument, followed by some number of
8292 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
8293 // that we ignore. Find out which row of BuiltinIndices to read from as well
8294 // as the number of fixed args.
8295 unsigned BuiltinID = FDecl->getBuiltinID();
8296 unsigned BuiltinIndex, NumFixed = 1;
8297 bool WarnAboutSemanticsChange = false;
8298 switch (BuiltinID) {
8299 default: llvm_unreachable("Unknown overloaded atomic builtin!");
8300 case Builtin::BI__sync_fetch_and_add:
8301 case Builtin::BI__sync_fetch_and_add_1:
8302 case Builtin::BI__sync_fetch_and_add_2:
8303 case Builtin::BI__sync_fetch_and_add_4:
8304 case Builtin::BI__sync_fetch_and_add_8:
8305 case Builtin::BI__sync_fetch_and_add_16:
8306 BuiltinIndex = 0;
8307 break;
8309 case Builtin::BI__sync_fetch_and_sub:
8310 case Builtin::BI__sync_fetch_and_sub_1:
8311 case Builtin::BI__sync_fetch_and_sub_2:
8312 case Builtin::BI__sync_fetch_and_sub_4:
8313 case Builtin::BI__sync_fetch_and_sub_8:
8314 case Builtin::BI__sync_fetch_and_sub_16:
8315 BuiltinIndex = 1;
8316 break;
8318 case Builtin::BI__sync_fetch_and_or:
8319 case Builtin::BI__sync_fetch_and_or_1:
8320 case Builtin::BI__sync_fetch_and_or_2:
8321 case Builtin::BI__sync_fetch_and_or_4:
8322 case Builtin::BI__sync_fetch_and_or_8:
8323 case Builtin::BI__sync_fetch_and_or_16:
8324 BuiltinIndex = 2;
8325 break;
8327 case Builtin::BI__sync_fetch_and_and:
8328 case Builtin::BI__sync_fetch_and_and_1:
8329 case Builtin::BI__sync_fetch_and_and_2:
8330 case Builtin::BI__sync_fetch_and_and_4:
8331 case Builtin::BI__sync_fetch_and_and_8:
8332 case Builtin::BI__sync_fetch_and_and_16:
8333 BuiltinIndex = 3;
8334 break;
8336 case Builtin::BI__sync_fetch_and_xor:
8337 case Builtin::BI__sync_fetch_and_xor_1:
8338 case Builtin::BI__sync_fetch_and_xor_2:
8339 case Builtin::BI__sync_fetch_and_xor_4:
8340 case Builtin::BI__sync_fetch_and_xor_8:
8341 case Builtin::BI__sync_fetch_and_xor_16:
8342 BuiltinIndex = 4;
8343 break;
8345 case Builtin::BI__sync_fetch_and_nand:
8346 case Builtin::BI__sync_fetch_and_nand_1:
8347 case Builtin::BI__sync_fetch_and_nand_2:
8348 case Builtin::BI__sync_fetch_and_nand_4:
8349 case Builtin::BI__sync_fetch_and_nand_8:
8350 case Builtin::BI__sync_fetch_and_nand_16:
8351 BuiltinIndex = 5;
8352 WarnAboutSemanticsChange = true;
8353 break;
8355 case Builtin::BI__sync_add_and_fetch:
8356 case Builtin::BI__sync_add_and_fetch_1:
8357 case Builtin::BI__sync_add_and_fetch_2:
8358 case Builtin::BI__sync_add_and_fetch_4:
8359 case Builtin::BI__sync_add_and_fetch_8:
8360 case Builtin::BI__sync_add_and_fetch_16:
8361 BuiltinIndex = 6;
8362 break;
8364 case Builtin::BI__sync_sub_and_fetch:
8365 case Builtin::BI__sync_sub_and_fetch_1:
8366 case Builtin::BI__sync_sub_and_fetch_2:
8367 case Builtin::BI__sync_sub_and_fetch_4:
8368 case Builtin::BI__sync_sub_and_fetch_8:
8369 case Builtin::BI__sync_sub_and_fetch_16:
8370 BuiltinIndex = 7;
8371 break;
8373 case Builtin::BI__sync_and_and_fetch:
8374 case Builtin::BI__sync_and_and_fetch_1:
8375 case Builtin::BI__sync_and_and_fetch_2:
8376 case Builtin::BI__sync_and_and_fetch_4:
8377 case Builtin::BI__sync_and_and_fetch_8:
8378 case Builtin::BI__sync_and_and_fetch_16:
8379 BuiltinIndex = 8;
8380 break;
8382 case Builtin::BI__sync_or_and_fetch:
8383 case Builtin::BI__sync_or_and_fetch_1:
8384 case Builtin::BI__sync_or_and_fetch_2:
8385 case Builtin::BI__sync_or_and_fetch_4:
8386 case Builtin::BI__sync_or_and_fetch_8:
8387 case Builtin::BI__sync_or_and_fetch_16:
8388 BuiltinIndex = 9;
8389 break;
8391 case Builtin::BI__sync_xor_and_fetch:
8392 case Builtin::BI__sync_xor_and_fetch_1:
8393 case Builtin::BI__sync_xor_and_fetch_2:
8394 case Builtin::BI__sync_xor_and_fetch_4:
8395 case Builtin::BI__sync_xor_and_fetch_8:
8396 case Builtin::BI__sync_xor_and_fetch_16:
8397 BuiltinIndex = 10;
8398 break;
8400 case Builtin::BI__sync_nand_and_fetch:
8401 case Builtin::BI__sync_nand_and_fetch_1:
8402 case Builtin::BI__sync_nand_and_fetch_2:
8403 case Builtin::BI__sync_nand_and_fetch_4:
8404 case Builtin::BI__sync_nand_and_fetch_8:
8405 case Builtin::BI__sync_nand_and_fetch_16:
8406 BuiltinIndex = 11;
8407 WarnAboutSemanticsChange = true;
8408 break;
8410 case Builtin::BI__sync_val_compare_and_swap:
8411 case Builtin::BI__sync_val_compare_and_swap_1:
8412 case Builtin::BI__sync_val_compare_and_swap_2:
8413 case Builtin::BI__sync_val_compare_and_swap_4:
8414 case Builtin::BI__sync_val_compare_and_swap_8:
8415 case Builtin::BI__sync_val_compare_and_swap_16:
8416 BuiltinIndex = 12;
8417 NumFixed = 2;
8418 break;
8420 case Builtin::BI__sync_bool_compare_and_swap:
8421 case Builtin::BI__sync_bool_compare_and_swap_1:
8422 case Builtin::BI__sync_bool_compare_and_swap_2:
8423 case Builtin::BI__sync_bool_compare_and_swap_4:
8424 case Builtin::BI__sync_bool_compare_and_swap_8:
8425 case Builtin::BI__sync_bool_compare_and_swap_16:
8426 BuiltinIndex = 13;
8427 NumFixed = 2;
8428 ResultType = Context.BoolTy;
8429 break;
8431 case Builtin::BI__sync_lock_test_and_set:
8432 case Builtin::BI__sync_lock_test_and_set_1:
8433 case Builtin::BI__sync_lock_test_and_set_2:
8434 case Builtin::BI__sync_lock_test_and_set_4:
8435 case Builtin::BI__sync_lock_test_and_set_8:
8436 case Builtin::BI__sync_lock_test_and_set_16:
8437 BuiltinIndex = 14;
8438 break;
8440 case Builtin::BI__sync_lock_release:
8441 case Builtin::BI__sync_lock_release_1:
8442 case Builtin::BI__sync_lock_release_2:
8443 case Builtin::BI__sync_lock_release_4:
8444 case Builtin::BI__sync_lock_release_8:
8445 case Builtin::BI__sync_lock_release_16:
8446 BuiltinIndex = 15;
8447 NumFixed = 0;
8448 ResultType = Context.VoidTy;
8449 break;
8451 case Builtin::BI__sync_swap:
8452 case Builtin::BI__sync_swap_1:
8453 case Builtin::BI__sync_swap_2:
8454 case Builtin::BI__sync_swap_4:
8455 case Builtin::BI__sync_swap_8:
8456 case Builtin::BI__sync_swap_16:
8457 BuiltinIndex = 16;
8458 break;
8461 // Now that we know how many fixed arguments we expect, first check that we
8462 // have at least that many.
8463 if (TheCall->getNumArgs() < 1+NumFixed) {
8464 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
8465 << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0
8466 << Callee->getSourceRange();
8467 return ExprError();
8470 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
8471 << Callee->getSourceRange();
8473 if (WarnAboutSemanticsChange) {
8474 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
8475 << Callee->getSourceRange();
8478 // Get the decl for the concrete builtin from this, we can tell what the
8479 // concrete integer type we should convert to is.
8480 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
8481 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
8482 FunctionDecl *NewBuiltinDecl;
8483 if (NewBuiltinID == BuiltinID)
8484 NewBuiltinDecl = FDecl;
8485 else {
8486 // Perform builtin lookup to avoid redeclaring it.
8487 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
8488 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
8489 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
8490 assert(Res.getFoundDecl());
8491 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
8492 if (!NewBuiltinDecl)
8493 return ExprError();
8496 // The first argument --- the pointer --- has a fixed type; we
8497 // deduce the types of the rest of the arguments accordingly. Walk
8498 // the remaining arguments, converting them to the deduced value type.
8499 for (unsigned i = 0; i != NumFixed; ++i) {
8500 ExprResult Arg = TheCall->getArg(i+1);
8502 // GCC does an implicit conversion to the pointer or integer ValType. This
8503 // can fail in some cases (1i -> int**), check for this error case now.
8504 // Initialize the argument.
8505 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
8506 ValType, /*consume*/ false);
8507 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
8508 if (Arg.isInvalid())
8509 return ExprError();
8511 // Okay, we have something that *can* be converted to the right type. Check
8512 // to see if there is a potentially weird extension going on here. This can
8513 // happen when you do an atomic operation on something like an char* and
8514 // pass in 42. The 42 gets converted to char. This is even more strange
8515 // for things like 45.123 -> char, etc.
8516 // FIXME: Do this check.
8517 TheCall->setArg(i+1, Arg.get());
8520 // Create a new DeclRefExpr to refer to the new decl.
8521 DeclRefExpr *NewDRE = DeclRefExpr::Create(
8522 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
8523 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
8524 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
8526 // Set the callee in the CallExpr.
8527 // FIXME: This loses syntactic information.
8528 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
8529 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
8530 CK_BuiltinFnToFnPtr);
8531 TheCall->setCallee(PromotedCall.get());
8533 // Change the result type of the call to match the original value type. This
8534 // is arbitrary, but the codegen for these builtins ins design to handle it
8535 // gracefully.
8536 TheCall->setType(ResultType);
8538 // Prohibit problematic uses of bit-precise integer types with atomic
8539 // builtins. The arguments would have already been converted to the first
8540 // argument's type, so only need to check the first argument.
8541 const auto *BitIntValType = ValType->getAs<BitIntType>();
8542 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) {
8543 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
8544 return ExprError();
8547 return TheCallResult;
8550 /// SemaBuiltinNontemporalOverloaded - We have a call to
8551 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
8552 /// overloaded function based on the pointer type of its last argument.
8554 /// This function goes through and does final semantic checking for these
8555 /// builtins.
8556 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
8557 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
8558 DeclRefExpr *DRE =
8559 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
8560 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
8561 unsigned BuiltinID = FDecl->getBuiltinID();
8562 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
8563 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
8564 "Unexpected nontemporal load/store builtin!");
8565 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
8566 unsigned numArgs = isStore ? 2 : 1;
8568 // Ensure that we have the proper number of arguments.
8569 if (checkArgCount(*this, TheCall, numArgs))
8570 return ExprError();
8572 // Inspect the last argument of the nontemporal builtin. This should always
8573 // be a pointer type, from which we imply the type of the memory access.
8574 // Because it is a pointer type, we don't have to worry about any implicit
8575 // casts here.
8576 Expr *PointerArg = TheCall->getArg(numArgs - 1);
8577 ExprResult PointerArgResult =
8578 DefaultFunctionArrayLvalueConversion(PointerArg);
8580 if (PointerArgResult.isInvalid())
8581 return ExprError();
8582 PointerArg = PointerArgResult.get();
8583 TheCall->setArg(numArgs - 1, PointerArg);
8585 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
8586 if (!pointerType) {
8587 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
8588 << PointerArg->getType() << PointerArg->getSourceRange();
8589 return ExprError();
8592 QualType ValType = pointerType->getPointeeType();
8594 // Strip any qualifiers off ValType.
8595 ValType = ValType.getUnqualifiedType();
8596 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
8597 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
8598 !ValType->isVectorType()) {
8599 Diag(DRE->getBeginLoc(),
8600 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
8601 << PointerArg->getType() << PointerArg->getSourceRange();
8602 return ExprError();
8605 if (!isStore) {
8606 TheCall->setType(ValType);
8607 return TheCallResult;
8610 ExprResult ValArg = TheCall->getArg(0);
8611 InitializedEntity Entity = InitializedEntity::InitializeParameter(
8612 Context, ValType, /*consume*/ false);
8613 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
8614 if (ValArg.isInvalid())
8615 return ExprError();
8617 TheCall->setArg(0, ValArg.get());
8618 TheCall->setType(Context.VoidTy);
8619 return TheCallResult;
8622 /// CheckObjCString - Checks that the argument to the builtin
8623 /// CFString constructor is correct
8624 /// Note: It might also make sense to do the UTF-16 conversion here (would
8625 /// simplify the backend).
8626 bool Sema::CheckObjCString(Expr *Arg) {
8627 Arg = Arg->IgnoreParenCasts();
8628 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
8630 if (!Literal || !Literal->isOrdinary()) {
8631 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
8632 << Arg->getSourceRange();
8633 return true;
8636 if (Literal->containsNonAsciiOrNull()) {
8637 StringRef String = Literal->getString();
8638 unsigned NumBytes = String.size();
8639 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
8640 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
8641 llvm::UTF16 *ToPtr = &ToBuf[0];
8643 llvm::ConversionResult Result =
8644 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
8645 ToPtr + NumBytes, llvm::strictConversion);
8646 // Check for conversion failure.
8647 if (Result != llvm::conversionOK)
8648 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
8649 << Arg->getSourceRange();
8651 return false;
8654 /// CheckObjCString - Checks that the format string argument to the os_log()
8655 /// and os_trace() functions is correct, and converts it to const char *.
8656 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
8657 Arg = Arg->IgnoreParenCasts();
8658 auto *Literal = dyn_cast<StringLiteral>(Arg);
8659 if (!Literal) {
8660 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
8661 Literal = ObjcLiteral->getString();
8665 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
8666 return ExprError(
8667 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
8668 << Arg->getSourceRange());
8671 ExprResult Result(Literal);
8672 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
8673 InitializedEntity Entity =
8674 InitializedEntity::InitializeParameter(Context, ResultTy, false);
8675 Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
8676 return Result;
8679 /// Check that the user is calling the appropriate va_start builtin for the
8680 /// target and calling convention.
8681 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
8682 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
8683 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
8684 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
8685 TT.getArch() == llvm::Triple::aarch64_32);
8686 bool IsWindows = TT.isOSWindows();
8687 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
8688 if (IsX64 || IsAArch64) {
8689 CallingConv CC = CC_C;
8690 if (const FunctionDecl *FD = S.getCurFunctionDecl())
8691 CC = FD->getType()->castAs<FunctionType>()->getCallConv();
8692 if (IsMSVAStart) {
8693 // Don't allow this in System V ABI functions.
8694 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
8695 return S.Diag(Fn->getBeginLoc(),
8696 diag::err_ms_va_start_used_in_sysv_function);
8697 } else {
8698 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
8699 // On x64 Windows, don't allow this in System V ABI functions.
8700 // (Yes, that means there's no corresponding way to support variadic
8701 // System V ABI functions on Windows.)
8702 if ((IsWindows && CC == CC_X86_64SysV) ||
8703 (!IsWindows && CC == CC_Win64))
8704 return S.Diag(Fn->getBeginLoc(),
8705 diag::err_va_start_used_in_wrong_abi_function)
8706 << !IsWindows;
8708 return false;
8711 if (IsMSVAStart)
8712 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
8713 return false;
8716 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
8717 ParmVarDecl **LastParam = nullptr) {
8718 // Determine whether the current function, block, or obj-c method is variadic
8719 // and get its parameter list.
8720 bool IsVariadic = false;
8721 ArrayRef<ParmVarDecl *> Params;
8722 DeclContext *Caller = S.CurContext;
8723 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
8724 IsVariadic = Block->isVariadic();
8725 Params = Block->parameters();
8726 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
8727 IsVariadic = FD->isVariadic();
8728 Params = FD->parameters();
8729 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
8730 IsVariadic = MD->isVariadic();
8731 // FIXME: This isn't correct for methods (results in bogus warning).
8732 Params = MD->parameters();
8733 } else if (isa<CapturedDecl>(Caller)) {
8734 // We don't support va_start in a CapturedDecl.
8735 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
8736 return true;
8737 } else {
8738 // This must be some other declcontext that parses exprs.
8739 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
8740 return true;
8743 if (!IsVariadic) {
8744 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
8745 return true;
8748 if (LastParam)
8749 *LastParam = Params.empty() ? nullptr : Params.back();
8751 return false;
8754 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
8755 /// for validity. Emit an error and return true on failure; return false
8756 /// on success.
8757 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
8758 Expr *Fn = TheCall->getCallee();
8760 if (checkVAStartABI(*this, BuiltinID, Fn))
8761 return true;
8763 // In C23 mode, va_start only needs one argument. However, the builtin still
8764 // requires two arguments (which matches the behavior of the GCC builtin),
8765 // <stdarg.h> passes `0` as the second argument in C23 mode.
8766 if (checkArgCount(*this, TheCall, 2))
8767 return true;
8769 // Type-check the first argument normally.
8770 if (checkBuiltinArgument(*this, TheCall, 0))
8771 return true;
8773 // Check that the current function is variadic, and get its last parameter.
8774 ParmVarDecl *LastParam;
8775 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
8776 return true;
8778 // Verify that the second argument to the builtin is the last argument of the
8779 // current function or method. In C23 mode, if the second argument is an
8780 // integer constant expression with value 0, then we don't bother with this
8781 // check.
8782 bool SecondArgIsLastNamedArgument = false;
8783 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
8784 if (std::optional<llvm::APSInt> Val =
8785 TheCall->getArg(1)->getIntegerConstantExpr(Context);
8786 Val && LangOpts.C23 && *Val == 0)
8787 return false;
8789 // These are valid if SecondArgIsLastNamedArgument is false after the next
8790 // block.
8791 QualType Type;
8792 SourceLocation ParamLoc;
8793 bool IsCRegister = false;
8795 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
8796 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
8797 SecondArgIsLastNamedArgument = PV == LastParam;
8799 Type = PV->getType();
8800 ParamLoc = PV->getLocation();
8801 IsCRegister =
8802 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
8806 if (!SecondArgIsLastNamedArgument)
8807 Diag(TheCall->getArg(1)->getBeginLoc(),
8808 diag::warn_second_arg_of_va_start_not_last_named_param);
8809 else if (IsCRegister || Type->isReferenceType() ||
8810 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
8811 // Promotable integers are UB, but enumerations need a bit of
8812 // extra checking to see what their promotable type actually is.
8813 if (!Context.isPromotableIntegerType(Type))
8814 return false;
8815 if (!Type->isEnumeralType())
8816 return true;
8817 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
8818 return !(ED &&
8819 Context.typesAreCompatible(ED->getPromotionType(), Type));
8820 }()) {
8821 unsigned Reason = 0;
8822 if (Type->isReferenceType()) Reason = 1;
8823 else if (IsCRegister) Reason = 2;
8824 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
8825 Diag(ParamLoc, diag::note_parameter_type) << Type;
8828 return false;
8831 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
8832 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
8833 const LangOptions &LO = getLangOpts();
8835 if (LO.CPlusPlus)
8836 return Arg->getType()
8837 .getCanonicalType()
8838 .getTypePtr()
8839 ->getPointeeType()
8840 .withoutLocalFastQualifiers() == Context.CharTy;
8842 // In C, allow aliasing through `char *`, this is required for AArch64 at
8843 // least.
8844 return true;
8847 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
8848 // const char *named_addr);
8850 Expr *Func = Call->getCallee();
8852 if (Call->getNumArgs() < 3)
8853 return Diag(Call->getEndLoc(),
8854 diag::err_typecheck_call_too_few_args_at_least)
8855 << 0 /*function call*/ << 3 << Call->getNumArgs()
8856 << /*is non object*/ 0;
8858 // Type-check the first argument normally.
8859 if (checkBuiltinArgument(*this, Call, 0))
8860 return true;
8862 // Check that the current function is variadic.
8863 if (checkVAStartIsInVariadicFunction(*this, Func))
8864 return true;
8866 // __va_start on Windows does not validate the parameter qualifiers
8868 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
8869 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
8871 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
8872 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
8874 const QualType &ConstCharPtrTy =
8875 Context.getPointerType(Context.CharTy.withConst());
8876 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1))
8877 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
8878 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
8879 << 0 /* qualifier difference */
8880 << 3 /* parameter mismatch */
8881 << 2 << Arg1->getType() << ConstCharPtrTy;
8883 const QualType SizeTy = Context.getSizeType();
8884 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
8885 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
8886 << Arg2->getType() << SizeTy << 1 /* different class */
8887 << 0 /* qualifier difference */
8888 << 3 /* parameter mismatch */
8889 << 3 << Arg2->getType() << SizeTy;
8891 return false;
8894 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
8895 /// friends. This is declared to take (...), so we have to check everything.
8896 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
8897 if (checkArgCount(*this, TheCall, 2))
8898 return true;
8900 ExprResult OrigArg0 = TheCall->getArg(0);
8901 ExprResult OrigArg1 = TheCall->getArg(1);
8903 // Do standard promotions between the two arguments, returning their common
8904 // type.
8905 QualType Res = UsualArithmeticConversions(
8906 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison);
8907 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
8908 return true;
8910 // Make sure any conversions are pushed back into the call; this is
8911 // type safe since unordered compare builtins are declared as "_Bool
8912 // foo(...)".
8913 TheCall->setArg(0, OrigArg0.get());
8914 TheCall->setArg(1, OrigArg1.get());
8916 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
8917 return false;
8919 // If the common type isn't a real floating type, then the arguments were
8920 // invalid for this operation.
8921 if (Res.isNull() || !Res->isRealFloatingType())
8922 return Diag(OrigArg0.get()->getBeginLoc(),
8923 diag::err_typecheck_call_invalid_ordered_compare)
8924 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
8925 << SourceRange(OrigArg0.get()->getBeginLoc(),
8926 OrigArg1.get()->getEndLoc());
8928 return false;
8931 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
8932 /// __builtin_isnan and friends. This is declared to take (...), so we have
8933 /// to check everything.
8934 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
8935 if (checkArgCount(*this, TheCall, NumArgs))
8936 return true;
8938 bool IsFPClass = NumArgs == 2;
8940 // Find out position of floating-point argument.
8941 unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1;
8943 // We can count on all parameters preceding the floating-point just being int.
8944 // Try all of those.
8945 for (unsigned i = 0; i < FPArgNo; ++i) {
8946 Expr *Arg = TheCall->getArg(i);
8948 if (Arg->isTypeDependent())
8949 return false;
8951 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing);
8953 if (Res.isInvalid())
8954 return true;
8955 TheCall->setArg(i, Res.get());
8958 Expr *OrigArg = TheCall->getArg(FPArgNo);
8960 if (OrigArg->isTypeDependent())
8961 return false;
8963 // Usual Unary Conversions will convert half to float, which we want for
8964 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
8965 // type how it is, but do normal L->Rvalue conversions.
8966 if (Context.getTargetInfo().useFP16ConversionIntrinsics())
8967 OrigArg = UsualUnaryConversions(OrigArg).get();
8968 else
8969 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
8970 TheCall->setArg(FPArgNo, OrigArg);
8972 QualType VectorResultTy;
8973 QualType ElementTy = OrigArg->getType();
8974 // TODO: When all classification function are implemented with is_fpclass,
8975 // vector argument can be supported in all of them.
8976 if (ElementTy->isVectorType() && IsFPClass) {
8977 VectorResultTy = GetSignedVectorType(ElementTy);
8978 ElementTy = ElementTy->getAs<VectorType>()->getElementType();
8981 // This operation requires a non-_Complex floating-point number.
8982 if (!ElementTy->isRealFloatingType())
8983 return Diag(OrigArg->getBeginLoc(),
8984 diag::err_typecheck_call_invalid_unary_fp)
8985 << OrigArg->getType() << OrigArg->getSourceRange();
8987 // __builtin_isfpclass has integer parameter that specify test mask. It is
8988 // passed in (...), so it should be analyzed completely here.
8989 if (IsFPClass)
8990 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
8991 return true;
8993 // TODO: enable this code to all classification functions.
8994 if (IsFPClass) {
8995 QualType ResultTy;
8996 if (!VectorResultTy.isNull())
8997 ResultTy = VectorResultTy;
8998 else
8999 ResultTy = Context.IntTy;
9000 TheCall->setType(ResultTy);
9003 return false;
9006 /// Perform semantic analysis for a call to __builtin_complex.
9007 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
9008 if (checkArgCount(*this, TheCall, 2))
9009 return true;
9011 bool Dependent = false;
9012 for (unsigned I = 0; I != 2; ++I) {
9013 Expr *Arg = TheCall->getArg(I);
9014 QualType T = Arg->getType();
9015 if (T->isDependentType()) {
9016 Dependent = true;
9017 continue;
9020 // Despite supporting _Complex int, GCC requires a real floating point type
9021 // for the operands of __builtin_complex.
9022 if (!T->isRealFloatingType()) {
9023 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
9024 << Arg->getType() << Arg->getSourceRange();
9027 ExprResult Converted = DefaultLvalueConversion(Arg);
9028 if (Converted.isInvalid())
9029 return true;
9030 TheCall->setArg(I, Converted.get());
9033 if (Dependent) {
9034 TheCall->setType(Context.DependentTy);
9035 return false;
9038 Expr *Real = TheCall->getArg(0);
9039 Expr *Imag = TheCall->getArg(1);
9040 if (!Context.hasSameType(Real->getType(), Imag->getType())) {
9041 return Diag(Real->getBeginLoc(),
9042 diag::err_typecheck_call_different_arg_types)
9043 << Real->getType() << Imag->getType()
9044 << Real->getSourceRange() << Imag->getSourceRange();
9047 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
9048 // don't allow this builtin to form those types either.
9049 // FIXME: Should we allow these types?
9050 if (Real->getType()->isFloat16Type())
9051 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9052 << "_Float16";
9053 if (Real->getType()->isHalfType())
9054 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9055 << "half";
9057 TheCall->setType(Context.getComplexType(Real->getType()));
9058 return false;
9061 // Customized Sema Checking for VSX builtins that have the following signature:
9062 // vector [...] builtinName(vector [...], vector [...], const int);
9063 // Which takes the same type of vectors (any legal vector type) for the first
9064 // two arguments and takes compile time constant for the third argument.
9065 // Example builtins are :
9066 // vector double vec_xxpermdi(vector double, vector double, int);
9067 // vector short vec_xxsldwi(vector short, vector short, int);
9068 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
9069 unsigned ExpectedNumArgs = 3;
9070 if (checkArgCount(*this, TheCall, ExpectedNumArgs))
9071 return true;
9073 // Check the third argument is a compile time constant
9074 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
9075 return Diag(TheCall->getBeginLoc(),
9076 diag::err_vsx_builtin_nonconstant_argument)
9077 << 3 /* argument index */ << TheCall->getDirectCallee()
9078 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
9079 TheCall->getArg(2)->getEndLoc());
9081 QualType Arg1Ty = TheCall->getArg(0)->getType();
9082 QualType Arg2Ty = TheCall->getArg(1)->getType();
9084 // Check the type of argument 1 and argument 2 are vectors.
9085 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
9086 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
9087 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
9088 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
9089 << TheCall->getDirectCallee()
9090 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9091 TheCall->getArg(1)->getEndLoc());
9094 // Check the first two arguments are the same type.
9095 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
9096 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
9097 << TheCall->getDirectCallee()
9098 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9099 TheCall->getArg(1)->getEndLoc());
9102 // When default clang type checking is turned off and the customized type
9103 // checking is used, the returning type of the function must be explicitly
9104 // set. Otherwise it is _Bool by default.
9105 TheCall->setType(Arg1Ty);
9107 return false;
9110 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
9111 // This is declared to take (...), so we have to check everything.
9112 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
9113 if (TheCall->getNumArgs() < 2)
9114 return ExprError(Diag(TheCall->getEndLoc(),
9115 diag::err_typecheck_call_too_few_args_at_least)
9116 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
9117 << /*is non object*/ 0 << TheCall->getSourceRange());
9119 // Determine which of the following types of shufflevector we're checking:
9120 // 1) unary, vector mask: (lhs, mask)
9121 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
9122 QualType resType = TheCall->getArg(0)->getType();
9123 unsigned numElements = 0;
9125 if (!TheCall->getArg(0)->isTypeDependent() &&
9126 !TheCall->getArg(1)->isTypeDependent()) {
9127 QualType LHSType = TheCall->getArg(0)->getType();
9128 QualType RHSType = TheCall->getArg(1)->getType();
9130 if (!LHSType->isVectorType() || !RHSType->isVectorType())
9131 return ExprError(
9132 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
9133 << TheCall->getDirectCallee()
9134 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9135 TheCall->getArg(1)->getEndLoc()));
9137 numElements = LHSType->castAs<VectorType>()->getNumElements();
9138 unsigned numResElements = TheCall->getNumArgs() - 2;
9140 // Check to see if we have a call with 2 vector arguments, the unary shuffle
9141 // with mask. If so, verify that RHS is an integer vector type with the
9142 // same number of elts as lhs.
9143 if (TheCall->getNumArgs() == 2) {
9144 if (!RHSType->hasIntegerRepresentation() ||
9145 RHSType->castAs<VectorType>()->getNumElements() != numElements)
9146 return ExprError(Diag(TheCall->getBeginLoc(),
9147 diag::err_vec_builtin_incompatible_vector)
9148 << TheCall->getDirectCallee()
9149 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
9150 TheCall->getArg(1)->getEndLoc()));
9151 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
9152 return ExprError(Diag(TheCall->getBeginLoc(),
9153 diag::err_vec_builtin_incompatible_vector)
9154 << TheCall->getDirectCallee()
9155 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9156 TheCall->getArg(1)->getEndLoc()));
9157 } else if (numElements != numResElements) {
9158 QualType eltType = LHSType->castAs<VectorType>()->getElementType();
9159 resType =
9160 Context.getVectorType(eltType, numResElements, VectorKind::Generic);
9164 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
9165 if (TheCall->getArg(i)->isTypeDependent() ||
9166 TheCall->getArg(i)->isValueDependent())
9167 continue;
9169 std::optional<llvm::APSInt> Result;
9170 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
9171 return ExprError(Diag(TheCall->getBeginLoc(),
9172 diag::err_shufflevector_nonconstant_argument)
9173 << TheCall->getArg(i)->getSourceRange());
9175 // Allow -1 which will be translated to undef in the IR.
9176 if (Result->isSigned() && Result->isAllOnes())
9177 continue;
9179 if (Result->getActiveBits() > 64 ||
9180 Result->getZExtValue() >= numElements * 2)
9181 return ExprError(Diag(TheCall->getBeginLoc(),
9182 diag::err_shufflevector_argument_too_large)
9183 << TheCall->getArg(i)->getSourceRange());
9186 SmallVector<Expr*, 32> exprs;
9188 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
9189 exprs.push_back(TheCall->getArg(i));
9190 TheCall->setArg(i, nullptr);
9193 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
9194 TheCall->getCallee()->getBeginLoc(),
9195 TheCall->getRParenLoc());
9198 /// SemaConvertVectorExpr - Handle __builtin_convertvector
9199 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
9200 SourceLocation BuiltinLoc,
9201 SourceLocation RParenLoc) {
9202 ExprValueKind VK = VK_PRValue;
9203 ExprObjectKind OK = OK_Ordinary;
9204 QualType DstTy = TInfo->getType();
9205 QualType SrcTy = E->getType();
9207 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
9208 return ExprError(Diag(BuiltinLoc,
9209 diag::err_convertvector_non_vector)
9210 << E->getSourceRange());
9211 if (!DstTy->isVectorType() && !DstTy->isDependentType())
9212 return ExprError(Diag(BuiltinLoc, diag::err_builtin_non_vector_type)
9213 << "second"
9214 << "__builtin_convertvector");
9216 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
9217 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements();
9218 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements();
9219 if (SrcElts != DstElts)
9220 return ExprError(Diag(BuiltinLoc,
9221 diag::err_convertvector_incompatible_vector)
9222 << E->getSourceRange());
9225 return new (Context)
9226 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
9229 /// SemaBuiltinPrefetch - Handle __builtin_prefetch.
9230 // This is declared to take (const void*, ...) and can take two
9231 // optional constant int args.
9232 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
9233 unsigned NumArgs = TheCall->getNumArgs();
9235 if (NumArgs > 3)
9236 return Diag(TheCall->getEndLoc(),
9237 diag::err_typecheck_call_too_many_args_at_most)
9238 << 0 /*function call*/ << 3 << NumArgs << /*is non object*/ 0
9239 << TheCall->getSourceRange();
9241 // Argument 0 is checked for us and the remaining arguments must be
9242 // constant integers.
9243 for (unsigned i = 1; i != NumArgs; ++i)
9244 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
9245 return true;
9247 return false;
9250 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
9251 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
9252 if (!Context.getTargetInfo().checkArithmeticFenceSupported())
9253 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
9254 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
9255 if (checkArgCount(*this, TheCall, 1))
9256 return true;
9257 Expr *Arg = TheCall->getArg(0);
9258 if (Arg->isInstantiationDependent())
9259 return false;
9261 QualType ArgTy = Arg->getType();
9262 if (!ArgTy->hasFloatingRepresentation())
9263 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector)
9264 << ArgTy;
9265 if (Arg->isLValue()) {
9266 ExprResult FirstArg = DefaultLvalueConversion(Arg);
9267 TheCall->setArg(0, FirstArg.get());
9269 TheCall->setType(TheCall->getArg(0)->getType());
9270 return false;
9273 /// SemaBuiltinAssume - Handle __assume (MS Extension).
9274 // __assume does not evaluate its arguments, and should warn if its argument
9275 // has side effects.
9276 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
9277 Expr *Arg = TheCall->getArg(0);
9278 if (Arg->isInstantiationDependent()) return false;
9280 if (Arg->HasSideEffects(Context))
9281 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
9282 << Arg->getSourceRange()
9283 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
9285 return false;
9288 /// Handle __builtin_alloca_with_align. This is declared
9289 /// as (size_t, size_t) where the second size_t must be a power of 2 greater
9290 /// than 8.
9291 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
9292 // The alignment must be a constant integer.
9293 Expr *Arg = TheCall->getArg(1);
9295 // We can't check the value of a dependent argument.
9296 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
9297 if (const auto *UE =
9298 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
9299 if (UE->getKind() == UETT_AlignOf ||
9300 UE->getKind() == UETT_PreferredAlignOf)
9301 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
9302 << Arg->getSourceRange();
9304 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
9306 if (!Result.isPowerOf2())
9307 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
9308 << Arg->getSourceRange();
9310 if (Result < Context.getCharWidth())
9311 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
9312 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
9314 if (Result > std::numeric_limits<int32_t>::max())
9315 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
9316 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
9319 return false;
9322 /// Handle __builtin_assume_aligned. This is declared
9323 /// as (const void*, size_t, ...) and can take one optional constant int arg.
9324 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
9325 if (checkArgCountRange(*this, TheCall, 2, 3))
9326 return true;
9328 unsigned NumArgs = TheCall->getNumArgs();
9329 Expr *FirstArg = TheCall->getArg(0);
9332 ExprResult FirstArgResult =
9333 DefaultFunctionArrayLvalueConversion(FirstArg);
9334 if (checkBuiltinArgument(*this, TheCall, 0))
9335 return true;
9336 /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
9337 TheCall->setArg(0, FirstArgResult.get());
9340 // The alignment must be a constant integer.
9341 Expr *SecondArg = TheCall->getArg(1);
9343 // We can't check the value of a dependent argument.
9344 if (!SecondArg->isValueDependent()) {
9345 llvm::APSInt Result;
9346 if (SemaBuiltinConstantArg(TheCall, 1, Result))
9347 return true;
9349 if (!Result.isPowerOf2())
9350 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
9351 << SecondArg->getSourceRange();
9353 if (Result > Sema::MaximumAlignment)
9354 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
9355 << SecondArg->getSourceRange() << Sema::MaximumAlignment;
9358 if (NumArgs > 2) {
9359 Expr *ThirdArg = TheCall->getArg(2);
9360 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType()))
9361 return true;
9362 TheCall->setArg(2, ThirdArg);
9365 return false;
9368 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
9369 unsigned BuiltinID =
9370 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
9371 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
9373 unsigned NumArgs = TheCall->getNumArgs();
9374 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
9375 if (NumArgs < NumRequiredArgs) {
9376 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
9377 << 0 /* function call */ << NumRequiredArgs << NumArgs
9378 << /*is non object*/ 0 << TheCall->getSourceRange();
9380 if (NumArgs >= NumRequiredArgs + 0x100) {
9381 return Diag(TheCall->getEndLoc(),
9382 diag::err_typecheck_call_too_many_args_at_most)
9383 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
9384 << /*is non object*/ 0 << TheCall->getSourceRange();
9386 unsigned i = 0;
9388 // For formatting call, check buffer arg.
9389 if (!IsSizeCall) {
9390 ExprResult Arg(TheCall->getArg(i));
9391 InitializedEntity Entity = InitializedEntity::InitializeParameter(
9392 Context, Context.VoidPtrTy, false);
9393 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
9394 if (Arg.isInvalid())
9395 return true;
9396 TheCall->setArg(i, Arg.get());
9397 i++;
9400 // Check string literal arg.
9401 unsigned FormatIdx = i;
9403 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
9404 if (Arg.isInvalid())
9405 return true;
9406 TheCall->setArg(i, Arg.get());
9407 i++;
9410 // Make sure variadic args are scalar.
9411 unsigned FirstDataArg = i;
9412 while (i < NumArgs) {
9413 ExprResult Arg = DefaultVariadicArgumentPromotion(
9414 TheCall->getArg(i), VariadicFunction, nullptr);
9415 if (Arg.isInvalid())
9416 return true;
9417 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
9418 if (ArgSize.getQuantity() >= 0x100) {
9419 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
9420 << i << (int)ArgSize.getQuantity() << 0xff
9421 << TheCall->getSourceRange();
9423 TheCall->setArg(i, Arg.get());
9424 i++;
9427 // Check formatting specifiers. NOTE: We're only doing this for the non-size
9428 // call to avoid duplicate diagnostics.
9429 if (!IsSizeCall) {
9430 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
9431 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
9432 bool Success = CheckFormatArguments(
9433 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog,
9434 VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
9435 CheckedVarArgs);
9436 if (!Success)
9437 return true;
9440 if (IsSizeCall) {
9441 TheCall->setType(Context.getSizeType());
9442 } else {
9443 TheCall->setType(Context.VoidPtrTy);
9445 return false;
9448 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
9449 /// TheCall is a constant expression.
9450 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
9451 llvm::APSInt &Result) {
9452 Expr *Arg = TheCall->getArg(ArgNum);
9453 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
9454 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
9456 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
9458 std::optional<llvm::APSInt> R;
9459 if (!(R = Arg->getIntegerConstantExpr(Context)))
9460 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
9461 << FDecl->getDeclName() << Arg->getSourceRange();
9462 Result = *R;
9463 return false;
9466 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
9467 /// TheCall is a constant expression in the range [Low, High].
9468 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
9469 int Low, int High, bool RangeIsError) {
9470 if (isConstantEvaluatedContext())
9471 return false;
9472 llvm::APSInt Result;
9474 // We can't check the value of a dependent argument.
9475 Expr *Arg = TheCall->getArg(ArgNum);
9476 if (Arg->isTypeDependent() || Arg->isValueDependent())
9477 return false;
9479 // Check constant-ness first.
9480 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9481 return true;
9483 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
9484 if (RangeIsError)
9485 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
9486 << toString(Result, 10) << Low << High << Arg->getSourceRange();
9487 else
9488 // Defer the warning until we know if the code will be emitted so that
9489 // dead code can ignore this.
9490 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
9491 PDiag(diag::warn_argument_invalid_range)
9492 << toString(Result, 10) << Low << High
9493 << Arg->getSourceRange());
9496 return false;
9499 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
9500 /// TheCall is a constant expression is a multiple of Num..
9501 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
9502 unsigned Num) {
9503 llvm::APSInt Result;
9505 // We can't check the value of a dependent argument.
9506 Expr *Arg = TheCall->getArg(ArgNum);
9507 if (Arg->isTypeDependent() || Arg->isValueDependent())
9508 return false;
9510 // Check constant-ness first.
9511 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9512 return true;
9514 if (Result.getSExtValue() % Num != 0)
9515 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
9516 << Num << Arg->getSourceRange();
9518 return false;
9521 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
9522 /// constant expression representing a power of 2.
9523 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
9524 llvm::APSInt Result;
9526 // We can't check the value of a dependent argument.
9527 Expr *Arg = TheCall->getArg(ArgNum);
9528 if (Arg->isTypeDependent() || Arg->isValueDependent())
9529 return false;
9531 // Check constant-ness first.
9532 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9533 return true;
9535 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
9536 // and only if x is a power of 2.
9537 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0)
9538 return false;
9540 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2)
9541 << Arg->getSourceRange();
9544 static bool IsShiftedByte(llvm::APSInt Value) {
9545 if (Value.isNegative())
9546 return false;
9548 // Check if it's a shifted byte, by shifting it down
9549 while (true) {
9550 // If the value fits in the bottom byte, the check passes.
9551 if (Value < 0x100)
9552 return true;
9554 // Otherwise, if the value has _any_ bits in the bottom byte, the check
9555 // fails.
9556 if ((Value & 0xFF) != 0)
9557 return false;
9559 // If the bottom 8 bits are all 0, but something above that is nonzero,
9560 // then shifting the value right by 8 bits won't affect whether it's a
9561 // shifted byte or not. So do that, and go round again.
9562 Value >>= 8;
9566 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
9567 /// a constant expression representing an arbitrary byte value shifted left by
9568 /// a multiple of 8 bits.
9569 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
9570 unsigned ArgBits) {
9571 llvm::APSInt Result;
9573 // We can't check the value of a dependent argument.
9574 Expr *Arg = TheCall->getArg(ArgNum);
9575 if (Arg->isTypeDependent() || Arg->isValueDependent())
9576 return false;
9578 // Check constant-ness first.
9579 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9580 return true;
9582 // Truncate to the given size.
9583 Result = Result.getLoBits(ArgBits);
9584 Result.setIsUnsigned(true);
9586 if (IsShiftedByte(Result))
9587 return false;
9589 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte)
9590 << Arg->getSourceRange();
9593 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
9594 /// TheCall is a constant expression representing either a shifted byte value,
9595 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
9596 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
9597 /// Arm MVE intrinsics.
9598 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
9599 int ArgNum,
9600 unsigned ArgBits) {
9601 llvm::APSInt Result;
9603 // We can't check the value of a dependent argument.
9604 Expr *Arg = TheCall->getArg(ArgNum);
9605 if (Arg->isTypeDependent() || Arg->isValueDependent())
9606 return false;
9608 // Check constant-ness first.
9609 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9610 return true;
9612 // Truncate to the given size.
9613 Result = Result.getLoBits(ArgBits);
9614 Result.setIsUnsigned(true);
9616 // Check to see if it's in either of the required forms.
9617 if (IsShiftedByte(Result) ||
9618 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
9619 return false;
9621 return Diag(TheCall->getBeginLoc(),
9622 diag::err_argument_not_shifted_byte_or_xxff)
9623 << Arg->getSourceRange();
9626 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
9627 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
9628 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
9629 if (checkArgCount(*this, TheCall, 2))
9630 return true;
9631 Expr *Arg0 = TheCall->getArg(0);
9632 Expr *Arg1 = TheCall->getArg(1);
9634 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
9635 if (FirstArg.isInvalid())
9636 return true;
9637 QualType FirstArgType = FirstArg.get()->getType();
9638 if (!FirstArgType->isAnyPointerType())
9639 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9640 << "first" << FirstArgType << Arg0->getSourceRange();
9641 TheCall->setArg(0, FirstArg.get());
9643 ExprResult SecArg = DefaultLvalueConversion(Arg1);
9644 if (SecArg.isInvalid())
9645 return true;
9646 QualType SecArgType = SecArg.get()->getType();
9647 if (!SecArgType->isIntegerType())
9648 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
9649 << "second" << SecArgType << Arg1->getSourceRange();
9651 // Derive the return type from the pointer argument.
9652 TheCall->setType(FirstArgType);
9653 return false;
9656 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
9657 if (checkArgCount(*this, TheCall, 2))
9658 return true;
9660 Expr *Arg0 = TheCall->getArg(0);
9661 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
9662 if (FirstArg.isInvalid())
9663 return true;
9664 QualType FirstArgType = FirstArg.get()->getType();
9665 if (!FirstArgType->isAnyPointerType())
9666 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9667 << "first" << FirstArgType << Arg0->getSourceRange();
9668 TheCall->setArg(0, FirstArg.get());
9670 // Derive the return type from the pointer argument.
9671 TheCall->setType(FirstArgType);
9673 // Second arg must be an constant in range [0,15]
9674 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
9677 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
9678 if (checkArgCount(*this, TheCall, 2))
9679 return true;
9680 Expr *Arg0 = TheCall->getArg(0);
9681 Expr *Arg1 = TheCall->getArg(1);
9683 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
9684 if (FirstArg.isInvalid())
9685 return true;
9686 QualType FirstArgType = FirstArg.get()->getType();
9687 if (!FirstArgType->isAnyPointerType())
9688 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9689 << "first" << FirstArgType << Arg0->getSourceRange();
9691 QualType SecArgType = Arg1->getType();
9692 if (!SecArgType->isIntegerType())
9693 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
9694 << "second" << SecArgType << Arg1->getSourceRange();
9695 TheCall->setType(Context.IntTy);
9696 return false;
9699 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
9700 BuiltinID == AArch64::BI__builtin_arm_stg) {
9701 if (checkArgCount(*this, TheCall, 1))
9702 return true;
9703 Expr *Arg0 = TheCall->getArg(0);
9704 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
9705 if (FirstArg.isInvalid())
9706 return true;
9708 QualType FirstArgType = FirstArg.get()->getType();
9709 if (!FirstArgType->isAnyPointerType())
9710 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9711 << "first" << FirstArgType << Arg0->getSourceRange();
9712 TheCall->setArg(0, FirstArg.get());
9714 // Derive the return type from the pointer argument.
9715 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
9716 TheCall->setType(FirstArgType);
9717 return false;
9720 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
9721 Expr *ArgA = TheCall->getArg(0);
9722 Expr *ArgB = TheCall->getArg(1);
9724 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
9725 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
9727 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
9728 return true;
9730 QualType ArgTypeA = ArgExprA.get()->getType();
9731 QualType ArgTypeB = ArgExprB.get()->getType();
9733 auto isNull = [&] (Expr *E) -> bool {
9734 return E->isNullPointerConstant(
9735 Context, Expr::NPC_ValueDependentIsNotNull); };
9737 // argument should be either a pointer or null
9738 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
9739 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
9740 << "first" << ArgTypeA << ArgA->getSourceRange();
9742 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
9743 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
9744 << "second" << ArgTypeB << ArgB->getSourceRange();
9746 // Ensure Pointee types are compatible
9747 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
9748 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
9749 QualType pointeeA = ArgTypeA->getPointeeType();
9750 QualType pointeeB = ArgTypeB->getPointeeType();
9751 if (!Context.typesAreCompatible(
9752 Context.getCanonicalType(pointeeA).getUnqualifiedType(),
9753 Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
9754 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
9755 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
9756 << ArgB->getSourceRange();
9760 // at least one argument should be pointer type
9761 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
9762 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
9763 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
9765 if (isNull(ArgA)) // adopt type of the other pointer
9766 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
9768 if (isNull(ArgB))
9769 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
9771 TheCall->setArg(0, ArgExprA.get());
9772 TheCall->setArg(1, ArgExprB.get());
9773 TheCall->setType(Context.LongLongTy);
9774 return false;
9776 assert(false && "Unhandled ARM MTE intrinsic");
9777 return true;
9780 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
9781 /// TheCall is an ARM/AArch64 special register string literal.
9782 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
9783 int ArgNum, unsigned ExpectedFieldNum,
9784 bool AllowName) {
9785 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
9786 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
9787 BuiltinID == ARM::BI__builtin_arm_rsr ||
9788 BuiltinID == ARM::BI__builtin_arm_rsrp ||
9789 BuiltinID == ARM::BI__builtin_arm_wsr ||
9790 BuiltinID == ARM::BI__builtin_arm_wsrp;
9791 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9792 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9793 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
9794 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
9795 BuiltinID == AArch64::BI__builtin_arm_rsr ||
9796 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9797 BuiltinID == AArch64::BI__builtin_arm_wsr ||
9798 BuiltinID == AArch64::BI__builtin_arm_wsrp;
9799 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
9801 // We can't check the value of a dependent argument.
9802 Expr *Arg = TheCall->getArg(ArgNum);
9803 if (Arg->isTypeDependent() || Arg->isValueDependent())
9804 return false;
9806 // Check if the argument is a string literal.
9807 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
9808 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
9809 << Arg->getSourceRange();
9811 // Check the type of special register given.
9812 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
9813 SmallVector<StringRef, 6> Fields;
9814 Reg.split(Fields, ":");
9816 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
9817 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
9818 << Arg->getSourceRange();
9820 // If the string is the name of a register then we cannot check that it is
9821 // valid here but if the string is of one the forms described in ACLE then we
9822 // can check that the supplied fields are integers and within the valid
9823 // ranges.
9824 if (Fields.size() > 1) {
9825 bool FiveFields = Fields.size() == 5;
9827 bool ValidString = true;
9828 if (IsARMBuiltin) {
9829 ValidString &= Fields[0].starts_with_insensitive("cp") ||
9830 Fields[0].starts_with_insensitive("p");
9831 if (ValidString)
9832 Fields[0] = Fields[0].drop_front(
9833 Fields[0].starts_with_insensitive("cp") ? 2 : 1);
9835 ValidString &= Fields[2].starts_with_insensitive("c");
9836 if (ValidString)
9837 Fields[2] = Fields[2].drop_front(1);
9839 if (FiveFields) {
9840 ValidString &= Fields[3].starts_with_insensitive("c");
9841 if (ValidString)
9842 Fields[3] = Fields[3].drop_front(1);
9846 SmallVector<int, 5> Ranges;
9847 if (FiveFields)
9848 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
9849 else
9850 Ranges.append({15, 7, 15});
9852 for (unsigned i=0; i<Fields.size(); ++i) {
9853 int IntField;
9854 ValidString &= !Fields[i].getAsInteger(10, IntField);
9855 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
9858 if (!ValidString)
9859 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
9860 << Arg->getSourceRange();
9861 } else if (IsAArch64Builtin && Fields.size() == 1) {
9862 // This code validates writes to PSTATE registers.
9864 // Not a write.
9865 if (TheCall->getNumArgs() != 2)
9866 return false;
9868 // The 128-bit system register accesses do not touch PSTATE.
9869 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
9870 BuiltinID == AArch64::BI__builtin_arm_wsr128)
9871 return false;
9873 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
9874 // along with the upper limit on the immediates allowed.
9875 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
9876 .CaseLower("spsel", 15)
9877 .CaseLower("daifclr", 15)
9878 .CaseLower("daifset", 15)
9879 .CaseLower("pan", 15)
9880 .CaseLower("uao", 15)
9881 .CaseLower("dit", 15)
9882 .CaseLower("ssbs", 15)
9883 .CaseLower("tco", 15)
9884 .CaseLower("allint", 1)
9885 .CaseLower("pm", 1)
9886 .Default(std::nullopt);
9888 // If this is not a named PSTATE, just continue without validating, as this
9889 // will be lowered to an "MSR (register)" instruction directly
9890 if (!MaxLimit)
9891 return false;
9893 // Here we only allow constants in the range for that pstate, as required by
9894 // the ACLE.
9896 // While clang also accepts the names of system registers in its ACLE
9897 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
9898 // as the value written via a register is different to the value used as an
9899 // immediate to have the same effect. e.g., for the instruction `msr tco,
9900 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
9901 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
9903 // If a programmer wants to codegen the MSR (register) form of `msr tco,
9904 // xN`, they can still do so by specifying the register using five
9905 // colon-separated numbers in a string.
9906 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
9909 return false;
9912 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
9913 /// Emit an error and return true on failure; return false on success.
9914 /// TypeStr is a string containing the type descriptor of the value returned by
9915 /// the builtin and the descriptors of the expected type of the arguments.
9916 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
9917 const char *TypeStr) {
9919 assert((TypeStr[0] != '\0') &&
9920 "Invalid types in PPC MMA builtin declaration");
9922 unsigned Mask = 0;
9923 unsigned ArgNum = 0;
9925 // The first type in TypeStr is the type of the value returned by the
9926 // builtin. So we first read that type and change the type of TheCall.
9927 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
9928 TheCall->setType(type);
9930 while (*TypeStr != '\0') {
9931 Mask = 0;
9932 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
9933 if (ArgNum >= TheCall->getNumArgs()) {
9934 ArgNum++;
9935 break;
9938 Expr *Arg = TheCall->getArg(ArgNum);
9939 QualType PassedType = Arg->getType();
9940 QualType StrippedRVType = PassedType.getCanonicalType();
9942 // Strip Restrict/Volatile qualifiers.
9943 if (StrippedRVType.isRestrictQualified() ||
9944 StrippedRVType.isVolatileQualified())
9945 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
9947 // The only case where the argument type and expected type are allowed to
9948 // mismatch is if the argument type is a non-void pointer (or array) and
9949 // expected type is a void pointer.
9950 if (StrippedRVType != ExpectedType)
9951 if (!(ExpectedType->isVoidPointerType() &&
9952 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
9953 return Diag(Arg->getBeginLoc(),
9954 diag::err_typecheck_convert_incompatible)
9955 << PassedType << ExpectedType << 1 << 0 << 0;
9957 // If the value of the Mask is not 0, we have a constraint in the size of
9958 // the integer argument so here we ensure the argument is a constant that
9959 // is in the valid range.
9960 if (Mask != 0 &&
9961 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
9962 return true;
9964 ArgNum++;
9967 // In case we exited early from the previous loop, there are other types to
9968 // read from TypeStr. So we need to read them all to ensure we have the right
9969 // number of arguments in TheCall and if it is not the case, to display a
9970 // better error message.
9971 while (*TypeStr != '\0') {
9972 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
9973 ArgNum++;
9975 if (checkArgCount(*this, TheCall, ArgNum))
9976 return true;
9978 return false;
9981 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
9982 /// This checks that the target supports __builtin_longjmp and
9983 /// that val is a constant 1.
9984 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
9985 if (!Context.getTargetInfo().hasSjLjLowering())
9986 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
9987 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
9989 Expr *Arg = TheCall->getArg(1);
9990 llvm::APSInt Result;
9992 // TODO: This is less than ideal. Overload this to take a value.
9993 if (SemaBuiltinConstantArg(TheCall, 1, Result))
9994 return true;
9996 if (Result != 1)
9997 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
9998 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
10000 return false;
10003 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
10004 /// This checks that the target supports __builtin_setjmp.
10005 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
10006 if (!Context.getTargetInfo().hasSjLjLowering())
10007 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
10008 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10009 return false;
10012 namespace {
10014 class UncoveredArgHandler {
10015 enum { Unknown = -1, AllCovered = -2 };
10017 signed FirstUncoveredArg = Unknown;
10018 SmallVector<const Expr *, 4> DiagnosticExprs;
10020 public:
10021 UncoveredArgHandler() = default;
10023 bool hasUncoveredArg() const {
10024 return (FirstUncoveredArg >= 0);
10027 unsigned getUncoveredArg() const {
10028 assert(hasUncoveredArg() && "no uncovered argument");
10029 return FirstUncoveredArg;
10032 void setAllCovered() {
10033 // A string has been found with all arguments covered, so clear out
10034 // the diagnostics.
10035 DiagnosticExprs.clear();
10036 FirstUncoveredArg = AllCovered;
10039 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
10040 assert(NewFirstUncoveredArg >= 0 && "Outside range");
10042 // Don't update if a previous string covers all arguments.
10043 if (FirstUncoveredArg == AllCovered)
10044 return;
10046 // UncoveredArgHandler tracks the highest uncovered argument index
10047 // and with it all the strings that match this index.
10048 if (NewFirstUncoveredArg == FirstUncoveredArg)
10049 DiagnosticExprs.push_back(StrExpr);
10050 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
10051 DiagnosticExprs.clear();
10052 DiagnosticExprs.push_back(StrExpr);
10053 FirstUncoveredArg = NewFirstUncoveredArg;
10057 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
10060 enum StringLiteralCheckType {
10061 SLCT_NotALiteral,
10062 SLCT_UncheckedLiteral,
10063 SLCT_CheckedLiteral
10066 } // namespace
10068 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
10069 BinaryOperatorKind BinOpKind,
10070 bool AddendIsRight) {
10071 unsigned BitWidth = Offset.getBitWidth();
10072 unsigned AddendBitWidth = Addend.getBitWidth();
10073 // There might be negative interim results.
10074 if (Addend.isUnsigned()) {
10075 Addend = Addend.zext(++AddendBitWidth);
10076 Addend.setIsSigned(true);
10078 // Adjust the bit width of the APSInts.
10079 if (AddendBitWidth > BitWidth) {
10080 Offset = Offset.sext(AddendBitWidth);
10081 BitWidth = AddendBitWidth;
10082 } else if (BitWidth > AddendBitWidth) {
10083 Addend = Addend.sext(BitWidth);
10086 bool Ov = false;
10087 llvm::APSInt ResOffset = Offset;
10088 if (BinOpKind == BO_Add)
10089 ResOffset = Offset.sadd_ov(Addend, Ov);
10090 else {
10091 assert(AddendIsRight && BinOpKind == BO_Sub &&
10092 "operator must be add or sub with addend on the right");
10093 ResOffset = Offset.ssub_ov(Addend, Ov);
10096 // We add an offset to a pointer here so we should support an offset as big as
10097 // possible.
10098 if (Ov) {
10099 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
10100 "index (intermediate) result too big");
10101 Offset = Offset.sext(2 * BitWidth);
10102 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
10103 return;
10106 Offset = ResOffset;
10109 namespace {
10111 // This is a wrapper class around StringLiteral to support offsetted string
10112 // literals as format strings. It takes the offset into account when returning
10113 // the string and its length or the source locations to display notes correctly.
10114 class FormatStringLiteral {
10115 const StringLiteral *FExpr;
10116 int64_t Offset;
10118 public:
10119 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
10120 : FExpr(fexpr), Offset(Offset) {}
10122 StringRef getString() const {
10123 return FExpr->getString().drop_front(Offset);
10126 unsigned getByteLength() const {
10127 return FExpr->getByteLength() - getCharByteWidth() * Offset;
10130 unsigned getLength() const { return FExpr->getLength() - Offset; }
10131 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
10133 StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
10135 QualType getType() const { return FExpr->getType(); }
10137 bool isAscii() const { return FExpr->isOrdinary(); }
10138 bool isWide() const { return FExpr->isWide(); }
10139 bool isUTF8() const { return FExpr->isUTF8(); }
10140 bool isUTF16() const { return FExpr->isUTF16(); }
10141 bool isUTF32() const { return FExpr->isUTF32(); }
10142 bool isPascal() const { return FExpr->isPascal(); }
10144 SourceLocation getLocationOfByte(
10145 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
10146 const TargetInfo &Target, unsigned *StartToken = nullptr,
10147 unsigned *StartTokenByteOffset = nullptr) const {
10148 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
10149 StartToken, StartTokenByteOffset);
10152 SourceLocation getBeginLoc() const LLVM_READONLY {
10153 return FExpr->getBeginLoc().getLocWithOffset(Offset);
10156 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
10159 } // namespace
10161 static void CheckFormatString(
10162 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
10163 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
10164 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
10165 bool inFunctionCall, Sema::VariadicCallType CallType,
10166 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
10167 bool IgnoreStringsWithoutSpecifiers);
10169 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
10170 const Expr *E);
10172 // Determine if an expression is a string literal or constant string.
10173 // If this function returns false on the arguments to a function expecting a
10174 // format string, we will usually need to emit a warning.
10175 // True string literals are then checked by CheckFormatString.
10176 static StringLiteralCheckType
10177 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
10178 Sema::FormatArgumentPassingKind APK, unsigned format_idx,
10179 unsigned firstDataArg, Sema::FormatStringType Type,
10180 Sema::VariadicCallType CallType, bool InFunctionCall,
10181 llvm::SmallBitVector &CheckedVarArgs,
10182 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset,
10183 bool IgnoreStringsWithoutSpecifiers = false) {
10184 if (S.isConstantEvaluatedContext())
10185 return SLCT_NotALiteral;
10186 tryAgain:
10187 assert(Offset.isSigned() && "invalid offset");
10189 if (E->isTypeDependent() || E->isValueDependent())
10190 return SLCT_NotALiteral;
10192 E = E->IgnoreParenCasts();
10194 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
10195 // Technically -Wformat-nonliteral does not warn about this case.
10196 // The behavior of printf and friends in this case is implementation
10197 // dependent. Ideally if the format string cannot be null then
10198 // it should have a 'nonnull' attribute in the function prototype.
10199 return SLCT_UncheckedLiteral;
10201 switch (E->getStmtClass()) {
10202 case Stmt::InitListExprClass:
10203 // Handle expressions like {"foobar"}.
10204 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) {
10205 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg,
10206 Type, CallType, /*InFunctionCall*/ false,
10207 CheckedVarArgs, UncoveredArg, Offset,
10208 IgnoreStringsWithoutSpecifiers);
10210 return SLCT_NotALiteral;
10211 case Stmt::BinaryConditionalOperatorClass:
10212 case Stmt::ConditionalOperatorClass: {
10213 // The expression is a literal if both sub-expressions were, and it was
10214 // completely checked only if both sub-expressions were checked.
10215 const AbstractConditionalOperator *C =
10216 cast<AbstractConditionalOperator>(E);
10218 // Determine whether it is necessary to check both sub-expressions, for
10219 // example, because the condition expression is a constant that can be
10220 // evaluated at compile time.
10221 bool CheckLeft = true, CheckRight = true;
10223 bool Cond;
10224 if (C->getCond()->EvaluateAsBooleanCondition(
10225 Cond, S.getASTContext(), S.isConstantEvaluatedContext())) {
10226 if (Cond)
10227 CheckRight = false;
10228 else
10229 CheckLeft = false;
10232 // We need to maintain the offsets for the right and the left hand side
10233 // separately to check if every possible indexed expression is a valid
10234 // string literal. They might have different offsets for different string
10235 // literals in the end.
10236 StringLiteralCheckType Left;
10237 if (!CheckLeft)
10238 Left = SLCT_UncheckedLiteral;
10239 else {
10240 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx,
10241 firstDataArg, Type, CallType, InFunctionCall,
10242 CheckedVarArgs, UncoveredArg, Offset,
10243 IgnoreStringsWithoutSpecifiers);
10244 if (Left == SLCT_NotALiteral || !CheckRight) {
10245 return Left;
10249 StringLiteralCheckType Right = checkFormatStringExpr(
10250 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type,
10251 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10252 IgnoreStringsWithoutSpecifiers);
10254 return (CheckLeft && Left < Right) ? Left : Right;
10257 case Stmt::ImplicitCastExprClass:
10258 E = cast<ImplicitCastExpr>(E)->getSubExpr();
10259 goto tryAgain;
10261 case Stmt::OpaqueValueExprClass:
10262 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
10263 E = src;
10264 goto tryAgain;
10266 return SLCT_NotALiteral;
10268 case Stmt::PredefinedExprClass:
10269 // While __func__, etc., are technically not string literals, they
10270 // cannot contain format specifiers and thus are not a security
10271 // liability.
10272 return SLCT_UncheckedLiteral;
10274 case Stmt::DeclRefExprClass: {
10275 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
10277 // As an exception, do not flag errors for variables binding to
10278 // const string literals.
10279 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
10280 bool isConstant = false;
10281 QualType T = DR->getType();
10283 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
10284 isConstant = AT->getElementType().isConstant(S.Context);
10285 } else if (const PointerType *PT = T->getAs<PointerType>()) {
10286 isConstant = T.isConstant(S.Context) &&
10287 PT->getPointeeType().isConstant(S.Context);
10288 } else if (T->isObjCObjectPointerType()) {
10289 // In ObjC, there is usually no "const ObjectPointer" type,
10290 // so don't check if the pointee type is constant.
10291 isConstant = T.isConstant(S.Context);
10294 if (isConstant) {
10295 if (const Expr *Init = VD->getAnyInitializer()) {
10296 // Look through initializers like const char c[] = { "foo" }
10297 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
10298 if (InitList->isStringLiteralInit())
10299 Init = InitList->getInit(0)->IgnoreParenImpCasts();
10301 return checkFormatStringExpr(
10302 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType,
10303 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset);
10307 // When the format argument is an argument of this function, and this
10308 // function also has the format attribute, there are several interactions
10309 // for which there shouldn't be a warning. For instance, when calling
10310 // v*printf from a function that has the printf format attribute, we
10311 // should not emit a warning about using `fmt`, even though it's not
10312 // constant, because the arguments have already been checked for the
10313 // caller of `logmessage`:
10315 // __attribute__((format(printf, 1, 2)))
10316 // void logmessage(char const *fmt, ...) {
10317 // va_list ap;
10318 // va_start(ap, fmt);
10319 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */
10320 // ...
10321 // }
10323 // Another interaction that we need to support is calling a variadic
10324 // format function from a format function that has fixed arguments. For
10325 // instance:
10327 // __attribute__((format(printf, 1, 2)))
10328 // void logstring(char const *fmt, char const *str) {
10329 // printf(fmt, str); /* do not emit a warning about "fmt" */
10330 // }
10332 // Same (and perhaps more relatably) for the variadic template case:
10334 // template<typename... Args>
10335 // __attribute__((format(printf, 1, 2)))
10336 // void log(const char *fmt, Args&&... args) {
10337 // printf(fmt, forward<Args>(args)...);
10338 // /* do not emit a warning about "fmt" */
10339 // }
10341 // Due to implementation difficulty, we only check the format, not the
10342 // format arguments, in all cases.
10344 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) {
10345 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) {
10346 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) {
10347 bool IsCXXMember = false;
10348 if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
10349 IsCXXMember = MD->isInstance();
10351 bool IsVariadic = false;
10352 if (const FunctionType *FnTy = D->getFunctionType())
10353 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic();
10354 else if (const auto *BD = dyn_cast<BlockDecl>(D))
10355 IsVariadic = BD->isVariadic();
10356 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D))
10357 IsVariadic = OMD->isVariadic();
10359 Sema::FormatStringInfo CallerFSI;
10360 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic,
10361 &CallerFSI)) {
10362 // We also check if the formats are compatible.
10363 // We can't pass a 'scanf' string to a 'printf' function.
10364 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx &&
10365 Type == S.GetFormatStringType(PVFormat)) {
10366 // Lastly, check that argument passing kinds transition in a
10367 // way that makes sense:
10368 // from a caller with FAPK_VAList, allow FAPK_VAList
10369 // from a caller with FAPK_Fixed, allow FAPK_Fixed
10370 // from a caller with FAPK_Fixed, allow FAPK_Variadic
10371 // from a caller with FAPK_Variadic, allow FAPK_VAList
10372 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) {
10373 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList):
10374 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed):
10375 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic):
10376 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList):
10377 return SLCT_UncheckedLiteral;
10386 return SLCT_NotALiteral;
10389 case Stmt::CallExprClass:
10390 case Stmt::CXXMemberCallExprClass: {
10391 const CallExpr *CE = cast<CallExpr>(E);
10392 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
10393 bool IsFirst = true;
10394 StringLiteralCheckType CommonResult;
10395 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
10396 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
10397 StringLiteralCheckType Result = checkFormatStringExpr(
10398 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10399 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10400 IgnoreStringsWithoutSpecifiers);
10401 if (IsFirst) {
10402 CommonResult = Result;
10403 IsFirst = false;
10406 if (!IsFirst)
10407 return CommonResult;
10409 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
10410 unsigned BuiltinID = FD->getBuiltinID();
10411 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
10412 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
10413 const Expr *Arg = CE->getArg(0);
10414 return checkFormatStringExpr(
10415 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10416 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10417 IgnoreStringsWithoutSpecifiers);
10421 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E))
10422 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg,
10423 Type, CallType, /*InFunctionCall*/ false,
10424 CheckedVarArgs, UncoveredArg, Offset,
10425 IgnoreStringsWithoutSpecifiers);
10426 return SLCT_NotALiteral;
10428 case Stmt::ObjCMessageExprClass: {
10429 const auto *ME = cast<ObjCMessageExpr>(E);
10430 if (const auto *MD = ME->getMethodDecl()) {
10431 if (const auto *FA = MD->getAttr<FormatArgAttr>()) {
10432 // As a special case heuristic, if we're using the method -[NSBundle
10433 // localizedStringForKey:value:table:], ignore any key strings that lack
10434 // format specifiers. The idea is that if the key doesn't have any
10435 // format specifiers then its probably just a key to map to the
10436 // localized strings. If it does have format specifiers though, then its
10437 // likely that the text of the key is the format string in the
10438 // programmer's language, and should be checked.
10439 const ObjCInterfaceDecl *IFace;
10440 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) &&
10441 IFace->getIdentifier()->isStr("NSBundle") &&
10442 MD->getSelector().isKeywordSelector(
10443 {"localizedStringForKey", "value", "table"})) {
10444 IgnoreStringsWithoutSpecifiers = true;
10447 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
10448 return checkFormatStringExpr(
10449 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10450 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10451 IgnoreStringsWithoutSpecifiers);
10455 return SLCT_NotALiteral;
10457 case Stmt::ObjCStringLiteralClass:
10458 case Stmt::StringLiteralClass: {
10459 const StringLiteral *StrE = nullptr;
10461 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
10462 StrE = ObjCFExpr->getString();
10463 else
10464 StrE = cast<StringLiteral>(E);
10466 if (StrE) {
10467 if (Offset.isNegative() || Offset > StrE->getLength()) {
10468 // TODO: It would be better to have an explicit warning for out of
10469 // bounds literals.
10470 return SLCT_NotALiteral;
10472 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
10473 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type,
10474 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg,
10475 IgnoreStringsWithoutSpecifiers);
10476 return SLCT_CheckedLiteral;
10479 return SLCT_NotALiteral;
10481 case Stmt::BinaryOperatorClass: {
10482 const BinaryOperator *BinOp = cast<BinaryOperator>(E);
10484 // A string literal + an int offset is still a string literal.
10485 if (BinOp->isAdditiveOp()) {
10486 Expr::EvalResult LResult, RResult;
10488 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
10489 LResult, S.Context, Expr::SE_NoSideEffects,
10490 S.isConstantEvaluatedContext());
10491 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
10492 RResult, S.Context, Expr::SE_NoSideEffects,
10493 S.isConstantEvaluatedContext());
10495 if (LIsInt != RIsInt) {
10496 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
10498 if (LIsInt) {
10499 if (BinOpKind == BO_Add) {
10500 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
10501 E = BinOp->getRHS();
10502 goto tryAgain;
10504 } else {
10505 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
10506 E = BinOp->getLHS();
10507 goto tryAgain;
10512 return SLCT_NotALiteral;
10514 case Stmt::UnaryOperatorClass: {
10515 const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
10516 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
10517 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
10518 Expr::EvalResult IndexResult;
10519 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context,
10520 Expr::SE_NoSideEffects,
10521 S.isConstantEvaluatedContext())) {
10522 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
10523 /*RHS is int*/ true);
10524 E = ASE->getBase();
10525 goto tryAgain;
10529 return SLCT_NotALiteral;
10532 default:
10533 return SLCT_NotALiteral;
10537 // If this expression can be evaluated at compile-time,
10538 // check if the result is a StringLiteral and return it
10539 // otherwise return nullptr
10540 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
10541 const Expr *E) {
10542 Expr::EvalResult Result;
10543 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) {
10544 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>();
10545 if (isa_and_nonnull<StringLiteral>(LVE))
10546 return LVE;
10548 return nullptr;
10551 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
10552 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
10553 .Case("scanf", FST_Scanf)
10554 .Cases("printf", "printf0", FST_Printf)
10555 .Cases("NSString", "CFString", FST_NSString)
10556 .Case("strftime", FST_Strftime)
10557 .Case("strfmon", FST_Strfmon)
10558 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
10559 .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
10560 .Case("os_trace", FST_OSLog)
10561 .Case("os_log", FST_OSLog)
10562 .Default(FST_Unknown);
10565 /// CheckFormatArguments - Check calls to printf and scanf (and similar
10566 /// functions) for correct use of format strings.
10567 /// Returns true if a format string has been fully checked.
10568 bool Sema::CheckFormatArguments(const FormatAttr *Format,
10569 ArrayRef<const Expr *> Args, bool IsCXXMember,
10570 VariadicCallType CallType, SourceLocation Loc,
10571 SourceRange Range,
10572 llvm::SmallBitVector &CheckedVarArgs) {
10573 FormatStringInfo FSI;
10574 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply,
10575 &FSI))
10576 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx,
10577 FSI.FirstDataArg, GetFormatStringType(Format),
10578 CallType, Loc, Range, CheckedVarArgs);
10579 return false;
10582 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
10583 Sema::FormatArgumentPassingKind APK,
10584 unsigned format_idx, unsigned firstDataArg,
10585 FormatStringType Type,
10586 VariadicCallType CallType, SourceLocation Loc,
10587 SourceRange Range,
10588 llvm::SmallBitVector &CheckedVarArgs) {
10589 // CHECK: printf/scanf-like function is called with no format string.
10590 if (format_idx >= Args.size()) {
10591 Diag(Loc, diag::warn_missing_format_string) << Range;
10592 return false;
10595 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
10597 // CHECK: format string is not a string literal.
10599 // Dynamically generated format strings are difficult to
10600 // automatically vet at compile time. Requiring that format strings
10601 // are string literals: (1) permits the checking of format strings by
10602 // the compiler and thereby (2) can practically remove the source of
10603 // many format string exploits.
10605 // Format string can be either ObjC string (e.g. @"%d") or
10606 // C string (e.g. "%d")
10607 // ObjC string uses the same format specifiers as C string, so we can use
10608 // the same format string checking logic for both ObjC and C strings.
10609 UncoveredArgHandler UncoveredArg;
10610 StringLiteralCheckType CT = checkFormatStringExpr(
10611 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type,
10612 CallType,
10613 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg,
10614 /*no string offset*/ llvm::APSInt(64, false) = 0);
10616 // Generate a diagnostic where an uncovered argument is detected.
10617 if (UncoveredArg.hasUncoveredArg()) {
10618 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
10619 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
10620 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
10623 if (CT != SLCT_NotALiteral)
10624 // Literal format string found, check done!
10625 return CT == SLCT_CheckedLiteral;
10627 // Strftime is particular as it always uses a single 'time' argument,
10628 // so it is safe to pass a non-literal string.
10629 if (Type == FST_Strftime)
10630 return false;
10632 // Do not emit diag when the string param is a macro expansion and the
10633 // format is either NSString or CFString. This is a hack to prevent
10634 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
10635 // which are usually used in place of NS and CF string literals.
10636 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
10637 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
10638 return false;
10640 // If there are no arguments specified, warn with -Wformat-security, otherwise
10641 // warn only with -Wformat-nonliteral.
10642 if (Args.size() == firstDataArg) {
10643 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
10644 << OrigFormatExpr->getSourceRange();
10645 switch (Type) {
10646 default:
10647 break;
10648 case FST_Kprintf:
10649 case FST_FreeBSDKPrintf:
10650 case FST_Printf:
10651 Diag(FormatLoc, diag::note_format_security_fixit)
10652 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
10653 break;
10654 case FST_NSString:
10655 Diag(FormatLoc, diag::note_format_security_fixit)
10656 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
10657 break;
10659 } else {
10660 Diag(FormatLoc, diag::warn_format_nonliteral)
10661 << OrigFormatExpr->getSourceRange();
10663 return false;
10666 namespace {
10668 class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
10669 protected:
10670 Sema &S;
10671 const FormatStringLiteral *FExpr;
10672 const Expr *OrigFormatExpr;
10673 const Sema::FormatStringType FSType;
10674 const unsigned FirstDataArg;
10675 const unsigned NumDataArgs;
10676 const char *Beg; // Start of format string.
10677 const Sema::FormatArgumentPassingKind ArgPassingKind;
10678 ArrayRef<const Expr *> Args;
10679 unsigned FormatIdx;
10680 llvm::SmallBitVector CoveredArgs;
10681 bool usesPositionalArgs = false;
10682 bool atFirstArg = true;
10683 bool inFunctionCall;
10684 Sema::VariadicCallType CallType;
10685 llvm::SmallBitVector &CheckedVarArgs;
10686 UncoveredArgHandler &UncoveredArg;
10688 public:
10689 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
10690 const Expr *origFormatExpr,
10691 const Sema::FormatStringType type, unsigned firstDataArg,
10692 unsigned numDataArgs, const char *beg,
10693 Sema::FormatArgumentPassingKind APK,
10694 ArrayRef<const Expr *> Args, unsigned formatIdx,
10695 bool inFunctionCall, Sema::VariadicCallType callType,
10696 llvm::SmallBitVector &CheckedVarArgs,
10697 UncoveredArgHandler &UncoveredArg)
10698 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
10699 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
10700 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx),
10701 inFunctionCall(inFunctionCall), CallType(callType),
10702 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
10703 CoveredArgs.resize(numDataArgs);
10704 CoveredArgs.reset();
10707 void DoneProcessing();
10709 void HandleIncompleteSpecifier(const char *startSpecifier,
10710 unsigned specifierLen) override;
10712 void HandleInvalidLengthModifier(
10713 const analyze_format_string::FormatSpecifier &FS,
10714 const analyze_format_string::ConversionSpecifier &CS,
10715 const char *startSpecifier, unsigned specifierLen,
10716 unsigned DiagID);
10718 void HandleNonStandardLengthModifier(
10719 const analyze_format_string::FormatSpecifier &FS,
10720 const char *startSpecifier, unsigned specifierLen);
10722 void HandleNonStandardConversionSpecifier(
10723 const analyze_format_string::ConversionSpecifier &CS,
10724 const char *startSpecifier, unsigned specifierLen);
10726 void HandlePosition(const char *startPos, unsigned posLen) override;
10728 void HandleInvalidPosition(const char *startSpecifier,
10729 unsigned specifierLen,
10730 analyze_format_string::PositionContext p) override;
10732 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
10734 void HandleNullChar(const char *nullCharacter) override;
10736 template <typename Range>
10737 static void
10738 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
10739 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
10740 bool IsStringLocation, Range StringRange,
10741 ArrayRef<FixItHint> Fixit = std::nullopt);
10743 protected:
10744 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
10745 const char *startSpec,
10746 unsigned specifierLen,
10747 const char *csStart, unsigned csLen);
10749 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
10750 const char *startSpec,
10751 unsigned specifierLen);
10753 SourceRange getFormatStringRange();
10754 CharSourceRange getSpecifierRange(const char *startSpecifier,
10755 unsigned specifierLen);
10756 SourceLocation getLocationOfByte(const char *x);
10758 const Expr *getDataArg(unsigned i) const;
10760 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
10761 const analyze_format_string::ConversionSpecifier &CS,
10762 const char *startSpecifier, unsigned specifierLen,
10763 unsigned argIndex);
10765 template <typename Range>
10766 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
10767 bool IsStringLocation, Range StringRange,
10768 ArrayRef<FixItHint> Fixit = std::nullopt);
10771 } // namespace
10773 SourceRange CheckFormatHandler::getFormatStringRange() {
10774 return OrigFormatExpr->getSourceRange();
10777 CharSourceRange CheckFormatHandler::
10778 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
10779 SourceLocation Start = getLocationOfByte(startSpecifier);
10780 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
10782 // Advance the end SourceLocation by one due to half-open ranges.
10783 End = End.getLocWithOffset(1);
10785 return CharSourceRange::getCharRange(Start, End);
10788 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
10789 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
10790 S.getLangOpts(), S.Context.getTargetInfo());
10793 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
10794 unsigned specifierLen){
10795 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
10796 getLocationOfByte(startSpecifier),
10797 /*IsStringLocation*/true,
10798 getSpecifierRange(startSpecifier, specifierLen));
10801 void CheckFormatHandler::HandleInvalidLengthModifier(
10802 const analyze_format_string::FormatSpecifier &FS,
10803 const analyze_format_string::ConversionSpecifier &CS,
10804 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
10805 using namespace analyze_format_string;
10807 const LengthModifier &LM = FS.getLengthModifier();
10808 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
10810 // See if we know how to fix this length modifier.
10811 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
10812 if (FixedLM) {
10813 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
10814 getLocationOfByte(LM.getStart()),
10815 /*IsStringLocation*/true,
10816 getSpecifierRange(startSpecifier, specifierLen));
10818 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
10819 << FixedLM->toString()
10820 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
10822 } else {
10823 FixItHint Hint;
10824 if (DiagID == diag::warn_format_nonsensical_length)
10825 Hint = FixItHint::CreateRemoval(LMRange);
10827 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
10828 getLocationOfByte(LM.getStart()),
10829 /*IsStringLocation*/true,
10830 getSpecifierRange(startSpecifier, specifierLen),
10831 Hint);
10835 void CheckFormatHandler::HandleNonStandardLengthModifier(
10836 const analyze_format_string::FormatSpecifier &FS,
10837 const char *startSpecifier, unsigned specifierLen) {
10838 using namespace analyze_format_string;
10840 const LengthModifier &LM = FS.getLengthModifier();
10841 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
10843 // See if we know how to fix this length modifier.
10844 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
10845 if (FixedLM) {
10846 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
10847 << LM.toString() << 0,
10848 getLocationOfByte(LM.getStart()),
10849 /*IsStringLocation*/true,
10850 getSpecifierRange(startSpecifier, specifierLen));
10852 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
10853 << FixedLM->toString()
10854 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
10856 } else {
10857 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
10858 << LM.toString() << 0,
10859 getLocationOfByte(LM.getStart()),
10860 /*IsStringLocation*/true,
10861 getSpecifierRange(startSpecifier, specifierLen));
10865 void CheckFormatHandler::HandleNonStandardConversionSpecifier(
10866 const analyze_format_string::ConversionSpecifier &CS,
10867 const char *startSpecifier, unsigned specifierLen) {
10868 using namespace analyze_format_string;
10870 // See if we know how to fix this conversion specifier.
10871 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
10872 if (FixedCS) {
10873 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
10874 << CS.toString() << /*conversion specifier*/1,
10875 getLocationOfByte(CS.getStart()),
10876 /*IsStringLocation*/true,
10877 getSpecifierRange(startSpecifier, specifierLen));
10879 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
10880 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
10881 << FixedCS->toString()
10882 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
10883 } else {
10884 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
10885 << CS.toString() << /*conversion specifier*/1,
10886 getLocationOfByte(CS.getStart()),
10887 /*IsStringLocation*/true,
10888 getSpecifierRange(startSpecifier, specifierLen));
10892 void CheckFormatHandler::HandlePosition(const char *startPos,
10893 unsigned posLen) {
10894 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
10895 getLocationOfByte(startPos),
10896 /*IsStringLocation*/true,
10897 getSpecifierRange(startPos, posLen));
10900 void CheckFormatHandler::HandleInvalidPosition(
10901 const char *startSpecifier, unsigned specifierLen,
10902 analyze_format_string::PositionContext p) {
10903 EmitFormatDiagnostic(
10904 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p,
10905 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true,
10906 getSpecifierRange(startSpecifier, specifierLen));
10909 void CheckFormatHandler::HandleZeroPosition(const char *startPos,
10910 unsigned posLen) {
10911 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
10912 getLocationOfByte(startPos),
10913 /*IsStringLocation*/true,
10914 getSpecifierRange(startPos, posLen));
10917 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
10918 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
10919 // The presence of a null character is likely an error.
10920 EmitFormatDiagnostic(
10921 S.PDiag(diag::warn_printf_format_string_contains_null_char),
10922 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
10923 getFormatStringRange());
10927 // Note that this may return NULL if there was an error parsing or building
10928 // one of the argument expressions.
10929 const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
10930 return Args[FirstDataArg + i];
10933 void CheckFormatHandler::DoneProcessing() {
10934 // Does the number of data arguments exceed the number of
10935 // format conversions in the format string?
10936 if (ArgPassingKind != Sema::FAPK_VAList) {
10937 // Find any arguments that weren't covered.
10938 CoveredArgs.flip();
10939 signed notCoveredArg = CoveredArgs.find_first();
10940 if (notCoveredArg >= 0) {
10941 assert((unsigned)notCoveredArg < NumDataArgs);
10942 UncoveredArg.Update(notCoveredArg, OrigFormatExpr);
10943 } else {
10944 UncoveredArg.setAllCovered();
10949 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
10950 const Expr *ArgExpr) {
10951 assert(hasUncoveredArg() && !DiagnosticExprs.empty() &&
10952 "Invalid state");
10954 if (!ArgExpr)
10955 return;
10957 SourceLocation Loc = ArgExpr->getBeginLoc();
10959 if (S.getSourceManager().isInSystemMacro(Loc))
10960 return;
10962 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
10963 for (auto E : DiagnosticExprs)
10964 PDiag << E->getSourceRange();
10966 CheckFormatHandler::EmitFormatDiagnostic(
10967 S, IsFunctionCall, DiagnosticExprs[0],
10968 PDiag, Loc, /*IsStringLocation*/false,
10969 DiagnosticExprs[0]->getSourceRange());
10972 bool
10973 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
10974 SourceLocation Loc,
10975 const char *startSpec,
10976 unsigned specifierLen,
10977 const char *csStart,
10978 unsigned csLen) {
10979 bool keepGoing = true;
10980 if (argIndex < NumDataArgs) {
10981 // Consider the argument coverered, even though the specifier doesn't
10982 // make sense.
10983 CoveredArgs.set(argIndex);
10985 else {
10986 // If argIndex exceeds the number of data arguments we
10987 // don't issue a warning because that is just a cascade of warnings (and
10988 // they may have intended '%%' anyway). We don't want to continue processing
10989 // the format string after this point, however, as we will like just get
10990 // gibberish when trying to match arguments.
10991 keepGoing = false;
10994 StringRef Specifier(csStart, csLen);
10996 // If the specifier in non-printable, it could be the first byte of a UTF-8
10997 // sequence. In that case, print the UTF-8 code point. If not, print the byte
10998 // hex value.
10999 std::string CodePointStr;
11000 if (!llvm::sys::locale::isPrint(*csStart)) {
11001 llvm::UTF32 CodePoint;
11002 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
11003 const llvm::UTF8 *E =
11004 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
11005 llvm::ConversionResult Result =
11006 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
11008 if (Result != llvm::conversionOK) {
11009 unsigned char FirstChar = *csStart;
11010 CodePoint = (llvm::UTF32)FirstChar;
11013 llvm::raw_string_ostream OS(CodePointStr);
11014 if (CodePoint < 256)
11015 OS << "\\x" << llvm::format("%02x", CodePoint);
11016 else if (CodePoint <= 0xFFFF)
11017 OS << "\\u" << llvm::format("%04x", CodePoint);
11018 else
11019 OS << "\\U" << llvm::format("%08x", CodePoint);
11020 OS.flush();
11021 Specifier = CodePointStr;
11024 EmitFormatDiagnostic(
11025 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
11026 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
11028 return keepGoing;
11031 void
11032 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
11033 const char *startSpec,
11034 unsigned specifierLen) {
11035 EmitFormatDiagnostic(
11036 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
11037 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
11040 bool
11041 CheckFormatHandler::CheckNumArgs(
11042 const analyze_format_string::FormatSpecifier &FS,
11043 const analyze_format_string::ConversionSpecifier &CS,
11044 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
11046 if (argIndex >= NumDataArgs) {
11047 PartialDiagnostic PDiag = FS.usesPositionalArg()
11048 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
11049 << (argIndex+1) << NumDataArgs)
11050 : S.PDiag(diag::warn_printf_insufficient_data_args);
11051 EmitFormatDiagnostic(
11052 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
11053 getSpecifierRange(startSpecifier, specifierLen));
11055 // Since more arguments than conversion tokens are given, by extension
11056 // all arguments are covered, so mark this as so.
11057 UncoveredArg.setAllCovered();
11058 return false;
11060 return true;
11063 template<typename Range>
11064 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
11065 SourceLocation Loc,
11066 bool IsStringLocation,
11067 Range StringRange,
11068 ArrayRef<FixItHint> FixIt) {
11069 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
11070 Loc, IsStringLocation, StringRange, FixIt);
11073 /// If the format string is not within the function call, emit a note
11074 /// so that the function call and string are in diagnostic messages.
11076 /// \param InFunctionCall if true, the format string is within the function
11077 /// call and only one diagnostic message will be produced. Otherwise, an
11078 /// extra note will be emitted pointing to location of the format string.
11080 /// \param ArgumentExpr the expression that is passed as the format string
11081 /// argument in the function call. Used for getting locations when two
11082 /// diagnostics are emitted.
11084 /// \param PDiag the callee should already have provided any strings for the
11085 /// diagnostic message. This function only adds locations and fixits
11086 /// to diagnostics.
11088 /// \param Loc primary location for diagnostic. If two diagnostics are
11089 /// required, one will be at Loc and a new SourceLocation will be created for
11090 /// the other one.
11092 /// \param IsStringLocation if true, Loc points to the format string should be
11093 /// used for the note. Otherwise, Loc points to the argument list and will
11094 /// be used with PDiag.
11096 /// \param StringRange some or all of the string to highlight. This is
11097 /// templated so it can accept either a CharSourceRange or a SourceRange.
11099 /// \param FixIt optional fix it hint for the format string.
11100 template <typename Range>
11101 void CheckFormatHandler::EmitFormatDiagnostic(
11102 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
11103 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
11104 Range StringRange, ArrayRef<FixItHint> FixIt) {
11105 if (InFunctionCall) {
11106 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
11107 D << StringRange;
11108 D << FixIt;
11109 } else {
11110 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
11111 << ArgumentExpr->getSourceRange();
11113 const Sema::SemaDiagnosticBuilder &Note =
11114 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
11115 diag::note_format_string_defined);
11117 Note << StringRange;
11118 Note << FixIt;
11122 //===--- CHECK: Printf format string checking ------------------------------===//
11124 namespace {
11126 class CheckPrintfHandler : public CheckFormatHandler {
11127 public:
11128 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
11129 const Expr *origFormatExpr,
11130 const Sema::FormatStringType type, unsigned firstDataArg,
11131 unsigned numDataArgs, bool isObjC, const char *beg,
11132 Sema::FormatArgumentPassingKind APK,
11133 ArrayRef<const Expr *> Args, unsigned formatIdx,
11134 bool inFunctionCall, Sema::VariadicCallType CallType,
11135 llvm::SmallBitVector &CheckedVarArgs,
11136 UncoveredArgHandler &UncoveredArg)
11137 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
11138 numDataArgs, beg, APK, Args, formatIdx,
11139 inFunctionCall, CallType, CheckedVarArgs,
11140 UncoveredArg) {}
11142 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
11144 /// Returns true if '%@' specifiers are allowed in the format string.
11145 bool allowsObjCArg() const {
11146 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
11147 FSType == Sema::FST_OSTrace;
11150 bool HandleInvalidPrintfConversionSpecifier(
11151 const analyze_printf::PrintfSpecifier &FS,
11152 const char *startSpecifier,
11153 unsigned specifierLen) override;
11155 void handleInvalidMaskType(StringRef MaskType) override;
11157 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
11158 const char *startSpecifier, unsigned specifierLen,
11159 const TargetInfo &Target) override;
11160 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
11161 const char *StartSpecifier,
11162 unsigned SpecifierLen,
11163 const Expr *E);
11165 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
11166 const char *startSpecifier, unsigned specifierLen);
11167 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
11168 const analyze_printf::OptionalAmount &Amt,
11169 unsigned type,
11170 const char *startSpecifier, unsigned specifierLen);
11171 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
11172 const analyze_printf::OptionalFlag &flag,
11173 const char *startSpecifier, unsigned specifierLen);
11174 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
11175 const analyze_printf::OptionalFlag &ignoredFlag,
11176 const analyze_printf::OptionalFlag &flag,
11177 const char *startSpecifier, unsigned specifierLen);
11178 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
11179 const Expr *E);
11181 void HandleEmptyObjCModifierFlag(const char *startFlag,
11182 unsigned flagLen) override;
11184 void HandleInvalidObjCModifierFlag(const char *startFlag,
11185 unsigned flagLen) override;
11187 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
11188 const char *flagsEnd,
11189 const char *conversionPosition)
11190 override;
11193 } // namespace
11195 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
11196 const analyze_printf::PrintfSpecifier &FS,
11197 const char *startSpecifier,
11198 unsigned specifierLen) {
11199 const analyze_printf::PrintfConversionSpecifier &CS =
11200 FS.getConversionSpecifier();
11202 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
11203 getLocationOfByte(CS.getStart()),
11204 startSpecifier, specifierLen,
11205 CS.getStart(), CS.getLength());
11208 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
11209 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
11212 bool CheckPrintfHandler::HandleAmount(
11213 const analyze_format_string::OptionalAmount &Amt, unsigned k,
11214 const char *startSpecifier, unsigned specifierLen) {
11215 if (Amt.hasDataArgument()) {
11216 if (ArgPassingKind != Sema::FAPK_VAList) {
11217 unsigned argIndex = Amt.getArgIndex();
11218 if (argIndex >= NumDataArgs) {
11219 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
11220 << k,
11221 getLocationOfByte(Amt.getStart()),
11222 /*IsStringLocation*/ true,
11223 getSpecifierRange(startSpecifier, specifierLen));
11224 // Don't do any more checking. We will just emit
11225 // spurious errors.
11226 return false;
11229 // Type check the data argument. It should be an 'int'.
11230 // Although not in conformance with C99, we also allow the argument to be
11231 // an 'unsigned int' as that is a reasonably safe case. GCC also
11232 // doesn't emit a warning for that case.
11233 CoveredArgs.set(argIndex);
11234 const Expr *Arg = getDataArg(argIndex);
11235 if (!Arg)
11236 return false;
11238 QualType T = Arg->getType();
11240 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
11241 assert(AT.isValid());
11243 if (!AT.matchesType(S.Context, T)) {
11244 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
11245 << k << AT.getRepresentativeTypeName(S.Context)
11246 << T << Arg->getSourceRange(),
11247 getLocationOfByte(Amt.getStart()),
11248 /*IsStringLocation*/true,
11249 getSpecifierRange(startSpecifier, specifierLen));
11250 // Don't do any more checking. We will just emit
11251 // spurious errors.
11252 return false;
11256 return true;
11259 void CheckPrintfHandler::HandleInvalidAmount(
11260 const analyze_printf::PrintfSpecifier &FS,
11261 const analyze_printf::OptionalAmount &Amt,
11262 unsigned type,
11263 const char *startSpecifier,
11264 unsigned specifierLen) {
11265 const analyze_printf::PrintfConversionSpecifier &CS =
11266 FS.getConversionSpecifier();
11268 FixItHint fixit =
11269 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
11270 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
11271 Amt.getConstantLength()))
11272 : FixItHint();
11274 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
11275 << type << CS.toString(),
11276 getLocationOfByte(Amt.getStart()),
11277 /*IsStringLocation*/true,
11278 getSpecifierRange(startSpecifier, specifierLen),
11279 fixit);
11282 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
11283 const analyze_printf::OptionalFlag &flag,
11284 const char *startSpecifier,
11285 unsigned specifierLen) {
11286 // Warn about pointless flag with a fixit removal.
11287 const analyze_printf::PrintfConversionSpecifier &CS =
11288 FS.getConversionSpecifier();
11289 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
11290 << flag.toString() << CS.toString(),
11291 getLocationOfByte(flag.getPosition()),
11292 /*IsStringLocation*/true,
11293 getSpecifierRange(startSpecifier, specifierLen),
11294 FixItHint::CreateRemoval(
11295 getSpecifierRange(flag.getPosition(), 1)));
11298 void CheckPrintfHandler::HandleIgnoredFlag(
11299 const analyze_printf::PrintfSpecifier &FS,
11300 const analyze_printf::OptionalFlag &ignoredFlag,
11301 const analyze_printf::OptionalFlag &flag,
11302 const char *startSpecifier,
11303 unsigned specifierLen) {
11304 // Warn about ignored flag with a fixit removal.
11305 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
11306 << ignoredFlag.toString() << flag.toString(),
11307 getLocationOfByte(ignoredFlag.getPosition()),
11308 /*IsStringLocation*/true,
11309 getSpecifierRange(startSpecifier, specifierLen),
11310 FixItHint::CreateRemoval(
11311 getSpecifierRange(ignoredFlag.getPosition(), 1)));
11314 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
11315 unsigned flagLen) {
11316 // Warn about an empty flag.
11317 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
11318 getLocationOfByte(startFlag),
11319 /*IsStringLocation*/true,
11320 getSpecifierRange(startFlag, flagLen));
11323 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
11324 unsigned flagLen) {
11325 // Warn about an invalid flag.
11326 auto Range = getSpecifierRange(startFlag, flagLen);
11327 StringRef flag(startFlag, flagLen);
11328 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
11329 getLocationOfByte(startFlag),
11330 /*IsStringLocation*/true,
11331 Range, FixItHint::CreateRemoval(Range));
11334 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
11335 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
11336 // Warn about using '[...]' without a '@' conversion.
11337 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
11338 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
11339 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
11340 getLocationOfByte(conversionPosition),
11341 /*IsStringLocation*/true,
11342 Range, FixItHint::CreateRemoval(Range));
11345 // Determines if the specified is a C++ class or struct containing
11346 // a member with the specified name and kind (e.g. a CXXMethodDecl named
11347 // "c_str()").
11348 template<typename MemberKind>
11349 static llvm::SmallPtrSet<MemberKind*, 1>
11350 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
11351 const RecordType *RT = Ty->getAs<RecordType>();
11352 llvm::SmallPtrSet<MemberKind*, 1> Results;
11354 if (!RT)
11355 return Results;
11356 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
11357 if (!RD || !RD->getDefinition())
11358 return Results;
11360 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
11361 Sema::LookupMemberName);
11362 R.suppressDiagnostics();
11364 // We just need to include all members of the right kind turned up by the
11365 // filter, at this point.
11366 if (S.LookupQualifiedName(R, RT->getDecl()))
11367 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
11368 NamedDecl *decl = (*I)->getUnderlyingDecl();
11369 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
11370 Results.insert(FK);
11372 return Results;
11375 /// Check if we could call '.c_str()' on an object.
11377 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
11378 /// allow the call, or if it would be ambiguous).
11379 bool Sema::hasCStrMethod(const Expr *E) {
11380 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
11382 MethodSet Results =
11383 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
11384 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
11385 MI != ME; ++MI)
11386 if ((*MI)->getMinRequiredArguments() == 0)
11387 return true;
11388 return false;
11391 // Check if a (w)string was passed when a (w)char* was needed, and offer a
11392 // better diagnostic if so. AT is assumed to be valid.
11393 // Returns true when a c_str() conversion method is found.
11394 bool CheckPrintfHandler::checkForCStrMembers(
11395 const analyze_printf::ArgType &AT, const Expr *E) {
11396 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
11398 MethodSet Results =
11399 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
11401 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
11402 MI != ME; ++MI) {
11403 const CXXMethodDecl *Method = *MI;
11404 if (Method->getMinRequiredArguments() == 0 &&
11405 AT.matchesType(S.Context, Method->getReturnType())) {
11406 // FIXME: Suggest parens if the expression needs them.
11407 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
11408 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
11409 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
11410 return true;
11414 return false;
11417 bool CheckPrintfHandler::HandlePrintfSpecifier(
11418 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier,
11419 unsigned specifierLen, const TargetInfo &Target) {
11420 using namespace analyze_format_string;
11421 using namespace analyze_printf;
11423 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
11425 if (FS.consumesDataArgument()) {
11426 if (atFirstArg) {
11427 atFirstArg = false;
11428 usesPositionalArgs = FS.usesPositionalArg();
11430 else if (usesPositionalArgs != FS.usesPositionalArg()) {
11431 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
11432 startSpecifier, specifierLen);
11433 return false;
11437 // First check if the field width, precision, and conversion specifier
11438 // have matching data arguments.
11439 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
11440 startSpecifier, specifierLen)) {
11441 return false;
11444 if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
11445 startSpecifier, specifierLen)) {
11446 return false;
11449 if (!CS.consumesDataArgument()) {
11450 // FIXME: Technically specifying a precision or field width here
11451 // makes no sense. Worth issuing a warning at some point.
11452 return true;
11455 // Consume the argument.
11456 unsigned argIndex = FS.getArgIndex();
11457 if (argIndex < NumDataArgs) {
11458 // The check to see if the argIndex is valid will come later.
11459 // We set the bit here because we may exit early from this
11460 // function if we encounter some other error.
11461 CoveredArgs.set(argIndex);
11464 // FreeBSD kernel extensions.
11465 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
11466 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
11467 // We need at least two arguments.
11468 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
11469 return false;
11471 // Claim the second argument.
11472 CoveredArgs.set(argIndex + 1);
11474 // Type check the first argument (int for %b, pointer for %D)
11475 const Expr *Ex = getDataArg(argIndex);
11476 const analyze_printf::ArgType &AT =
11477 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
11478 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
11479 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
11480 EmitFormatDiagnostic(
11481 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
11482 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
11483 << false << Ex->getSourceRange(),
11484 Ex->getBeginLoc(), /*IsStringLocation*/ false,
11485 getSpecifierRange(startSpecifier, specifierLen));
11487 // Type check the second argument (char * for both %b and %D)
11488 Ex = getDataArg(argIndex + 1);
11489 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
11490 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
11491 EmitFormatDiagnostic(
11492 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
11493 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
11494 << false << Ex->getSourceRange(),
11495 Ex->getBeginLoc(), /*IsStringLocation*/ false,
11496 getSpecifierRange(startSpecifier, specifierLen));
11498 return true;
11501 // Check for using an Objective-C specific conversion specifier
11502 // in a non-ObjC literal.
11503 if (!allowsObjCArg() && CS.isObjCArg()) {
11504 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11505 specifierLen);
11508 // %P can only be used with os_log.
11509 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
11510 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11511 specifierLen);
11514 // %n is not allowed with os_log.
11515 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
11516 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
11517 getLocationOfByte(CS.getStart()),
11518 /*IsStringLocation*/ false,
11519 getSpecifierRange(startSpecifier, specifierLen));
11521 return true;
11524 // Only scalars are allowed for os_trace.
11525 if (FSType == Sema::FST_OSTrace &&
11526 (CS.getKind() == ConversionSpecifier::PArg ||
11527 CS.getKind() == ConversionSpecifier::sArg ||
11528 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
11529 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11530 specifierLen);
11533 // Check for use of public/private annotation outside of os_log().
11534 if (FSType != Sema::FST_OSLog) {
11535 if (FS.isPublic().isSet()) {
11536 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
11537 << "public",
11538 getLocationOfByte(FS.isPublic().getPosition()),
11539 /*IsStringLocation*/ false,
11540 getSpecifierRange(startSpecifier, specifierLen));
11542 if (FS.isPrivate().isSet()) {
11543 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
11544 << "private",
11545 getLocationOfByte(FS.isPrivate().getPosition()),
11546 /*IsStringLocation*/ false,
11547 getSpecifierRange(startSpecifier, specifierLen));
11551 const llvm::Triple &Triple = Target.getTriple();
11552 if (CS.getKind() == ConversionSpecifier::nArg &&
11553 (Triple.isAndroid() || Triple.isOSFuchsia())) {
11554 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported),
11555 getLocationOfByte(CS.getStart()),
11556 /*IsStringLocation*/ false,
11557 getSpecifierRange(startSpecifier, specifierLen));
11560 // Check for invalid use of field width
11561 if (!FS.hasValidFieldWidth()) {
11562 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
11563 startSpecifier, specifierLen);
11566 // Check for invalid use of precision
11567 if (!FS.hasValidPrecision()) {
11568 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
11569 startSpecifier, specifierLen);
11572 // Precision is mandatory for %P specifier.
11573 if (CS.getKind() == ConversionSpecifier::PArg &&
11574 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
11575 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
11576 getLocationOfByte(startSpecifier),
11577 /*IsStringLocation*/ false,
11578 getSpecifierRange(startSpecifier, specifierLen));
11581 // Check each flag does not conflict with any other component.
11582 if (!FS.hasValidThousandsGroupingPrefix())
11583 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
11584 if (!FS.hasValidLeadingZeros())
11585 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
11586 if (!FS.hasValidPlusPrefix())
11587 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
11588 if (!FS.hasValidSpacePrefix())
11589 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
11590 if (!FS.hasValidAlternativeForm())
11591 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
11592 if (!FS.hasValidLeftJustified())
11593 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
11595 // Check that flags are not ignored by another flag
11596 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
11597 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
11598 startSpecifier, specifierLen);
11599 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
11600 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
11601 startSpecifier, specifierLen);
11603 // Check the length modifier is valid with the given conversion specifier.
11604 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
11605 S.getLangOpts()))
11606 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
11607 diag::warn_format_nonsensical_length);
11608 else if (!FS.hasStandardLengthModifier())
11609 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
11610 else if (!FS.hasStandardLengthConversionCombination())
11611 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
11612 diag::warn_format_non_standard_conversion_spec);
11614 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
11615 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
11617 // The remaining checks depend on the data arguments.
11618 if (ArgPassingKind == Sema::FAPK_VAList)
11619 return true;
11621 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
11622 return false;
11624 const Expr *Arg = getDataArg(argIndex);
11625 if (!Arg)
11626 return true;
11628 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
11631 static bool requiresParensToAddCast(const Expr *E) {
11632 // FIXME: We should have a general way to reason about operator
11633 // precedence and whether parens are actually needed here.
11634 // Take care of a few common cases where they aren't.
11635 const Expr *Inside = E->IgnoreImpCasts();
11636 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
11637 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
11639 switch (Inside->getStmtClass()) {
11640 case Stmt::ArraySubscriptExprClass:
11641 case Stmt::CallExprClass:
11642 case Stmt::CharacterLiteralClass:
11643 case Stmt::CXXBoolLiteralExprClass:
11644 case Stmt::DeclRefExprClass:
11645 case Stmt::FloatingLiteralClass:
11646 case Stmt::IntegerLiteralClass:
11647 case Stmt::MemberExprClass:
11648 case Stmt::ObjCArrayLiteralClass:
11649 case Stmt::ObjCBoolLiteralExprClass:
11650 case Stmt::ObjCBoxedExprClass:
11651 case Stmt::ObjCDictionaryLiteralClass:
11652 case Stmt::ObjCEncodeExprClass:
11653 case Stmt::ObjCIvarRefExprClass:
11654 case Stmt::ObjCMessageExprClass:
11655 case Stmt::ObjCPropertyRefExprClass:
11656 case Stmt::ObjCStringLiteralClass:
11657 case Stmt::ObjCSubscriptRefExprClass:
11658 case Stmt::ParenExprClass:
11659 case Stmt::StringLiteralClass:
11660 case Stmt::UnaryOperatorClass:
11661 return false;
11662 default:
11663 return true;
11667 static std::pair<QualType, StringRef>
11668 shouldNotPrintDirectly(const ASTContext &Context,
11669 QualType IntendedTy,
11670 const Expr *E) {
11671 // Use a 'while' to peel off layers of typedefs.
11672 QualType TyTy = IntendedTy;
11673 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
11674 StringRef Name = UserTy->getDecl()->getName();
11675 QualType CastTy = llvm::StringSwitch<QualType>(Name)
11676 .Case("CFIndex", Context.getNSIntegerType())
11677 .Case("NSInteger", Context.getNSIntegerType())
11678 .Case("NSUInteger", Context.getNSUIntegerType())
11679 .Case("SInt32", Context.IntTy)
11680 .Case("UInt32", Context.UnsignedIntTy)
11681 .Default(QualType());
11683 if (!CastTy.isNull())
11684 return std::make_pair(CastTy, Name);
11686 TyTy = UserTy->desugar();
11689 // Strip parens if necessary.
11690 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
11691 return shouldNotPrintDirectly(Context,
11692 PE->getSubExpr()->getType(),
11693 PE->getSubExpr());
11695 // If this is a conditional expression, then its result type is constructed
11696 // via usual arithmetic conversions and thus there might be no necessary
11697 // typedef sugar there. Recurse to operands to check for NSInteger &
11698 // Co. usage condition.
11699 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
11700 QualType TrueTy, FalseTy;
11701 StringRef TrueName, FalseName;
11703 std::tie(TrueTy, TrueName) =
11704 shouldNotPrintDirectly(Context,
11705 CO->getTrueExpr()->getType(),
11706 CO->getTrueExpr());
11707 std::tie(FalseTy, FalseName) =
11708 shouldNotPrintDirectly(Context,
11709 CO->getFalseExpr()->getType(),
11710 CO->getFalseExpr());
11712 if (TrueTy == FalseTy)
11713 return std::make_pair(TrueTy, TrueName);
11714 else if (TrueTy.isNull())
11715 return std::make_pair(FalseTy, FalseName);
11716 else if (FalseTy.isNull())
11717 return std::make_pair(TrueTy, TrueName);
11720 return std::make_pair(QualType(), StringRef());
11723 /// Return true if \p ICE is an implicit argument promotion of an arithmetic
11724 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
11725 /// type do not count.
11726 static bool
11727 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
11728 QualType From = ICE->getSubExpr()->getType();
11729 QualType To = ICE->getType();
11730 // It's an integer promotion if the destination type is the promoted
11731 // source type.
11732 if (ICE->getCastKind() == CK_IntegralCast &&
11733 S.Context.isPromotableIntegerType(From) &&
11734 S.Context.getPromotedIntegerType(From) == To)
11735 return true;
11736 // Look through vector types, since we do default argument promotion for
11737 // those in OpenCL.
11738 if (const auto *VecTy = From->getAs<ExtVectorType>())
11739 From = VecTy->getElementType();
11740 if (const auto *VecTy = To->getAs<ExtVectorType>())
11741 To = VecTy->getElementType();
11742 // It's a floating promotion if the source type is a lower rank.
11743 return ICE->getCastKind() == CK_FloatingCast &&
11744 S.Context.getFloatingTypeOrder(From, To) < 0;
11747 bool
11748 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
11749 const char *StartSpecifier,
11750 unsigned SpecifierLen,
11751 const Expr *E) {
11752 using namespace analyze_format_string;
11753 using namespace analyze_printf;
11755 // Now type check the data expression that matches the
11756 // format specifier.
11757 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
11758 if (!AT.isValid())
11759 return true;
11761 QualType ExprTy = E->getType();
11762 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
11763 ExprTy = TET->getUnderlyingExpr()->getType();
11766 // When using the format attribute in C++, you can receive a function or an
11767 // array that will necessarily decay to a pointer when passed to the final
11768 // format consumer. Apply decay before type comparison.
11769 if (ExprTy->canDecayToPointerType())
11770 ExprTy = S.Context.getDecayedType(ExprTy);
11772 // Diagnose attempts to print a boolean value as a character. Unlike other
11773 // -Wformat diagnostics, this is fine from a type perspective, but it still
11774 // doesn't make sense.
11775 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg &&
11776 E->isKnownToHaveBooleanValue()) {
11777 const CharSourceRange &CSR =
11778 getSpecifierRange(StartSpecifier, SpecifierLen);
11779 SmallString<4> FSString;
11780 llvm::raw_svector_ostream os(FSString);
11781 FS.toString(os);
11782 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character)
11783 << FSString,
11784 E->getExprLoc(), false, CSR);
11785 return true;
11788 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
11789 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
11790 if (Match == ArgType::Match)
11791 return true;
11793 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr
11794 assert(Match != ArgType::NoMatchPromotionTypeConfusion);
11796 // Look through argument promotions for our error message's reported type.
11797 // This includes the integral and floating promotions, but excludes array
11798 // and function pointer decay (seeing that an argument intended to be a
11799 // string has type 'char [6]' is probably more confusing than 'char *') and
11800 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
11801 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
11802 if (isArithmeticArgumentPromotion(S, ICE)) {
11803 E = ICE->getSubExpr();
11804 ExprTy = E->getType();
11806 // Check if we didn't match because of an implicit cast from a 'char'
11807 // or 'short' to an 'int'. This is done because printf is a varargs
11808 // function.
11809 if (ICE->getType() == S.Context.IntTy ||
11810 ICE->getType() == S.Context.UnsignedIntTy) {
11811 // All further checking is done on the subexpression
11812 ImplicitMatch = AT.matchesType(S.Context, ExprTy);
11813 if (ImplicitMatch == ArgType::Match)
11814 return true;
11817 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
11818 // Special case for 'a', which has type 'int' in C.
11819 // Note, however, that we do /not/ want to treat multibyte constants like
11820 // 'MooV' as characters! This form is deprecated but still exists. In
11821 // addition, don't treat expressions as of type 'char' if one byte length
11822 // modifier is provided.
11823 if (ExprTy == S.Context.IntTy &&
11824 FS.getLengthModifier().getKind() != LengthModifier::AsChar)
11825 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) {
11826 ExprTy = S.Context.CharTy;
11827 // To improve check results, we consider a character literal in C
11828 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is
11829 // more likely a type confusion situation, so we will suggest to
11830 // use '%hhd' instead by discarding the MatchPromotion.
11831 if (Match == ArgType::MatchPromotion)
11832 Match = ArgType::NoMatch;
11835 if (Match == ArgType::MatchPromotion) {
11836 // WG14 N2562 only clarified promotions in *printf
11837 // For NSLog in ObjC, just preserve -Wformat behavior
11838 if (!S.getLangOpts().ObjC &&
11839 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion &&
11840 ImplicitMatch != ArgType::NoMatchTypeConfusion)
11841 return true;
11842 Match = ArgType::NoMatch;
11844 if (ImplicitMatch == ArgType::NoMatchPedantic ||
11845 ImplicitMatch == ArgType::NoMatchTypeConfusion)
11846 Match = ImplicitMatch;
11847 assert(Match != ArgType::MatchPromotion);
11849 // Look through unscoped enums to their underlying type.
11850 bool IsEnum = false;
11851 bool IsScopedEnum = false;
11852 QualType IntendedTy = ExprTy;
11853 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
11854 IntendedTy = EnumTy->getDecl()->getIntegerType();
11855 if (EnumTy->isUnscopedEnumerationType()) {
11856 ExprTy = IntendedTy;
11857 // This controls whether we're talking about the underlying type or not,
11858 // which we only want to do when it's an unscoped enum.
11859 IsEnum = true;
11860 } else {
11861 IsScopedEnum = true;
11865 // %C in an Objective-C context prints a unichar, not a wchar_t.
11866 // If the argument is an integer of some kind, believe the %C and suggest
11867 // a cast instead of changing the conversion specifier.
11868 if (isObjCContext() &&
11869 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
11870 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
11871 !ExprTy->isCharType()) {
11872 // 'unichar' is defined as a typedef of unsigned short, but we should
11873 // prefer using the typedef if it is visible.
11874 IntendedTy = S.Context.UnsignedShortTy;
11876 // While we are here, check if the value is an IntegerLiteral that happens
11877 // to be within the valid range.
11878 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
11879 const llvm::APInt &V = IL->getValue();
11880 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
11881 return true;
11884 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(),
11885 Sema::LookupOrdinaryName);
11886 if (S.LookupName(Result, S.getCurScope())) {
11887 NamedDecl *ND = Result.getFoundDecl();
11888 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
11889 if (TD->getUnderlyingType() == IntendedTy)
11890 IntendedTy = S.Context.getTypedefType(TD);
11895 // Special-case some of Darwin's platform-independence types by suggesting
11896 // casts to primitive types that are known to be large enough.
11897 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
11898 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
11899 QualType CastTy;
11900 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
11901 if (!CastTy.isNull()) {
11902 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
11903 // (long in ASTContext). Only complain to pedants or when they're the
11904 // underlying type of a scoped enum (which always needs a cast).
11905 if (!IsScopedEnum &&
11906 (CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
11907 (AT.isSizeT() || AT.isPtrdiffT()) &&
11908 AT.matchesType(S.Context, CastTy))
11909 Match = ArgType::NoMatchPedantic;
11910 IntendedTy = CastTy;
11911 ShouldNotPrintDirectly = true;
11915 // We may be able to offer a FixItHint if it is a supported type.
11916 PrintfSpecifier fixedFS = FS;
11917 bool Success =
11918 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
11920 if (Success) {
11921 // Get the fix string from the fixed format specifier
11922 SmallString<16> buf;
11923 llvm::raw_svector_ostream os(buf);
11924 fixedFS.toString(os);
11926 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
11928 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) {
11929 unsigned Diag;
11930 switch (Match) {
11931 case ArgType::Match:
11932 case ArgType::MatchPromotion:
11933 case ArgType::NoMatchPromotionTypeConfusion:
11934 llvm_unreachable("expected non-matching");
11935 case ArgType::NoMatchPedantic:
11936 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
11937 break;
11938 case ArgType::NoMatchTypeConfusion:
11939 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
11940 break;
11941 case ArgType::NoMatch:
11942 Diag = diag::warn_format_conversion_argument_type_mismatch;
11943 break;
11946 // In this case, the specifier is wrong and should be changed to match
11947 // the argument.
11948 EmitFormatDiagnostic(S.PDiag(Diag)
11949 << AT.getRepresentativeTypeName(S.Context)
11950 << IntendedTy << IsEnum << E->getSourceRange(),
11951 E->getBeginLoc(),
11952 /*IsStringLocation*/ false, SpecRange,
11953 FixItHint::CreateReplacement(SpecRange, os.str()));
11954 } else {
11955 // The canonical type for formatting this value is different from the
11956 // actual type of the expression. (This occurs, for example, with Darwin's
11957 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
11958 // should be printed as 'long' for 64-bit compatibility.)
11959 // Rather than emitting a normal format/argument mismatch, we want to
11960 // add a cast to the recommended type (and correct the format string
11961 // if necessary). We should also do so for scoped enumerations.
11962 SmallString<16> CastBuf;
11963 llvm::raw_svector_ostream CastFix(CastBuf);
11964 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "(");
11965 IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
11966 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
11968 SmallVector<FixItHint,4> Hints;
11969 if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match ||
11970 ShouldNotPrintDirectly)
11971 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
11973 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
11974 // If there's already a cast present, just replace it.
11975 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
11976 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
11978 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) {
11979 // If the expression has high enough precedence,
11980 // just write the C-style cast.
11981 Hints.push_back(
11982 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
11983 } else {
11984 // Otherwise, add parens around the expression as well as the cast.
11985 CastFix << "(";
11986 Hints.push_back(
11987 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
11989 // We don't use getLocForEndOfToken because it returns invalid source
11990 // locations for macro expansions (by design).
11991 SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(E->getEndLoc());
11992 SourceLocation After = EndLoc.getLocWithOffset(
11993 Lexer::MeasureTokenLength(EndLoc, S.SourceMgr, S.LangOpts));
11994 Hints.push_back(FixItHint::CreateInsertion(After, ")"));
11997 if (ShouldNotPrintDirectly && !IsScopedEnum) {
11998 // The expression has a type that should not be printed directly.
11999 // We extract the name from the typedef because we don't want to show
12000 // the underlying type in the diagnostic.
12001 StringRef Name;
12002 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>())
12003 Name = TypedefTy->getDecl()->getName();
12004 else
12005 Name = CastTyName;
12006 unsigned Diag = Match == ArgType::NoMatchPedantic
12007 ? diag::warn_format_argument_needs_cast_pedantic
12008 : diag::warn_format_argument_needs_cast;
12009 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
12010 << E->getSourceRange(),
12011 E->getBeginLoc(), /*IsStringLocation=*/false,
12012 SpecRange, Hints);
12013 } else {
12014 // In this case, the expression could be printed using a different
12015 // specifier, but we've decided that the specifier is probably correct
12016 // and we should cast instead. Just use the normal warning message.
12017 EmitFormatDiagnostic(
12018 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12019 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
12020 << E->getSourceRange(),
12021 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
12024 } else {
12025 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
12026 SpecifierLen);
12027 // Since the warning for passing non-POD types to variadic functions
12028 // was deferred until now, we emit a warning for non-POD
12029 // arguments here.
12030 bool EmitTypeMismatch = false;
12031 switch (S.isValidVarArgType(ExprTy)) {
12032 case Sema::VAK_Valid:
12033 case Sema::VAK_ValidInCXX11: {
12034 unsigned Diag;
12035 switch (Match) {
12036 case ArgType::Match:
12037 case ArgType::MatchPromotion:
12038 case ArgType::NoMatchPromotionTypeConfusion:
12039 llvm_unreachable("expected non-matching");
12040 case ArgType::NoMatchPedantic:
12041 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
12042 break;
12043 case ArgType::NoMatchTypeConfusion:
12044 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
12045 break;
12046 case ArgType::NoMatch:
12047 Diag = diag::warn_format_conversion_argument_type_mismatch;
12048 break;
12051 EmitFormatDiagnostic(
12052 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
12053 << IsEnum << CSR << E->getSourceRange(),
12054 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12055 break;
12057 case Sema::VAK_Undefined:
12058 case Sema::VAK_MSVCUndefined:
12059 if (CallType == Sema::VariadicDoesNotApply) {
12060 EmitTypeMismatch = true;
12061 } else {
12062 EmitFormatDiagnostic(
12063 S.PDiag(diag::warn_non_pod_vararg_with_format_string)
12064 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12065 << AT.getRepresentativeTypeName(S.Context) << CSR
12066 << E->getSourceRange(),
12067 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12068 checkForCStrMembers(AT, E);
12070 break;
12072 case Sema::VAK_Invalid:
12073 if (CallType == Sema::VariadicDoesNotApply)
12074 EmitTypeMismatch = true;
12075 else if (ExprTy->isObjCObjectType())
12076 EmitFormatDiagnostic(
12077 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
12078 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12079 << AT.getRepresentativeTypeName(S.Context) << CSR
12080 << E->getSourceRange(),
12081 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12082 else
12083 // FIXME: If this is an initializer list, suggest removing the braces
12084 // or inserting a cast to the target type.
12085 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
12086 << isa<InitListExpr>(E) << ExprTy << CallType
12087 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
12088 break;
12091 if (EmitTypeMismatch) {
12092 // The function is not variadic, so we do not generate warnings about
12093 // being allowed to pass that object as a variadic argument. Instead,
12094 // since there are inherently no printf specifiers for types which cannot
12095 // be passed as variadic arguments, emit a plain old specifier mismatch
12096 // argument.
12097 EmitFormatDiagnostic(
12098 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12099 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false
12100 << E->getSourceRange(),
12101 E->getBeginLoc(), false, CSR);
12104 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
12105 "format string specifier index out of range");
12106 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
12109 return true;
12112 //===--- CHECK: Scanf format string checking ------------------------------===//
12114 namespace {
12116 class CheckScanfHandler : public CheckFormatHandler {
12117 public:
12118 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
12119 const Expr *origFormatExpr, Sema::FormatStringType type,
12120 unsigned firstDataArg, unsigned numDataArgs,
12121 const char *beg, Sema::FormatArgumentPassingKind APK,
12122 ArrayRef<const Expr *> Args, unsigned formatIdx,
12123 bool inFunctionCall, Sema::VariadicCallType CallType,
12124 llvm::SmallBitVector &CheckedVarArgs,
12125 UncoveredArgHandler &UncoveredArg)
12126 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
12127 numDataArgs, beg, APK, Args, formatIdx,
12128 inFunctionCall, CallType, CheckedVarArgs,
12129 UncoveredArg) {}
12131 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
12132 const char *startSpecifier,
12133 unsigned specifierLen) override;
12135 bool HandleInvalidScanfConversionSpecifier(
12136 const analyze_scanf::ScanfSpecifier &FS,
12137 const char *startSpecifier,
12138 unsigned specifierLen) override;
12140 void HandleIncompleteScanList(const char *start, const char *end) override;
12143 } // namespace
12145 void CheckScanfHandler::HandleIncompleteScanList(const char *start,
12146 const char *end) {
12147 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
12148 getLocationOfByte(end), /*IsStringLocation*/true,
12149 getSpecifierRange(start, end - start));
12152 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
12153 const analyze_scanf::ScanfSpecifier &FS,
12154 const char *startSpecifier,
12155 unsigned specifierLen) {
12156 const analyze_scanf::ScanfConversionSpecifier &CS =
12157 FS.getConversionSpecifier();
12159 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
12160 getLocationOfByte(CS.getStart()),
12161 startSpecifier, specifierLen,
12162 CS.getStart(), CS.getLength());
12165 bool CheckScanfHandler::HandleScanfSpecifier(
12166 const analyze_scanf::ScanfSpecifier &FS,
12167 const char *startSpecifier,
12168 unsigned specifierLen) {
12169 using namespace analyze_scanf;
12170 using namespace analyze_format_string;
12172 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
12174 // Handle case where '%' and '*' don't consume an argument. These shouldn't
12175 // be used to decide if we are using positional arguments consistently.
12176 if (FS.consumesDataArgument()) {
12177 if (atFirstArg) {
12178 atFirstArg = false;
12179 usesPositionalArgs = FS.usesPositionalArg();
12181 else if (usesPositionalArgs != FS.usesPositionalArg()) {
12182 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
12183 startSpecifier, specifierLen);
12184 return false;
12188 // Check if the field with is non-zero.
12189 const OptionalAmount &Amt = FS.getFieldWidth();
12190 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
12191 if (Amt.getConstantAmount() == 0) {
12192 const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
12193 Amt.getConstantLength());
12194 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
12195 getLocationOfByte(Amt.getStart()),
12196 /*IsStringLocation*/true, R,
12197 FixItHint::CreateRemoval(R));
12201 if (!FS.consumesDataArgument()) {
12202 // FIXME: Technically specifying a precision or field width here
12203 // makes no sense. Worth issuing a warning at some point.
12204 return true;
12207 // Consume the argument.
12208 unsigned argIndex = FS.getArgIndex();
12209 if (argIndex < NumDataArgs) {
12210 // The check to see if the argIndex is valid will come later.
12211 // We set the bit here because we may exit early from this
12212 // function if we encounter some other error.
12213 CoveredArgs.set(argIndex);
12216 // Check the length modifier is valid with the given conversion specifier.
12217 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
12218 S.getLangOpts()))
12219 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12220 diag::warn_format_nonsensical_length);
12221 else if (!FS.hasStandardLengthModifier())
12222 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
12223 else if (!FS.hasStandardLengthConversionCombination())
12224 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12225 diag::warn_format_non_standard_conversion_spec);
12227 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
12228 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
12230 // The remaining checks depend on the data arguments.
12231 if (ArgPassingKind == Sema::FAPK_VAList)
12232 return true;
12234 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
12235 return false;
12237 // Check that the argument type matches the format specifier.
12238 const Expr *Ex = getDataArg(argIndex);
12239 if (!Ex)
12240 return true;
12242 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
12244 if (!AT.isValid()) {
12245 return true;
12248 analyze_format_string::ArgType::MatchKind Match =
12249 AT.matchesType(S.Context, Ex->getType());
12250 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
12251 if (Match == analyze_format_string::ArgType::Match)
12252 return true;
12254 ScanfSpecifier fixedFS = FS;
12255 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
12256 S.getLangOpts(), S.Context);
12258 unsigned Diag =
12259 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
12260 : diag::warn_format_conversion_argument_type_mismatch;
12262 if (Success) {
12263 // Get the fix string from the fixed format specifier.
12264 SmallString<128> buf;
12265 llvm::raw_svector_ostream os(buf);
12266 fixedFS.toString(os);
12268 EmitFormatDiagnostic(
12269 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
12270 << Ex->getType() << false << Ex->getSourceRange(),
12271 Ex->getBeginLoc(),
12272 /*IsStringLocation*/ false,
12273 getSpecifierRange(startSpecifier, specifierLen),
12274 FixItHint::CreateReplacement(
12275 getSpecifierRange(startSpecifier, specifierLen), os.str()));
12276 } else {
12277 EmitFormatDiagnostic(S.PDiag(Diag)
12278 << AT.getRepresentativeTypeName(S.Context)
12279 << Ex->getType() << false << Ex->getSourceRange(),
12280 Ex->getBeginLoc(),
12281 /*IsStringLocation*/ false,
12282 getSpecifierRange(startSpecifier, specifierLen));
12285 return true;
12288 static void CheckFormatString(
12289 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
12290 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
12291 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
12292 bool inFunctionCall, Sema::VariadicCallType CallType,
12293 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
12294 bool IgnoreStringsWithoutSpecifiers) {
12295 // CHECK: is the format string a wide literal?
12296 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
12297 CheckFormatHandler::EmitFormatDiagnostic(
12298 S, inFunctionCall, Args[format_idx],
12299 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
12300 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
12301 return;
12304 // Str - The format string. NOTE: this is NOT null-terminated!
12305 StringRef StrRef = FExpr->getString();
12306 const char *Str = StrRef.data();
12307 // Account for cases where the string literal is truncated in a declaration.
12308 const ConstantArrayType *T =
12309 S.Context.getAsConstantArrayType(FExpr->getType());
12310 assert(T && "String literal not of constant array type!");
12311 size_t TypeSize = T->getSize().getZExtValue();
12312 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
12313 const unsigned numDataArgs = Args.size() - firstDataArg;
12315 if (IgnoreStringsWithoutSpecifiers &&
12316 !analyze_format_string::parseFormatStringHasFormattingSpecifiers(
12317 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo()))
12318 return;
12320 // Emit a warning if the string literal is truncated and does not contain an
12321 // embedded null character.
12322 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) {
12323 CheckFormatHandler::EmitFormatDiagnostic(
12324 S, inFunctionCall, Args[format_idx],
12325 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
12326 FExpr->getBeginLoc(),
12327 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
12328 return;
12331 // CHECK: empty format string?
12332 if (StrLen == 0 && numDataArgs > 0) {
12333 CheckFormatHandler::EmitFormatDiagnostic(
12334 S, inFunctionCall, Args[format_idx],
12335 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
12336 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
12337 return;
12340 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
12341 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
12342 Type == Sema::FST_OSTrace) {
12343 CheckPrintfHandler H(
12344 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
12345 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK,
12346 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs,
12347 UncoveredArg);
12349 if (!analyze_format_string::ParsePrintfString(
12350 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(),
12351 Type == Sema::FST_FreeBSDKPrintf))
12352 H.DoneProcessing();
12353 } else if (Type == Sema::FST_Scanf) {
12354 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
12355 numDataArgs, Str, APK, Args, format_idx, inFunctionCall,
12356 CallType, CheckedVarArgs, UncoveredArg);
12358 if (!analyze_format_string::ParseScanfString(
12359 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo()))
12360 H.DoneProcessing();
12361 } // TODO: handle other formats
12364 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
12365 // Str - The format string. NOTE: this is NOT null-terminated!
12366 StringRef StrRef = FExpr->getString();
12367 const char *Str = StrRef.data();
12368 // Account for cases where the string literal is truncated in a declaration.
12369 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
12370 assert(T && "String literal not of constant array type!");
12371 size_t TypeSize = T->getSize().getZExtValue();
12372 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
12373 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
12374 getLangOpts(),
12375 Context.getTargetInfo());
12378 //===--- CHECK: Warn on use of wrong absolute value function. -------------===//
12380 // Returns the related absolute value function that is larger, of 0 if one
12381 // does not exist.
12382 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
12383 switch (AbsFunction) {
12384 default:
12385 return 0;
12387 case Builtin::BI__builtin_abs:
12388 return Builtin::BI__builtin_labs;
12389 case Builtin::BI__builtin_labs:
12390 return Builtin::BI__builtin_llabs;
12391 case Builtin::BI__builtin_llabs:
12392 return 0;
12394 case Builtin::BI__builtin_fabsf:
12395 return Builtin::BI__builtin_fabs;
12396 case Builtin::BI__builtin_fabs:
12397 return Builtin::BI__builtin_fabsl;
12398 case Builtin::BI__builtin_fabsl:
12399 return 0;
12401 case Builtin::BI__builtin_cabsf:
12402 return Builtin::BI__builtin_cabs;
12403 case Builtin::BI__builtin_cabs:
12404 return Builtin::BI__builtin_cabsl;
12405 case Builtin::BI__builtin_cabsl:
12406 return 0;
12408 case Builtin::BIabs:
12409 return Builtin::BIlabs;
12410 case Builtin::BIlabs:
12411 return Builtin::BIllabs;
12412 case Builtin::BIllabs:
12413 return 0;
12415 case Builtin::BIfabsf:
12416 return Builtin::BIfabs;
12417 case Builtin::BIfabs:
12418 return Builtin::BIfabsl;
12419 case Builtin::BIfabsl:
12420 return 0;
12422 case Builtin::BIcabsf:
12423 return Builtin::BIcabs;
12424 case Builtin::BIcabs:
12425 return Builtin::BIcabsl;
12426 case Builtin::BIcabsl:
12427 return 0;
12431 // Returns the argument type of the absolute value function.
12432 static QualType getAbsoluteValueArgumentType(ASTContext &Context,
12433 unsigned AbsType) {
12434 if (AbsType == 0)
12435 return QualType();
12437 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
12438 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
12439 if (Error != ASTContext::GE_None)
12440 return QualType();
12442 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
12443 if (!FT)
12444 return QualType();
12446 if (FT->getNumParams() != 1)
12447 return QualType();
12449 return FT->getParamType(0);
12452 // Returns the best absolute value function, or zero, based on type and
12453 // current absolute value function.
12454 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
12455 unsigned AbsFunctionKind) {
12456 unsigned BestKind = 0;
12457 uint64_t ArgSize = Context.getTypeSize(ArgType);
12458 for (unsigned Kind = AbsFunctionKind; Kind != 0;
12459 Kind = getLargerAbsoluteValueFunction(Kind)) {
12460 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
12461 if (Context.getTypeSize(ParamType) >= ArgSize) {
12462 if (BestKind == 0)
12463 BestKind = Kind;
12464 else if (Context.hasSameType(ParamType, ArgType)) {
12465 BestKind = Kind;
12466 break;
12470 return BestKind;
12473 enum AbsoluteValueKind {
12474 AVK_Integer,
12475 AVK_Floating,
12476 AVK_Complex
12479 static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
12480 if (T->isIntegralOrEnumerationType())
12481 return AVK_Integer;
12482 if (T->isRealFloatingType())
12483 return AVK_Floating;
12484 if (T->isAnyComplexType())
12485 return AVK_Complex;
12487 llvm_unreachable("Type not integer, floating, or complex");
12490 // Changes the absolute value function to a different type. Preserves whether
12491 // the function is a builtin.
12492 static unsigned changeAbsFunction(unsigned AbsKind,
12493 AbsoluteValueKind ValueKind) {
12494 switch (ValueKind) {
12495 case AVK_Integer:
12496 switch (AbsKind) {
12497 default:
12498 return 0;
12499 case Builtin::BI__builtin_fabsf:
12500 case Builtin::BI__builtin_fabs:
12501 case Builtin::BI__builtin_fabsl:
12502 case Builtin::BI__builtin_cabsf:
12503 case Builtin::BI__builtin_cabs:
12504 case Builtin::BI__builtin_cabsl:
12505 return Builtin::BI__builtin_abs;
12506 case Builtin::BIfabsf:
12507 case Builtin::BIfabs:
12508 case Builtin::BIfabsl:
12509 case Builtin::BIcabsf:
12510 case Builtin::BIcabs:
12511 case Builtin::BIcabsl:
12512 return Builtin::BIabs;
12514 case AVK_Floating:
12515 switch (AbsKind) {
12516 default:
12517 return 0;
12518 case Builtin::BI__builtin_abs:
12519 case Builtin::BI__builtin_labs:
12520 case Builtin::BI__builtin_llabs:
12521 case Builtin::BI__builtin_cabsf:
12522 case Builtin::BI__builtin_cabs:
12523 case Builtin::BI__builtin_cabsl:
12524 return Builtin::BI__builtin_fabsf;
12525 case Builtin::BIabs:
12526 case Builtin::BIlabs:
12527 case Builtin::BIllabs:
12528 case Builtin::BIcabsf:
12529 case Builtin::BIcabs:
12530 case Builtin::BIcabsl:
12531 return Builtin::BIfabsf;
12533 case AVK_Complex:
12534 switch (AbsKind) {
12535 default:
12536 return 0;
12537 case Builtin::BI__builtin_abs:
12538 case Builtin::BI__builtin_labs:
12539 case Builtin::BI__builtin_llabs:
12540 case Builtin::BI__builtin_fabsf:
12541 case Builtin::BI__builtin_fabs:
12542 case Builtin::BI__builtin_fabsl:
12543 return Builtin::BI__builtin_cabsf;
12544 case Builtin::BIabs:
12545 case Builtin::BIlabs:
12546 case Builtin::BIllabs:
12547 case Builtin::BIfabsf:
12548 case Builtin::BIfabs:
12549 case Builtin::BIfabsl:
12550 return Builtin::BIcabsf;
12553 llvm_unreachable("Unable to convert function");
12556 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
12557 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
12558 if (!FnInfo)
12559 return 0;
12561 switch (FDecl->getBuiltinID()) {
12562 default:
12563 return 0;
12564 case Builtin::BI__builtin_abs:
12565 case Builtin::BI__builtin_fabs:
12566 case Builtin::BI__builtin_fabsf:
12567 case Builtin::BI__builtin_fabsl:
12568 case Builtin::BI__builtin_labs:
12569 case Builtin::BI__builtin_llabs:
12570 case Builtin::BI__builtin_cabs:
12571 case Builtin::BI__builtin_cabsf:
12572 case Builtin::BI__builtin_cabsl:
12573 case Builtin::BIabs:
12574 case Builtin::BIlabs:
12575 case Builtin::BIllabs:
12576 case Builtin::BIfabs:
12577 case Builtin::BIfabsf:
12578 case Builtin::BIfabsl:
12579 case Builtin::BIcabs:
12580 case Builtin::BIcabsf:
12581 case Builtin::BIcabsl:
12582 return FDecl->getBuiltinID();
12584 llvm_unreachable("Unknown Builtin type");
12587 // If the replacement is valid, emit a note with replacement function.
12588 // Additionally, suggest including the proper header if not already included.
12589 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
12590 unsigned AbsKind, QualType ArgType) {
12591 bool EmitHeaderHint = true;
12592 const char *HeaderName = nullptr;
12593 StringRef FunctionName;
12594 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
12595 FunctionName = "std::abs";
12596 if (ArgType->isIntegralOrEnumerationType()) {
12597 HeaderName = "cstdlib";
12598 } else if (ArgType->isRealFloatingType()) {
12599 HeaderName = "cmath";
12600 } else {
12601 llvm_unreachable("Invalid Type");
12604 // Lookup all std::abs
12605 if (NamespaceDecl *Std = S.getStdNamespace()) {
12606 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
12607 R.suppressDiagnostics();
12608 S.LookupQualifiedName(R, Std);
12610 for (const auto *I : R) {
12611 const FunctionDecl *FDecl = nullptr;
12612 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
12613 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
12614 } else {
12615 FDecl = dyn_cast<FunctionDecl>(I);
12617 if (!FDecl)
12618 continue;
12620 // Found std::abs(), check that they are the right ones.
12621 if (FDecl->getNumParams() != 1)
12622 continue;
12624 // Check that the parameter type can handle the argument.
12625 QualType ParamType = FDecl->getParamDecl(0)->getType();
12626 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
12627 S.Context.getTypeSize(ArgType) <=
12628 S.Context.getTypeSize(ParamType)) {
12629 // Found a function, don't need the header hint.
12630 EmitHeaderHint = false;
12631 break;
12635 } else {
12636 FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
12637 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
12639 if (HeaderName) {
12640 DeclarationName DN(&S.Context.Idents.get(FunctionName));
12641 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
12642 R.suppressDiagnostics();
12643 S.LookupName(R, S.getCurScope());
12645 if (R.isSingleResult()) {
12646 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
12647 if (FD && FD->getBuiltinID() == AbsKind) {
12648 EmitHeaderHint = false;
12649 } else {
12650 return;
12652 } else if (!R.empty()) {
12653 return;
12658 S.Diag(Loc, diag::note_replace_abs_function)
12659 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
12661 if (!HeaderName)
12662 return;
12664 if (!EmitHeaderHint)
12665 return;
12667 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
12668 << FunctionName;
12671 template <std::size_t StrLen>
12672 static bool IsStdFunction(const FunctionDecl *FDecl,
12673 const char (&Str)[StrLen]) {
12674 if (!FDecl)
12675 return false;
12676 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
12677 return false;
12678 if (!FDecl->isInStdNamespace())
12679 return false;
12681 return true;
12684 // Warn when using the wrong abs() function.
12685 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
12686 const FunctionDecl *FDecl) {
12687 if (Call->getNumArgs() != 1)
12688 return;
12690 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
12691 bool IsStdAbs = IsStdFunction(FDecl, "abs");
12692 if (AbsKind == 0 && !IsStdAbs)
12693 return;
12695 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
12696 QualType ParamType = Call->getArg(0)->getType();
12698 // Unsigned types cannot be negative. Suggest removing the absolute value
12699 // function call.
12700 if (ArgType->isUnsignedIntegerType()) {
12701 StringRef FunctionName =
12702 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
12703 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
12704 Diag(Call->getExprLoc(), diag::note_remove_abs)
12705 << FunctionName
12706 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
12707 return;
12710 // Taking the absolute value of a pointer is very suspicious, they probably
12711 // wanted to index into an array, dereference a pointer, call a function, etc.
12712 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
12713 unsigned DiagType = 0;
12714 if (ArgType->isFunctionType())
12715 DiagType = 1;
12716 else if (ArgType->isArrayType())
12717 DiagType = 2;
12719 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
12720 return;
12723 // std::abs has overloads which prevent most of the absolute value problems
12724 // from occurring.
12725 if (IsStdAbs)
12726 return;
12728 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
12729 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
12731 // The argument and parameter are the same kind. Check if they are the right
12732 // size.
12733 if (ArgValueKind == ParamValueKind) {
12734 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
12735 return;
12737 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
12738 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
12739 << FDecl << ArgType << ParamType;
12741 if (NewAbsKind == 0)
12742 return;
12744 emitReplacement(*this, Call->getExprLoc(),
12745 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
12746 return;
12749 // ArgValueKind != ParamValueKind
12750 // The wrong type of absolute value function was used. Attempt to find the
12751 // proper one.
12752 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
12753 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
12754 if (NewAbsKind == 0)
12755 return;
12757 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
12758 << FDecl << ParamValueKind << ArgValueKind;
12760 emitReplacement(*this, Call->getExprLoc(),
12761 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
12764 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
12765 void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
12766 const FunctionDecl *FDecl) {
12767 if (!Call || !FDecl) return;
12769 // Ignore template specializations and macros.
12770 if (inTemplateInstantiation()) return;
12771 if (Call->getExprLoc().isMacroID()) return;
12773 // Only care about the one template argument, two function parameter std::max
12774 if (Call->getNumArgs() != 2) return;
12775 if (!IsStdFunction(FDecl, "max")) return;
12776 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
12777 if (!ArgList) return;
12778 if (ArgList->size() != 1) return;
12780 // Check that template type argument is unsigned integer.
12781 const auto& TA = ArgList->get(0);
12782 if (TA.getKind() != TemplateArgument::Type) return;
12783 QualType ArgType = TA.getAsType();
12784 if (!ArgType->isUnsignedIntegerType()) return;
12786 // See if either argument is a literal zero.
12787 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
12788 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
12789 if (!MTE) return false;
12790 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr());
12791 if (!Num) return false;
12792 if (Num->getValue() != 0) return false;
12793 return true;
12796 const Expr *FirstArg = Call->getArg(0);
12797 const Expr *SecondArg = Call->getArg(1);
12798 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
12799 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
12801 // Only warn when exactly one argument is zero.
12802 if (IsFirstArgZero == IsSecondArgZero) return;
12804 SourceRange FirstRange = FirstArg->getSourceRange();
12805 SourceRange SecondRange = SecondArg->getSourceRange();
12807 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
12809 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
12810 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
12812 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
12813 SourceRange RemovalRange;
12814 if (IsFirstArgZero) {
12815 RemovalRange = SourceRange(FirstRange.getBegin(),
12816 SecondRange.getBegin().getLocWithOffset(-1));
12817 } else {
12818 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
12819 SecondRange.getEnd());
12822 Diag(Call->getExprLoc(), diag::note_remove_max_call)
12823 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
12824 << FixItHint::CreateRemoval(RemovalRange);
12827 //===--- CHECK: Standard memory functions ---------------------------------===//
12829 /// Takes the expression passed to the size_t parameter of functions
12830 /// such as memcmp, strncat, etc and warns if it's a comparison.
12832 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
12833 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
12834 IdentifierInfo *FnName,
12835 SourceLocation FnLoc,
12836 SourceLocation RParenLoc) {
12837 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
12838 if (!Size)
12839 return false;
12841 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
12842 if (!Size->isComparisonOp() && !Size->isLogicalOp())
12843 return false;
12845 SourceRange SizeRange = Size->getSourceRange();
12846 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
12847 << SizeRange << FnName;
12848 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
12849 << FnName
12850 << FixItHint::CreateInsertion(
12851 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
12852 << FixItHint::CreateRemoval(RParenLoc);
12853 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
12854 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
12855 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
12856 ")");
12858 return true;
12861 /// Determine whether the given type is or contains a dynamic class type
12862 /// (e.g., whether it has a vtable).
12863 static const CXXRecordDecl *getContainedDynamicClass(QualType T,
12864 bool &IsContained) {
12865 // Look through array types while ignoring qualifiers.
12866 const Type *Ty = T->getBaseElementTypeUnsafe();
12867 IsContained = false;
12869 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
12870 RD = RD ? RD->getDefinition() : nullptr;
12871 if (!RD || RD->isInvalidDecl())
12872 return nullptr;
12874 if (RD->isDynamicClass())
12875 return RD;
12877 // Check all the fields. If any bases were dynamic, the class is dynamic.
12878 // It's impossible for a class to transitively contain itself by value, so
12879 // infinite recursion is impossible.
12880 for (auto *FD : RD->fields()) {
12881 bool SubContained;
12882 if (const CXXRecordDecl *ContainedRD =
12883 getContainedDynamicClass(FD->getType(), SubContained)) {
12884 IsContained = true;
12885 return ContainedRD;
12889 return nullptr;
12892 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
12893 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
12894 if (Unary->getKind() == UETT_SizeOf)
12895 return Unary;
12896 return nullptr;
12899 /// If E is a sizeof expression, returns its argument expression,
12900 /// otherwise returns NULL.
12901 static const Expr *getSizeOfExprArg(const Expr *E) {
12902 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
12903 if (!SizeOf->isArgumentType())
12904 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
12905 return nullptr;
12908 /// If E is a sizeof expression, returns its argument type.
12909 static QualType getSizeOfArgType(const Expr *E) {
12910 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
12911 return SizeOf->getTypeOfArgument();
12912 return QualType();
12915 namespace {
12917 struct SearchNonTrivialToInitializeField
12918 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
12919 using Super =
12920 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
12922 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
12924 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
12925 SourceLocation SL) {
12926 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
12927 asDerived().visitArray(PDIK, AT, SL);
12928 return;
12931 Super::visitWithKind(PDIK, FT, SL);
12934 void visitARCStrong(QualType FT, SourceLocation SL) {
12935 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
12937 void visitARCWeak(QualType FT, SourceLocation SL) {
12938 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
12940 void visitStruct(QualType FT, SourceLocation SL) {
12941 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
12942 visit(FD->getType(), FD->getLocation());
12944 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
12945 const ArrayType *AT, SourceLocation SL) {
12946 visit(getContext().getBaseElementType(AT), SL);
12948 void visitTrivial(QualType FT, SourceLocation SL) {}
12950 static void diag(QualType RT, const Expr *E, Sema &S) {
12951 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
12954 ASTContext &getContext() { return S.getASTContext(); }
12956 const Expr *E;
12957 Sema &S;
12960 struct SearchNonTrivialToCopyField
12961 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
12962 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
12964 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
12966 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
12967 SourceLocation SL) {
12968 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
12969 asDerived().visitArray(PCK, AT, SL);
12970 return;
12973 Super::visitWithKind(PCK, FT, SL);
12976 void visitARCStrong(QualType FT, SourceLocation SL) {
12977 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
12979 void visitARCWeak(QualType FT, SourceLocation SL) {
12980 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
12982 void visitStruct(QualType FT, SourceLocation SL) {
12983 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
12984 visit(FD->getType(), FD->getLocation());
12986 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
12987 SourceLocation SL) {
12988 visit(getContext().getBaseElementType(AT), SL);
12990 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
12991 SourceLocation SL) {}
12992 void visitTrivial(QualType FT, SourceLocation SL) {}
12993 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
12995 static void diag(QualType RT, const Expr *E, Sema &S) {
12996 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
12999 ASTContext &getContext() { return S.getASTContext(); }
13001 const Expr *E;
13002 Sema &S;
13007 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
13008 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
13009 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
13011 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
13012 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
13013 return false;
13015 return doesExprLikelyComputeSize(BO->getLHS()) ||
13016 doesExprLikelyComputeSize(BO->getRHS());
13019 return getAsSizeOfExpr(SizeofExpr) != nullptr;
13022 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
13024 /// \code
13025 /// #define MACRO 0
13026 /// foo(MACRO);
13027 /// foo(0);
13028 /// \endcode
13030 /// This should return true for the first call to foo, but not for the second
13031 /// (regardless of whether foo is a macro or function).
13032 static bool isArgumentExpandedFromMacro(SourceManager &SM,
13033 SourceLocation CallLoc,
13034 SourceLocation ArgLoc) {
13035 if (!CallLoc.isMacroID())
13036 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
13038 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
13039 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
13042 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
13043 /// last two arguments transposed.
13044 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
13045 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
13046 return;
13048 const Expr *SizeArg =
13049 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
13051 auto isLiteralZero = [](const Expr *E) {
13052 return (isa<IntegerLiteral>(E) &&
13053 cast<IntegerLiteral>(E)->getValue() == 0) ||
13054 (isa<CharacterLiteral>(E) &&
13055 cast<CharacterLiteral>(E)->getValue() == 0);
13058 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
13059 SourceLocation CallLoc = Call->getRParenLoc();
13060 SourceManager &SM = S.getSourceManager();
13061 if (isLiteralZero(SizeArg) &&
13062 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
13064 SourceLocation DiagLoc = SizeArg->getExprLoc();
13066 // Some platforms #define bzero to __builtin_memset. See if this is the
13067 // case, and if so, emit a better diagnostic.
13068 if (BId == Builtin::BIbzero ||
13069 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
13070 CallLoc, SM, S.getLangOpts()) == "bzero")) {
13071 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
13072 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
13073 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
13074 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
13075 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
13077 return;
13080 // If the second argument to a memset is a sizeof expression and the third
13081 // isn't, this is also likely an error. This should catch
13082 // 'memset(buf, sizeof(buf), 0xff)'.
13083 if (BId == Builtin::BImemset &&
13084 doesExprLikelyComputeSize(Call->getArg(1)) &&
13085 !doesExprLikelyComputeSize(Call->getArg(2))) {
13086 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
13087 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
13088 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
13089 return;
13093 /// Check for dangerous or invalid arguments to memset().
13095 /// This issues warnings on known problematic, dangerous or unspecified
13096 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
13097 /// function calls.
13099 /// \param Call The call expression to diagnose.
13100 void Sema::CheckMemaccessArguments(const CallExpr *Call,
13101 unsigned BId,
13102 IdentifierInfo *FnName) {
13103 assert(BId != 0);
13105 // It is possible to have a non-standard definition of memset. Validate
13106 // we have enough arguments, and if not, abort further checking.
13107 unsigned ExpectedNumArgs =
13108 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
13109 if (Call->getNumArgs() < ExpectedNumArgs)
13110 return;
13112 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
13113 BId == Builtin::BIstrndup ? 1 : 2);
13114 unsigned LenArg =
13115 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
13116 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
13118 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
13119 Call->getBeginLoc(), Call->getRParenLoc()))
13120 return;
13122 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
13123 CheckMemaccessSize(*this, BId, Call);
13125 // We have special checking when the length is a sizeof expression.
13126 QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
13127 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
13128 llvm::FoldingSetNodeID SizeOfArgID;
13130 // Although widely used, 'bzero' is not a standard function. Be more strict
13131 // with the argument types before allowing diagnostics and only allow the
13132 // form bzero(ptr, sizeof(...)).
13133 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
13134 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
13135 return;
13137 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
13138 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
13139 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
13141 QualType DestTy = Dest->getType();
13142 QualType PointeeTy;
13143 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
13144 PointeeTy = DestPtrTy->getPointeeType();
13146 // Never warn about void type pointers. This can be used to suppress
13147 // false positives.
13148 if (PointeeTy->isVoidType())
13149 continue;
13151 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
13152 // actually comparing the expressions for equality. Because computing the
13153 // expression IDs can be expensive, we only do this if the diagnostic is
13154 // enabled.
13155 if (SizeOfArg &&
13156 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
13157 SizeOfArg->getExprLoc())) {
13158 // We only compute IDs for expressions if the warning is enabled, and
13159 // cache the sizeof arg's ID.
13160 if (SizeOfArgID == llvm::FoldingSetNodeID())
13161 SizeOfArg->Profile(SizeOfArgID, Context, true);
13162 llvm::FoldingSetNodeID DestID;
13163 Dest->Profile(DestID, Context, true);
13164 if (DestID == SizeOfArgID) {
13165 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
13166 // over sizeof(src) as well.
13167 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
13168 StringRef ReadableName = FnName->getName();
13170 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
13171 if (UnaryOp->getOpcode() == UO_AddrOf)
13172 ActionIdx = 1; // If its an address-of operator, just remove it.
13173 if (!PointeeTy->isIncompleteType() &&
13174 (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
13175 ActionIdx = 2; // If the pointee's size is sizeof(char),
13176 // suggest an explicit length.
13178 // If the function is defined as a builtin macro, do not show macro
13179 // expansion.
13180 SourceLocation SL = SizeOfArg->getExprLoc();
13181 SourceRange DSR = Dest->getSourceRange();
13182 SourceRange SSR = SizeOfArg->getSourceRange();
13183 SourceManager &SM = getSourceManager();
13185 if (SM.isMacroArgExpansion(SL)) {
13186 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
13187 SL = SM.getSpellingLoc(SL);
13188 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
13189 SM.getSpellingLoc(DSR.getEnd()));
13190 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
13191 SM.getSpellingLoc(SSR.getEnd()));
13194 DiagRuntimeBehavior(SL, SizeOfArg,
13195 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
13196 << ReadableName
13197 << PointeeTy
13198 << DestTy
13199 << DSR
13200 << SSR);
13201 DiagRuntimeBehavior(SL, SizeOfArg,
13202 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
13203 << ActionIdx
13204 << SSR);
13206 break;
13210 // Also check for cases where the sizeof argument is the exact same
13211 // type as the memory argument, and where it points to a user-defined
13212 // record type.
13213 if (SizeOfArgTy != QualType()) {
13214 if (PointeeTy->isRecordType() &&
13215 Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
13216 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
13217 PDiag(diag::warn_sizeof_pointer_type_memaccess)
13218 << FnName << SizeOfArgTy << ArgIdx
13219 << PointeeTy << Dest->getSourceRange()
13220 << LenExpr->getSourceRange());
13221 break;
13224 } else if (DestTy->isArrayType()) {
13225 PointeeTy = DestTy;
13228 if (PointeeTy == QualType())
13229 continue;
13231 // Always complain about dynamic classes.
13232 bool IsContained;
13233 if (const CXXRecordDecl *ContainedRD =
13234 getContainedDynamicClass(PointeeTy, IsContained)) {
13236 unsigned OperationType = 0;
13237 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
13238 // "overwritten" if we're warning about the destination for any call
13239 // but memcmp; otherwise a verb appropriate to the call.
13240 if (ArgIdx != 0 || IsCmp) {
13241 if (BId == Builtin::BImemcpy)
13242 OperationType = 1;
13243 else if(BId == Builtin::BImemmove)
13244 OperationType = 2;
13245 else if (IsCmp)
13246 OperationType = 3;
13249 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13250 PDiag(diag::warn_dyn_class_memaccess)
13251 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
13252 << IsContained << ContainedRD << OperationType
13253 << Call->getCallee()->getSourceRange());
13254 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
13255 BId != Builtin::BImemset)
13256 DiagRuntimeBehavior(
13257 Dest->getExprLoc(), Dest,
13258 PDiag(diag::warn_arc_object_memaccess)
13259 << ArgIdx << FnName << PointeeTy
13260 << Call->getCallee()->getSourceRange());
13261 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
13262 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
13263 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
13264 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13265 PDiag(diag::warn_cstruct_memaccess)
13266 << ArgIdx << FnName << PointeeTy << 0);
13267 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
13268 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
13269 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
13270 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13271 PDiag(diag::warn_cstruct_memaccess)
13272 << ArgIdx << FnName << PointeeTy << 1);
13273 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
13274 } else {
13275 continue;
13277 } else
13278 continue;
13280 DiagRuntimeBehavior(
13281 Dest->getExprLoc(), Dest,
13282 PDiag(diag::note_bad_memaccess_silence)
13283 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
13284 break;
13288 // A little helper routine: ignore addition and subtraction of integer literals.
13289 // This intentionally does not ignore all integer constant expressions because
13290 // we don't want to remove sizeof().
13291 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
13292 Ex = Ex->IgnoreParenCasts();
13294 while (true) {
13295 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
13296 if (!BO || !BO->isAdditiveOp())
13297 break;
13299 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
13300 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
13302 if (isa<IntegerLiteral>(RHS))
13303 Ex = LHS;
13304 else if (isa<IntegerLiteral>(LHS))
13305 Ex = RHS;
13306 else
13307 break;
13310 return Ex;
13313 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
13314 ASTContext &Context) {
13315 // Only handle constant-sized or VLAs, but not flexible members.
13316 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
13317 // Only issue the FIXIT for arrays of size > 1.
13318 if (CAT->getSize().getSExtValue() <= 1)
13319 return false;
13320 } else if (!Ty->isVariableArrayType()) {
13321 return false;
13323 return true;
13326 // Warn if the user has made the 'size' argument to strlcpy or strlcat
13327 // be the size of the source, instead of the destination.
13328 void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
13329 IdentifierInfo *FnName) {
13331 // Don't crash if the user has the wrong number of arguments
13332 unsigned NumArgs = Call->getNumArgs();
13333 if ((NumArgs != 3) && (NumArgs != 4))
13334 return;
13336 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
13337 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
13338 const Expr *CompareWithSrc = nullptr;
13340 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
13341 Call->getBeginLoc(), Call->getRParenLoc()))
13342 return;
13344 // Look for 'strlcpy(dst, x, sizeof(x))'
13345 if (const Expr *Ex = getSizeOfExprArg(SizeArg))
13346 CompareWithSrc = Ex;
13347 else {
13348 // Look for 'strlcpy(dst, x, strlen(x))'
13349 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
13350 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
13351 SizeCall->getNumArgs() == 1)
13352 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
13356 if (!CompareWithSrc)
13357 return;
13359 // Determine if the argument to sizeof/strlen is equal to the source
13360 // argument. In principle there's all kinds of things you could do
13361 // here, for instance creating an == expression and evaluating it with
13362 // EvaluateAsBooleanCondition, but this uses a more direct technique:
13363 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
13364 if (!SrcArgDRE)
13365 return;
13367 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
13368 if (!CompareWithSrcDRE ||
13369 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
13370 return;
13372 const Expr *OriginalSizeArg = Call->getArg(2);
13373 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
13374 << OriginalSizeArg->getSourceRange() << FnName;
13376 // Output a FIXIT hint if the destination is an array (rather than a
13377 // pointer to an array). This could be enhanced to handle some
13378 // pointers if we know the actual size, like if DstArg is 'array+2'
13379 // we could say 'sizeof(array)-2'.
13380 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
13381 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
13382 return;
13384 SmallString<128> sizeString;
13385 llvm::raw_svector_ostream OS(sizeString);
13386 OS << "sizeof(";
13387 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13388 OS << ")";
13390 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
13391 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
13392 OS.str());
13395 /// Check if two expressions refer to the same declaration.
13396 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
13397 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
13398 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
13399 return D1->getDecl() == D2->getDecl();
13400 return false;
13403 static const Expr *getStrlenExprArg(const Expr *E) {
13404 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
13405 const FunctionDecl *FD = CE->getDirectCallee();
13406 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
13407 return nullptr;
13408 return CE->getArg(0)->IgnoreParenCasts();
13410 return nullptr;
13413 // Warn on anti-patterns as the 'size' argument to strncat.
13414 // The correct size argument should look like following:
13415 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
13416 void Sema::CheckStrncatArguments(const CallExpr *CE,
13417 IdentifierInfo *FnName) {
13418 // Don't crash if the user has the wrong number of arguments.
13419 if (CE->getNumArgs() < 3)
13420 return;
13421 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
13422 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
13423 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
13425 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(),
13426 CE->getRParenLoc()))
13427 return;
13429 // Identify common expressions, which are wrongly used as the size argument
13430 // to strncat and may lead to buffer overflows.
13431 unsigned PatternType = 0;
13432 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
13433 // - sizeof(dst)
13434 if (referToTheSameDecl(SizeOfArg, DstArg))
13435 PatternType = 1;
13436 // - sizeof(src)
13437 else if (referToTheSameDecl(SizeOfArg, SrcArg))
13438 PatternType = 2;
13439 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
13440 if (BE->getOpcode() == BO_Sub) {
13441 const Expr *L = BE->getLHS()->IgnoreParenCasts();
13442 const Expr *R = BE->getRHS()->IgnoreParenCasts();
13443 // - sizeof(dst) - strlen(dst)
13444 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
13445 referToTheSameDecl(DstArg, getStrlenExprArg(R)))
13446 PatternType = 1;
13447 // - sizeof(src) - (anything)
13448 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
13449 PatternType = 2;
13453 if (PatternType == 0)
13454 return;
13456 // Generate the diagnostic.
13457 SourceLocation SL = LenArg->getBeginLoc();
13458 SourceRange SR = LenArg->getSourceRange();
13459 SourceManager &SM = getSourceManager();
13461 // If the function is defined as a builtin macro, do not show macro expansion.
13462 if (SM.isMacroArgExpansion(SL)) {
13463 SL = SM.getSpellingLoc(SL);
13464 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
13465 SM.getSpellingLoc(SR.getEnd()));
13468 // Check if the destination is an array (rather than a pointer to an array).
13469 QualType DstTy = DstArg->getType();
13470 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
13471 Context);
13472 if (!isKnownSizeArray) {
13473 if (PatternType == 1)
13474 Diag(SL, diag::warn_strncat_wrong_size) << SR;
13475 else
13476 Diag(SL, diag::warn_strncat_src_size) << SR;
13477 return;
13480 if (PatternType == 1)
13481 Diag(SL, diag::warn_strncat_large_size) << SR;
13482 else
13483 Diag(SL, diag::warn_strncat_src_size) << SR;
13485 SmallString<128> sizeString;
13486 llvm::raw_svector_ostream OS(sizeString);
13487 OS << "sizeof(";
13488 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13489 OS << ") - ";
13490 OS << "strlen(";
13491 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13492 OS << ") - 1";
13494 Diag(SL, diag::note_strncat_wrong_size)
13495 << FixItHint::CreateReplacement(SR, OS.str());
13498 namespace {
13499 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
13500 const UnaryOperator *UnaryExpr, const Decl *D) {
13501 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) {
13502 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
13503 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D);
13504 return;
13508 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
13509 const UnaryOperator *UnaryExpr) {
13510 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) {
13511 const Decl *D = Lvalue->getDecl();
13512 if (isa<DeclaratorDecl>(D))
13513 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType())
13514 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D);
13517 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr()))
13518 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
13519 Lvalue->getMemberDecl());
13522 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName,
13523 const UnaryOperator *UnaryExpr) {
13524 const auto *Lambda = dyn_cast<LambdaExpr>(
13525 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
13526 if (!Lambda)
13527 return;
13529 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object)
13530 << CalleeName << 2 /*object: lambda expression*/;
13533 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
13534 const DeclRefExpr *Lvalue) {
13535 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl());
13536 if (Var == nullptr)
13537 return;
13539 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
13540 << CalleeName << 0 /*object: */ << Var;
13543 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
13544 const CastExpr *Cast) {
13545 SmallString<128> SizeString;
13546 llvm::raw_svector_ostream OS(SizeString);
13548 clang::CastKind Kind = Cast->getCastKind();
13549 if (Kind == clang::CK_BitCast &&
13550 !Cast->getSubExpr()->getType()->isFunctionPointerType())
13551 return;
13552 if (Kind == clang::CK_IntegralToPointer &&
13553 !isa<IntegerLiteral>(
13554 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
13555 return;
13557 switch (Cast->getCastKind()) {
13558 case clang::CK_BitCast:
13559 case clang::CK_IntegralToPointer:
13560 case clang::CK_FunctionToPointerDecay:
13561 OS << '\'';
13562 Cast->printPretty(OS, nullptr, S.getPrintingPolicy());
13563 OS << '\'';
13564 break;
13565 default:
13566 return;
13569 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object)
13570 << CalleeName << 0 /*object: */ << OS.str();
13572 } // namespace
13574 /// Alerts the user that they are attempting to free a non-malloc'd object.
13575 void Sema::CheckFreeArguments(const CallExpr *E) {
13576 const std::string CalleeName =
13577 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
13579 { // Prefer something that doesn't involve a cast to make things simpler.
13580 const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
13581 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg))
13582 switch (UnaryExpr->getOpcode()) {
13583 case UnaryOperator::Opcode::UO_AddrOf:
13584 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr);
13585 case UnaryOperator::Opcode::UO_Plus:
13586 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr);
13587 default:
13588 break;
13591 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg))
13592 if (Lvalue->getType()->isArrayType())
13593 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue);
13595 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) {
13596 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object)
13597 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier();
13598 return;
13601 if (isa<BlockExpr>(Arg)) {
13602 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object)
13603 << CalleeName << 1 /*object: block*/;
13604 return;
13607 // Maybe the cast was important, check after the other cases.
13608 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0)))
13609 return CheckFreeArgumentsCast(*this, CalleeName, Cast);
13612 void
13613 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
13614 SourceLocation ReturnLoc,
13615 bool isObjCMethod,
13616 const AttrVec *Attrs,
13617 const FunctionDecl *FD) {
13618 // Check if the return value is null but should not be.
13619 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
13620 (!isObjCMethod && isNonNullType(lhsType))) &&
13621 CheckNonNullExpr(*this, RetValExp))
13622 Diag(ReturnLoc, diag::warn_null_ret)
13623 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
13625 // C++11 [basic.stc.dynamic.allocation]p4:
13626 // If an allocation function declared with a non-throwing
13627 // exception-specification fails to allocate storage, it shall return
13628 // a null pointer. Any other allocation function that fails to allocate
13629 // storage shall indicate failure only by throwing an exception [...]
13630 if (FD) {
13631 OverloadedOperatorKind Op = FD->getOverloadedOperator();
13632 if (Op == OO_New || Op == OO_Array_New) {
13633 const FunctionProtoType *Proto
13634 = FD->getType()->castAs<FunctionProtoType>();
13635 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
13636 CheckNonNullExpr(*this, RetValExp))
13637 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
13638 << FD << getLangOpts().CPlusPlus11;
13642 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) {
13643 Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
13646 // PPC MMA non-pointer types are not allowed as return type. Checking the type
13647 // here prevent the user from using a PPC MMA type as trailing return type.
13648 if (Context.getTargetInfo().getTriple().isPPC64())
13649 CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
13652 /// Check for comparisons of floating-point values using == and !=. Issue a
13653 /// warning if the comparison is not likely to do what the programmer intended.
13654 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
13655 BinaryOperatorKind Opcode) {
13656 if (!BinaryOperator::isEqualityOp(Opcode))
13657 return;
13659 // Match and capture subexpressions such as "(float) X == 0.1".
13660 FloatingLiteral *FPLiteral;
13661 CastExpr *FPCast;
13662 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) {
13663 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens());
13664 FPCast = dyn_cast<CastExpr>(R->IgnoreParens());
13665 return FPLiteral && FPCast;
13668 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) {
13669 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>();
13670 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>();
13671 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() &&
13672 TargetTy->isFloatingPoint()) {
13673 bool Lossy;
13674 llvm::APFloat TargetC = FPLiteral->getValue();
13675 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)),
13676 llvm::APFloat::rmNearestTiesToEven, &Lossy);
13677 if (Lossy) {
13678 // If the literal cannot be represented in the source type, then a
13679 // check for == is always false and check for != is always true.
13680 Diag(Loc, diag::warn_float_compare_literal)
13681 << (Opcode == BO_EQ) << QualType(SourceTy, 0)
13682 << LHS->getSourceRange() << RHS->getSourceRange();
13683 return;
13688 // Match a more general floating-point equality comparison (-Wfloat-equal).
13689 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
13690 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
13692 // Special case: check for x == x (which is OK).
13693 // Do not emit warnings for such cases.
13694 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
13695 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
13696 if (DRL->getDecl() == DRR->getDecl())
13697 return;
13699 // Special case: check for comparisons against literals that can be exactly
13700 // represented by APFloat. In such cases, do not emit a warning. This
13701 // is a heuristic: often comparison against such literals are used to
13702 // detect if a value in a variable has not changed. This clearly can
13703 // lead to false negatives.
13704 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
13705 if (FLL->isExact())
13706 return;
13707 } else
13708 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
13709 if (FLR->isExact())
13710 return;
13712 // Check for comparisons with builtin types.
13713 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
13714 if (CL->getBuiltinCallee())
13715 return;
13717 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
13718 if (CR->getBuiltinCallee())
13719 return;
13721 // Emit the diagnostic.
13722 Diag(Loc, diag::warn_floatingpoint_eq)
13723 << LHS->getSourceRange() << RHS->getSourceRange();
13726 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
13727 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
13729 namespace {
13731 /// Structure recording the 'active' range of an integer-valued
13732 /// expression.
13733 struct IntRange {
13734 /// The number of bits active in the int. Note that this includes exactly one
13735 /// sign bit if !NonNegative.
13736 unsigned Width;
13738 /// True if the int is known not to have negative values. If so, all leading
13739 /// bits before Width are known zero, otherwise they are known to be the
13740 /// same as the MSB within Width.
13741 bool NonNegative;
13743 IntRange(unsigned Width, bool NonNegative)
13744 : Width(Width), NonNegative(NonNegative) {}
13746 /// Number of bits excluding the sign bit.
13747 unsigned valueBits() const {
13748 return NonNegative ? Width : Width - 1;
13751 /// Returns the range of the bool type.
13752 static IntRange forBoolType() {
13753 return IntRange(1, true);
13756 /// Returns the range of an opaque value of the given integral type.
13757 static IntRange forValueOfType(ASTContext &C, QualType T) {
13758 return forValueOfCanonicalType(C,
13759 T->getCanonicalTypeInternal().getTypePtr());
13762 /// Returns the range of an opaque value of a canonical integral type.
13763 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
13764 assert(T->isCanonicalUnqualified());
13766 if (const VectorType *VT = dyn_cast<VectorType>(T))
13767 T = VT->getElementType().getTypePtr();
13768 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
13769 T = CT->getElementType().getTypePtr();
13770 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
13771 T = AT->getValueType().getTypePtr();
13773 if (!C.getLangOpts().CPlusPlus) {
13774 // For enum types in C code, use the underlying datatype.
13775 if (const EnumType *ET = dyn_cast<EnumType>(T))
13776 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
13777 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) {
13778 // For enum types in C++, use the known bit width of the enumerators.
13779 EnumDecl *Enum = ET->getDecl();
13780 // In C++11, enums can have a fixed underlying type. Use this type to
13781 // compute the range.
13782 if (Enum->isFixed()) {
13783 return IntRange(C.getIntWidth(QualType(T, 0)),
13784 !ET->isSignedIntegerOrEnumerationType());
13787 unsigned NumPositive = Enum->getNumPositiveBits();
13788 unsigned NumNegative = Enum->getNumNegativeBits();
13790 if (NumNegative == 0)
13791 return IntRange(NumPositive, true/*NonNegative*/);
13792 else
13793 return IntRange(std::max(NumPositive + 1, NumNegative),
13794 false/*NonNegative*/);
13797 if (const auto *EIT = dyn_cast<BitIntType>(T))
13798 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
13800 const BuiltinType *BT = cast<BuiltinType>(T);
13801 assert(BT->isInteger());
13803 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
13806 /// Returns the "target" range of a canonical integral type, i.e.
13807 /// the range of values expressible in the type.
13809 /// This matches forValueOfCanonicalType except that enums have the
13810 /// full range of their type, not the range of their enumerators.
13811 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
13812 assert(T->isCanonicalUnqualified());
13814 if (const VectorType *VT = dyn_cast<VectorType>(T))
13815 T = VT->getElementType().getTypePtr();
13816 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
13817 T = CT->getElementType().getTypePtr();
13818 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
13819 T = AT->getValueType().getTypePtr();
13820 if (const EnumType *ET = dyn_cast<EnumType>(T))
13821 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
13823 if (const auto *EIT = dyn_cast<BitIntType>(T))
13824 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
13826 const BuiltinType *BT = cast<BuiltinType>(T);
13827 assert(BT->isInteger());
13829 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
13832 /// Returns the supremum of two ranges: i.e. their conservative merge.
13833 static IntRange join(IntRange L, IntRange R) {
13834 bool Unsigned = L.NonNegative && R.NonNegative;
13835 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned,
13836 L.NonNegative && R.NonNegative);
13839 /// Return the range of a bitwise-AND of the two ranges.
13840 static IntRange bit_and(IntRange L, IntRange R) {
13841 unsigned Bits = std::max(L.Width, R.Width);
13842 bool NonNegative = false;
13843 if (L.NonNegative) {
13844 Bits = std::min(Bits, L.Width);
13845 NonNegative = true;
13847 if (R.NonNegative) {
13848 Bits = std::min(Bits, R.Width);
13849 NonNegative = true;
13851 return IntRange(Bits, NonNegative);
13854 /// Return the range of a sum of the two ranges.
13855 static IntRange sum(IntRange L, IntRange R) {
13856 bool Unsigned = L.NonNegative && R.NonNegative;
13857 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned,
13858 Unsigned);
13861 /// Return the range of a difference of the two ranges.
13862 static IntRange difference(IntRange L, IntRange R) {
13863 // We need a 1-bit-wider range if:
13864 // 1) LHS can be negative: least value can be reduced.
13865 // 2) RHS can be negative: greatest value can be increased.
13866 bool CanWiden = !L.NonNegative || !R.NonNegative;
13867 bool Unsigned = L.NonNegative && R.Width == 0;
13868 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden +
13869 !Unsigned,
13870 Unsigned);
13873 /// Return the range of a product of the two ranges.
13874 static IntRange product(IntRange L, IntRange R) {
13875 // If both LHS and RHS can be negative, we can form
13876 // -2^L * -2^R = 2^(L + R)
13877 // which requires L + R + 1 value bits to represent.
13878 bool CanWiden = !L.NonNegative && !R.NonNegative;
13879 bool Unsigned = L.NonNegative && R.NonNegative;
13880 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned,
13881 Unsigned);
13884 /// Return the range of a remainder operation between the two ranges.
13885 static IntRange rem(IntRange L, IntRange R) {
13886 // The result of a remainder can't be larger than the result of
13887 // either side. The sign of the result is the sign of the LHS.
13888 bool Unsigned = L.NonNegative;
13889 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned,
13890 Unsigned);
13894 } // namespace
13896 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
13897 unsigned MaxWidth) {
13898 if (value.isSigned() && value.isNegative())
13899 return IntRange(value.getSignificantBits(), false);
13901 if (value.getBitWidth() > MaxWidth)
13902 value = value.trunc(MaxWidth);
13904 // isNonNegative() just checks the sign bit without considering
13905 // signedness.
13906 return IntRange(value.getActiveBits(), true);
13909 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
13910 unsigned MaxWidth) {
13911 if (result.isInt())
13912 return GetValueRange(C, result.getInt(), MaxWidth);
13914 if (result.isVector()) {
13915 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
13916 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
13917 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
13918 R = IntRange::join(R, El);
13920 return R;
13923 if (result.isComplexInt()) {
13924 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
13925 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
13926 return IntRange::join(R, I);
13929 // This can happen with lossless casts to intptr_t of "based" lvalues.
13930 // Assume it might use arbitrary bits.
13931 // FIXME: The only reason we need to pass the type in here is to get
13932 // the sign right on this one case. It would be nice if APValue
13933 // preserved this.
13934 assert(result.isLValue() || result.isAddrLabelDiff());
13935 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
13938 static QualType GetExprType(const Expr *E) {
13939 QualType Ty = E->getType();
13940 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
13941 Ty = AtomicRHS->getValueType();
13942 return Ty;
13945 /// Pseudo-evaluate the given integer expression, estimating the
13946 /// range of values it might take.
13948 /// \param MaxWidth The width to which the value will be truncated.
13949 /// \param Approximate If \c true, return a likely range for the result: in
13950 /// particular, assume that arithmetic on narrower types doesn't leave
13951 /// those types. If \c false, return a range including all possible
13952 /// result values.
13953 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
13954 bool InConstantContext, bool Approximate) {
13955 E = E->IgnoreParens();
13957 // Try a full evaluation first.
13958 Expr::EvalResult result;
13959 if (E->EvaluateAsRValue(result, C, InConstantContext))
13960 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
13962 // I think we only want to look through implicit casts here; if the
13963 // user has an explicit widening cast, we should treat the value as
13964 // being of the new, wider type.
13965 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
13966 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
13967 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext,
13968 Approximate);
13970 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
13972 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
13973 CE->getCastKind() == CK_BooleanToSignedIntegral;
13975 // Assume that non-integer casts can span the full range of the type.
13976 if (!isIntegerCast)
13977 return OutputTypeRange;
13979 IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
13980 std::min(MaxWidth, OutputTypeRange.Width),
13981 InConstantContext, Approximate);
13983 // Bail out if the subexpr's range is as wide as the cast type.
13984 if (SubRange.Width >= OutputTypeRange.Width)
13985 return OutputTypeRange;
13987 // Otherwise, we take the smaller width, and we're non-negative if
13988 // either the output type or the subexpr is.
13989 return IntRange(SubRange.Width,
13990 SubRange.NonNegative || OutputTypeRange.NonNegative);
13993 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
13994 // If we can fold the condition, just take that operand.
13995 bool CondResult;
13996 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
13997 return GetExprRange(C,
13998 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
13999 MaxWidth, InConstantContext, Approximate);
14001 // Otherwise, conservatively merge.
14002 // GetExprRange requires an integer expression, but a throw expression
14003 // results in a void type.
14004 Expr *E = CO->getTrueExpr();
14005 IntRange L = E->getType()->isVoidType()
14006 ? IntRange{0, true}
14007 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14008 E = CO->getFalseExpr();
14009 IntRange R = E->getType()->isVoidType()
14010 ? IntRange{0, true}
14011 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14012 return IntRange::join(L, R);
14015 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
14016 IntRange (*Combine)(IntRange, IntRange) = IntRange::join;
14018 switch (BO->getOpcode()) {
14019 case BO_Cmp:
14020 llvm_unreachable("builtin <=> should have class type");
14022 // Boolean-valued operations are single-bit and positive.
14023 case BO_LAnd:
14024 case BO_LOr:
14025 case BO_LT:
14026 case BO_GT:
14027 case BO_LE:
14028 case BO_GE:
14029 case BO_EQ:
14030 case BO_NE:
14031 return IntRange::forBoolType();
14033 // The type of the assignments is the type of the LHS, so the RHS
14034 // is not necessarily the same type.
14035 case BO_MulAssign:
14036 case BO_DivAssign:
14037 case BO_RemAssign:
14038 case BO_AddAssign:
14039 case BO_SubAssign:
14040 case BO_XorAssign:
14041 case BO_OrAssign:
14042 // TODO: bitfields?
14043 return IntRange::forValueOfType(C, GetExprType(E));
14045 // Simple assignments just pass through the RHS, which will have
14046 // been coerced to the LHS type.
14047 case BO_Assign:
14048 // TODO: bitfields?
14049 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
14050 Approximate);
14052 // Operations with opaque sources are black-listed.
14053 case BO_PtrMemD:
14054 case BO_PtrMemI:
14055 return IntRange::forValueOfType(C, GetExprType(E));
14057 // Bitwise-and uses the *infinum* of the two source ranges.
14058 case BO_And:
14059 case BO_AndAssign:
14060 Combine = IntRange::bit_and;
14061 break;
14063 // Left shift gets black-listed based on a judgement call.
14064 case BO_Shl:
14065 // ...except that we want to treat '1 << (blah)' as logically
14066 // positive. It's an important idiom.
14067 if (IntegerLiteral *I
14068 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
14069 if (I->getValue() == 1) {
14070 IntRange R = IntRange::forValueOfType(C, GetExprType(E));
14071 return IntRange(R.Width, /*NonNegative*/ true);
14074 [[fallthrough]];
14076 case BO_ShlAssign:
14077 return IntRange::forValueOfType(C, GetExprType(E));
14079 // Right shift by a constant can narrow its left argument.
14080 case BO_Shr:
14081 case BO_ShrAssign: {
14082 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext,
14083 Approximate);
14085 // If the shift amount is a positive constant, drop the width by
14086 // that much.
14087 if (std::optional<llvm::APSInt> shift =
14088 BO->getRHS()->getIntegerConstantExpr(C)) {
14089 if (shift->isNonNegative()) {
14090 if (shift->uge(L.Width))
14091 L.Width = (L.NonNegative ? 0 : 1);
14092 else
14093 L.Width -= shift->getZExtValue();
14097 return L;
14100 // Comma acts as its right operand.
14101 case BO_Comma:
14102 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext,
14103 Approximate);
14105 case BO_Add:
14106 if (!Approximate)
14107 Combine = IntRange::sum;
14108 break;
14110 case BO_Sub:
14111 if (BO->getLHS()->getType()->isPointerType())
14112 return IntRange::forValueOfType(C, GetExprType(E));
14113 if (!Approximate)
14114 Combine = IntRange::difference;
14115 break;
14117 case BO_Mul:
14118 if (!Approximate)
14119 Combine = IntRange::product;
14120 break;
14122 // The width of a division result is mostly determined by the size
14123 // of the LHS.
14124 case BO_Div: {
14125 // Don't 'pre-truncate' the operands.
14126 unsigned opWidth = C.getIntWidth(GetExprType(E));
14127 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext,
14128 Approximate);
14130 // If the divisor is constant, use that.
14131 if (std::optional<llvm::APSInt> divisor =
14132 BO->getRHS()->getIntegerConstantExpr(C)) {
14133 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
14134 if (log2 >= L.Width)
14135 L.Width = (L.NonNegative ? 0 : 1);
14136 else
14137 L.Width = std::min(L.Width - log2, MaxWidth);
14138 return L;
14141 // Otherwise, just use the LHS's width.
14142 // FIXME: This is wrong if the LHS could be its minimal value and the RHS
14143 // could be -1.
14144 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext,
14145 Approximate);
14146 return IntRange(L.Width, L.NonNegative && R.NonNegative);
14149 case BO_Rem:
14150 Combine = IntRange::rem;
14151 break;
14153 // The default behavior is okay for these.
14154 case BO_Xor:
14155 case BO_Or:
14156 break;
14159 // Combine the two ranges, but limit the result to the type in which we
14160 // performed the computation.
14161 QualType T = GetExprType(E);
14162 unsigned opWidth = C.getIntWidth(T);
14163 IntRange L =
14164 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate);
14165 IntRange R =
14166 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate);
14167 IntRange C = Combine(L, R);
14168 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType();
14169 C.Width = std::min(C.Width, MaxWidth);
14170 return C;
14173 if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
14174 switch (UO->getOpcode()) {
14175 // Boolean-valued operations are white-listed.
14176 case UO_LNot:
14177 return IntRange::forBoolType();
14179 // Operations with opaque sources are black-listed.
14180 case UO_Deref:
14181 case UO_AddrOf: // should be impossible
14182 return IntRange::forValueOfType(C, GetExprType(E));
14184 default:
14185 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext,
14186 Approximate);
14190 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
14191 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext,
14192 Approximate);
14194 if (const auto *BitField = E->getSourceBitField())
14195 return IntRange(BitField->getBitWidthValue(C),
14196 BitField->getType()->isUnsignedIntegerOrEnumerationType());
14198 return IntRange::forValueOfType(C, GetExprType(E));
14201 static IntRange GetExprRange(ASTContext &C, const Expr *E,
14202 bool InConstantContext, bool Approximate) {
14203 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext,
14204 Approximate);
14207 /// Checks whether the given value, which currently has the given
14208 /// source semantics, has the same value when coerced through the
14209 /// target semantics.
14210 static bool IsSameFloatAfterCast(const llvm::APFloat &value,
14211 const llvm::fltSemantics &Src,
14212 const llvm::fltSemantics &Tgt) {
14213 llvm::APFloat truncated = value;
14215 bool ignored;
14216 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
14217 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
14219 return truncated.bitwiseIsEqual(value);
14222 /// Checks whether the given value, which currently has the given
14223 /// source semantics, has the same value when coerced through the
14224 /// target semantics.
14226 /// The value might be a vector of floats (or a complex number).
14227 static bool IsSameFloatAfterCast(const APValue &value,
14228 const llvm::fltSemantics &Src,
14229 const llvm::fltSemantics &Tgt) {
14230 if (value.isFloat())
14231 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
14233 if (value.isVector()) {
14234 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
14235 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
14236 return false;
14237 return true;
14240 assert(value.isComplexFloat());
14241 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
14242 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
14245 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC,
14246 bool IsListInit = false);
14248 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
14249 // Suppress cases where we are comparing against an enum constant.
14250 if (const DeclRefExpr *DR =
14251 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
14252 if (isa<EnumConstantDecl>(DR->getDecl()))
14253 return true;
14255 // Suppress cases where the value is expanded from a macro, unless that macro
14256 // is how a language represents a boolean literal. This is the case in both C
14257 // and Objective-C.
14258 SourceLocation BeginLoc = E->getBeginLoc();
14259 if (BeginLoc.isMacroID()) {
14260 StringRef MacroName = Lexer::getImmediateMacroName(
14261 BeginLoc, S.getSourceManager(), S.getLangOpts());
14262 return MacroName != "YES" && MacroName != "NO" &&
14263 MacroName != "true" && MacroName != "false";
14266 return false;
14269 static bool isKnownToHaveUnsignedValue(Expr *E) {
14270 return E->getType()->isIntegerType() &&
14271 (!E->getType()->isSignedIntegerType() ||
14272 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
14275 namespace {
14276 /// The promoted range of values of a type. In general this has the
14277 /// following structure:
14279 /// |-----------| . . . |-----------|
14280 /// ^ ^ ^ ^
14281 /// Min HoleMin HoleMax Max
14283 /// ... where there is only a hole if a signed type is promoted to unsigned
14284 /// (in which case Min and Max are the smallest and largest representable
14285 /// values).
14286 struct PromotedRange {
14287 // Min, or HoleMax if there is a hole.
14288 llvm::APSInt PromotedMin;
14289 // Max, or HoleMin if there is a hole.
14290 llvm::APSInt PromotedMax;
14292 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
14293 if (R.Width == 0)
14294 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
14295 else if (R.Width >= BitWidth && !Unsigned) {
14296 // Promotion made the type *narrower*. This happens when promoting
14297 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
14298 // Treat all values of 'signed int' as being in range for now.
14299 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned);
14300 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned);
14301 } else {
14302 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative)
14303 .extOrTrunc(BitWidth);
14304 PromotedMin.setIsUnsigned(Unsigned);
14306 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative)
14307 .extOrTrunc(BitWidth);
14308 PromotedMax.setIsUnsigned(Unsigned);
14312 // Determine whether this range is contiguous (has no hole).
14313 bool isContiguous() const { return PromotedMin <= PromotedMax; }
14315 // Where a constant value is within the range.
14316 enum ComparisonResult {
14317 LT = 0x1,
14318 LE = 0x2,
14319 GT = 0x4,
14320 GE = 0x8,
14321 EQ = 0x10,
14322 NE = 0x20,
14323 InRangeFlag = 0x40,
14325 Less = LE | LT | NE,
14326 Min = LE | InRangeFlag,
14327 InRange = InRangeFlag,
14328 Max = GE | InRangeFlag,
14329 Greater = GE | GT | NE,
14331 OnlyValue = LE | GE | EQ | InRangeFlag,
14332 InHole = NE
14335 ComparisonResult compare(const llvm::APSInt &Value) const {
14336 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
14337 Value.isUnsigned() == PromotedMin.isUnsigned());
14338 if (!isContiguous()) {
14339 assert(Value.isUnsigned() && "discontiguous range for signed compare");
14340 if (Value.isMinValue()) return Min;
14341 if (Value.isMaxValue()) return Max;
14342 if (Value >= PromotedMin) return InRange;
14343 if (Value <= PromotedMax) return InRange;
14344 return InHole;
14347 switch (llvm::APSInt::compareValues(Value, PromotedMin)) {
14348 case -1: return Less;
14349 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
14350 case 1:
14351 switch (llvm::APSInt::compareValues(Value, PromotedMax)) {
14352 case -1: return InRange;
14353 case 0: return Max;
14354 case 1: return Greater;
14358 llvm_unreachable("impossible compare result");
14361 static std::optional<StringRef>
14362 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
14363 if (Op == BO_Cmp) {
14364 ComparisonResult LTFlag = LT, GTFlag = GT;
14365 if (ConstantOnRHS) std::swap(LTFlag, GTFlag);
14367 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
14368 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
14369 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
14370 return std::nullopt;
14373 ComparisonResult TrueFlag, FalseFlag;
14374 if (Op == BO_EQ) {
14375 TrueFlag = EQ;
14376 FalseFlag = NE;
14377 } else if (Op == BO_NE) {
14378 TrueFlag = NE;
14379 FalseFlag = EQ;
14380 } else {
14381 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
14382 TrueFlag = LT;
14383 FalseFlag = GE;
14384 } else {
14385 TrueFlag = GT;
14386 FalseFlag = LE;
14388 if (Op == BO_GE || Op == BO_LE)
14389 std::swap(TrueFlag, FalseFlag);
14391 if (R & TrueFlag)
14392 return StringRef("true");
14393 if (R & FalseFlag)
14394 return StringRef("false");
14395 return std::nullopt;
14400 static bool HasEnumType(Expr *E) {
14401 // Strip off implicit integral promotions.
14402 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
14403 if (ICE->getCastKind() != CK_IntegralCast &&
14404 ICE->getCastKind() != CK_NoOp)
14405 break;
14406 E = ICE->getSubExpr();
14409 return E->getType()->isEnumeralType();
14412 static int classifyConstantValue(Expr *Constant) {
14413 // The values of this enumeration are used in the diagnostics
14414 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
14415 enum ConstantValueKind {
14416 Miscellaneous = 0,
14417 LiteralTrue,
14418 LiteralFalse
14420 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant))
14421 return BL->getValue() ? ConstantValueKind::LiteralTrue
14422 : ConstantValueKind::LiteralFalse;
14423 return ConstantValueKind::Miscellaneous;
14426 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
14427 Expr *Constant, Expr *Other,
14428 const llvm::APSInt &Value,
14429 bool RhsConstant) {
14430 if (S.inTemplateInstantiation())
14431 return false;
14433 Expr *OriginalOther = Other;
14435 Constant = Constant->IgnoreParenImpCasts();
14436 Other = Other->IgnoreParenImpCasts();
14438 // Suppress warnings on tautological comparisons between values of the same
14439 // enumeration type. There are only two ways we could warn on this:
14440 // - If the constant is outside the range of representable values of
14441 // the enumeration. In such a case, we should warn about the cast
14442 // to enumeration type, not about the comparison.
14443 // - If the constant is the maximum / minimum in-range value. For an
14444 // enumeratin type, such comparisons can be meaningful and useful.
14445 if (Constant->getType()->isEnumeralType() &&
14446 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
14447 return false;
14449 IntRange OtherValueRange = GetExprRange(
14450 S.Context, Other, S.isConstantEvaluatedContext(), /*Approximate=*/false);
14452 QualType OtherT = Other->getType();
14453 if (const auto *AT = OtherT->getAs<AtomicType>())
14454 OtherT = AT->getValueType();
14455 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT);
14457 // Special case for ObjC BOOL on targets where its a typedef for a signed char
14458 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
14459 bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
14460 S.NSAPIObj->isObjCBOOLType(OtherT) &&
14461 OtherT->isSpecificBuiltinType(BuiltinType::SChar);
14463 // Whether we're treating Other as being a bool because of the form of
14464 // expression despite it having another type (typically 'int' in C).
14465 bool OtherIsBooleanDespiteType =
14466 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
14467 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
14468 OtherTypeRange = OtherValueRange = IntRange::forBoolType();
14470 // Check if all values in the range of possible values of this expression
14471 // lead to the same comparison outcome.
14472 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(),
14473 Value.isUnsigned());
14474 auto Cmp = OtherPromotedValueRange.compare(Value);
14475 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
14476 if (!Result)
14477 return false;
14479 // Also consider the range determined by the type alone. This allows us to
14480 // classify the warning under the proper diagnostic group.
14481 bool TautologicalTypeCompare = false;
14483 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(),
14484 Value.isUnsigned());
14485 auto TypeCmp = OtherPromotedTypeRange.compare(Value);
14486 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp,
14487 RhsConstant)) {
14488 TautologicalTypeCompare = true;
14489 Cmp = TypeCmp;
14490 Result = TypeResult;
14494 // Don't warn if the non-constant operand actually always evaluates to the
14495 // same value.
14496 if (!TautologicalTypeCompare && OtherValueRange.Width == 0)
14497 return false;
14499 // Suppress the diagnostic for an in-range comparison if the constant comes
14500 // from a macro or enumerator. We don't want to diagnose
14502 // some_long_value <= INT_MAX
14504 // when sizeof(int) == sizeof(long).
14505 bool InRange = Cmp & PromotedRange::InRangeFlag;
14506 if (InRange && IsEnumConstOrFromMacro(S, Constant))
14507 return false;
14509 // A comparison of an unsigned bit-field against 0 is really a type problem,
14510 // even though at the type level the bit-field might promote to 'signed int'.
14511 if (Other->refersToBitField() && InRange && Value == 0 &&
14512 Other->getType()->isUnsignedIntegerOrEnumerationType())
14513 TautologicalTypeCompare = true;
14515 // If this is a comparison to an enum constant, include that
14516 // constant in the diagnostic.
14517 const EnumConstantDecl *ED = nullptr;
14518 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
14519 ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
14521 // Should be enough for uint128 (39 decimal digits)
14522 SmallString<64> PrettySourceValue;
14523 llvm::raw_svector_ostream OS(PrettySourceValue);
14524 if (ED) {
14525 OS << '\'' << *ED << "' (" << Value << ")";
14526 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
14527 Constant->IgnoreParenImpCasts())) {
14528 OS << (BL->getValue() ? "YES" : "NO");
14529 } else {
14530 OS << Value;
14533 if (!TautologicalTypeCompare) {
14534 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range)
14535 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative
14536 << E->getOpcodeStr() << OS.str() << *Result
14537 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
14538 return true;
14541 if (IsObjCSignedCharBool) {
14542 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
14543 S.PDiag(diag::warn_tautological_compare_objc_bool)
14544 << OS.str() << *Result);
14545 return true;
14548 // FIXME: We use a somewhat different formatting for the in-range cases and
14549 // cases involving boolean values for historical reasons. We should pick a
14550 // consistent way of presenting these diagnostics.
14551 if (!InRange || Other->isKnownToHaveBooleanValue()) {
14553 S.DiagRuntimeBehavior(
14554 E->getOperatorLoc(), E,
14555 S.PDiag(!InRange ? diag::warn_out_of_range_compare
14556 : diag::warn_tautological_bool_compare)
14557 << OS.str() << classifyConstantValue(Constant) << OtherT
14558 << OtherIsBooleanDespiteType << *Result
14559 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
14560 } else {
14561 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy;
14562 unsigned Diag =
14563 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
14564 ? (HasEnumType(OriginalOther)
14565 ? diag::warn_unsigned_enum_always_true_comparison
14566 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison
14567 : diag::warn_unsigned_always_true_comparison)
14568 : diag::warn_tautological_constant_compare;
14570 S.Diag(E->getOperatorLoc(), Diag)
14571 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
14572 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
14575 return true;
14578 /// Analyze the operands of the given comparison. Implements the
14579 /// fallback case from AnalyzeComparison.
14580 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
14581 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
14582 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
14585 /// Implements -Wsign-compare.
14587 /// \param E the binary operator to check for warnings
14588 static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
14589 // The type the comparison is being performed in.
14590 QualType T = E->getLHS()->getType();
14592 // Only analyze comparison operators where both sides have been converted to
14593 // the same type.
14594 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
14595 return AnalyzeImpConvsInComparison(S, E);
14597 // Don't analyze value-dependent comparisons directly.
14598 if (E->isValueDependent())
14599 return AnalyzeImpConvsInComparison(S, E);
14601 Expr *LHS = E->getLHS();
14602 Expr *RHS = E->getRHS();
14604 if (T->isIntegralType(S.Context)) {
14605 std::optional<llvm::APSInt> RHSValue =
14606 RHS->getIntegerConstantExpr(S.Context);
14607 std::optional<llvm::APSInt> LHSValue =
14608 LHS->getIntegerConstantExpr(S.Context);
14610 // We don't care about expressions whose result is a constant.
14611 if (RHSValue && LHSValue)
14612 return AnalyzeImpConvsInComparison(S, E);
14614 // We only care about expressions where just one side is literal
14615 if ((bool)RHSValue ^ (bool)LHSValue) {
14616 // Is the constant on the RHS or LHS?
14617 const bool RhsConstant = (bool)RHSValue;
14618 Expr *Const = RhsConstant ? RHS : LHS;
14619 Expr *Other = RhsConstant ? LHS : RHS;
14620 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue;
14622 // Check whether an integer constant comparison results in a value
14623 // of 'true' or 'false'.
14624 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant))
14625 return AnalyzeImpConvsInComparison(S, E);
14629 if (!T->hasUnsignedIntegerRepresentation()) {
14630 // We don't do anything special if this isn't an unsigned integral
14631 // comparison: we're only interested in integral comparisons, and
14632 // signed comparisons only happen in cases we don't care to warn about.
14633 return AnalyzeImpConvsInComparison(S, E);
14636 LHS = LHS->IgnoreParenImpCasts();
14637 RHS = RHS->IgnoreParenImpCasts();
14639 if (!S.getLangOpts().CPlusPlus) {
14640 // Avoid warning about comparison of integers with different signs when
14641 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
14642 // the type of `E`.
14643 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
14644 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
14645 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
14646 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
14649 // Check to see if one of the (unmodified) operands is of different
14650 // signedness.
14651 Expr *signedOperand, *unsignedOperand;
14652 if (LHS->getType()->hasSignedIntegerRepresentation()) {
14653 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
14654 "unsigned comparison between two signed integer expressions?");
14655 signedOperand = LHS;
14656 unsignedOperand = RHS;
14657 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
14658 signedOperand = RHS;
14659 unsignedOperand = LHS;
14660 } else {
14661 return AnalyzeImpConvsInComparison(S, E);
14664 // Otherwise, calculate the effective range of the signed operand.
14665 IntRange signedRange =
14666 GetExprRange(S.Context, signedOperand, S.isConstantEvaluatedContext(),
14667 /*Approximate=*/true);
14669 // Go ahead and analyze implicit conversions in the operands. Note
14670 // that we skip the implicit conversions on both sides.
14671 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
14672 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
14674 // If the signed range is non-negative, -Wsign-compare won't fire.
14675 if (signedRange.NonNegative)
14676 return;
14678 // For (in)equality comparisons, if the unsigned operand is a
14679 // constant which cannot collide with a overflowed signed operand,
14680 // then reinterpreting the signed operand as unsigned will not
14681 // change the result of the comparison.
14682 if (E->isEqualityOp()) {
14683 unsigned comparisonWidth = S.Context.getIntWidth(T);
14684 IntRange unsignedRange =
14685 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluatedContext(),
14686 /*Approximate=*/true);
14688 // We should never be unable to prove that the unsigned operand is
14689 // non-negative.
14690 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
14692 if (unsignedRange.Width < comparisonWidth)
14693 return;
14696 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
14697 S.PDiag(diag::warn_mixed_sign_comparison)
14698 << LHS->getType() << RHS->getType()
14699 << LHS->getSourceRange() << RHS->getSourceRange());
14702 /// Analyzes an attempt to assign the given value to a bitfield.
14704 /// Returns true if there was something fishy about the attempt.
14705 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
14706 SourceLocation InitLoc) {
14707 assert(Bitfield->isBitField());
14708 if (Bitfield->isInvalidDecl())
14709 return false;
14711 // White-list bool bitfields.
14712 QualType BitfieldType = Bitfield->getType();
14713 if (BitfieldType->isBooleanType())
14714 return false;
14716 if (BitfieldType->isEnumeralType()) {
14717 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
14718 // If the underlying enum type was not explicitly specified as an unsigned
14719 // type and the enum contain only positive values, MSVC++ will cause an
14720 // inconsistency by storing this as a signed type.
14721 if (S.getLangOpts().CPlusPlus11 &&
14722 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
14723 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
14724 BitfieldEnumDecl->getNumNegativeBits() == 0) {
14725 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
14726 << BitfieldEnumDecl;
14730 // Ignore value- or type-dependent expressions.
14731 if (Bitfield->getBitWidth()->isValueDependent() ||
14732 Bitfield->getBitWidth()->isTypeDependent() ||
14733 Init->isValueDependent() ||
14734 Init->isTypeDependent())
14735 return false;
14737 Expr *OriginalInit = Init->IgnoreParenImpCasts();
14738 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
14740 Expr::EvalResult Result;
14741 if (!OriginalInit->EvaluateAsInt(Result, S.Context,
14742 Expr::SE_AllowSideEffects)) {
14743 // The RHS is not constant. If the RHS has an enum type, make sure the
14744 // bitfield is wide enough to hold all the values of the enum without
14745 // truncation.
14746 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
14747 EnumDecl *ED = EnumTy->getDecl();
14748 bool SignedBitfield = BitfieldType->isSignedIntegerType();
14750 // Enum types are implicitly signed on Windows, so check if there are any
14751 // negative enumerators to see if the enum was intended to be signed or
14752 // not.
14753 bool SignedEnum = ED->getNumNegativeBits() > 0;
14755 // Check for surprising sign changes when assigning enum values to a
14756 // bitfield of different signedness. If the bitfield is signed and we
14757 // have exactly the right number of bits to store this unsigned enum,
14758 // suggest changing the enum to an unsigned type. This typically happens
14759 // on Windows where unfixed enums always use an underlying type of 'int'.
14760 unsigned DiagID = 0;
14761 if (SignedEnum && !SignedBitfield) {
14762 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
14763 } else if (SignedBitfield && !SignedEnum &&
14764 ED->getNumPositiveBits() == FieldWidth) {
14765 DiagID = diag::warn_signed_bitfield_enum_conversion;
14768 if (DiagID) {
14769 S.Diag(InitLoc, DiagID) << Bitfield << ED;
14770 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
14771 SourceRange TypeRange =
14772 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
14773 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
14774 << SignedEnum << TypeRange;
14777 // Compute the required bitwidth. If the enum has negative values, we need
14778 // one more bit than the normal number of positive bits to represent the
14779 // sign bit.
14780 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
14781 ED->getNumNegativeBits())
14782 : ED->getNumPositiveBits();
14784 // Check the bitwidth.
14785 if (BitsNeeded > FieldWidth) {
14786 Expr *WidthExpr = Bitfield->getBitWidth();
14787 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
14788 << Bitfield << ED;
14789 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
14790 << BitsNeeded << ED << WidthExpr->getSourceRange();
14794 return false;
14797 llvm::APSInt Value = Result.Val.getInt();
14799 unsigned OriginalWidth = Value.getBitWidth();
14801 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce
14802 // false positives where the user is demonstrating they intend to use the
14803 // bit-field as a Boolean, check to see if the value is 1 and we're assigning
14804 // to a one-bit bit-field to see if the value came from a macro named 'true'.
14805 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1;
14806 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) {
14807 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc();
14808 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
14809 S.findMacroSpelling(MaybeMacroLoc, "true"))
14810 return false;
14813 if (!Value.isSigned() || Value.isNegative())
14814 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
14815 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
14816 OriginalWidth = Value.getSignificantBits();
14818 if (OriginalWidth <= FieldWidth)
14819 return false;
14821 // Compute the value which the bitfield will contain.
14822 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
14823 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
14825 // Check whether the stored value is equal to the original value.
14826 TruncatedValue = TruncatedValue.extend(OriginalWidth);
14827 if (llvm::APSInt::isSameValue(Value, TruncatedValue))
14828 return false;
14830 std::string PrettyValue = toString(Value, 10);
14831 std::string PrettyTrunc = toString(TruncatedValue, 10);
14833 S.Diag(InitLoc, OneAssignedToOneBitBitfield
14834 ? diag::warn_impcast_single_bit_bitield_precision_constant
14835 : diag::warn_impcast_bitfield_precision_constant)
14836 << PrettyValue << PrettyTrunc << OriginalInit->getType()
14837 << Init->getSourceRange();
14839 return true;
14842 /// Analyze the given simple or compound assignment for warning-worthy
14843 /// operations.
14844 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
14845 // Just recurse on the LHS.
14846 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
14848 // We want to recurse on the RHS as normal unless we're assigning to
14849 // a bitfield.
14850 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
14851 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
14852 E->getOperatorLoc())) {
14853 // Recurse, ignoring any implicit conversions on the RHS.
14854 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
14855 E->getOperatorLoc());
14859 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
14861 // Diagnose implicitly sequentially-consistent atomic assignment.
14862 if (E->getLHS()->getType()->isAtomicType())
14863 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
14866 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
14867 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
14868 SourceLocation CContext, unsigned diag,
14869 bool pruneControlFlow = false) {
14870 if (pruneControlFlow) {
14871 S.DiagRuntimeBehavior(E->getExprLoc(), E,
14872 S.PDiag(diag)
14873 << SourceType << T << E->getSourceRange()
14874 << SourceRange(CContext));
14875 return;
14877 S.Diag(E->getExprLoc(), diag)
14878 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
14881 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
14882 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
14883 SourceLocation CContext,
14884 unsigned diag, bool pruneControlFlow = false) {
14885 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
14888 static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
14889 return Ty->isSpecificBuiltinType(BuiltinType::SChar) &&
14890 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty);
14893 static void adornObjCBoolConversionDiagWithTernaryFixit(
14894 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
14895 Expr *Ignored = SourceExpr->IgnoreImplicit();
14896 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored))
14897 Ignored = OVE->getSourceExpr();
14898 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
14899 isa<BinaryOperator>(Ignored) ||
14900 isa<CXXOperatorCallExpr>(Ignored);
14901 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc());
14902 if (NeedsParens)
14903 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(")
14904 << FixItHint::CreateInsertion(EndLoc, ")");
14905 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
14908 /// Diagnose an implicit cast from a floating point value to an integer value.
14909 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
14910 SourceLocation CContext) {
14911 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
14912 const bool PruneWarnings = S.inTemplateInstantiation();
14914 Expr *InnerE = E->IgnoreParenImpCasts();
14915 // We also want to warn on, e.g., "int i = -1.234"
14916 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
14917 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
14918 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
14920 const bool IsLiteral =
14921 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE);
14923 llvm::APFloat Value(0.0);
14924 bool IsConstant =
14925 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
14926 if (!IsConstant) {
14927 if (isObjCSignedCharBool(S, T)) {
14928 return adornObjCBoolConversionDiagWithTernaryFixit(
14929 S, E,
14930 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
14931 << E->getType());
14934 return DiagnoseImpCast(S, E, T, CContext,
14935 diag::warn_impcast_float_integer, PruneWarnings);
14938 bool isExact = false;
14940 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
14941 T->hasUnsignedIntegerRepresentation());
14942 llvm::APFloat::opStatus Result = Value.convertToInteger(
14943 IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
14945 // FIXME: Force the precision of the source value down so we don't print
14946 // digits which are usually useless (we don't really care here if we
14947 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
14948 // would automatically print the shortest representation, but it's a bit
14949 // tricky to implement.
14950 SmallString<16> PrettySourceValue;
14951 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
14952 precision = (precision * 59 + 195) / 196;
14953 Value.toString(PrettySourceValue, precision);
14955 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) {
14956 return adornObjCBoolConversionDiagWithTernaryFixit(
14957 S, E,
14958 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
14959 << PrettySourceValue);
14962 if (Result == llvm::APFloat::opOK && isExact) {
14963 if (IsLiteral) return;
14964 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
14965 PruneWarnings);
14968 // Conversion of a floating-point value to a non-bool integer where the
14969 // integral part cannot be represented by the integer type is undefined.
14970 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
14971 return DiagnoseImpCast(
14972 S, E, T, CContext,
14973 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
14974 : diag::warn_impcast_float_to_integer_out_of_range,
14975 PruneWarnings);
14977 unsigned DiagID = 0;
14978 if (IsLiteral) {
14979 // Warn on floating point literal to integer.
14980 DiagID = diag::warn_impcast_literal_float_to_integer;
14981 } else if (IntegerValue == 0) {
14982 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
14983 return DiagnoseImpCast(S, E, T, CContext,
14984 diag::warn_impcast_float_integer, PruneWarnings);
14986 // Warn on non-zero to zero conversion.
14987 DiagID = diag::warn_impcast_float_to_integer_zero;
14988 } else {
14989 if (IntegerValue.isUnsigned()) {
14990 if (!IntegerValue.isMaxValue()) {
14991 return DiagnoseImpCast(S, E, T, CContext,
14992 diag::warn_impcast_float_integer, PruneWarnings);
14994 } else { // IntegerValue.isSigned()
14995 if (!IntegerValue.isMaxSignedValue() &&
14996 !IntegerValue.isMinSignedValue()) {
14997 return DiagnoseImpCast(S, E, T, CContext,
14998 diag::warn_impcast_float_integer, PruneWarnings);
15001 // Warn on evaluatable floating point expression to integer conversion.
15002 DiagID = diag::warn_impcast_float_to_integer;
15005 SmallString<16> PrettyTargetValue;
15006 if (IsBool)
15007 PrettyTargetValue = Value.isZero() ? "false" : "true";
15008 else
15009 IntegerValue.toString(PrettyTargetValue);
15011 if (PruneWarnings) {
15012 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15013 S.PDiag(DiagID)
15014 << E->getType() << T.getUnqualifiedType()
15015 << PrettySourceValue << PrettyTargetValue
15016 << E->getSourceRange() << SourceRange(CContext));
15017 } else {
15018 S.Diag(E->getExprLoc(), DiagID)
15019 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
15020 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
15024 /// Analyze the given compound assignment for the possible losing of
15025 /// floating-point precision.
15026 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
15027 assert(isa<CompoundAssignOperator>(E) &&
15028 "Must be compound assignment operation");
15029 // Recurse on the LHS and RHS in here
15030 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
15031 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
15033 if (E->getLHS()->getType()->isAtomicType())
15034 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
15036 // Now check the outermost expression
15037 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
15038 const auto *RBT = cast<CompoundAssignOperator>(E)
15039 ->getComputationResultType()
15040 ->getAs<BuiltinType>();
15042 // The below checks assume source is floating point.
15043 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
15045 // If source is floating point but target is an integer.
15046 if (ResultBT->isInteger())
15047 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
15048 E->getExprLoc(), diag::warn_impcast_float_integer);
15050 if (!ResultBT->isFloatingPoint())
15051 return;
15053 // If both source and target are floating points, warn about losing precision.
15054 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
15055 QualType(ResultBT, 0), QualType(RBT, 0));
15056 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
15057 // warn about dropping FP rank.
15058 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
15059 diag::warn_impcast_float_result_precision);
15062 static std::string PrettyPrintInRange(const llvm::APSInt &Value,
15063 IntRange Range) {
15064 if (!Range.Width) return "0";
15066 llvm::APSInt ValueInRange = Value;
15067 ValueInRange.setIsSigned(!Range.NonNegative);
15068 ValueInRange = ValueInRange.trunc(Range.Width);
15069 return toString(ValueInRange, 10);
15072 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
15073 if (!isa<ImplicitCastExpr>(Ex))
15074 return false;
15076 Expr *InnerE = Ex->IgnoreParenImpCasts();
15077 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
15078 const Type *Source =
15079 S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
15080 if (Target->isDependentType())
15081 return false;
15083 const BuiltinType *FloatCandidateBT =
15084 dyn_cast<BuiltinType>(ToBool ? Source : Target);
15085 const Type *BoolCandidateType = ToBool ? Target : Source;
15087 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
15088 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
15091 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
15092 SourceLocation CC) {
15093 unsigned NumArgs = TheCall->getNumArgs();
15094 for (unsigned i = 0; i < NumArgs; ++i) {
15095 Expr *CurrA = TheCall->getArg(i);
15096 if (!IsImplicitBoolFloatConversion(S, CurrA, true))
15097 continue;
15099 bool IsSwapped = ((i > 0) &&
15100 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
15101 IsSwapped |= ((i < (NumArgs - 1)) &&
15102 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
15103 if (IsSwapped) {
15104 // Warn on this floating-point to bool conversion.
15105 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
15106 CurrA->getType(), CC,
15107 diag::warn_impcast_floating_point_to_bool);
15112 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
15113 SourceLocation CC) {
15114 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
15115 E->getExprLoc()))
15116 return;
15118 // Don't warn on functions which have return type nullptr_t.
15119 if (isa<CallExpr>(E))
15120 return;
15122 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
15123 const Expr *NewE = E->IgnoreParenImpCasts();
15124 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE);
15125 bool HasNullPtrType = NewE->getType()->isNullPtrType();
15126 if (!IsGNUNullExpr && !HasNullPtrType)
15127 return;
15129 // Return if target type is a safe conversion.
15130 if (T->isAnyPointerType() || T->isBlockPointerType() ||
15131 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
15132 return;
15134 SourceLocation Loc = E->getSourceRange().getBegin();
15136 // Venture through the macro stacks to get to the source of macro arguments.
15137 // The new location is a better location than the complete location that was
15138 // passed in.
15139 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
15140 CC = S.SourceMgr.getTopMacroCallerLoc(CC);
15142 // __null is usually wrapped in a macro. Go up a macro if that is the case.
15143 if (IsGNUNullExpr && Loc.isMacroID()) {
15144 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
15145 Loc, S.SourceMgr, S.getLangOpts());
15146 if (MacroName == "NULL")
15147 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
15150 // Only warn if the null and context location are in the same macro expansion.
15151 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
15152 return;
15154 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
15155 << HasNullPtrType << T << SourceRange(CC)
15156 << FixItHint::CreateReplacement(Loc,
15157 S.getFixItZeroLiteralForType(T, Loc));
15160 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15161 ObjCArrayLiteral *ArrayLiteral);
15163 static void
15164 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
15165 ObjCDictionaryLiteral *DictionaryLiteral);
15167 /// Check a single element within a collection literal against the
15168 /// target element type.
15169 static void checkObjCCollectionLiteralElement(Sema &S,
15170 QualType TargetElementType,
15171 Expr *Element,
15172 unsigned ElementKind) {
15173 // Skip a bitcast to 'id' or qualified 'id'.
15174 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
15175 if (ICE->getCastKind() == CK_BitCast &&
15176 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
15177 Element = ICE->getSubExpr();
15180 QualType ElementType = Element->getType();
15181 ExprResult ElementResult(Element);
15182 if (ElementType->getAs<ObjCObjectPointerType>() &&
15183 S.CheckSingleAssignmentConstraints(TargetElementType,
15184 ElementResult,
15185 false, false)
15186 != Sema::Compatible) {
15187 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
15188 << ElementType << ElementKind << TargetElementType
15189 << Element->getSourceRange();
15192 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
15193 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
15194 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
15195 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
15198 /// Check an Objective-C array literal being converted to the given
15199 /// target type.
15200 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15201 ObjCArrayLiteral *ArrayLiteral) {
15202 if (!S.NSArrayDecl)
15203 return;
15205 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
15206 if (!TargetObjCPtr)
15207 return;
15209 if (TargetObjCPtr->isUnspecialized() ||
15210 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
15211 != S.NSArrayDecl->getCanonicalDecl())
15212 return;
15214 auto TypeArgs = TargetObjCPtr->getTypeArgs();
15215 if (TypeArgs.size() != 1)
15216 return;
15218 QualType TargetElementType = TypeArgs[0];
15219 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
15220 checkObjCCollectionLiteralElement(S, TargetElementType,
15221 ArrayLiteral->getElement(I),
15226 /// Check an Objective-C dictionary literal being converted to the given
15227 /// target type.
15228 static void
15229 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
15230 ObjCDictionaryLiteral *DictionaryLiteral) {
15231 if (!S.NSDictionaryDecl)
15232 return;
15234 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
15235 if (!TargetObjCPtr)
15236 return;
15238 if (TargetObjCPtr->isUnspecialized() ||
15239 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
15240 != S.NSDictionaryDecl->getCanonicalDecl())
15241 return;
15243 auto TypeArgs = TargetObjCPtr->getTypeArgs();
15244 if (TypeArgs.size() != 2)
15245 return;
15247 QualType TargetKeyType = TypeArgs[0];
15248 QualType TargetObjectType = TypeArgs[1];
15249 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
15250 auto Element = DictionaryLiteral->getKeyValueElement(I);
15251 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
15252 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
15256 // Helper function to filter out cases for constant width constant conversion.
15257 // Don't warn on char array initialization or for non-decimal values.
15258 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
15259 SourceLocation CC) {
15260 // If initializing from a constant, and the constant starts with '0',
15261 // then it is a binary, octal, or hexadecimal. Allow these constants
15262 // to fill all the bits, even if there is a sign change.
15263 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
15264 const char FirstLiteralCharacter =
15265 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0];
15266 if (FirstLiteralCharacter == '0')
15267 return false;
15270 // If the CC location points to a '{', and the type is char, then assume
15271 // assume it is an array initialization.
15272 if (CC.isValid() && T->isCharType()) {
15273 const char FirstContextCharacter =
15274 S.getSourceManager().getCharacterData(CC)[0];
15275 if (FirstContextCharacter == '{')
15276 return false;
15279 return true;
15282 static const IntegerLiteral *getIntegerLiteral(Expr *E) {
15283 const auto *IL = dyn_cast<IntegerLiteral>(E);
15284 if (!IL) {
15285 if (auto *UO = dyn_cast<UnaryOperator>(E)) {
15286 if (UO->getOpcode() == UO_Minus)
15287 return dyn_cast<IntegerLiteral>(UO->getSubExpr());
15291 return IL;
15294 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
15295 E = E->IgnoreParenImpCasts();
15296 SourceLocation ExprLoc = E->getExprLoc();
15298 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
15299 BinaryOperator::Opcode Opc = BO->getOpcode();
15300 Expr::EvalResult Result;
15301 // Do not diagnose unsigned shifts.
15302 if (Opc == BO_Shl) {
15303 const auto *LHS = getIntegerLiteral(BO->getLHS());
15304 const auto *RHS = getIntegerLiteral(BO->getRHS());
15305 if (LHS && LHS->getValue() == 0)
15306 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0;
15307 else if (!E->isValueDependent() && LHS && RHS &&
15308 RHS->getValue().isNonNegative() &&
15309 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects))
15310 S.Diag(ExprLoc, diag::warn_left_shift_always)
15311 << (Result.Val.getInt() != 0);
15312 else if (E->getType()->isSignedIntegerType())
15313 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E;
15317 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
15318 const auto *LHS = getIntegerLiteral(CO->getTrueExpr());
15319 const auto *RHS = getIntegerLiteral(CO->getFalseExpr());
15320 if (!LHS || !RHS)
15321 return;
15322 if ((LHS->getValue() == 0 || LHS->getValue() == 1) &&
15323 (RHS->getValue() == 0 || RHS->getValue() == 1))
15324 // Do not diagnose common idioms.
15325 return;
15326 if (LHS->getValue() != 0 && RHS->getValue() != 0)
15327 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true);
15331 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
15332 SourceLocation CC,
15333 bool *ICContext = nullptr,
15334 bool IsListInit = false) {
15335 if (E->isTypeDependent() || E->isValueDependent()) return;
15337 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
15338 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
15339 if (Source == Target) return;
15340 if (Target->isDependentType()) return;
15342 // If the conversion context location is invalid don't complain. We also
15343 // don't want to emit a warning if the issue occurs from the expansion of
15344 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
15345 // delay this check as long as possible. Once we detect we are in that
15346 // scenario, we just return.
15347 if (CC.isInvalid())
15348 return;
15350 if (Source->isAtomicType())
15351 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
15353 // Diagnose implicit casts to bool.
15354 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
15355 if (isa<StringLiteral>(E))
15356 // Warn on string literal to bool. Checks for string literals in logical
15357 // and expressions, for instance, assert(0 && "error here"), are
15358 // prevented by a check in AnalyzeImplicitConversions().
15359 return DiagnoseImpCast(S, E, T, CC,
15360 diag::warn_impcast_string_literal_to_bool);
15361 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
15362 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
15363 // This covers the literal expressions that evaluate to Objective-C
15364 // objects.
15365 return DiagnoseImpCast(S, E, T, CC,
15366 diag::warn_impcast_objective_c_literal_to_bool);
15368 if (Source->isPointerType() || Source->canDecayToPointerType()) {
15369 // Warn on pointer to bool conversion that is always true.
15370 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
15371 SourceRange(CC));
15375 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
15376 // is a typedef for signed char (macOS), then that constant value has to be 1
15377 // or 0.
15378 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) {
15379 Expr::EvalResult Result;
15380 if (E->EvaluateAsInt(Result, S.getASTContext(),
15381 Expr::SE_AllowSideEffects)) {
15382 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
15383 adornObjCBoolConversionDiagWithTernaryFixit(
15384 S, E,
15385 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
15386 << toString(Result.Val.getInt(), 10));
15388 return;
15392 // Check implicit casts from Objective-C collection literals to specialized
15393 // collection types, e.g., NSArray<NSString *> *.
15394 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
15395 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
15396 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
15397 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
15399 // Strip vector types.
15400 if (isa<VectorType>(Source)) {
15401 if (Target->isSveVLSBuiltinType() &&
15402 (S.Context.areCompatibleSveTypes(QualType(Target, 0),
15403 QualType(Source, 0)) ||
15404 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
15405 QualType(Source, 0))))
15406 return;
15408 if (Target->isRVVVLSBuiltinType() &&
15409 (S.Context.areCompatibleRVVTypes(QualType(Target, 0),
15410 QualType(Source, 0)) ||
15411 S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0),
15412 QualType(Source, 0))))
15413 return;
15415 if (!isa<VectorType>(Target)) {
15416 if (S.SourceMgr.isInSystemMacro(CC))
15417 return;
15418 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
15421 // If the vector cast is cast between two vectors of the same size, it is
15422 // a bitcast, not a conversion.
15423 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
15424 return;
15426 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
15427 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
15429 if (auto VecTy = dyn_cast<VectorType>(Target))
15430 Target = VecTy->getElementType().getTypePtr();
15432 // Strip complex types.
15433 if (isa<ComplexType>(Source)) {
15434 if (!isa<ComplexType>(Target)) {
15435 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
15436 return;
15438 return DiagnoseImpCast(S, E, T, CC,
15439 S.getLangOpts().CPlusPlus
15440 ? diag::err_impcast_complex_scalar
15441 : diag::warn_impcast_complex_scalar);
15444 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
15445 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
15448 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
15449 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
15451 // Strip SVE vector types
15452 if (SourceBT && SourceBT->isSveVLSBuiltinType()) {
15453 // Need the original target type for vector type checks
15454 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
15455 // Handle conversion from scalable to fixed when msve-vector-bits is
15456 // specified
15457 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0),
15458 QualType(Source, 0)) ||
15459 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0),
15460 QualType(Source, 0)))
15461 return;
15463 // If the vector cast is cast between two vectors of the same size, it is
15464 // a bitcast, not a conversion.
15465 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
15466 return;
15468 Source = SourceBT->getSveEltType(S.Context).getTypePtr();
15471 if (TargetBT && TargetBT->isSveVLSBuiltinType())
15472 Target = TargetBT->getSveEltType(S.Context).getTypePtr();
15474 // If the source is floating point...
15475 if (SourceBT && SourceBT->isFloatingPoint()) {
15476 // ...and the target is floating point...
15477 if (TargetBT && TargetBT->isFloatingPoint()) {
15478 // ...then warn if we're dropping FP rank.
15480 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
15481 QualType(SourceBT, 0), QualType(TargetBT, 0));
15482 if (Order > 0) {
15483 // Don't warn about float constants that are precisely
15484 // representable in the target type.
15485 Expr::EvalResult result;
15486 if (E->EvaluateAsRValue(result, S.Context)) {
15487 // Value might be a float, a float vector, or a float complex.
15488 if (IsSameFloatAfterCast(result.Val,
15489 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
15490 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
15491 return;
15494 if (S.SourceMgr.isInSystemMacro(CC))
15495 return;
15497 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
15499 // ... or possibly if we're increasing rank, too
15500 else if (Order < 0) {
15501 if (S.SourceMgr.isInSystemMacro(CC))
15502 return;
15504 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
15506 return;
15509 // If the target is integral, always warn.
15510 if (TargetBT && TargetBT->isInteger()) {
15511 if (S.SourceMgr.isInSystemMacro(CC))
15512 return;
15514 DiagnoseFloatingImpCast(S, E, T, CC);
15517 // Detect the case where a call result is converted from floating-point to
15518 // to bool, and the final argument to the call is converted from bool, to
15519 // discover this typo:
15521 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
15523 // FIXME: This is an incredibly special case; is there some more general
15524 // way to detect this class of misplaced-parentheses bug?
15525 if (Target->isBooleanType() && isa<CallExpr>(E)) {
15526 // Check last argument of function call to see if it is an
15527 // implicit cast from a type matching the type the result
15528 // is being cast to.
15529 CallExpr *CEx = cast<CallExpr>(E);
15530 if (unsigned NumArgs = CEx->getNumArgs()) {
15531 Expr *LastA = CEx->getArg(NumArgs - 1);
15532 Expr *InnerE = LastA->IgnoreParenImpCasts();
15533 if (isa<ImplicitCastExpr>(LastA) &&
15534 InnerE->getType()->isBooleanType()) {
15535 // Warn on this floating-point to bool conversion
15536 DiagnoseImpCast(S, E, T, CC,
15537 diag::warn_impcast_floating_point_to_bool);
15541 return;
15544 // Valid casts involving fixed point types should be accounted for here.
15545 if (Source->isFixedPointType()) {
15546 if (Target->isUnsaturatedFixedPointType()) {
15547 Expr::EvalResult Result;
15548 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
15549 S.isConstantEvaluatedContext())) {
15550 llvm::APFixedPoint Value = Result.Val.getFixedPoint();
15551 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
15552 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T);
15553 if (Value > MaxVal || Value < MinVal) {
15554 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15555 S.PDiag(diag::warn_impcast_fixed_point_range)
15556 << Value.toString() << T
15557 << E->getSourceRange()
15558 << clang::SourceRange(CC));
15559 return;
15562 } else if (Target->isIntegerType()) {
15563 Expr::EvalResult Result;
15564 if (!S.isConstantEvaluatedContext() &&
15565 E->EvaluateAsFixedPoint(Result, S.Context,
15566 Expr::SE_AllowSideEffects)) {
15567 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
15569 bool Overflowed;
15570 llvm::APSInt IntResult = FXResult.convertToInt(
15571 S.Context.getIntWidth(T),
15572 Target->isSignedIntegerOrEnumerationType(), &Overflowed);
15574 if (Overflowed) {
15575 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15576 S.PDiag(diag::warn_impcast_fixed_point_range)
15577 << FXResult.toString() << T
15578 << E->getSourceRange()
15579 << clang::SourceRange(CC));
15580 return;
15584 } else if (Target->isUnsaturatedFixedPointType()) {
15585 if (Source->isIntegerType()) {
15586 Expr::EvalResult Result;
15587 if (!S.isConstantEvaluatedContext() &&
15588 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
15589 llvm::APSInt Value = Result.Val.getInt();
15591 bool Overflowed;
15592 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
15593 Value, S.Context.getFixedPointSemantics(T), &Overflowed);
15595 if (Overflowed) {
15596 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15597 S.PDiag(diag::warn_impcast_fixed_point_range)
15598 << toString(Value, /*Radix=*/10) << T
15599 << E->getSourceRange()
15600 << clang::SourceRange(CC));
15601 return;
15607 // If we are casting an integer type to a floating point type without
15608 // initialization-list syntax, we might lose accuracy if the floating
15609 // point type has a narrower significand than the integer type.
15610 if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
15611 TargetBT->isFloatingType() && !IsListInit) {
15612 // Determine the number of precision bits in the source integer type.
15613 IntRange SourceRange =
15614 GetExprRange(S.Context, E, S.isConstantEvaluatedContext(),
15615 /*Approximate=*/true);
15616 unsigned int SourcePrecision = SourceRange.Width;
15618 // Determine the number of precision bits in the
15619 // target floating point type.
15620 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision(
15621 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
15623 if (SourcePrecision > 0 && TargetPrecision > 0 &&
15624 SourcePrecision > TargetPrecision) {
15626 if (std::optional<llvm::APSInt> SourceInt =
15627 E->getIntegerConstantExpr(S.Context)) {
15628 // If the source integer is a constant, convert it to the target
15629 // floating point type. Issue a warning if the value changes
15630 // during the whole conversion.
15631 llvm::APFloat TargetFloatValue(
15632 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
15633 llvm::APFloat::opStatus ConversionStatus =
15634 TargetFloatValue.convertFromAPInt(
15635 *SourceInt, SourceBT->isSignedInteger(),
15636 llvm::APFloat::rmNearestTiesToEven);
15638 if (ConversionStatus != llvm::APFloat::opOK) {
15639 SmallString<32> PrettySourceValue;
15640 SourceInt->toString(PrettySourceValue, 10);
15641 SmallString<32> PrettyTargetValue;
15642 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision);
15644 S.DiagRuntimeBehavior(
15645 E->getExprLoc(), E,
15646 S.PDiag(diag::warn_impcast_integer_float_precision_constant)
15647 << PrettySourceValue << PrettyTargetValue << E->getType() << T
15648 << E->getSourceRange() << clang::SourceRange(CC));
15650 } else {
15651 // Otherwise, the implicit conversion may lose precision.
15652 DiagnoseImpCast(S, E, T, CC,
15653 diag::warn_impcast_integer_float_precision);
15658 DiagnoseNullConversion(S, E, T, CC);
15660 S.DiscardMisalignedMemberAddress(Target, E);
15662 if (Target->isBooleanType())
15663 DiagnoseIntInBoolContext(S, E);
15665 if (!Source->isIntegerType() || !Target->isIntegerType())
15666 return;
15668 // TODO: remove this early return once the false positives for constant->bool
15669 // in templates, macros, etc, are reduced or removed.
15670 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
15671 return;
15673 if (isObjCSignedCharBool(S, T) && !Source->isCharType() &&
15674 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
15675 return adornObjCBoolConversionDiagWithTernaryFixit(
15676 S, E,
15677 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
15678 << E->getType());
15681 IntRange SourceTypeRange =
15682 IntRange::forTargetOfCanonicalType(S.Context, Source);
15683 IntRange LikelySourceRange = GetExprRange(
15684 S.Context, E, S.isConstantEvaluatedContext(), /*Approximate=*/true);
15685 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
15687 if (LikelySourceRange.Width > TargetRange.Width) {
15688 // If the source is a constant, use a default-on diagnostic.
15689 // TODO: this should happen for bitfield stores, too.
15690 Expr::EvalResult Result;
15691 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
15692 S.isConstantEvaluatedContext())) {
15693 llvm::APSInt Value(32);
15694 Value = Result.Val.getInt();
15696 if (S.SourceMgr.isInSystemMacro(CC))
15697 return;
15699 std::string PrettySourceValue = toString(Value, 10);
15700 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
15702 S.DiagRuntimeBehavior(
15703 E->getExprLoc(), E,
15704 S.PDiag(diag::warn_impcast_integer_precision_constant)
15705 << PrettySourceValue << PrettyTargetValue << E->getType() << T
15706 << E->getSourceRange() << SourceRange(CC));
15707 return;
15710 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
15711 if (S.SourceMgr.isInSystemMacro(CC))
15712 return;
15714 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
15715 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
15716 /* pruneControlFlow */ true);
15717 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
15720 if (TargetRange.Width > SourceTypeRange.Width) {
15721 if (auto *UO = dyn_cast<UnaryOperator>(E))
15722 if (UO->getOpcode() == UO_Minus)
15723 if (Source->isUnsignedIntegerType()) {
15724 if (Target->isUnsignedIntegerType())
15725 return DiagnoseImpCast(S, E, T, CC,
15726 diag::warn_impcast_high_order_zero_bits);
15727 if (Target->isSignedIntegerType())
15728 return DiagnoseImpCast(S, E, T, CC,
15729 diag::warn_impcast_nonnegative_result);
15733 if (TargetRange.Width == LikelySourceRange.Width &&
15734 !TargetRange.NonNegative && LikelySourceRange.NonNegative &&
15735 Source->isSignedIntegerType()) {
15736 // Warn when doing a signed to signed conversion, warn if the positive
15737 // source value is exactly the width of the target type, which will
15738 // cause a negative value to be stored.
15740 Expr::EvalResult Result;
15741 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
15742 !S.SourceMgr.isInSystemMacro(CC)) {
15743 llvm::APSInt Value = Result.Val.getInt();
15744 if (isSameWidthConstantConversion(S, E, T, CC)) {
15745 std::string PrettySourceValue = toString(Value, 10);
15746 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
15748 S.DiagRuntimeBehavior(
15749 E->getExprLoc(), E,
15750 S.PDiag(diag::warn_impcast_integer_precision_constant)
15751 << PrettySourceValue << PrettyTargetValue << E->getType() << T
15752 << E->getSourceRange() << SourceRange(CC));
15753 return;
15757 // Fall through for non-constants to give a sign conversion warning.
15760 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) &&
15761 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
15762 (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
15763 LikelySourceRange.Width == TargetRange.Width))) {
15764 if (S.SourceMgr.isInSystemMacro(CC))
15765 return;
15767 if (SourceBT && SourceBT->isInteger() && TargetBT &&
15768 TargetBT->isInteger() &&
15769 Source->isSignedIntegerType() == Target->isSignedIntegerType()) {
15770 return;
15773 unsigned DiagID = diag::warn_impcast_integer_sign;
15775 // Traditionally, gcc has warned about this under -Wsign-compare.
15776 // We also want to warn about it in -Wconversion.
15777 // So if -Wconversion is off, use a completely identical diagnostic
15778 // in the sign-compare group.
15779 // The conditional-checking code will
15780 if (ICContext) {
15781 DiagID = diag::warn_impcast_integer_sign_conditional;
15782 *ICContext = true;
15785 return DiagnoseImpCast(S, E, T, CC, DiagID);
15788 // Diagnose conversions between different enumeration types.
15789 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
15790 // type, to give us better diagnostics.
15791 QualType SourceType = E->getType();
15792 if (!S.getLangOpts().CPlusPlus) {
15793 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
15794 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
15795 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
15796 SourceType = S.Context.getTypeDeclType(Enum);
15797 Source = S.Context.getCanonicalType(SourceType).getTypePtr();
15801 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
15802 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
15803 if (SourceEnum->getDecl()->hasNameForLinkage() &&
15804 TargetEnum->getDecl()->hasNameForLinkage() &&
15805 SourceEnum != TargetEnum) {
15806 if (S.SourceMgr.isInSystemMacro(CC))
15807 return;
15809 return DiagnoseImpCast(S, E, SourceType, T, CC,
15810 diag::warn_impcast_different_enum_types);
15814 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
15815 SourceLocation CC, QualType T);
15817 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
15818 SourceLocation CC, bool &ICContext) {
15819 E = E->IgnoreParenImpCasts();
15820 // Diagnose incomplete type for second or third operand in C.
15821 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType())
15822 S.RequireCompleteExprType(E, diag::err_incomplete_type);
15824 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
15825 return CheckConditionalOperator(S, CO, CC, T);
15827 AnalyzeImplicitConversions(S, E, CC);
15828 if (E->getType() != T)
15829 return CheckImplicitConversion(S, E, T, CC, &ICContext);
15832 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
15833 SourceLocation CC, QualType T) {
15834 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
15836 Expr *TrueExpr = E->getTrueExpr();
15837 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E))
15838 TrueExpr = BCO->getCommon();
15840 bool Suspicious = false;
15841 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious);
15842 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
15844 if (T->isBooleanType())
15845 DiagnoseIntInBoolContext(S, E);
15847 // If -Wconversion would have warned about either of the candidates
15848 // for a signedness conversion to the context type...
15849 if (!Suspicious) return;
15851 // ...but it's currently ignored...
15852 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
15853 return;
15855 // ...then check whether it would have warned about either of the
15856 // candidates for a signedness conversion to the condition type.
15857 if (E->getType() == T) return;
15859 Suspicious = false;
15860 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
15861 E->getType(), CC, &Suspicious);
15862 if (!Suspicious)
15863 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
15864 E->getType(), CC, &Suspicious);
15867 /// Check conversion of given expression to boolean.
15868 /// Input argument E is a logical expression.
15869 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
15870 if (S.getLangOpts().Bool)
15871 return;
15872 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
15873 return;
15874 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
15877 namespace {
15878 struct AnalyzeImplicitConversionsWorkItem {
15879 Expr *E;
15880 SourceLocation CC;
15881 bool IsListInit;
15885 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
15886 /// that should be visited are added to WorkList.
15887 static void AnalyzeImplicitConversions(
15888 Sema &S, AnalyzeImplicitConversionsWorkItem Item,
15889 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
15890 Expr *OrigE = Item.E;
15891 SourceLocation CC = Item.CC;
15893 QualType T = OrigE->getType();
15894 Expr *E = OrigE->IgnoreParenImpCasts();
15896 // Propagate whether we are in a C++ list initialization expression.
15897 // If so, we do not issue warnings for implicit int-float conversion
15898 // precision loss, because C++11 narrowing already handles it.
15899 bool IsListInit = Item.IsListInit ||
15900 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
15902 if (E->isTypeDependent() || E->isValueDependent())
15903 return;
15905 Expr *SourceExpr = E;
15906 // Examine, but don't traverse into the source expression of an
15907 // OpaqueValueExpr, since it may have multiple parents and we don't want to
15908 // emit duplicate diagnostics. Its fine to examine the form or attempt to
15909 // evaluate it in the context of checking the specific conversion to T though.
15910 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E))
15911 if (auto *Src = OVE->getSourceExpr())
15912 SourceExpr = Src;
15914 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
15915 if (UO->getOpcode() == UO_Not &&
15916 UO->getSubExpr()->isKnownToHaveBooleanValue())
15917 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
15918 << OrigE->getSourceRange() << T->isBooleanType()
15919 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!");
15921 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr))
15922 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) &&
15923 BO->getLHS()->isKnownToHaveBooleanValue() &&
15924 BO->getRHS()->isKnownToHaveBooleanValue() &&
15925 BO->getLHS()->HasSideEffects(S.Context) &&
15926 BO->getRHS()->HasSideEffects(S.Context)) {
15927 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
15928 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange()
15929 << FixItHint::CreateReplacement(
15930 BO->getOperatorLoc(),
15931 (BO->getOpcode() == BO_And ? "&&" : "||"));
15932 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
15935 // For conditional operators, we analyze the arguments as if they
15936 // were being fed directly into the output.
15937 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) {
15938 CheckConditionalOperator(S, CO, CC, T);
15939 return;
15942 // Check implicit argument conversions for function calls.
15943 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr))
15944 CheckImplicitArgumentConversions(S, Call, CC);
15946 // Go ahead and check any implicit conversions we might have skipped.
15947 // The non-canonical typecheck is just an optimization;
15948 // CheckImplicitConversion will filter out dead implicit conversions.
15949 if (SourceExpr->getType() != T)
15950 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit);
15952 // Now continue drilling into this expression.
15954 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
15955 // The bound subexpressions in a PseudoObjectExpr are not reachable
15956 // as transitive children.
15957 // FIXME: Use a more uniform representation for this.
15958 for (auto *SE : POE->semantics())
15959 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
15960 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit});
15963 // Skip past explicit casts.
15964 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
15965 E = CE->getSubExpr()->IgnoreParenImpCasts();
15966 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
15967 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
15968 WorkList.push_back({E, CC, IsListInit});
15969 return;
15972 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
15973 // Do a somewhat different check with comparison operators.
15974 if (BO->isComparisonOp())
15975 return AnalyzeComparison(S, BO);
15977 // And with simple assignments.
15978 if (BO->getOpcode() == BO_Assign)
15979 return AnalyzeAssignment(S, BO);
15980 // And with compound assignments.
15981 if (BO->isAssignmentOp())
15982 return AnalyzeCompoundAssignment(S, BO);
15985 // These break the otherwise-useful invariant below. Fortunately,
15986 // we don't really need to recurse into them, because any internal
15987 // expressions should have been analyzed already when they were
15988 // built into statements.
15989 if (isa<StmtExpr>(E)) return;
15991 // Don't descend into unevaluated contexts.
15992 if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
15994 // Now just recurse over the expression's children.
15995 CC = E->getExprLoc();
15996 BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
15997 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
15998 for (Stmt *SubStmt : E->children()) {
15999 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
16000 if (!ChildExpr)
16001 continue;
16003 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E))
16004 if (ChildExpr == CSE->getOperand())
16005 // Do not recurse over a CoroutineSuspendExpr's operand.
16006 // The operand is also a subexpression of getCommonExpr(), and
16007 // recursing into it directly would produce duplicate diagnostics.
16008 continue;
16010 if (IsLogicalAndOperator &&
16011 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
16012 // Ignore checking string literals that are in logical and operators.
16013 // This is a common pattern for asserts.
16014 continue;
16015 WorkList.push_back({ChildExpr, CC, IsListInit});
16018 if (BO && BO->isLogicalOp()) {
16019 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
16020 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
16021 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
16023 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
16024 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
16025 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
16028 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
16029 if (U->getOpcode() == UO_LNot) {
16030 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
16031 } else if (U->getOpcode() != UO_AddrOf) {
16032 if (U->getSubExpr()->getType()->isAtomicType())
16033 S.Diag(U->getSubExpr()->getBeginLoc(),
16034 diag::warn_atomic_implicit_seq_cst);
16039 /// AnalyzeImplicitConversions - Find and report any interesting
16040 /// implicit conversions in the given expression. There are a couple
16041 /// of competing diagnostics here, -Wconversion and -Wsign-compare.
16042 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
16043 bool IsListInit/*= false*/) {
16044 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
16045 WorkList.push_back({OrigE, CC, IsListInit});
16046 while (!WorkList.empty())
16047 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList);
16050 /// Diagnose integer type and any valid implicit conversion to it.
16051 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
16052 // Taking into account implicit conversions,
16053 // allow any integer.
16054 if (!E->getType()->isIntegerType()) {
16055 S.Diag(E->getBeginLoc(),
16056 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
16057 return true;
16059 // Potentially emit standard warnings for implicit conversions if enabled
16060 // using -Wconversion.
16061 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
16062 return false;
16065 // Helper function for Sema::DiagnoseAlwaysNonNullPointer.
16066 // Returns true when emitting a warning about taking the address of a reference.
16067 static bool CheckForReference(Sema &SemaRef, const Expr *E,
16068 const PartialDiagnostic &PD) {
16069 E = E->IgnoreParenImpCasts();
16071 const FunctionDecl *FD = nullptr;
16073 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
16074 if (!DRE->getDecl()->getType()->isReferenceType())
16075 return false;
16076 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
16077 if (!M->getMemberDecl()->getType()->isReferenceType())
16078 return false;
16079 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
16080 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
16081 return false;
16082 FD = Call->getDirectCallee();
16083 } else {
16084 return false;
16087 SemaRef.Diag(E->getExprLoc(), PD);
16089 // If possible, point to location of function.
16090 if (FD) {
16091 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
16094 return true;
16097 // Returns true if the SourceLocation is expanded from any macro body.
16098 // Returns false if the SourceLocation is invalid, is from not in a macro
16099 // expansion, or is from expanded from a top-level macro argument.
16100 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
16101 if (Loc.isInvalid())
16102 return false;
16104 while (Loc.isMacroID()) {
16105 if (SM.isMacroBodyExpansion(Loc))
16106 return true;
16107 Loc = SM.getImmediateMacroCallerLoc(Loc);
16110 return false;
16113 /// Diagnose pointers that are always non-null.
16114 /// \param E the expression containing the pointer
16115 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
16116 /// compared to a null pointer
16117 /// \param IsEqual True when the comparison is equal to a null pointer
16118 /// \param Range Extra SourceRange to highlight in the diagnostic
16119 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
16120 Expr::NullPointerConstantKind NullKind,
16121 bool IsEqual, SourceRange Range) {
16122 if (!E)
16123 return;
16125 // Don't warn inside macros.
16126 if (E->getExprLoc().isMacroID()) {
16127 const SourceManager &SM = getSourceManager();
16128 if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
16129 IsInAnyMacroBody(SM, Range.getBegin()))
16130 return;
16132 E = E->IgnoreImpCasts();
16134 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
16136 if (isa<CXXThisExpr>(E)) {
16137 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
16138 : diag::warn_this_bool_conversion;
16139 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
16140 return;
16143 bool IsAddressOf = false;
16145 if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
16146 if (UO->getOpcode() != UO_AddrOf)
16147 return;
16148 IsAddressOf = true;
16149 E = UO->getSubExpr();
16152 if (IsAddressOf) {
16153 unsigned DiagID = IsCompare
16154 ? diag::warn_address_of_reference_null_compare
16155 : diag::warn_address_of_reference_bool_conversion;
16156 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
16157 << IsEqual;
16158 if (CheckForReference(*this, E, PD)) {
16159 return;
16163 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
16164 bool IsParam = isa<NonNullAttr>(NonnullAttr);
16165 std::string Str;
16166 llvm::raw_string_ostream S(Str);
16167 E->printPretty(S, nullptr, getPrintingPolicy());
16168 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
16169 : diag::warn_cast_nonnull_to_bool;
16170 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
16171 << E->getSourceRange() << Range << IsEqual;
16172 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
16175 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
16176 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
16177 if (auto *Callee = Call->getDirectCallee()) {
16178 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
16179 ComplainAboutNonnullParamOrCall(A);
16180 return;
16185 // Expect to find a single Decl. Skip anything more complicated.
16186 ValueDecl *D = nullptr;
16187 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
16188 D = R->getDecl();
16189 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
16190 D = M->getMemberDecl();
16193 // Weak Decls can be null.
16194 if (!D || D->isWeak())
16195 return;
16197 // Check for parameter decl with nonnull attribute
16198 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
16199 if (getCurFunction() &&
16200 !getCurFunction()->ModifiedNonNullParams.count(PV)) {
16201 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
16202 ComplainAboutNonnullParamOrCall(A);
16203 return;
16206 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
16207 // Skip function template not specialized yet.
16208 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
16209 return;
16210 auto ParamIter = llvm::find(FD->parameters(), PV);
16211 assert(ParamIter != FD->param_end());
16212 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
16214 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
16215 if (!NonNull->args_size()) {
16216 ComplainAboutNonnullParamOrCall(NonNull);
16217 return;
16220 for (const ParamIdx &ArgNo : NonNull->args()) {
16221 if (ArgNo.getASTIndex() == ParamNo) {
16222 ComplainAboutNonnullParamOrCall(NonNull);
16223 return;
16231 QualType T = D->getType();
16232 const bool IsArray = T->isArrayType();
16233 const bool IsFunction = T->isFunctionType();
16235 // Address of function is used to silence the function warning.
16236 if (IsAddressOf && IsFunction) {
16237 return;
16240 // Found nothing.
16241 if (!IsAddressOf && !IsFunction && !IsArray)
16242 return;
16244 // Pretty print the expression for the diagnostic.
16245 std::string Str;
16246 llvm::raw_string_ostream S(Str);
16247 E->printPretty(S, nullptr, getPrintingPolicy());
16249 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
16250 : diag::warn_impcast_pointer_to_bool;
16251 enum {
16252 AddressOf,
16253 FunctionPointer,
16254 ArrayPointer
16255 } DiagType;
16256 if (IsAddressOf)
16257 DiagType = AddressOf;
16258 else if (IsFunction)
16259 DiagType = FunctionPointer;
16260 else if (IsArray)
16261 DiagType = ArrayPointer;
16262 else
16263 llvm_unreachable("Could not determine diagnostic.");
16264 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
16265 << Range << IsEqual;
16267 if (!IsFunction)
16268 return;
16270 // Suggest '&' to silence the function warning.
16271 Diag(E->getExprLoc(), diag::note_function_warning_silence)
16272 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
16274 // Check to see if '()' fixit should be emitted.
16275 QualType ReturnType;
16276 UnresolvedSet<4> NonTemplateOverloads;
16277 tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
16278 if (ReturnType.isNull())
16279 return;
16281 if (IsCompare) {
16282 // There are two cases here. If there is null constant, the only suggest
16283 // for a pointer return type. If the null is 0, then suggest if the return
16284 // type is a pointer or an integer type.
16285 if (!ReturnType->isPointerType()) {
16286 if (NullKind == Expr::NPCK_ZeroExpression ||
16287 NullKind == Expr::NPCK_ZeroLiteral) {
16288 if (!ReturnType->isIntegerType())
16289 return;
16290 } else {
16291 return;
16294 } else { // !IsCompare
16295 // For function to bool, only suggest if the function pointer has bool
16296 // return type.
16297 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
16298 return;
16300 Diag(E->getExprLoc(), diag::note_function_to_function_call)
16301 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
16304 /// Diagnoses "dangerous" implicit conversions within the given
16305 /// expression (which is a full expression). Implements -Wconversion
16306 /// and -Wsign-compare.
16308 /// \param CC the "context" location of the implicit conversion, i.e.
16309 /// the most location of the syntactic entity requiring the implicit
16310 /// conversion
16311 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
16312 // Don't diagnose in unevaluated contexts.
16313 if (isUnevaluatedContext())
16314 return;
16316 // Don't diagnose for value- or type-dependent expressions.
16317 if (E->isTypeDependent() || E->isValueDependent())
16318 return;
16320 // Check for array bounds violations in cases where the check isn't triggered
16321 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
16322 // ArraySubscriptExpr is on the RHS of a variable initialization.
16323 CheckArrayAccess(E);
16325 // This is not the right CC for (e.g.) a variable initialization.
16326 AnalyzeImplicitConversions(*this, E, CC);
16329 /// CheckBoolLikeConversion - Check conversion of given expression to boolean.
16330 /// Input argument E is a logical expression.
16331 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
16332 ::CheckBoolLikeConversion(*this, E, CC);
16335 /// Diagnose when expression is an integer constant expression and its evaluation
16336 /// results in integer overflow
16337 void Sema::CheckForIntOverflow (const Expr *E) {
16338 // Use a work list to deal with nested struct initializers.
16339 SmallVector<const Expr *, 2> Exprs(1, E);
16341 do {
16342 const Expr *OriginalE = Exprs.pop_back_val();
16343 const Expr *E = OriginalE->IgnoreParenCasts();
16345 if (isa<BinaryOperator, UnaryOperator>(E)) {
16346 E->EvaluateForOverflow(Context);
16347 continue;
16350 if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE))
16351 Exprs.append(InitList->inits().begin(), InitList->inits().end());
16352 else if (isa<ObjCBoxedExpr>(OriginalE))
16353 E->EvaluateForOverflow(Context);
16354 else if (const auto *Call = dyn_cast<CallExpr>(E))
16355 Exprs.append(Call->arg_begin(), Call->arg_end());
16356 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E))
16357 Exprs.append(Message->arg_begin(), Message->arg_end());
16358 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E))
16359 Exprs.append(Construct->arg_begin(), Construct->arg_end());
16360 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E))
16361 Exprs.push_back(Temporary->getSubExpr());
16362 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E))
16363 Exprs.push_back(Array->getIdx());
16364 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E))
16365 Exprs.push_back(Compound->getInitializer());
16366 else if (const auto *New = dyn_cast<CXXNewExpr>(E);
16367 New && New->isArray()) {
16368 if (auto ArraySize = New->getArraySize())
16369 Exprs.push_back(*ArraySize);
16371 } while (!Exprs.empty());
16374 namespace {
16376 /// Visitor for expressions which looks for unsequenced operations on the
16377 /// same object.
16378 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
16379 using Base = ConstEvaluatedExprVisitor<SequenceChecker>;
16381 /// A tree of sequenced regions within an expression. Two regions are
16382 /// unsequenced if one is an ancestor or a descendent of the other. When we
16383 /// finish processing an expression with sequencing, such as a comma
16384 /// expression, we fold its tree nodes into its parent, since they are
16385 /// unsequenced with respect to nodes we will visit later.
16386 class SequenceTree {
16387 struct Value {
16388 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
16389 unsigned Parent : 31;
16390 unsigned Merged : 1;
16392 SmallVector<Value, 8> Values;
16394 public:
16395 /// A region within an expression which may be sequenced with respect
16396 /// to some other region.
16397 class Seq {
16398 friend class SequenceTree;
16400 unsigned Index;
16402 explicit Seq(unsigned N) : Index(N) {}
16404 public:
16405 Seq() : Index(0) {}
16408 SequenceTree() { Values.push_back(Value(0)); }
16409 Seq root() const { return Seq(0); }
16411 /// Create a new sequence of operations, which is an unsequenced
16412 /// subset of \p Parent. This sequence of operations is sequenced with
16413 /// respect to other children of \p Parent.
16414 Seq allocate(Seq Parent) {
16415 Values.push_back(Value(Parent.Index));
16416 return Seq(Values.size() - 1);
16419 /// Merge a sequence of operations into its parent.
16420 void merge(Seq S) {
16421 Values[S.Index].Merged = true;
16424 /// Determine whether two operations are unsequenced. This operation
16425 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
16426 /// should have been merged into its parent as appropriate.
16427 bool isUnsequenced(Seq Cur, Seq Old) {
16428 unsigned C = representative(Cur.Index);
16429 unsigned Target = representative(Old.Index);
16430 while (C >= Target) {
16431 if (C == Target)
16432 return true;
16433 C = Values[C].Parent;
16435 return false;
16438 private:
16439 /// Pick a representative for a sequence.
16440 unsigned representative(unsigned K) {
16441 if (Values[K].Merged)
16442 // Perform path compression as we go.
16443 return Values[K].Parent = representative(Values[K].Parent);
16444 return K;
16448 /// An object for which we can track unsequenced uses.
16449 using Object = const NamedDecl *;
16451 /// Different flavors of object usage which we track. We only track the
16452 /// least-sequenced usage of each kind.
16453 enum UsageKind {
16454 /// A read of an object. Multiple unsequenced reads are OK.
16455 UK_Use,
16457 /// A modification of an object which is sequenced before the value
16458 /// computation of the expression, such as ++n in C++.
16459 UK_ModAsValue,
16461 /// A modification of an object which is not sequenced before the value
16462 /// computation of the expression, such as n++.
16463 UK_ModAsSideEffect,
16465 UK_Count = UK_ModAsSideEffect + 1
16468 /// Bundle together a sequencing region and the expression corresponding
16469 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
16470 struct Usage {
16471 const Expr *UsageExpr = nullptr;
16472 SequenceTree::Seq Seq;
16474 Usage() = default;
16477 struct UsageInfo {
16478 Usage Uses[UK_Count];
16480 /// Have we issued a diagnostic for this object already?
16481 bool Diagnosed = false;
16483 UsageInfo() = default;
16485 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
16487 Sema &SemaRef;
16489 /// Sequenced regions within the expression.
16490 SequenceTree Tree;
16492 /// Declaration modifications and references which we have seen.
16493 UsageInfoMap UsageMap;
16495 /// The region we are currently within.
16496 SequenceTree::Seq Region;
16498 /// Filled in with declarations which were modified as a side-effect
16499 /// (that is, post-increment operations).
16500 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
16502 /// Expressions to check later. We defer checking these to reduce
16503 /// stack usage.
16504 SmallVectorImpl<const Expr *> &WorkList;
16506 /// RAII object wrapping the visitation of a sequenced subexpression of an
16507 /// expression. At the end of this process, the side-effects of the evaluation
16508 /// become sequenced with respect to the value computation of the result, so
16509 /// we downgrade any UK_ModAsSideEffect within the evaluation to
16510 /// UK_ModAsValue.
16511 struct SequencedSubexpression {
16512 SequencedSubexpression(SequenceChecker &Self)
16513 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
16514 Self.ModAsSideEffect = &ModAsSideEffect;
16517 ~SequencedSubexpression() {
16518 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) {
16519 // Add a new usage with usage kind UK_ModAsValue, and then restore
16520 // the previous usage with UK_ModAsSideEffect (thus clearing it if
16521 // the previous one was empty).
16522 UsageInfo &UI = Self.UsageMap[M.first];
16523 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect];
16524 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue);
16525 SideEffectUsage = M.second;
16527 Self.ModAsSideEffect = OldModAsSideEffect;
16530 SequenceChecker &Self;
16531 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
16532 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
16535 /// RAII object wrapping the visitation of a subexpression which we might
16536 /// choose to evaluate as a constant. If any subexpression is evaluated and
16537 /// found to be non-constant, this allows us to suppress the evaluation of
16538 /// the outer expression.
16539 class EvaluationTracker {
16540 public:
16541 EvaluationTracker(SequenceChecker &Self)
16542 : Self(Self), Prev(Self.EvalTracker) {
16543 Self.EvalTracker = this;
16546 ~EvaluationTracker() {
16547 Self.EvalTracker = Prev;
16548 if (Prev)
16549 Prev->EvalOK &= EvalOK;
16552 bool evaluate(const Expr *E, bool &Result) {
16553 if (!EvalOK || E->isValueDependent())
16554 return false;
16555 EvalOK = E->EvaluateAsBooleanCondition(
16556 Result, Self.SemaRef.Context,
16557 Self.SemaRef.isConstantEvaluatedContext());
16558 return EvalOK;
16561 private:
16562 SequenceChecker &Self;
16563 EvaluationTracker *Prev;
16564 bool EvalOK = true;
16565 } *EvalTracker = nullptr;
16567 /// Find the object which is produced by the specified expression,
16568 /// if any.
16569 Object getObject(const Expr *E, bool Mod) const {
16570 E = E->IgnoreParenCasts();
16571 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
16572 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
16573 return getObject(UO->getSubExpr(), Mod);
16574 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
16575 if (BO->getOpcode() == BO_Comma)
16576 return getObject(BO->getRHS(), Mod);
16577 if (Mod && BO->isAssignmentOp())
16578 return getObject(BO->getLHS(), Mod);
16579 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
16580 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
16581 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
16582 return ME->getMemberDecl();
16583 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
16584 // FIXME: If this is a reference, map through to its value.
16585 return DRE->getDecl();
16586 return nullptr;
16589 /// Note that an object \p O was modified or used by an expression
16590 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
16591 /// the object \p O as obtained via the \p UsageMap.
16592 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) {
16593 // Get the old usage for the given object and usage kind.
16594 Usage &U = UI.Uses[UK];
16595 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) {
16596 // If we have a modification as side effect and are in a sequenced
16597 // subexpression, save the old Usage so that we can restore it later
16598 // in SequencedSubexpression::~SequencedSubexpression.
16599 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
16600 ModAsSideEffect->push_back(std::make_pair(O, U));
16601 // Then record the new usage with the current sequencing region.
16602 U.UsageExpr = UsageExpr;
16603 U.Seq = Region;
16607 /// Check whether a modification or use of an object \p O in an expression
16608 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
16609 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
16610 /// \p IsModMod is true when we are checking for a mod-mod unsequenced
16611 /// usage and false we are checking for a mod-use unsequenced usage.
16612 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr,
16613 UsageKind OtherKind, bool IsModMod) {
16614 if (UI.Diagnosed)
16615 return;
16617 const Usage &U = UI.Uses[OtherKind];
16618 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq))
16619 return;
16621 const Expr *Mod = U.UsageExpr;
16622 const Expr *ModOrUse = UsageExpr;
16623 if (OtherKind == UK_Use)
16624 std::swap(Mod, ModOrUse);
16626 SemaRef.DiagRuntimeBehavior(
16627 Mod->getExprLoc(), {Mod, ModOrUse},
16628 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
16629 : diag::warn_unsequenced_mod_use)
16630 << O << SourceRange(ModOrUse->getExprLoc()));
16631 UI.Diagnosed = true;
16634 // A note on note{Pre, Post}{Use, Mod}:
16636 // (It helps to follow the algorithm with an expression such as
16637 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
16638 // operations before C++17 and both are well-defined in C++17).
16640 // When visiting a node which uses/modify an object we first call notePreUse
16641 // or notePreMod before visiting its sub-expression(s). At this point the
16642 // children of the current node have not yet been visited and so the eventual
16643 // uses/modifications resulting from the children of the current node have not
16644 // been recorded yet.
16646 // We then visit the children of the current node. After that notePostUse or
16647 // notePostMod is called. These will 1) detect an unsequenced modification
16648 // as side effect (as in "k++ + k") and 2) add a new usage with the
16649 // appropriate usage kind.
16651 // We also have to be careful that some operation sequences modification as
16652 // side effect as well (for example: || or ,). To account for this we wrap
16653 // the visitation of such a sub-expression (for example: the LHS of || or ,)
16654 // with SequencedSubexpression. SequencedSubexpression is an RAII object
16655 // which record usages which are modifications as side effect, and then
16656 // downgrade them (or more accurately restore the previous usage which was a
16657 // modification as side effect) when exiting the scope of the sequenced
16658 // subexpression.
16660 void notePreUse(Object O, const Expr *UseExpr) {
16661 UsageInfo &UI = UsageMap[O];
16662 // Uses conflict with other modifications.
16663 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false);
16666 void notePostUse(Object O, const Expr *UseExpr) {
16667 UsageInfo &UI = UsageMap[O];
16668 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect,
16669 /*IsModMod=*/false);
16670 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use);
16673 void notePreMod(Object O, const Expr *ModExpr) {
16674 UsageInfo &UI = UsageMap[O];
16675 // Modifications conflict with other modifications and with uses.
16676 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true);
16677 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false);
16680 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) {
16681 UsageInfo &UI = UsageMap[O];
16682 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect,
16683 /*IsModMod=*/true);
16684 addUsage(O, UI, ModExpr, /*UsageKind=*/UK);
16687 public:
16688 SequenceChecker(Sema &S, const Expr *E,
16689 SmallVectorImpl<const Expr *> &WorkList)
16690 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
16691 Visit(E);
16692 // Silence a -Wunused-private-field since WorkList is now unused.
16693 // TODO: Evaluate if it can be used, and if not remove it.
16694 (void)this->WorkList;
16697 void VisitStmt(const Stmt *S) {
16698 // Skip all statements which aren't expressions for now.
16701 void VisitExpr(const Expr *E) {
16702 // By default, just recurse to evaluated subexpressions.
16703 Base::VisitStmt(E);
16706 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) {
16707 for (auto *Sub : CSE->children()) {
16708 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub);
16709 if (!ChildExpr)
16710 continue;
16712 if (ChildExpr == CSE->getOperand())
16713 // Do not recurse over a CoroutineSuspendExpr's operand.
16714 // The operand is also a subexpression of getCommonExpr(), and
16715 // recursing into it directly could confuse object management
16716 // for the sake of sequence tracking.
16717 continue;
16719 Visit(Sub);
16723 void VisitCastExpr(const CastExpr *E) {
16724 Object O = Object();
16725 if (E->getCastKind() == CK_LValueToRValue)
16726 O = getObject(E->getSubExpr(), false);
16728 if (O)
16729 notePreUse(O, E);
16730 VisitExpr(E);
16731 if (O)
16732 notePostUse(O, E);
16735 void VisitSequencedExpressions(const Expr *SequencedBefore,
16736 const Expr *SequencedAfter) {
16737 SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
16738 SequenceTree::Seq AfterRegion = Tree.allocate(Region);
16739 SequenceTree::Seq OldRegion = Region;
16742 SequencedSubexpression SeqBefore(*this);
16743 Region = BeforeRegion;
16744 Visit(SequencedBefore);
16747 Region = AfterRegion;
16748 Visit(SequencedAfter);
16750 Region = OldRegion;
16752 Tree.merge(BeforeRegion);
16753 Tree.merge(AfterRegion);
16756 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
16757 // C++17 [expr.sub]p1:
16758 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
16759 // expression E1 is sequenced before the expression E2.
16760 if (SemaRef.getLangOpts().CPlusPlus17)
16761 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
16762 else {
16763 Visit(ASE->getLHS());
16764 Visit(ASE->getRHS());
16768 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
16769 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
16770 void VisitBinPtrMem(const BinaryOperator *BO) {
16771 // C++17 [expr.mptr.oper]p4:
16772 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
16773 // the expression E1 is sequenced before the expression E2.
16774 if (SemaRef.getLangOpts().CPlusPlus17)
16775 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
16776 else {
16777 Visit(BO->getLHS());
16778 Visit(BO->getRHS());
16782 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); }
16783 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); }
16784 void VisitBinShlShr(const BinaryOperator *BO) {
16785 // C++17 [expr.shift]p4:
16786 // The expression E1 is sequenced before the expression E2.
16787 if (SemaRef.getLangOpts().CPlusPlus17)
16788 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
16789 else {
16790 Visit(BO->getLHS());
16791 Visit(BO->getRHS());
16795 void VisitBinComma(const BinaryOperator *BO) {
16796 // C++11 [expr.comma]p1:
16797 // Every value computation and side effect associated with the left
16798 // expression is sequenced before every value computation and side
16799 // effect associated with the right expression.
16800 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
16803 void VisitBinAssign(const BinaryOperator *BO) {
16804 SequenceTree::Seq RHSRegion;
16805 SequenceTree::Seq LHSRegion;
16806 if (SemaRef.getLangOpts().CPlusPlus17) {
16807 RHSRegion = Tree.allocate(Region);
16808 LHSRegion = Tree.allocate(Region);
16809 } else {
16810 RHSRegion = Region;
16811 LHSRegion = Region;
16813 SequenceTree::Seq OldRegion = Region;
16815 // C++11 [expr.ass]p1:
16816 // [...] the assignment is sequenced after the value computation
16817 // of the right and left operands, [...]
16819 // so check it before inspecting the operands and update the
16820 // map afterwards.
16821 Object O = getObject(BO->getLHS(), /*Mod=*/true);
16822 if (O)
16823 notePreMod(O, BO);
16825 if (SemaRef.getLangOpts().CPlusPlus17) {
16826 // C++17 [expr.ass]p1:
16827 // [...] The right operand is sequenced before the left operand. [...]
16829 SequencedSubexpression SeqBefore(*this);
16830 Region = RHSRegion;
16831 Visit(BO->getRHS());
16834 Region = LHSRegion;
16835 Visit(BO->getLHS());
16837 if (O && isa<CompoundAssignOperator>(BO))
16838 notePostUse(O, BO);
16840 } else {
16841 // C++11 does not specify any sequencing between the LHS and RHS.
16842 Region = LHSRegion;
16843 Visit(BO->getLHS());
16845 if (O && isa<CompoundAssignOperator>(BO))
16846 notePostUse(O, BO);
16848 Region = RHSRegion;
16849 Visit(BO->getRHS());
16852 // C++11 [expr.ass]p1:
16853 // the assignment is sequenced [...] before the value computation of the
16854 // assignment expression.
16855 // C11 6.5.16/3 has no such rule.
16856 Region = OldRegion;
16857 if (O)
16858 notePostMod(O, BO,
16859 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
16860 : UK_ModAsSideEffect);
16861 if (SemaRef.getLangOpts().CPlusPlus17) {
16862 Tree.merge(RHSRegion);
16863 Tree.merge(LHSRegion);
16867 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
16868 VisitBinAssign(CAO);
16871 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
16872 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
16873 void VisitUnaryPreIncDec(const UnaryOperator *UO) {
16874 Object O = getObject(UO->getSubExpr(), true);
16875 if (!O)
16876 return VisitExpr(UO);
16878 notePreMod(O, UO);
16879 Visit(UO->getSubExpr());
16880 // C++11 [expr.pre.incr]p1:
16881 // the expression ++x is equivalent to x+=1
16882 notePostMod(O, UO,
16883 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
16884 : UK_ModAsSideEffect);
16887 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
16888 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
16889 void VisitUnaryPostIncDec(const UnaryOperator *UO) {
16890 Object O = getObject(UO->getSubExpr(), true);
16891 if (!O)
16892 return VisitExpr(UO);
16894 notePreMod(O, UO);
16895 Visit(UO->getSubExpr());
16896 notePostMod(O, UO, UK_ModAsSideEffect);
16899 void VisitBinLOr(const BinaryOperator *BO) {
16900 // C++11 [expr.log.or]p2:
16901 // If the second expression is evaluated, every value computation and
16902 // side effect associated with the first expression is sequenced before
16903 // every value computation and side effect associated with the
16904 // second expression.
16905 SequenceTree::Seq LHSRegion = Tree.allocate(Region);
16906 SequenceTree::Seq RHSRegion = Tree.allocate(Region);
16907 SequenceTree::Seq OldRegion = Region;
16909 EvaluationTracker Eval(*this);
16911 SequencedSubexpression Sequenced(*this);
16912 Region = LHSRegion;
16913 Visit(BO->getLHS());
16916 // C++11 [expr.log.or]p1:
16917 // [...] the second operand is not evaluated if the first operand
16918 // evaluates to true.
16919 bool EvalResult = false;
16920 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
16921 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult);
16922 if (ShouldVisitRHS) {
16923 Region = RHSRegion;
16924 Visit(BO->getRHS());
16927 Region = OldRegion;
16928 Tree.merge(LHSRegion);
16929 Tree.merge(RHSRegion);
16932 void VisitBinLAnd(const BinaryOperator *BO) {
16933 // C++11 [expr.log.and]p2:
16934 // If the second expression is evaluated, every value computation and
16935 // side effect associated with the first expression is sequenced before
16936 // every value computation and side effect associated with the
16937 // second expression.
16938 SequenceTree::Seq LHSRegion = Tree.allocate(Region);
16939 SequenceTree::Seq RHSRegion = Tree.allocate(Region);
16940 SequenceTree::Seq OldRegion = Region;
16942 EvaluationTracker Eval(*this);
16944 SequencedSubexpression Sequenced(*this);
16945 Region = LHSRegion;
16946 Visit(BO->getLHS());
16949 // C++11 [expr.log.and]p1:
16950 // [...] the second operand is not evaluated if the first operand is false.
16951 bool EvalResult = false;
16952 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
16953 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult);
16954 if (ShouldVisitRHS) {
16955 Region = RHSRegion;
16956 Visit(BO->getRHS());
16959 Region = OldRegion;
16960 Tree.merge(LHSRegion);
16961 Tree.merge(RHSRegion);
16964 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) {
16965 // C++11 [expr.cond]p1:
16966 // [...] Every value computation and side effect associated with the first
16967 // expression is sequenced before every value computation and side effect
16968 // associated with the second or third expression.
16969 SequenceTree::Seq ConditionRegion = Tree.allocate(Region);
16971 // No sequencing is specified between the true and false expression.
16972 // However since exactly one of both is going to be evaluated we can
16973 // consider them to be sequenced. This is needed to avoid warning on
16974 // something like "x ? y+= 1 : y += 2;" in the case where we will visit
16975 // both the true and false expressions because we can't evaluate x.
16976 // This will still allow us to detect an expression like (pre C++17)
16977 // "(x ? y += 1 : y += 2) = y".
16979 // We don't wrap the visitation of the true and false expression with
16980 // SequencedSubexpression because we don't want to downgrade modifications
16981 // as side effect in the true and false expressions after the visition
16982 // is done. (for example in the expression "(x ? y++ : y++) + y" we should
16983 // not warn between the two "y++", but we should warn between the "y++"
16984 // and the "y".
16985 SequenceTree::Seq TrueRegion = Tree.allocate(Region);
16986 SequenceTree::Seq FalseRegion = Tree.allocate(Region);
16987 SequenceTree::Seq OldRegion = Region;
16989 EvaluationTracker Eval(*this);
16991 SequencedSubexpression Sequenced(*this);
16992 Region = ConditionRegion;
16993 Visit(CO->getCond());
16996 // C++11 [expr.cond]p1:
16997 // [...] The first expression is contextually converted to bool (Clause 4).
16998 // It is evaluated and if it is true, the result of the conditional
16999 // expression is the value of the second expression, otherwise that of the
17000 // third expression. Only one of the second and third expressions is
17001 // evaluated. [...]
17002 bool EvalResult = false;
17003 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult);
17004 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult);
17005 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult);
17006 if (ShouldVisitTrueExpr) {
17007 Region = TrueRegion;
17008 Visit(CO->getTrueExpr());
17010 if (ShouldVisitFalseExpr) {
17011 Region = FalseRegion;
17012 Visit(CO->getFalseExpr());
17015 Region = OldRegion;
17016 Tree.merge(ConditionRegion);
17017 Tree.merge(TrueRegion);
17018 Tree.merge(FalseRegion);
17021 void VisitCallExpr(const CallExpr *CE) {
17022 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
17024 if (CE->isUnevaluatedBuiltinCall(Context))
17025 return;
17027 // C++11 [intro.execution]p15:
17028 // When calling a function [...], every value computation and side effect
17029 // associated with any argument expression, or with the postfix expression
17030 // designating the called function, is sequenced before execution of every
17031 // expression or statement in the body of the function [and thus before
17032 // the value computation of its result].
17033 SequencedSubexpression Sequenced(*this);
17034 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] {
17035 // C++17 [expr.call]p5
17036 // The postfix-expression is sequenced before each expression in the
17037 // expression-list and any default argument. [...]
17038 SequenceTree::Seq CalleeRegion;
17039 SequenceTree::Seq OtherRegion;
17040 if (SemaRef.getLangOpts().CPlusPlus17) {
17041 CalleeRegion = Tree.allocate(Region);
17042 OtherRegion = Tree.allocate(Region);
17043 } else {
17044 CalleeRegion = Region;
17045 OtherRegion = Region;
17047 SequenceTree::Seq OldRegion = Region;
17049 // Visit the callee expression first.
17050 Region = CalleeRegion;
17051 if (SemaRef.getLangOpts().CPlusPlus17) {
17052 SequencedSubexpression Sequenced(*this);
17053 Visit(CE->getCallee());
17054 } else {
17055 Visit(CE->getCallee());
17058 // Then visit the argument expressions.
17059 Region = OtherRegion;
17060 for (const Expr *Argument : CE->arguments())
17061 Visit(Argument);
17063 Region = OldRegion;
17064 if (SemaRef.getLangOpts().CPlusPlus17) {
17065 Tree.merge(CalleeRegion);
17066 Tree.merge(OtherRegion);
17071 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
17072 // C++17 [over.match.oper]p2:
17073 // [...] the operator notation is first transformed to the equivalent
17074 // function-call notation as summarized in Table 12 (where @ denotes one
17075 // of the operators covered in the specified subclause). However, the
17076 // operands are sequenced in the order prescribed for the built-in
17077 // operator (Clause 8).
17079 // From the above only overloaded binary operators and overloaded call
17080 // operators have sequencing rules in C++17 that we need to handle
17081 // separately.
17082 if (!SemaRef.getLangOpts().CPlusPlus17 ||
17083 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
17084 return VisitCallExpr(CXXOCE);
17086 enum {
17087 NoSequencing,
17088 LHSBeforeRHS,
17089 RHSBeforeLHS,
17090 LHSBeforeRest
17091 } SequencingKind;
17092 switch (CXXOCE->getOperator()) {
17093 case OO_Equal:
17094 case OO_PlusEqual:
17095 case OO_MinusEqual:
17096 case OO_StarEqual:
17097 case OO_SlashEqual:
17098 case OO_PercentEqual:
17099 case OO_CaretEqual:
17100 case OO_AmpEqual:
17101 case OO_PipeEqual:
17102 case OO_LessLessEqual:
17103 case OO_GreaterGreaterEqual:
17104 SequencingKind = RHSBeforeLHS;
17105 break;
17107 case OO_LessLess:
17108 case OO_GreaterGreater:
17109 case OO_AmpAmp:
17110 case OO_PipePipe:
17111 case OO_Comma:
17112 case OO_ArrowStar:
17113 case OO_Subscript:
17114 SequencingKind = LHSBeforeRHS;
17115 break;
17117 case OO_Call:
17118 SequencingKind = LHSBeforeRest;
17119 break;
17121 default:
17122 SequencingKind = NoSequencing;
17123 break;
17126 if (SequencingKind == NoSequencing)
17127 return VisitCallExpr(CXXOCE);
17129 // This is a call, so all subexpressions are sequenced before the result.
17130 SequencedSubexpression Sequenced(*this);
17132 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
17133 assert(SemaRef.getLangOpts().CPlusPlus17 &&
17134 "Should only get there with C++17 and above!");
17135 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&
17136 "Should only get there with an overloaded binary operator"
17137 " or an overloaded call operator!");
17139 if (SequencingKind == LHSBeforeRest) {
17140 assert(CXXOCE->getOperator() == OO_Call &&
17141 "We should only have an overloaded call operator here!");
17143 // This is very similar to VisitCallExpr, except that we only have the
17144 // C++17 case. The postfix-expression is the first argument of the
17145 // CXXOperatorCallExpr. The expressions in the expression-list, if any,
17146 // are in the following arguments.
17148 // Note that we intentionally do not visit the callee expression since
17149 // it is just a decayed reference to a function.
17150 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region);
17151 SequenceTree::Seq ArgsRegion = Tree.allocate(Region);
17152 SequenceTree::Seq OldRegion = Region;
17154 assert(CXXOCE->getNumArgs() >= 1 &&
17155 "An overloaded call operator must have at least one argument"
17156 " for the postfix-expression!");
17157 const Expr *PostfixExpr = CXXOCE->getArgs()[0];
17158 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
17159 CXXOCE->getNumArgs() - 1);
17161 // Visit the postfix-expression first.
17163 Region = PostfixExprRegion;
17164 SequencedSubexpression Sequenced(*this);
17165 Visit(PostfixExpr);
17168 // Then visit the argument expressions.
17169 Region = ArgsRegion;
17170 for (const Expr *Arg : Args)
17171 Visit(Arg);
17173 Region = OldRegion;
17174 Tree.merge(PostfixExprRegion);
17175 Tree.merge(ArgsRegion);
17176 } else {
17177 assert(CXXOCE->getNumArgs() == 2 &&
17178 "Should only have two arguments here!");
17179 assert((SequencingKind == LHSBeforeRHS ||
17180 SequencingKind == RHSBeforeLHS) &&
17181 "Unexpected sequencing kind!");
17183 // We do not visit the callee expression since it is just a decayed
17184 // reference to a function.
17185 const Expr *E1 = CXXOCE->getArg(0);
17186 const Expr *E2 = CXXOCE->getArg(1);
17187 if (SequencingKind == RHSBeforeLHS)
17188 std::swap(E1, E2);
17190 return VisitSequencedExpressions(E1, E2);
17195 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
17196 // This is a call, so all subexpressions are sequenced before the result.
17197 SequencedSubexpression Sequenced(*this);
17199 if (!CCE->isListInitialization())
17200 return VisitExpr(CCE);
17202 // In C++11, list initializations are sequenced.
17203 SmallVector<SequenceTree::Seq, 32> Elts;
17204 SequenceTree::Seq Parent = Region;
17205 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
17206 E = CCE->arg_end();
17207 I != E; ++I) {
17208 Region = Tree.allocate(Parent);
17209 Elts.push_back(Region);
17210 Visit(*I);
17213 // Forget that the initializers are sequenced.
17214 Region = Parent;
17215 for (unsigned I = 0; I < Elts.size(); ++I)
17216 Tree.merge(Elts[I]);
17219 void VisitInitListExpr(const InitListExpr *ILE) {
17220 if (!SemaRef.getLangOpts().CPlusPlus11)
17221 return VisitExpr(ILE);
17223 // In C++11, list initializations are sequenced.
17224 SmallVector<SequenceTree::Seq, 32> Elts;
17225 SequenceTree::Seq Parent = Region;
17226 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
17227 const Expr *E = ILE->getInit(I);
17228 if (!E)
17229 continue;
17230 Region = Tree.allocate(Parent);
17231 Elts.push_back(Region);
17232 Visit(E);
17235 // Forget that the initializers are sequenced.
17236 Region = Parent;
17237 for (unsigned I = 0; I < Elts.size(); ++I)
17238 Tree.merge(Elts[I]);
17242 } // namespace
17244 void Sema::CheckUnsequencedOperations(const Expr *E) {
17245 SmallVector<const Expr *, 8> WorkList;
17246 WorkList.push_back(E);
17247 while (!WorkList.empty()) {
17248 const Expr *Item = WorkList.pop_back_val();
17249 SequenceChecker(*this, Item, WorkList);
17253 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
17254 bool IsConstexpr) {
17255 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride,
17256 IsConstexpr || isa<ConstantExpr>(E));
17257 CheckImplicitConversions(E, CheckLoc);
17258 if (!E->isInstantiationDependent())
17259 CheckUnsequencedOperations(E);
17260 if (!IsConstexpr && !E->isValueDependent())
17261 CheckForIntOverflow(E);
17262 DiagnoseMisalignedMembers();
17265 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
17266 FieldDecl *BitField,
17267 Expr *Init) {
17268 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
17271 static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
17272 SourceLocation Loc) {
17273 if (!PType->isVariablyModifiedType())
17274 return;
17275 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
17276 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
17277 return;
17279 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
17280 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
17281 return;
17283 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
17284 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
17285 return;
17288 const ArrayType *AT = S.Context.getAsArrayType(PType);
17289 if (!AT)
17290 return;
17292 if (AT->getSizeModifier() != ArraySizeModifier::Star) {
17293 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
17294 return;
17297 S.Diag(Loc, diag::err_array_star_in_function_definition);
17300 /// CheckParmsForFunctionDef - Check that the parameters of the given
17301 /// function are appropriate for the definition of a function. This
17302 /// takes care of any checks that cannot be performed on the
17303 /// declaration itself, e.g., that the types of each of the function
17304 /// parameters are complete.
17305 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
17306 bool CheckParameterNames) {
17307 bool HasInvalidParm = false;
17308 for (ParmVarDecl *Param : Parameters) {
17309 assert(Param && "null in a parameter list");
17310 // C99 6.7.5.3p4: the parameters in a parameter type list in a
17311 // function declarator that is part of a function definition of
17312 // that function shall not have incomplete type.
17314 // C++23 [dcl.fct.def.general]/p2
17315 // The type of a parameter [...] for a function definition
17316 // shall not be a (possibly cv-qualified) class type that is incomplete
17317 // or abstract within the function body unless the function is deleted.
17318 if (!Param->isInvalidDecl() &&
17319 (RequireCompleteType(Param->getLocation(), Param->getType(),
17320 diag::err_typecheck_decl_incomplete_type) ||
17321 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(),
17322 diag::err_abstract_type_in_decl,
17323 AbstractParamType))) {
17324 Param->setInvalidDecl();
17325 HasInvalidParm = true;
17328 // C99 6.9.1p5: If the declarator includes a parameter type list, the
17329 // declaration of each parameter shall include an identifier.
17330 if (CheckParameterNames && Param->getIdentifier() == nullptr &&
17331 !Param->isImplicit() && !getLangOpts().CPlusPlus) {
17332 // Diagnose this as an extension in C17 and earlier.
17333 if (!getLangOpts().C23)
17334 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23);
17337 // C99 6.7.5.3p12:
17338 // If the function declarator is not part of a definition of that
17339 // function, parameters may have incomplete type and may use the [*]
17340 // notation in their sequences of declarator specifiers to specify
17341 // variable length array types.
17342 QualType PType = Param->getOriginalType();
17343 // FIXME: This diagnostic should point the '[*]' if source-location
17344 // information is added for it.
17345 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
17347 // If the parameter is a c++ class type and it has to be destructed in the
17348 // callee function, declare the destructor so that it can be called by the
17349 // callee function. Do not perform any direct access check on the dtor here.
17350 if (!Param->isInvalidDecl()) {
17351 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
17352 if (!ClassDecl->isInvalidDecl() &&
17353 !ClassDecl->hasIrrelevantDestructor() &&
17354 !ClassDecl->isDependentContext() &&
17355 ClassDecl->isParamDestroyedInCallee()) {
17356 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
17357 MarkFunctionReferenced(Param->getLocation(), Destructor);
17358 DiagnoseUseOfDecl(Destructor, Param->getLocation());
17363 // Parameters with the pass_object_size attribute only need to be marked
17364 // constant at function definitions. Because we lack information about
17365 // whether we're on a declaration or definition when we're instantiating the
17366 // attribute, we need to check for constness here.
17367 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
17368 if (!Param->getType().isConstQualified())
17369 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
17370 << Attr->getSpelling() << 1;
17372 // Check for parameter names shadowing fields from the class.
17373 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
17374 // The owning context for the parameter should be the function, but we
17375 // want to see if this function's declaration context is a record.
17376 DeclContext *DC = Param->getDeclContext();
17377 if (DC && DC->isFunctionOrMethod()) {
17378 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
17379 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
17380 RD, /*DeclIsField*/ false);
17384 if (!Param->isInvalidDecl() &&
17385 Param->getOriginalType()->isWebAssemblyTableType()) {
17386 Param->setInvalidDecl();
17387 HasInvalidParm = true;
17388 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter);
17392 return HasInvalidParm;
17395 std::optional<std::pair<
17396 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
17398 ASTContext
17399 &Ctx);
17401 /// Compute the alignment and offset of the base class object given the
17402 /// derived-to-base cast expression and the alignment and offset of the derived
17403 /// class object.
17404 static std::pair<CharUnits, CharUnits>
17405 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
17406 CharUnits BaseAlignment, CharUnits Offset,
17407 ASTContext &Ctx) {
17408 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
17409 ++PathI) {
17410 const CXXBaseSpecifier *Base = *PathI;
17411 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
17412 if (Base->isVirtual()) {
17413 // The complete object may have a lower alignment than the non-virtual
17414 // alignment of the base, in which case the base may be misaligned. Choose
17415 // the smaller of the non-virtual alignment and BaseAlignment, which is a
17416 // conservative lower bound of the complete object alignment.
17417 CharUnits NonVirtualAlignment =
17418 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
17419 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment);
17420 Offset = CharUnits::Zero();
17421 } else {
17422 const ASTRecordLayout &RL =
17423 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
17424 Offset += RL.getBaseClassOffset(BaseDecl);
17426 DerivedType = Base->getType();
17429 return std::make_pair(BaseAlignment, Offset);
17432 /// Compute the alignment and offset of a binary additive operator.
17433 static std::optional<std::pair<CharUnits, CharUnits>>
17434 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
17435 bool IsSub, ASTContext &Ctx) {
17436 QualType PointeeType = PtrE->getType()->getPointeeType();
17438 if (!PointeeType->isConstantSizeType())
17439 return std::nullopt;
17441 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx);
17443 if (!P)
17444 return std::nullopt;
17446 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
17447 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
17448 CharUnits Offset = EltSize * IdxRes->getExtValue();
17449 if (IsSub)
17450 Offset = -Offset;
17451 return std::make_pair(P->first, P->second + Offset);
17454 // If the integer expression isn't a constant expression, compute the lower
17455 // bound of the alignment using the alignment and offset of the pointer
17456 // expression and the element size.
17457 return std::make_pair(
17458 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize),
17459 CharUnits::Zero());
17462 /// This helper function takes an lvalue expression and returns the alignment of
17463 /// a VarDecl and a constant offset from the VarDecl.
17464 std::optional<std::pair<
17465 CharUnits,
17466 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E,
17467 ASTContext &Ctx) {
17468 E = E->IgnoreParens();
17469 switch (E->getStmtClass()) {
17470 default:
17471 break;
17472 case Stmt::CStyleCastExprClass:
17473 case Stmt::CXXStaticCastExprClass:
17474 case Stmt::ImplicitCastExprClass: {
17475 auto *CE = cast<CastExpr>(E);
17476 const Expr *From = CE->getSubExpr();
17477 switch (CE->getCastKind()) {
17478 default:
17479 break;
17480 case CK_NoOp:
17481 return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
17482 case CK_UncheckedDerivedToBase:
17483 case CK_DerivedToBase: {
17484 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx);
17485 if (!P)
17486 break;
17487 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first,
17488 P->second, Ctx);
17491 break;
17493 case Stmt::ArraySubscriptExprClass: {
17494 auto *ASE = cast<ArraySubscriptExpr>(E);
17495 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(),
17496 false, Ctx);
17498 case Stmt::DeclRefExprClass: {
17499 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
17500 // FIXME: If VD is captured by copy or is an escaping __block variable,
17501 // use the alignment of VD's type.
17502 if (!VD->getType()->isReferenceType()) {
17503 // Dependent alignment cannot be resolved -> bail out.
17504 if (VD->hasDependentAlignment())
17505 break;
17506 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
17508 if (VD->hasInit())
17509 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
17511 break;
17513 case Stmt::MemberExprClass: {
17514 auto *ME = cast<MemberExpr>(E);
17515 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
17516 if (!FD || FD->getType()->isReferenceType() ||
17517 FD->getParent()->isInvalidDecl())
17518 break;
17519 std::optional<std::pair<CharUnits, CharUnits>> P;
17520 if (ME->isArrow())
17521 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx);
17522 else
17523 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx);
17524 if (!P)
17525 break;
17526 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
17527 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex());
17528 return std::make_pair(P->first,
17529 P->second + CharUnits::fromQuantity(Offset));
17531 case Stmt::UnaryOperatorClass: {
17532 auto *UO = cast<UnaryOperator>(E);
17533 switch (UO->getOpcode()) {
17534 default:
17535 break;
17536 case UO_Deref:
17537 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx);
17539 break;
17541 case Stmt::BinaryOperatorClass: {
17542 auto *BO = cast<BinaryOperator>(E);
17543 auto Opcode = BO->getOpcode();
17544 switch (Opcode) {
17545 default:
17546 break;
17547 case BO_Comma:
17548 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx);
17550 break;
17553 return std::nullopt;
17556 /// This helper function takes a pointer expression and returns the alignment of
17557 /// a VarDecl and a constant offset from the VarDecl.
17558 std::optional<std::pair<
17559 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
17561 ASTContext
17562 &Ctx) {
17563 E = E->IgnoreParens();
17564 switch (E->getStmtClass()) {
17565 default:
17566 break;
17567 case Stmt::CStyleCastExprClass:
17568 case Stmt::CXXStaticCastExprClass:
17569 case Stmt::ImplicitCastExprClass: {
17570 auto *CE = cast<CastExpr>(E);
17571 const Expr *From = CE->getSubExpr();
17572 switch (CE->getCastKind()) {
17573 default:
17574 break;
17575 case CK_NoOp:
17576 return getBaseAlignmentAndOffsetFromPtr(From, Ctx);
17577 case CK_ArrayToPointerDecay:
17578 return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
17579 case CK_UncheckedDerivedToBase:
17580 case CK_DerivedToBase: {
17581 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx);
17582 if (!P)
17583 break;
17584 return getDerivedToBaseAlignmentAndOffset(
17585 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx);
17588 break;
17590 case Stmt::CXXThisExprClass: {
17591 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
17592 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
17593 return std::make_pair(Alignment, CharUnits::Zero());
17595 case Stmt::UnaryOperatorClass: {
17596 auto *UO = cast<UnaryOperator>(E);
17597 if (UO->getOpcode() == UO_AddrOf)
17598 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx);
17599 break;
17601 case Stmt::BinaryOperatorClass: {
17602 auto *BO = cast<BinaryOperator>(E);
17603 auto Opcode = BO->getOpcode();
17604 switch (Opcode) {
17605 default:
17606 break;
17607 case BO_Add:
17608 case BO_Sub: {
17609 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
17610 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
17611 std::swap(LHS, RHS);
17612 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub,
17613 Ctx);
17615 case BO_Comma:
17616 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx);
17618 break;
17621 return std::nullopt;
17624 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
17625 // See if we can compute the alignment of a VarDecl and an offset from it.
17626 std::optional<std::pair<CharUnits, CharUnits>> P =
17627 getBaseAlignmentAndOffsetFromPtr(E, S.Context);
17629 if (P)
17630 return P->first.alignmentAtOffset(P->second);
17632 // If that failed, return the type's alignment.
17633 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType());
17636 /// CheckCastAlign - Implements -Wcast-align, which warns when a
17637 /// pointer cast increases the alignment requirements.
17638 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
17639 // This is actually a lot of work to potentially be doing on every
17640 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
17641 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
17642 return;
17644 // Ignore dependent types.
17645 if (T->isDependentType() || Op->getType()->isDependentType())
17646 return;
17648 // Require that the destination be a pointer type.
17649 const PointerType *DestPtr = T->getAs<PointerType>();
17650 if (!DestPtr) return;
17652 // If the destination has alignment 1, we're done.
17653 QualType DestPointee = DestPtr->getPointeeType();
17654 if (DestPointee->isIncompleteType()) return;
17655 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
17656 if (DestAlign.isOne()) return;
17658 // Require that the source be a pointer type.
17659 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
17660 if (!SrcPtr) return;
17661 QualType SrcPointee = SrcPtr->getPointeeType();
17663 // Explicitly allow casts from cv void*. We already implicitly
17664 // allowed casts to cv void*, since they have alignment 1.
17665 // Also allow casts involving incomplete types, which implicitly
17666 // includes 'void'.
17667 if (SrcPointee->isIncompleteType()) return;
17669 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this);
17671 if (SrcAlign >= DestAlign) return;
17673 Diag(TRange.getBegin(), diag::warn_cast_align)
17674 << Op->getType() << T
17675 << static_cast<unsigned>(SrcAlign.getQuantity())
17676 << static_cast<unsigned>(DestAlign.getQuantity())
17677 << TRange << Op->getSourceRange();
17680 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
17681 const ArraySubscriptExpr *ASE,
17682 bool AllowOnePastEnd, bool IndexNegated) {
17683 // Already diagnosed by the constant evaluator.
17684 if (isConstantEvaluatedContext())
17685 return;
17687 IndexExpr = IndexExpr->IgnoreParenImpCasts();
17688 if (IndexExpr->isValueDependent())
17689 return;
17691 const Type *EffectiveType =
17692 BaseExpr->getType()->getPointeeOrArrayElementType();
17693 BaseExpr = BaseExpr->IgnoreParenCasts();
17694 const ConstantArrayType *ArrayTy =
17695 Context.getAsConstantArrayType(BaseExpr->getType());
17697 LangOptions::StrictFlexArraysLevelKind
17698 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel();
17700 const Type *BaseType =
17701 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
17702 bool IsUnboundedArray =
17703 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike(
17704 Context, StrictFlexArraysLevel,
17705 /*IgnoreTemplateOrMacroSubstitution=*/true);
17706 if (EffectiveType->isDependentType() ||
17707 (!IsUnboundedArray && BaseType->isDependentType()))
17708 return;
17710 Expr::EvalResult Result;
17711 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
17712 return;
17714 llvm::APSInt index = Result.Val.getInt();
17715 if (IndexNegated) {
17716 index.setIsUnsigned(false);
17717 index = -index;
17720 if (IsUnboundedArray) {
17721 if (EffectiveType->isFunctionType())
17722 return;
17723 if (index.isUnsigned() || !index.isNegative()) {
17724 const auto &ASTC = getASTContext();
17725 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth(
17726 EffectiveType->getCanonicalTypeInternal().getAddressSpace());
17727 if (index.getBitWidth() < AddrBits)
17728 index = index.zext(AddrBits);
17729 std::optional<CharUnits> ElemCharUnits =
17730 ASTC.getTypeSizeInCharsIfKnown(EffectiveType);
17731 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
17732 // pointer) bounds-checking isn't meaningful.
17733 if (!ElemCharUnits || ElemCharUnits->isZero())
17734 return;
17735 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
17736 // If index has more active bits than address space, we already know
17737 // we have a bounds violation to warn about. Otherwise, compute
17738 // address of (index + 1)th element, and warn about bounds violation
17739 // only if that address exceeds address space.
17740 if (index.getActiveBits() <= AddrBits) {
17741 bool Overflow;
17742 llvm::APInt Product(index);
17743 Product += 1;
17744 Product = Product.umul_ov(ElemBytes, Overflow);
17745 if (!Overflow && Product.getActiveBits() <= AddrBits)
17746 return;
17749 // Need to compute max possible elements in address space, since that
17750 // is included in diag message.
17751 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits);
17752 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth()));
17753 MaxElems += 1;
17754 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth());
17755 MaxElems = MaxElems.udiv(ElemBytes);
17757 unsigned DiagID =
17758 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds
17759 : diag::warn_ptr_arith_exceeds_max_addressable_bounds;
17761 // Diag message shows element size in bits and in "bytes" (platform-
17762 // dependent CharUnits)
17763 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
17764 PDiag(DiagID)
17765 << toString(index, 10, true) << AddrBits
17766 << (unsigned)ASTC.toBits(*ElemCharUnits)
17767 << toString(ElemBytes, 10, false)
17768 << toString(MaxElems, 10, false)
17769 << (unsigned)MaxElems.getLimitedValue(~0U)
17770 << IndexExpr->getSourceRange());
17772 const NamedDecl *ND = nullptr;
17773 // Try harder to find a NamedDecl to point at in the note.
17774 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
17775 BaseExpr = ASE->getBase()->IgnoreParenCasts();
17776 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
17777 ND = DRE->getDecl();
17778 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
17779 ND = ME->getMemberDecl();
17781 if (ND)
17782 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
17783 PDiag(diag::note_array_declared_here) << ND);
17785 return;
17788 if (index.isUnsigned() || !index.isNegative()) {
17789 // It is possible that the type of the base expression after
17790 // IgnoreParenCasts is incomplete, even though the type of the base
17791 // expression before IgnoreParenCasts is complete (see PR39746 for an
17792 // example). In this case we have no information about whether the array
17793 // access exceeds the array bounds. However we can still diagnose an array
17794 // access which precedes the array bounds.
17795 if (BaseType->isIncompleteType())
17796 return;
17798 llvm::APInt size = ArrayTy->getSize();
17800 if (BaseType != EffectiveType) {
17801 // Make sure we're comparing apples to apples when comparing index to
17802 // size.
17803 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
17804 uint64_t array_typesize = Context.getTypeSize(BaseType);
17806 // Handle ptrarith_typesize being zero, such as when casting to void*.
17807 // Use the size in bits (what "getTypeSize()" returns) rather than bytes.
17808 if (!ptrarith_typesize)
17809 ptrarith_typesize = Context.getCharWidth();
17811 if (ptrarith_typesize != array_typesize) {
17812 // There's a cast to a different size type involved.
17813 uint64_t ratio = array_typesize / ptrarith_typesize;
17815 // TODO: Be smarter about handling cases where array_typesize is not a
17816 // multiple of ptrarith_typesize.
17817 if (ptrarith_typesize * ratio == array_typesize)
17818 size *= llvm::APInt(size.getBitWidth(), ratio);
17822 if (size.getBitWidth() > index.getBitWidth())
17823 index = index.zext(size.getBitWidth());
17824 else if (size.getBitWidth() < index.getBitWidth())
17825 size = size.zext(index.getBitWidth());
17827 // For array subscripting the index must be less than size, but for pointer
17828 // arithmetic also allow the index (offset) to be equal to size since
17829 // computing the next address after the end of the array is legal and
17830 // commonly done e.g. in C++ iterators and range-based for loops.
17831 if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
17832 return;
17834 // Suppress the warning if the subscript expression (as identified by the
17835 // ']' location) and the index expression are both from macro expansions
17836 // within a system header.
17837 if (ASE) {
17838 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
17839 ASE->getRBracketLoc());
17840 if (SourceMgr.isInSystemHeader(RBracketLoc)) {
17841 SourceLocation IndexLoc =
17842 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc());
17843 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
17844 return;
17848 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
17849 : diag::warn_ptr_arith_exceeds_bounds;
17850 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1;
17851 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType();
17853 DiagRuntimeBehavior(
17854 BaseExpr->getBeginLoc(), BaseExpr,
17855 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar()
17856 << CastMsg << CastMsgTy << IndexExpr->getSourceRange());
17857 } else {
17858 unsigned DiagID = diag::warn_array_index_precedes_bounds;
17859 if (!ASE) {
17860 DiagID = diag::warn_ptr_arith_precedes_bounds;
17861 if (index.isNegative()) index = -index;
17864 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
17865 PDiag(DiagID) << toString(index, 10, true)
17866 << IndexExpr->getSourceRange());
17869 const NamedDecl *ND = nullptr;
17870 // Try harder to find a NamedDecl to point at in the note.
17871 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
17872 BaseExpr = ASE->getBase()->IgnoreParenCasts();
17873 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
17874 ND = DRE->getDecl();
17875 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
17876 ND = ME->getMemberDecl();
17878 if (ND)
17879 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
17880 PDiag(diag::note_array_declared_here) << ND);
17883 void Sema::CheckArrayAccess(const Expr *expr) {
17884 int AllowOnePastEnd = 0;
17885 while (expr) {
17886 expr = expr->IgnoreParenImpCasts();
17887 switch (expr->getStmtClass()) {
17888 case Stmt::ArraySubscriptExprClass: {
17889 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
17890 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
17891 AllowOnePastEnd > 0);
17892 expr = ASE->getBase();
17893 break;
17895 case Stmt::MemberExprClass: {
17896 expr = cast<MemberExpr>(expr)->getBase();
17897 break;
17899 case Stmt::OMPArraySectionExprClass: {
17900 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
17901 if (ASE->getLowerBound())
17902 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
17903 /*ASE=*/nullptr, AllowOnePastEnd > 0);
17904 return;
17906 case Stmt::UnaryOperatorClass: {
17907 // Only unwrap the * and & unary operators
17908 const UnaryOperator *UO = cast<UnaryOperator>(expr);
17909 expr = UO->getSubExpr();
17910 switch (UO->getOpcode()) {
17911 case UO_AddrOf:
17912 AllowOnePastEnd++;
17913 break;
17914 case UO_Deref:
17915 AllowOnePastEnd--;
17916 break;
17917 default:
17918 return;
17920 break;
17922 case Stmt::ConditionalOperatorClass: {
17923 const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
17924 if (const Expr *lhs = cond->getLHS())
17925 CheckArrayAccess(lhs);
17926 if (const Expr *rhs = cond->getRHS())
17927 CheckArrayAccess(rhs);
17928 return;
17930 case Stmt::CXXOperatorCallExprClass: {
17931 const auto *OCE = cast<CXXOperatorCallExpr>(expr);
17932 for (const auto *Arg : OCE->arguments())
17933 CheckArrayAccess(Arg);
17934 return;
17936 default:
17937 return;
17942 //===--- CHECK: Objective-C retain cycles ----------------------------------//
17944 namespace {
17946 struct RetainCycleOwner {
17947 VarDecl *Variable = nullptr;
17948 SourceRange Range;
17949 SourceLocation Loc;
17950 bool Indirect = false;
17952 RetainCycleOwner() = default;
17954 void setLocsFrom(Expr *e) {
17955 Loc = e->getExprLoc();
17956 Range = e->getSourceRange();
17960 } // namespace
17962 /// Consider whether capturing the given variable can possibly lead to
17963 /// a retain cycle.
17964 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
17965 // In ARC, it's captured strongly iff the variable has __strong
17966 // lifetime. In MRR, it's captured strongly if the variable is
17967 // __block and has an appropriate type.
17968 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
17969 return false;
17971 owner.Variable = var;
17972 if (ref)
17973 owner.setLocsFrom(ref);
17974 return true;
17977 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
17978 while (true) {
17979 e = e->IgnoreParens();
17980 if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
17981 switch (cast->getCastKind()) {
17982 case CK_BitCast:
17983 case CK_LValueBitCast:
17984 case CK_LValueToRValue:
17985 case CK_ARCReclaimReturnedObject:
17986 e = cast->getSubExpr();
17987 continue;
17989 default:
17990 return false;
17994 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
17995 ObjCIvarDecl *ivar = ref->getDecl();
17996 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
17997 return false;
17999 // Try to find a retain cycle in the base.
18000 if (!findRetainCycleOwner(S, ref->getBase(), owner))
18001 return false;
18003 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
18004 owner.Indirect = true;
18005 return true;
18008 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
18009 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
18010 if (!var) return false;
18011 return considerVariable(var, ref, owner);
18014 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
18015 if (member->isArrow()) return false;
18017 // Don't count this as an indirect ownership.
18018 e = member->getBase();
18019 continue;
18022 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
18023 // Only pay attention to pseudo-objects on property references.
18024 ObjCPropertyRefExpr *pre
18025 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
18026 ->IgnoreParens());
18027 if (!pre) return false;
18028 if (pre->isImplicitProperty()) return false;
18029 ObjCPropertyDecl *property = pre->getExplicitProperty();
18030 if (!property->isRetaining() &&
18031 !(property->getPropertyIvarDecl() &&
18032 property->getPropertyIvarDecl()->getType()
18033 .getObjCLifetime() == Qualifiers::OCL_Strong))
18034 return false;
18036 owner.Indirect = true;
18037 if (pre->isSuperReceiver()) {
18038 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
18039 if (!owner.Variable)
18040 return false;
18041 owner.Loc = pre->getLocation();
18042 owner.Range = pre->getSourceRange();
18043 return true;
18045 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
18046 ->getSourceExpr());
18047 continue;
18050 // Array ivars?
18052 return false;
18056 namespace {
18058 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
18059 VarDecl *Variable;
18060 Expr *Capturer = nullptr;
18061 bool VarWillBeReased = false;
18063 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
18064 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
18065 Variable(variable) {}
18067 void VisitDeclRefExpr(DeclRefExpr *ref) {
18068 if (ref->getDecl() == Variable && !Capturer)
18069 Capturer = ref;
18072 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
18073 if (Capturer) return;
18074 Visit(ref->getBase());
18075 if (Capturer && ref->isFreeIvar())
18076 Capturer = ref;
18079 void VisitBlockExpr(BlockExpr *block) {
18080 // Look inside nested blocks
18081 if (block->getBlockDecl()->capturesVariable(Variable))
18082 Visit(block->getBlockDecl()->getBody());
18085 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
18086 if (Capturer) return;
18087 if (OVE->getSourceExpr())
18088 Visit(OVE->getSourceExpr());
18091 void VisitBinaryOperator(BinaryOperator *BinOp) {
18092 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
18093 return;
18094 Expr *LHS = BinOp->getLHS();
18095 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
18096 if (DRE->getDecl() != Variable)
18097 return;
18098 if (Expr *RHS = BinOp->getRHS()) {
18099 RHS = RHS->IgnoreParenCasts();
18100 std::optional<llvm::APSInt> Value;
18101 VarWillBeReased =
18102 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
18103 *Value == 0);
18109 } // namespace
18111 /// Check whether the given argument is a block which captures a
18112 /// variable.
18113 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
18114 assert(owner.Variable && owner.Loc.isValid());
18116 e = e->IgnoreParenCasts();
18118 // Look through [^{...} copy] and Block_copy(^{...}).
18119 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
18120 Selector Cmd = ME->getSelector();
18121 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
18122 e = ME->getInstanceReceiver();
18123 if (!e)
18124 return nullptr;
18125 e = e->IgnoreParenCasts();
18127 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
18128 if (CE->getNumArgs() == 1) {
18129 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
18130 if (Fn) {
18131 const IdentifierInfo *FnI = Fn->getIdentifier();
18132 if (FnI && FnI->isStr("_Block_copy")) {
18133 e = CE->getArg(0)->IgnoreParenCasts();
18139 BlockExpr *block = dyn_cast<BlockExpr>(e);
18140 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
18141 return nullptr;
18143 FindCaptureVisitor visitor(S.Context, owner.Variable);
18144 visitor.Visit(block->getBlockDecl()->getBody());
18145 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
18148 static void diagnoseRetainCycle(Sema &S, Expr *capturer,
18149 RetainCycleOwner &owner) {
18150 assert(capturer);
18151 assert(owner.Variable && owner.Loc.isValid());
18153 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
18154 << owner.Variable << capturer->getSourceRange();
18155 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
18156 << owner.Indirect << owner.Range;
18159 /// Check for a keyword selector that starts with the word 'add' or
18160 /// 'set'.
18161 static bool isSetterLikeSelector(Selector sel) {
18162 if (sel.isUnarySelector()) return false;
18164 StringRef str = sel.getNameForSlot(0);
18165 while (!str.empty() && str.front() == '_') str = str.substr(1);
18166 if (str.startswith("set"))
18167 str = str.substr(3);
18168 else if (str.startswith("add")) {
18169 // Specially allow 'addOperationWithBlock:'.
18170 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
18171 return false;
18172 str = str.substr(3);
18174 else
18175 return false;
18177 if (str.empty()) return true;
18178 return !isLowercase(str.front());
18181 static std::optional<int>
18182 GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
18183 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
18184 Message->getReceiverInterface(),
18185 NSAPI::ClassId_NSMutableArray);
18186 if (!IsMutableArray) {
18187 return std::nullopt;
18190 Selector Sel = Message->getSelector();
18192 std::optional<NSAPI::NSArrayMethodKind> MKOpt =
18193 S.NSAPIObj->getNSArrayMethodKind(Sel);
18194 if (!MKOpt) {
18195 return std::nullopt;
18198 NSAPI::NSArrayMethodKind MK = *MKOpt;
18200 switch (MK) {
18201 case NSAPI::NSMutableArr_addObject:
18202 case NSAPI::NSMutableArr_insertObjectAtIndex:
18203 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
18204 return 0;
18205 case NSAPI::NSMutableArr_replaceObjectAtIndex:
18206 return 1;
18208 default:
18209 return std::nullopt;
18212 return std::nullopt;
18215 static std::optional<int>
18216 GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
18217 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
18218 Message->getReceiverInterface(),
18219 NSAPI::ClassId_NSMutableDictionary);
18220 if (!IsMutableDictionary) {
18221 return std::nullopt;
18224 Selector Sel = Message->getSelector();
18226 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
18227 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
18228 if (!MKOpt) {
18229 return std::nullopt;
18232 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
18234 switch (MK) {
18235 case NSAPI::NSMutableDict_setObjectForKey:
18236 case NSAPI::NSMutableDict_setValueForKey:
18237 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
18238 return 0;
18240 default:
18241 return std::nullopt;
18244 return std::nullopt;
18247 static std::optional<int> GetNSSetArgumentIndex(Sema &S,
18248 ObjCMessageExpr *Message) {
18249 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
18250 Message->getReceiverInterface(),
18251 NSAPI::ClassId_NSMutableSet);
18253 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
18254 Message->getReceiverInterface(),
18255 NSAPI::ClassId_NSMutableOrderedSet);
18256 if (!IsMutableSet && !IsMutableOrderedSet) {
18257 return std::nullopt;
18260 Selector Sel = Message->getSelector();
18262 std::optional<NSAPI::NSSetMethodKind> MKOpt =
18263 S.NSAPIObj->getNSSetMethodKind(Sel);
18264 if (!MKOpt) {
18265 return std::nullopt;
18268 NSAPI::NSSetMethodKind MK = *MKOpt;
18270 switch (MK) {
18271 case NSAPI::NSMutableSet_addObject:
18272 case NSAPI::NSOrderedSet_setObjectAtIndex:
18273 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
18274 case NSAPI::NSOrderedSet_insertObjectAtIndex:
18275 return 0;
18276 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
18277 return 1;
18280 return std::nullopt;
18283 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
18284 if (!Message->isInstanceMessage()) {
18285 return;
18288 std::optional<int> ArgOpt;
18290 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
18291 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
18292 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
18293 return;
18296 int ArgIndex = *ArgOpt;
18298 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
18299 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
18300 Arg = OE->getSourceExpr()->IgnoreImpCasts();
18303 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
18304 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
18305 if (ArgRE->isObjCSelfExpr()) {
18306 Diag(Message->getSourceRange().getBegin(),
18307 diag::warn_objc_circular_container)
18308 << ArgRE->getDecl() << StringRef("'super'");
18311 } else {
18312 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
18314 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
18315 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
18318 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
18319 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
18320 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
18321 ValueDecl *Decl = ReceiverRE->getDecl();
18322 Diag(Message->getSourceRange().getBegin(),
18323 diag::warn_objc_circular_container)
18324 << Decl << Decl;
18325 if (!ArgRE->isObjCSelfExpr()) {
18326 Diag(Decl->getLocation(),
18327 diag::note_objc_circular_container_declared_here)
18328 << Decl;
18332 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
18333 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
18334 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
18335 ObjCIvarDecl *Decl = IvarRE->getDecl();
18336 Diag(Message->getSourceRange().getBegin(),
18337 diag::warn_objc_circular_container)
18338 << Decl << Decl;
18339 Diag(Decl->getLocation(),
18340 diag::note_objc_circular_container_declared_here)
18341 << Decl;
18348 /// Check a message send to see if it's likely to cause a retain cycle.
18349 void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
18350 // Only check instance methods whose selector looks like a setter.
18351 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
18352 return;
18354 // Try to find a variable that the receiver is strongly owned by.
18355 RetainCycleOwner owner;
18356 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
18357 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
18358 return;
18359 } else {
18360 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
18361 owner.Variable = getCurMethodDecl()->getSelfDecl();
18362 owner.Loc = msg->getSuperLoc();
18363 owner.Range = msg->getSuperLoc();
18366 // Check whether the receiver is captured by any of the arguments.
18367 const ObjCMethodDecl *MD = msg->getMethodDecl();
18368 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
18369 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
18370 // noescape blocks should not be retained by the method.
18371 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
18372 continue;
18373 return diagnoseRetainCycle(*this, capturer, owner);
18378 /// Check a property assign to see if it's likely to cause a retain cycle.
18379 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
18380 RetainCycleOwner owner;
18381 if (!findRetainCycleOwner(*this, receiver, owner))
18382 return;
18384 if (Expr *capturer = findCapturingExpr(*this, argument, owner))
18385 diagnoseRetainCycle(*this, capturer, owner);
18388 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
18389 RetainCycleOwner Owner;
18390 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
18391 return;
18393 // Because we don't have an expression for the variable, we have to set the
18394 // location explicitly here.
18395 Owner.Loc = Var->getLocation();
18396 Owner.Range = Var->getSourceRange();
18398 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
18399 diagnoseRetainCycle(*this, Capturer, Owner);
18402 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
18403 Expr *RHS, bool isProperty) {
18404 // Check if RHS is an Objective-C object literal, which also can get
18405 // immediately zapped in a weak reference. Note that we explicitly
18406 // allow ObjCStringLiterals, since those are designed to never really die.
18407 RHS = RHS->IgnoreParenImpCasts();
18409 // This enum needs to match with the 'select' in
18410 // warn_objc_arc_literal_assign (off-by-1).
18411 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
18412 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
18413 return false;
18415 S.Diag(Loc, diag::warn_arc_literal_assign)
18416 << (unsigned) Kind
18417 << (isProperty ? 0 : 1)
18418 << RHS->getSourceRange();
18420 return true;
18423 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
18424 Qualifiers::ObjCLifetime LT,
18425 Expr *RHS, bool isProperty) {
18426 // Strip off any implicit cast added to get to the one ARC-specific.
18427 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
18428 if (cast->getCastKind() == CK_ARCConsumeObject) {
18429 S.Diag(Loc, diag::warn_arc_retained_assign)
18430 << (LT == Qualifiers::OCL_ExplicitNone)
18431 << (isProperty ? 0 : 1)
18432 << RHS->getSourceRange();
18433 return true;
18435 RHS = cast->getSubExpr();
18438 if (LT == Qualifiers::OCL_Weak &&
18439 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
18440 return true;
18442 return false;
18445 bool Sema::checkUnsafeAssigns(SourceLocation Loc,
18446 QualType LHS, Expr *RHS) {
18447 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
18449 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
18450 return false;
18452 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
18453 return true;
18455 return false;
18458 void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
18459 Expr *LHS, Expr *RHS) {
18460 QualType LHSType;
18461 // PropertyRef on LHS type need be directly obtained from
18462 // its declaration as it has a PseudoType.
18463 ObjCPropertyRefExpr *PRE
18464 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
18465 if (PRE && !PRE->isImplicitProperty()) {
18466 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
18467 if (PD)
18468 LHSType = PD->getType();
18471 if (LHSType.isNull())
18472 LHSType = LHS->getType();
18474 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
18476 if (LT == Qualifiers::OCL_Weak) {
18477 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
18478 getCurFunction()->markSafeWeakUse(LHS);
18481 if (checkUnsafeAssigns(Loc, LHSType, RHS))
18482 return;
18484 // FIXME. Check for other life times.
18485 if (LT != Qualifiers::OCL_None)
18486 return;
18488 if (PRE) {
18489 if (PRE->isImplicitProperty())
18490 return;
18491 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
18492 if (!PD)
18493 return;
18495 unsigned Attributes = PD->getPropertyAttributes();
18496 if (Attributes & ObjCPropertyAttribute::kind_assign) {
18497 // when 'assign' attribute was not explicitly specified
18498 // by user, ignore it and rely on property type itself
18499 // for lifetime info.
18500 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
18501 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
18502 LHSType->isObjCRetainableType())
18503 return;
18505 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
18506 if (cast->getCastKind() == CK_ARCConsumeObject) {
18507 Diag(Loc, diag::warn_arc_retained_property_assign)
18508 << RHS->getSourceRange();
18509 return;
18511 RHS = cast->getSubExpr();
18513 } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
18514 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
18515 return;
18520 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
18522 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
18523 SourceLocation StmtLoc,
18524 const NullStmt *Body) {
18525 // Do not warn if the body is a macro that expands to nothing, e.g:
18527 // #define CALL(x)
18528 // if (condition)
18529 // CALL(0);
18530 if (Body->hasLeadingEmptyMacro())
18531 return false;
18533 // Get line numbers of statement and body.
18534 bool StmtLineInvalid;
18535 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
18536 &StmtLineInvalid);
18537 if (StmtLineInvalid)
18538 return false;
18540 bool BodyLineInvalid;
18541 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
18542 &BodyLineInvalid);
18543 if (BodyLineInvalid)
18544 return false;
18546 // Warn if null statement and body are on the same line.
18547 if (StmtLine != BodyLine)
18548 return false;
18550 return true;
18553 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
18554 const Stmt *Body,
18555 unsigned DiagID) {
18556 // Since this is a syntactic check, don't emit diagnostic for template
18557 // instantiations, this just adds noise.
18558 if (CurrentInstantiationScope)
18559 return;
18561 // The body should be a null statement.
18562 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
18563 if (!NBody)
18564 return;
18566 // Do the usual checks.
18567 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
18568 return;
18570 Diag(NBody->getSemiLoc(), DiagID);
18571 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
18574 void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
18575 const Stmt *PossibleBody) {
18576 assert(!CurrentInstantiationScope); // Ensured by caller
18578 SourceLocation StmtLoc;
18579 const Stmt *Body;
18580 unsigned DiagID;
18581 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
18582 StmtLoc = FS->getRParenLoc();
18583 Body = FS->getBody();
18584 DiagID = diag::warn_empty_for_body;
18585 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
18586 StmtLoc = WS->getRParenLoc();
18587 Body = WS->getBody();
18588 DiagID = diag::warn_empty_while_body;
18589 } else
18590 return; // Neither `for' nor `while'.
18592 // The body should be a null statement.
18593 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
18594 if (!NBody)
18595 return;
18597 // Skip expensive checks if diagnostic is disabled.
18598 if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
18599 return;
18601 // Do the usual checks.
18602 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
18603 return;
18605 // `for(...);' and `while(...);' are popular idioms, so in order to keep
18606 // noise level low, emit diagnostics only if for/while is followed by a
18607 // CompoundStmt, e.g.:
18608 // for (int i = 0; i < n; i++);
18609 // {
18610 // a(i);
18611 // }
18612 // or if for/while is followed by a statement with more indentation
18613 // than for/while itself:
18614 // for (int i = 0; i < n; i++);
18615 // a(i);
18616 bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
18617 if (!ProbableTypo) {
18618 bool BodyColInvalid;
18619 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
18620 PossibleBody->getBeginLoc(), &BodyColInvalid);
18621 if (BodyColInvalid)
18622 return;
18624 bool StmtColInvalid;
18625 unsigned StmtCol =
18626 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid);
18627 if (StmtColInvalid)
18628 return;
18630 if (BodyCol > StmtCol)
18631 ProbableTypo = true;
18634 if (ProbableTypo) {
18635 Diag(NBody->getSemiLoc(), DiagID);
18636 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
18640 //===--- CHECK: Warn on self move with std::move. -------------------------===//
18642 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
18643 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
18644 SourceLocation OpLoc) {
18645 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
18646 return;
18648 if (inTemplateInstantiation())
18649 return;
18651 // Strip parens and casts away.
18652 LHSExpr = LHSExpr->IgnoreParenImpCasts();
18653 RHSExpr = RHSExpr->IgnoreParenImpCasts();
18655 // Check for a call expression
18656 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
18657 if (!CE || CE->getNumArgs() != 1)
18658 return;
18660 // Check for a call to std::move
18661 if (!CE->isCallToStdMove())
18662 return;
18664 // Get argument from std::move
18665 RHSExpr = CE->getArg(0);
18667 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
18668 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
18670 // Two DeclRefExpr's, check that the decls are the same.
18671 if (LHSDeclRef && RHSDeclRef) {
18672 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
18673 return;
18674 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
18675 RHSDeclRef->getDecl()->getCanonicalDecl())
18676 return;
18678 auto D = Diag(OpLoc, diag::warn_self_move)
18679 << LHSExpr->getType() << LHSExpr->getSourceRange()
18680 << RHSExpr->getSourceRange();
18681 if (const FieldDecl *F =
18682 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl()))
18683 D << 1 << F
18684 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->");
18685 else
18686 D << 0;
18687 return;
18690 // Member variables require a different approach to check for self moves.
18691 // MemberExpr's are the same if every nested MemberExpr refers to the same
18692 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
18693 // the base Expr's are CXXThisExpr's.
18694 const Expr *LHSBase = LHSExpr;
18695 const Expr *RHSBase = RHSExpr;
18696 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
18697 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
18698 if (!LHSME || !RHSME)
18699 return;
18701 while (LHSME && RHSME) {
18702 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
18703 RHSME->getMemberDecl()->getCanonicalDecl())
18704 return;
18706 LHSBase = LHSME->getBase();
18707 RHSBase = RHSME->getBase();
18708 LHSME = dyn_cast<MemberExpr>(LHSBase);
18709 RHSME = dyn_cast<MemberExpr>(RHSBase);
18712 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
18713 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
18714 if (LHSDeclRef && RHSDeclRef) {
18715 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
18716 return;
18717 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
18718 RHSDeclRef->getDecl()->getCanonicalDecl())
18719 return;
18721 Diag(OpLoc, diag::warn_self_move)
18722 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
18723 << RHSExpr->getSourceRange();
18724 return;
18727 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
18728 Diag(OpLoc, diag::warn_self_move)
18729 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
18730 << RHSExpr->getSourceRange();
18733 //===--- Layout compatibility ----------------------------------------------//
18735 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
18737 /// Check if two enumeration types are layout-compatible.
18738 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
18739 // C++11 [dcl.enum] p8:
18740 // Two enumeration types are layout-compatible if they have the same
18741 // underlying type.
18742 return ED1->isComplete() && ED2->isComplete() &&
18743 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
18746 /// Check if two fields are layout-compatible.
18747 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
18748 FieldDecl *Field2) {
18749 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
18750 return false;
18752 if (Field1->isBitField() != Field2->isBitField())
18753 return false;
18755 if (Field1->isBitField()) {
18756 // Make sure that the bit-fields are the same length.
18757 unsigned Bits1 = Field1->getBitWidthValue(C);
18758 unsigned Bits2 = Field2->getBitWidthValue(C);
18760 if (Bits1 != Bits2)
18761 return false;
18764 return true;
18767 /// Check if two standard-layout structs are layout-compatible.
18768 /// (C++11 [class.mem] p17)
18769 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
18770 RecordDecl *RD2) {
18771 // If both records are C++ classes, check that base classes match.
18772 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
18773 // If one of records is a CXXRecordDecl we are in C++ mode,
18774 // thus the other one is a CXXRecordDecl, too.
18775 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
18776 // Check number of base classes.
18777 if (D1CXX->getNumBases() != D2CXX->getNumBases())
18778 return false;
18780 // Check the base classes.
18781 for (CXXRecordDecl::base_class_const_iterator
18782 Base1 = D1CXX->bases_begin(),
18783 BaseEnd1 = D1CXX->bases_end(),
18784 Base2 = D2CXX->bases_begin();
18785 Base1 != BaseEnd1;
18786 ++Base1, ++Base2) {
18787 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
18788 return false;
18790 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
18791 // If only RD2 is a C++ class, it should have zero base classes.
18792 if (D2CXX->getNumBases() > 0)
18793 return false;
18796 // Check the fields.
18797 RecordDecl::field_iterator Field2 = RD2->field_begin(),
18798 Field2End = RD2->field_end(),
18799 Field1 = RD1->field_begin(),
18800 Field1End = RD1->field_end();
18801 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
18802 if (!isLayoutCompatible(C, *Field1, *Field2))
18803 return false;
18805 if (Field1 != Field1End || Field2 != Field2End)
18806 return false;
18808 return true;
18811 /// Check if two standard-layout unions are layout-compatible.
18812 /// (C++11 [class.mem] p18)
18813 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
18814 RecordDecl *RD2) {
18815 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
18816 for (auto *Field2 : RD2->fields())
18817 UnmatchedFields.insert(Field2);
18819 for (auto *Field1 : RD1->fields()) {
18820 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
18821 I = UnmatchedFields.begin(),
18822 E = UnmatchedFields.end();
18824 for ( ; I != E; ++I) {
18825 if (isLayoutCompatible(C, Field1, *I)) {
18826 bool Result = UnmatchedFields.erase(*I);
18827 (void) Result;
18828 assert(Result);
18829 break;
18832 if (I == E)
18833 return false;
18836 return UnmatchedFields.empty();
18839 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
18840 RecordDecl *RD2) {
18841 if (RD1->isUnion() != RD2->isUnion())
18842 return false;
18844 if (RD1->isUnion())
18845 return isLayoutCompatibleUnion(C, RD1, RD2);
18846 else
18847 return isLayoutCompatibleStruct(C, RD1, RD2);
18850 /// Check if two types are layout-compatible in C++11 sense.
18851 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
18852 if (T1.isNull() || T2.isNull())
18853 return false;
18855 // C++11 [basic.types] p11:
18856 // If two types T1 and T2 are the same type, then T1 and T2 are
18857 // layout-compatible types.
18858 if (C.hasSameType(T1, T2))
18859 return true;
18861 T1 = T1.getCanonicalType().getUnqualifiedType();
18862 T2 = T2.getCanonicalType().getUnqualifiedType();
18864 const Type::TypeClass TC1 = T1->getTypeClass();
18865 const Type::TypeClass TC2 = T2->getTypeClass();
18867 if (TC1 != TC2)
18868 return false;
18870 if (TC1 == Type::Enum) {
18871 return isLayoutCompatible(C,
18872 cast<EnumType>(T1)->getDecl(),
18873 cast<EnumType>(T2)->getDecl());
18874 } else if (TC1 == Type::Record) {
18875 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
18876 return false;
18878 return isLayoutCompatible(C,
18879 cast<RecordType>(T1)->getDecl(),
18880 cast<RecordType>(T2)->getDecl());
18883 return false;
18886 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
18888 /// Given a type tag expression find the type tag itself.
18890 /// \param TypeExpr Type tag expression, as it appears in user's code.
18892 /// \param VD Declaration of an identifier that appears in a type tag.
18894 /// \param MagicValue Type tag magic value.
18896 /// \param isConstantEvaluated whether the evalaution should be performed in
18898 /// constant context.
18899 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
18900 const ValueDecl **VD, uint64_t *MagicValue,
18901 bool isConstantEvaluated) {
18902 while(true) {
18903 if (!TypeExpr)
18904 return false;
18906 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
18908 switch (TypeExpr->getStmtClass()) {
18909 case Stmt::UnaryOperatorClass: {
18910 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
18911 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
18912 TypeExpr = UO->getSubExpr();
18913 continue;
18915 return false;
18918 case Stmt::DeclRefExprClass: {
18919 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
18920 *VD = DRE->getDecl();
18921 return true;
18924 case Stmt::IntegerLiteralClass: {
18925 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
18926 llvm::APInt MagicValueAPInt = IL->getValue();
18927 if (MagicValueAPInt.getActiveBits() <= 64) {
18928 *MagicValue = MagicValueAPInt.getZExtValue();
18929 return true;
18930 } else
18931 return false;
18934 case Stmt::BinaryConditionalOperatorClass:
18935 case Stmt::ConditionalOperatorClass: {
18936 const AbstractConditionalOperator *ACO =
18937 cast<AbstractConditionalOperator>(TypeExpr);
18938 bool Result;
18939 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
18940 isConstantEvaluated)) {
18941 if (Result)
18942 TypeExpr = ACO->getTrueExpr();
18943 else
18944 TypeExpr = ACO->getFalseExpr();
18945 continue;
18947 return false;
18950 case Stmt::BinaryOperatorClass: {
18951 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
18952 if (BO->getOpcode() == BO_Comma) {
18953 TypeExpr = BO->getRHS();
18954 continue;
18956 return false;
18959 default:
18960 return false;
18965 /// Retrieve the C type corresponding to type tag TypeExpr.
18967 /// \param TypeExpr Expression that specifies a type tag.
18969 /// \param MagicValues Registered magic values.
18971 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
18972 /// kind.
18974 /// \param TypeInfo Information about the corresponding C type.
18976 /// \param isConstantEvaluated whether the evalaution should be performed in
18977 /// constant context.
18979 /// \returns true if the corresponding C type was found.
18980 static bool GetMatchingCType(
18981 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
18982 const ASTContext &Ctx,
18983 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
18984 *MagicValues,
18985 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
18986 bool isConstantEvaluated) {
18987 FoundWrongKind = false;
18989 // Variable declaration that has type_tag_for_datatype attribute.
18990 const ValueDecl *VD = nullptr;
18992 uint64_t MagicValue;
18994 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated))
18995 return false;
18997 if (VD) {
18998 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
18999 if (I->getArgumentKind() != ArgumentKind) {
19000 FoundWrongKind = true;
19001 return false;
19003 TypeInfo.Type = I->getMatchingCType();
19004 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
19005 TypeInfo.MustBeNull = I->getMustBeNull();
19006 return true;
19008 return false;
19011 if (!MagicValues)
19012 return false;
19014 llvm::DenseMap<Sema::TypeTagMagicValue,
19015 Sema::TypeTagData>::const_iterator I =
19016 MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
19017 if (I == MagicValues->end())
19018 return false;
19020 TypeInfo = I->second;
19021 return true;
19024 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
19025 uint64_t MagicValue, QualType Type,
19026 bool LayoutCompatible,
19027 bool MustBeNull) {
19028 if (!TypeTagForDatatypeMagicValues)
19029 TypeTagForDatatypeMagicValues.reset(
19030 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
19032 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
19033 (*TypeTagForDatatypeMagicValues)[Magic] =
19034 TypeTagData(Type, LayoutCompatible, MustBeNull);
19037 static bool IsSameCharType(QualType T1, QualType T2) {
19038 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
19039 if (!BT1)
19040 return false;
19042 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
19043 if (!BT2)
19044 return false;
19046 BuiltinType::Kind T1Kind = BT1->getKind();
19047 BuiltinType::Kind T2Kind = BT2->getKind();
19049 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
19050 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
19051 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
19052 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
19055 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
19056 const ArrayRef<const Expr *> ExprArgs,
19057 SourceLocation CallSiteLoc) {
19058 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
19059 bool IsPointerAttr = Attr->getIsPointer();
19061 // Retrieve the argument representing the 'type_tag'.
19062 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
19063 if (TypeTagIdxAST >= ExprArgs.size()) {
19064 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19065 << 0 << Attr->getTypeTagIdx().getSourceIndex();
19066 return;
19068 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
19069 bool FoundWrongKind;
19070 TypeTagData TypeInfo;
19071 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
19072 TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
19073 TypeInfo, isConstantEvaluatedContext())) {
19074 if (FoundWrongKind)
19075 Diag(TypeTagExpr->getExprLoc(),
19076 diag::warn_type_tag_for_datatype_wrong_kind)
19077 << TypeTagExpr->getSourceRange();
19078 return;
19081 // Retrieve the argument representing the 'arg_idx'.
19082 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
19083 if (ArgumentIdxAST >= ExprArgs.size()) {
19084 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19085 << 1 << Attr->getArgumentIdx().getSourceIndex();
19086 return;
19088 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
19089 if (IsPointerAttr) {
19090 // Skip implicit cast of pointer to `void *' (as a function argument).
19091 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
19092 if (ICE->getType()->isVoidPointerType() &&
19093 ICE->getCastKind() == CK_BitCast)
19094 ArgumentExpr = ICE->getSubExpr();
19096 QualType ArgumentType = ArgumentExpr->getType();
19098 // Passing a `void*' pointer shouldn't trigger a warning.
19099 if (IsPointerAttr && ArgumentType->isVoidPointerType())
19100 return;
19102 if (TypeInfo.MustBeNull) {
19103 // Type tag with matching void type requires a null pointer.
19104 if (!ArgumentExpr->isNullPointerConstant(Context,
19105 Expr::NPC_ValueDependentIsNotNull)) {
19106 Diag(ArgumentExpr->getExprLoc(),
19107 diag::warn_type_safety_null_pointer_required)
19108 << ArgumentKind->getName()
19109 << ArgumentExpr->getSourceRange()
19110 << TypeTagExpr->getSourceRange();
19112 return;
19115 QualType RequiredType = TypeInfo.Type;
19116 if (IsPointerAttr)
19117 RequiredType = Context.getPointerType(RequiredType);
19119 bool mismatch = false;
19120 if (!TypeInfo.LayoutCompatible) {
19121 mismatch = !Context.hasSameType(ArgumentType, RequiredType);
19123 // C++11 [basic.fundamental] p1:
19124 // Plain char, signed char, and unsigned char are three distinct types.
19126 // But we treat plain `char' as equivalent to `signed char' or `unsigned
19127 // char' depending on the current char signedness mode.
19128 if (mismatch)
19129 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
19130 RequiredType->getPointeeType())) ||
19131 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
19132 mismatch = false;
19133 } else
19134 if (IsPointerAttr)
19135 mismatch = !isLayoutCompatible(Context,
19136 ArgumentType->getPointeeType(),
19137 RequiredType->getPointeeType());
19138 else
19139 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
19141 if (mismatch)
19142 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
19143 << ArgumentType << ArgumentKind
19144 << TypeInfo.LayoutCompatible << RequiredType
19145 << ArgumentExpr->getSourceRange()
19146 << TypeTagExpr->getSourceRange();
19149 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
19150 CharUnits Alignment) {
19151 MisalignedMembers.emplace_back(E, RD, MD, Alignment);
19154 void Sema::DiagnoseMisalignedMembers() {
19155 for (MisalignedMember &m : MisalignedMembers) {
19156 const NamedDecl *ND = m.RD;
19157 if (ND->getName().empty()) {
19158 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
19159 ND = TD;
19161 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
19162 << m.MD << ND << m.E->getSourceRange();
19164 MisalignedMembers.clear();
19167 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
19168 E = E->IgnoreParens();
19169 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType())
19170 return;
19171 if (isa<UnaryOperator>(E) &&
19172 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
19173 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
19174 if (isa<MemberExpr>(Op)) {
19175 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
19176 if (MA != MisalignedMembers.end() &&
19177 (T->isDependentType() || T->isIntegerType() ||
19178 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
19179 Context.getTypeAlignInChars(
19180 T->getPointeeType()) <= MA->Alignment))))
19181 MisalignedMembers.erase(MA);
19186 void Sema::RefersToMemberWithReducedAlignment(
19187 Expr *E,
19188 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
19189 Action) {
19190 const auto *ME = dyn_cast<MemberExpr>(E);
19191 if (!ME)
19192 return;
19194 // No need to check expressions with an __unaligned-qualified type.
19195 if (E->getType().getQualifiers().hasUnaligned())
19196 return;
19198 // For a chain of MemberExpr like "a.b.c.d" this list
19199 // will keep FieldDecl's like [d, c, b].
19200 SmallVector<FieldDecl *, 4> ReverseMemberChain;
19201 const MemberExpr *TopME = nullptr;
19202 bool AnyIsPacked = false;
19203 do {
19204 QualType BaseType = ME->getBase()->getType();
19205 if (BaseType->isDependentType())
19206 return;
19207 if (ME->isArrow())
19208 BaseType = BaseType->getPointeeType();
19209 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
19210 if (RD->isInvalidDecl())
19211 return;
19213 ValueDecl *MD = ME->getMemberDecl();
19214 auto *FD = dyn_cast<FieldDecl>(MD);
19215 // We do not care about non-data members.
19216 if (!FD || FD->isInvalidDecl())
19217 return;
19219 AnyIsPacked =
19220 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
19221 ReverseMemberChain.push_back(FD);
19223 TopME = ME;
19224 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
19225 } while (ME);
19226 assert(TopME && "We did not compute a topmost MemberExpr!");
19228 // Not the scope of this diagnostic.
19229 if (!AnyIsPacked)
19230 return;
19232 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
19233 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
19234 // TODO: The innermost base of the member expression may be too complicated.
19235 // For now, just disregard these cases. This is left for future
19236 // improvement.
19237 if (!DRE && !isa<CXXThisExpr>(TopBase))
19238 return;
19240 // Alignment expected by the whole expression.
19241 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
19243 // No need to do anything else with this case.
19244 if (ExpectedAlignment.isOne())
19245 return;
19247 // Synthesize offset of the whole access.
19248 CharUnits Offset;
19249 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain))
19250 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD));
19252 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
19253 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
19254 ReverseMemberChain.back()->getParent()->getTypeForDecl());
19256 // The base expression of the innermost MemberExpr may give
19257 // stronger guarantees than the class containing the member.
19258 if (DRE && !TopME->isArrow()) {
19259 const ValueDecl *VD = DRE->getDecl();
19260 if (!VD->getType()->isReferenceType())
19261 CompleteObjectAlignment =
19262 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
19265 // Check if the synthesized offset fulfills the alignment.
19266 if (Offset % ExpectedAlignment != 0 ||
19267 // It may fulfill the offset it but the effective alignment may still be
19268 // lower than the expected expression alignment.
19269 CompleteObjectAlignment < ExpectedAlignment) {
19270 // If this happens, we want to determine a sensible culprit of this.
19271 // Intuitively, watching the chain of member expressions from right to
19272 // left, we start with the required alignment (as required by the field
19273 // type) but some packed attribute in that chain has reduced the alignment.
19274 // It may happen that another packed structure increases it again. But if
19275 // we are here such increase has not been enough. So pointing the first
19276 // FieldDecl that either is packed or else its RecordDecl is,
19277 // seems reasonable.
19278 FieldDecl *FD = nullptr;
19279 CharUnits Alignment;
19280 for (FieldDecl *FDI : ReverseMemberChain) {
19281 if (FDI->hasAttr<PackedAttr>() ||
19282 FDI->getParent()->hasAttr<PackedAttr>()) {
19283 FD = FDI;
19284 Alignment = std::min(
19285 Context.getTypeAlignInChars(FD->getType()),
19286 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
19287 break;
19290 assert(FD && "We did not find a packed FieldDecl!");
19291 Action(E, FD->getParent(), FD, Alignment);
19295 void Sema::CheckAddressOfPackedMember(Expr *rhs) {
19296 using namespace std::placeholders;
19298 RefersToMemberWithReducedAlignment(
19299 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
19300 _2, _3, _4));
19303 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
19304 if (checkArgCount(*this, TheCall, 1))
19305 return true;
19307 ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
19308 if (A.isInvalid())
19309 return true;
19311 TheCall->setArg(0, A.get());
19312 QualType TyA = A.get()->getType();
19314 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
19315 return true;
19317 TheCall->setType(TyA);
19318 return false;
19321 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
19322 if (checkArgCount(*this, TheCall, 2))
19323 return true;
19325 ExprResult A = TheCall->getArg(0);
19326 ExprResult B = TheCall->getArg(1);
19327 // Do standard promotions between the two arguments, returning their common
19328 // type.
19329 QualType Res =
19330 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison);
19331 if (A.isInvalid() || B.isInvalid())
19332 return true;
19334 QualType TyA = A.get()->getType();
19335 QualType TyB = B.get()->getType();
19337 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType())
19338 return Diag(A.get()->getBeginLoc(),
19339 diag::err_typecheck_call_different_arg_types)
19340 << TyA << TyB;
19342 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
19343 return true;
19345 TheCall->setArg(0, A.get());
19346 TheCall->setArg(1, B.get());
19347 TheCall->setType(Res);
19348 return false;
19351 bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
19352 if (checkArgCount(*this, TheCall, 3))
19353 return true;
19355 Expr *Args[3];
19356 for (int I = 0; I < 3; ++I) {
19357 ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I));
19358 if (Converted.isInvalid())
19359 return true;
19360 Args[I] = Converted.get();
19363 int ArgOrdinal = 1;
19364 for (Expr *Arg : Args) {
19365 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
19366 ArgOrdinal++))
19367 return true;
19370 for (int I = 1; I < 3; ++I) {
19371 if (Args[0]->getType().getCanonicalType() !=
19372 Args[I]->getType().getCanonicalType()) {
19373 return Diag(Args[0]->getBeginLoc(),
19374 diag::err_typecheck_call_different_arg_types)
19375 << Args[0]->getType() << Args[I]->getType();
19378 TheCall->setArg(I, Args[I]);
19381 TheCall->setType(Args[0]->getType());
19382 return false;
19385 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
19386 if (checkArgCount(*this, TheCall, 1))
19387 return true;
19389 ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
19390 if (A.isInvalid())
19391 return true;
19393 TheCall->setArg(0, A.get());
19394 return false;
19397 bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
19398 if (checkArgCount(*this, TheCall, 1))
19399 return true;
19401 ExprResult Arg = TheCall->getArg(0);
19402 QualType TyArg = Arg.get()->getType();
19404 if (!TyArg->isBuiltinType() && !TyArg->isVectorType())
19405 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19406 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg;
19408 TheCall->setType(TyArg);
19409 return false;
19412 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
19413 ExprResult CallResult) {
19414 if (checkArgCount(*this, TheCall, 1))
19415 return ExprError();
19417 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
19418 if (MatrixArg.isInvalid())
19419 return MatrixArg;
19420 Expr *Matrix = MatrixArg.get();
19422 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
19423 if (!MType) {
19424 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19425 << 1 << /* matrix ty*/ 1 << Matrix->getType();
19426 return ExprError();
19429 // Create returned matrix type by swapping rows and columns of the argument
19430 // matrix type.
19431 QualType ResultType = Context.getConstantMatrixType(
19432 MType->getElementType(), MType->getNumColumns(), MType->getNumRows());
19434 // Change the return type to the type of the returned matrix.
19435 TheCall->setType(ResultType);
19437 // Update call argument to use the possibly converted matrix argument.
19438 TheCall->setArg(0, Matrix);
19439 return CallResult;
19442 // Get and verify the matrix dimensions.
19443 static std::optional<unsigned>
19444 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
19445 SourceLocation ErrorPos;
19446 std::optional<llvm::APSInt> Value =
19447 Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
19448 if (!Value) {
19449 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
19450 << Name;
19451 return {};
19453 uint64_t Dim = Value->getZExtValue();
19454 if (!ConstantMatrixType::isDimensionValid(Dim)) {
19455 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
19456 << Name << ConstantMatrixType::getMaxElementsPerDimension();
19457 return {};
19459 return Dim;
19462 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
19463 ExprResult CallResult) {
19464 if (!getLangOpts().MatrixTypes) {
19465 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
19466 return ExprError();
19469 if (checkArgCount(*this, TheCall, 4))
19470 return ExprError();
19472 unsigned PtrArgIdx = 0;
19473 Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
19474 Expr *RowsExpr = TheCall->getArg(1);
19475 Expr *ColumnsExpr = TheCall->getArg(2);
19476 Expr *StrideExpr = TheCall->getArg(3);
19478 bool ArgError = false;
19480 // Check pointer argument.
19482 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
19483 if (PtrConv.isInvalid())
19484 return PtrConv;
19485 PtrExpr = PtrConv.get();
19486 TheCall->setArg(0, PtrExpr);
19487 if (PtrExpr->isTypeDependent()) {
19488 TheCall->setType(Context.DependentTy);
19489 return TheCall;
19493 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
19494 QualType ElementTy;
19495 if (!PtrTy) {
19496 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19497 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
19498 ArgError = true;
19499 } else {
19500 ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
19502 if (!ConstantMatrixType::isValidElementType(ElementTy)) {
19503 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19504 << PtrArgIdx + 1 << /* pointer to element ty*/ 2
19505 << PtrExpr->getType();
19506 ArgError = true;
19510 // Apply default Lvalue conversions and convert the expression to size_t.
19511 auto ApplyArgumentConversions = [this](Expr *E) {
19512 ExprResult Conv = DefaultLvalueConversion(E);
19513 if (Conv.isInvalid())
19514 return Conv;
19516 return tryConvertExprToType(Conv.get(), Context.getSizeType());
19519 // Apply conversion to row and column expressions.
19520 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
19521 if (!RowsConv.isInvalid()) {
19522 RowsExpr = RowsConv.get();
19523 TheCall->setArg(1, RowsExpr);
19524 } else
19525 RowsExpr = nullptr;
19527 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
19528 if (!ColumnsConv.isInvalid()) {
19529 ColumnsExpr = ColumnsConv.get();
19530 TheCall->setArg(2, ColumnsExpr);
19531 } else
19532 ColumnsExpr = nullptr;
19534 // If any part of the result matrix type is still pending, just use
19535 // Context.DependentTy, until all parts are resolved.
19536 if ((RowsExpr && RowsExpr->isTypeDependent()) ||
19537 (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
19538 TheCall->setType(Context.DependentTy);
19539 return CallResult;
19542 // Check row and column dimensions.
19543 std::optional<unsigned> MaybeRows;
19544 if (RowsExpr)
19545 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this);
19547 std::optional<unsigned> MaybeColumns;
19548 if (ColumnsExpr)
19549 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this);
19551 // Check stride argument.
19552 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
19553 if (StrideConv.isInvalid())
19554 return ExprError();
19555 StrideExpr = StrideConv.get();
19556 TheCall->setArg(3, StrideExpr);
19558 if (MaybeRows) {
19559 if (std::optional<llvm::APSInt> Value =
19560 StrideExpr->getIntegerConstantExpr(Context)) {
19561 uint64_t Stride = Value->getZExtValue();
19562 if (Stride < *MaybeRows) {
19563 Diag(StrideExpr->getBeginLoc(),
19564 diag::err_builtin_matrix_stride_too_small);
19565 ArgError = true;
19570 if (ArgError || !MaybeRows || !MaybeColumns)
19571 return ExprError();
19573 TheCall->setType(
19574 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns));
19575 return CallResult;
19578 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
19579 ExprResult CallResult) {
19580 if (checkArgCount(*this, TheCall, 3))
19581 return ExprError();
19583 unsigned PtrArgIdx = 1;
19584 Expr *MatrixExpr = TheCall->getArg(0);
19585 Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
19586 Expr *StrideExpr = TheCall->getArg(2);
19588 bool ArgError = false;
19591 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr);
19592 if (MatrixConv.isInvalid())
19593 return MatrixConv;
19594 MatrixExpr = MatrixConv.get();
19595 TheCall->setArg(0, MatrixExpr);
19597 if (MatrixExpr->isTypeDependent()) {
19598 TheCall->setType(Context.DependentTy);
19599 return TheCall;
19602 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
19603 if (!MatrixTy) {
19604 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19605 << 1 << /*matrix ty */ 1 << MatrixExpr->getType();
19606 ArgError = true;
19610 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
19611 if (PtrConv.isInvalid())
19612 return PtrConv;
19613 PtrExpr = PtrConv.get();
19614 TheCall->setArg(1, PtrExpr);
19615 if (PtrExpr->isTypeDependent()) {
19616 TheCall->setType(Context.DependentTy);
19617 return TheCall;
19621 // Check pointer argument.
19622 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
19623 if (!PtrTy) {
19624 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19625 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
19626 ArgError = true;
19627 } else {
19628 QualType ElementTy = PtrTy->getPointeeType();
19629 if (ElementTy.isConstQualified()) {
19630 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
19631 ArgError = true;
19633 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
19634 if (MatrixTy &&
19635 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
19636 Diag(PtrExpr->getBeginLoc(),
19637 diag::err_builtin_matrix_pointer_arg_mismatch)
19638 << ElementTy << MatrixTy->getElementType();
19639 ArgError = true;
19643 // Apply default Lvalue conversions and convert the stride expression to
19644 // size_t.
19646 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr);
19647 if (StrideConv.isInvalid())
19648 return StrideConv;
19650 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType());
19651 if (StrideConv.isInvalid())
19652 return StrideConv;
19653 StrideExpr = StrideConv.get();
19654 TheCall->setArg(2, StrideExpr);
19657 // Check stride argument.
19658 if (MatrixTy) {
19659 if (std::optional<llvm::APSInt> Value =
19660 StrideExpr->getIntegerConstantExpr(Context)) {
19661 uint64_t Stride = Value->getZExtValue();
19662 if (Stride < MatrixTy->getNumRows()) {
19663 Diag(StrideExpr->getBeginLoc(),
19664 diag::err_builtin_matrix_stride_too_small);
19665 ArgError = true;
19670 if (ArgError)
19671 return ExprError();
19673 return CallResult;
19676 /// Checks the argument at the given index is a WebAssembly table and if it
19677 /// is, sets ElTy to the element type.
19678 static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
19679 QualType &ElTy) {
19680 Expr *ArgExpr = E->getArg(ArgIndex);
19681 const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
19682 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
19683 return S.Diag(ArgExpr->getBeginLoc(),
19684 diag::err_wasm_builtin_arg_must_be_table_type)
19685 << ArgIndex + 1 << ArgExpr->getSourceRange();
19687 ElTy = ATy->getElementType();
19688 return false;
19691 /// Checks the argument at the given index is an integer.
19692 static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
19693 unsigned ArgIndex) {
19694 Expr *ArgExpr = E->getArg(ArgIndex);
19695 if (!ArgExpr->getType()->isIntegerType()) {
19696 return S.Diag(ArgExpr->getBeginLoc(),
19697 diag::err_wasm_builtin_arg_must_be_integer_type)
19698 << ArgIndex + 1 << ArgExpr->getSourceRange();
19700 return false;
19703 /// Check that the first argument is a WebAssembly table, and the second
19704 /// is an index to use as index into the table.
19705 bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
19706 if (checkArgCount(*this, TheCall, 2))
19707 return true;
19709 QualType ElTy;
19710 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
19711 return true;
19713 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
19714 return true;
19716 // If all is well, we set the type of TheCall to be the type of the
19717 // element of the table.
19718 // i.e. a table.get on an externref table has type externref,
19719 // or whatever the type of the table element is.
19720 TheCall->setType(ElTy);
19722 return false;
19725 /// Check that the first argumnet is a WebAssembly table, the second is
19726 /// an index to use as index into the table and the third is the reference
19727 /// type to set into the table.
19728 bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
19729 if (checkArgCount(*this, TheCall, 3))
19730 return true;
19732 QualType ElTy;
19733 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
19734 return true;
19736 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
19737 return true;
19739 if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType()))
19740 return true;
19742 return false;
19745 /// Check that the argument is a WebAssembly table.
19746 bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
19747 if (checkArgCount(*this, TheCall, 1))
19748 return true;
19750 QualType ElTy;
19751 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
19752 return true;
19754 return false;
19757 /// Check that the first argument is a WebAssembly table, the second is the
19758 /// value to use for new elements (of a type matching the table type), the
19759 /// third value is an integer.
19760 bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
19761 if (checkArgCount(*this, TheCall, 3))
19762 return true;
19764 QualType ElTy;
19765 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
19766 return true;
19768 Expr *NewElemArg = TheCall->getArg(1);
19769 if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
19770 return Diag(NewElemArg->getBeginLoc(),
19771 diag::err_wasm_builtin_arg_must_match_table_element_type)
19772 << 2 << 1 << NewElemArg->getSourceRange();
19775 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2))
19776 return true;
19778 return false;
19781 /// Check that the first argument is a WebAssembly table, the second is an
19782 /// integer, the third is the value to use to fill the table (of a type
19783 /// matching the table type), and the fourth is an integer.
19784 bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
19785 if (checkArgCount(*this, TheCall, 4))
19786 return true;
19788 QualType ElTy;
19789 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
19790 return true;
19792 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
19793 return true;
19795 Expr *NewElemArg = TheCall->getArg(2);
19796 if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
19797 return Diag(NewElemArg->getBeginLoc(),
19798 diag::err_wasm_builtin_arg_must_match_table_element_type)
19799 << 3 << 1 << NewElemArg->getSourceRange();
19802 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3))
19803 return true;
19805 return false;
19808 /// Check that the first argument is a WebAssembly table, the second is also a
19809 /// WebAssembly table (of the same element type), and the third to fifth
19810 /// arguments are integers.
19811 bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
19812 if (checkArgCount(*this, TheCall, 5))
19813 return true;
19815 QualType XElTy;
19816 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy))
19817 return true;
19819 QualType YElTy;
19820 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy))
19821 return true;
19823 Expr *TableYArg = TheCall->getArg(1);
19824 if (!Context.hasSameType(XElTy, YElTy)) {
19825 return Diag(TableYArg->getBeginLoc(),
19826 diag::err_wasm_builtin_arg_must_match_table_element_type)
19827 << 2 << 1 << TableYArg->getSourceRange();
19830 for (int I = 2; I <= 4; I++) {
19831 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I))
19832 return true;
19835 return false;
19838 /// \brief Enforce the bounds of a TCB
19839 /// CheckTCBEnforcement - Enforces that every function in a named TCB only
19840 /// directly calls other functions in the same TCB as marked by the enforce_tcb
19841 /// and enforce_tcb_leaf attributes.
19842 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
19843 const NamedDecl *Callee) {
19844 // This warning does not make sense in code that has no runtime behavior.
19845 if (isUnevaluatedContext())
19846 return;
19848 const NamedDecl *Caller = getCurFunctionOrMethodDecl();
19850 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
19851 return;
19853 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
19854 // all TCBs the callee is a part of.
19855 llvm::StringSet<> CalleeTCBs;
19856 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>())
19857 CalleeTCBs.insert(A->getTCBName());
19858 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>())
19859 CalleeTCBs.insert(A->getTCBName());
19861 // Go through the TCBs the caller is a part of and emit warnings if Caller
19862 // is in a TCB that the Callee is not.
19863 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) {
19864 StringRef CallerTCB = A->getTCBName();
19865 if (CalleeTCBs.count(CallerTCB) == 0) {
19866 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation)
19867 << Callee << CallerTCB;