1 //===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements semantic analysis functions specific to ARM.
11 //===----------------------------------------------------------------------===//
13 #include "clang/Sema/SemaARM.h"
14 #include "clang/Basic/DiagnosticSema.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "clang/Sema/Initialization.h"
18 #include "clang/Sema/ParsedAttr.h"
19 #include "clang/Sema/Sema.h"
23 SemaARM::SemaARM(Sema
&S
) : SemaBase(S
) {}
25 /// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26 bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID
,
28 ASTContext
&Context
= getASTContext();
30 if (BuiltinID
== AArch64::BI__builtin_arm_irg
) {
31 if (SemaRef
.checkArgCount(TheCall
, 2))
33 Expr
*Arg0
= TheCall
->getArg(0);
34 Expr
*Arg1
= TheCall
->getArg(1);
36 ExprResult FirstArg
= SemaRef
.DefaultFunctionArrayLvalueConversion(Arg0
);
37 if (FirstArg
.isInvalid())
39 QualType FirstArgType
= FirstArg
.get()->getType();
40 if (!FirstArgType
->isAnyPointerType())
41 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
42 << "first" << FirstArgType
<< Arg0
->getSourceRange();
43 TheCall
->setArg(0, FirstArg
.get());
45 ExprResult SecArg
= SemaRef
.DefaultLvalueConversion(Arg1
);
46 if (SecArg
.isInvalid())
48 QualType SecArgType
= SecArg
.get()->getType();
49 if (!SecArgType
->isIntegerType())
50 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_integer
)
51 << "second" << SecArgType
<< Arg1
->getSourceRange();
53 // Derive the return type from the pointer argument.
54 TheCall
->setType(FirstArgType
);
58 if (BuiltinID
== AArch64::BI__builtin_arm_addg
) {
59 if (SemaRef
.checkArgCount(TheCall
, 2))
62 Expr
*Arg0
= TheCall
->getArg(0);
63 ExprResult FirstArg
= SemaRef
.DefaultFunctionArrayLvalueConversion(Arg0
);
64 if (FirstArg
.isInvalid())
66 QualType FirstArgType
= FirstArg
.get()->getType();
67 if (!FirstArgType
->isAnyPointerType())
68 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
69 << "first" << FirstArgType
<< Arg0
->getSourceRange();
70 TheCall
->setArg(0, FirstArg
.get());
72 // Derive the return type from the pointer argument.
73 TheCall
->setType(FirstArgType
);
75 // Second arg must be an constant in range [0,15]
76 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 15);
79 if (BuiltinID
== AArch64::BI__builtin_arm_gmi
) {
80 if (SemaRef
.checkArgCount(TheCall
, 2))
82 Expr
*Arg0
= TheCall
->getArg(0);
83 Expr
*Arg1
= TheCall
->getArg(1);
85 ExprResult FirstArg
= SemaRef
.DefaultFunctionArrayLvalueConversion(Arg0
);
86 if (FirstArg
.isInvalid())
88 QualType FirstArgType
= FirstArg
.get()->getType();
89 if (!FirstArgType
->isAnyPointerType())
90 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
91 << "first" << FirstArgType
<< Arg0
->getSourceRange();
93 QualType SecArgType
= Arg1
->getType();
94 if (!SecArgType
->isIntegerType())
95 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_integer
)
96 << "second" << SecArgType
<< Arg1
->getSourceRange();
97 TheCall
->setType(Context
.IntTy
);
101 if (BuiltinID
== AArch64::BI__builtin_arm_ldg
||
102 BuiltinID
== AArch64::BI__builtin_arm_stg
) {
103 if (SemaRef
.checkArgCount(TheCall
, 1))
105 Expr
*Arg0
= TheCall
->getArg(0);
106 ExprResult FirstArg
= SemaRef
.DefaultFunctionArrayLvalueConversion(Arg0
);
107 if (FirstArg
.isInvalid())
110 QualType FirstArgType
= FirstArg
.get()->getType();
111 if (!FirstArgType
->isAnyPointerType())
112 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_must_be_pointer
)
113 << "first" << FirstArgType
<< Arg0
->getSourceRange();
114 TheCall
->setArg(0, FirstArg
.get());
116 // Derive the return type from the pointer argument.
117 if (BuiltinID
== AArch64::BI__builtin_arm_ldg
)
118 TheCall
->setType(FirstArgType
);
122 if (BuiltinID
== AArch64::BI__builtin_arm_subp
) {
123 Expr
*ArgA
= TheCall
->getArg(0);
124 Expr
*ArgB
= TheCall
->getArg(1);
126 ExprResult ArgExprA
= SemaRef
.DefaultFunctionArrayLvalueConversion(ArgA
);
127 ExprResult ArgExprB
= SemaRef
.DefaultFunctionArrayLvalueConversion(ArgB
);
129 if (ArgExprA
.isInvalid() || ArgExprB
.isInvalid())
132 QualType ArgTypeA
= ArgExprA
.get()->getType();
133 QualType ArgTypeB
= ArgExprB
.get()->getType();
135 auto isNull
= [&](Expr
*E
) -> bool {
136 return E
->isNullPointerConstant(Context
,
137 Expr::NPC_ValueDependentIsNotNull
);
140 // argument should be either a pointer or null
141 if (!ArgTypeA
->isAnyPointerType() && !isNull(ArgA
))
142 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_null_or_pointer
)
143 << "first" << ArgTypeA
<< ArgA
->getSourceRange();
145 if (!ArgTypeB
->isAnyPointerType() && !isNull(ArgB
))
146 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_arg_null_or_pointer
)
147 << "second" << ArgTypeB
<< ArgB
->getSourceRange();
149 // Ensure Pointee types are compatible
150 if (ArgTypeA
->isAnyPointerType() && !isNull(ArgA
) &&
151 ArgTypeB
->isAnyPointerType() && !isNull(ArgB
)) {
152 QualType pointeeA
= ArgTypeA
->getPointeeType();
153 QualType pointeeB
= ArgTypeB
->getPointeeType();
154 if (!Context
.typesAreCompatible(
155 Context
.getCanonicalType(pointeeA
).getUnqualifiedType(),
156 Context
.getCanonicalType(pointeeB
).getUnqualifiedType())) {
157 return Diag(TheCall
->getBeginLoc(),
158 diag::err_typecheck_sub_ptr_compatible
)
159 << ArgTypeA
<< ArgTypeB
<< ArgA
->getSourceRange()
160 << ArgB
->getSourceRange();
164 // at least one argument should be pointer type
165 if (!ArgTypeA
->isAnyPointerType() && !ArgTypeB
->isAnyPointerType())
166 return Diag(TheCall
->getBeginLoc(), diag::err_memtag_any2arg_pointer
)
167 << ArgTypeA
<< ArgTypeB
<< ArgA
->getSourceRange();
169 if (isNull(ArgA
)) // adopt type of the other pointer
171 SemaRef
.ImpCastExprToType(ArgExprA
.get(), ArgTypeB
, CK_NullToPointer
);
175 SemaRef
.ImpCastExprToType(ArgExprB
.get(), ArgTypeA
, CK_NullToPointer
);
177 TheCall
->setArg(0, ArgExprA
.get());
178 TheCall
->setArg(1, ArgExprB
.get());
179 TheCall
->setType(Context
.LongLongTy
);
182 assert(false && "Unhandled ARM MTE intrinsic");
186 /// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187 /// TheCall is an ARM/AArch64 special register string literal.
188 bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID
, CallExpr
*TheCall
,
189 int ArgNum
, unsigned ExpectedFieldNum
,
191 bool IsARMBuiltin
= BuiltinID
== ARM::BI__builtin_arm_rsr64
||
192 BuiltinID
== ARM::BI__builtin_arm_wsr64
||
193 BuiltinID
== ARM::BI__builtin_arm_rsr
||
194 BuiltinID
== ARM::BI__builtin_arm_rsrp
||
195 BuiltinID
== ARM::BI__builtin_arm_wsr
||
196 BuiltinID
== ARM::BI__builtin_arm_wsrp
;
197 bool IsAArch64Builtin
= BuiltinID
== AArch64::BI__builtin_arm_rsr64
||
198 BuiltinID
== AArch64::BI__builtin_arm_wsr64
||
199 BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
200 BuiltinID
== AArch64::BI__builtin_arm_wsr128
||
201 BuiltinID
== AArch64::BI__builtin_arm_rsr
||
202 BuiltinID
== AArch64::BI__builtin_arm_rsrp
||
203 BuiltinID
== AArch64::BI__builtin_arm_wsr
||
204 BuiltinID
== AArch64::BI__builtin_arm_wsrp
;
205 assert((IsARMBuiltin
|| IsAArch64Builtin
) && "Unexpected ARM builtin.");
207 // We can't check the value of a dependent argument.
208 Expr
*Arg
= TheCall
->getArg(ArgNum
);
209 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
212 // Check if the argument is a string literal.
213 if (!isa
<StringLiteral
>(Arg
->IgnoreParenImpCasts()))
214 return Diag(TheCall
->getBeginLoc(), diag::err_expr_not_string_literal
)
215 << Arg
->getSourceRange();
217 // Check the type of special register given.
218 StringRef Reg
= cast
<StringLiteral
>(Arg
->IgnoreParenImpCasts())->getString();
219 SmallVector
<StringRef
, 6> Fields
;
220 Reg
.split(Fields
, ":");
222 if (Fields
.size() != ExpectedFieldNum
&& !(AllowName
&& Fields
.size() == 1))
223 return Diag(TheCall
->getBeginLoc(), diag::err_arm_invalid_specialreg
)
224 << Arg
->getSourceRange();
226 // If the string is the name of a register then we cannot check that it is
227 // valid here but if the string is of one the forms described in ACLE then we
228 // can check that the supplied fields are integers and within the valid
230 if (Fields
.size() > 1) {
231 bool FiveFields
= Fields
.size() == 5;
233 bool ValidString
= true;
235 ValidString
&= Fields
[0].starts_with_insensitive("cp") ||
236 Fields
[0].starts_with_insensitive("p");
238 Fields
[0] = Fields
[0].drop_front(
239 Fields
[0].starts_with_insensitive("cp") ? 2 : 1);
241 ValidString
&= Fields
[2].starts_with_insensitive("c");
243 Fields
[2] = Fields
[2].drop_front(1);
246 ValidString
&= Fields
[3].starts_with_insensitive("c");
248 Fields
[3] = Fields
[3].drop_front(1);
252 SmallVector
<int, 5> Ranges
;
254 Ranges
.append({IsAArch64Builtin
? 1 : 15, 7, 15, 15, 7});
256 Ranges
.append({15, 7, 15});
258 for (unsigned i
= 0; i
< Fields
.size(); ++i
) {
260 ValidString
&= !Fields
[i
].getAsInteger(10, IntField
);
261 ValidString
&= (IntField
>= 0 && IntField
<= Ranges
[i
]);
265 return Diag(TheCall
->getBeginLoc(), diag::err_arm_invalid_specialreg
)
266 << Arg
->getSourceRange();
267 } else if (IsAArch64Builtin
&& Fields
.size() == 1) {
268 // This code validates writes to PSTATE registers.
271 if (TheCall
->getNumArgs() != 2)
274 // The 128-bit system register accesses do not touch PSTATE.
275 if (BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
276 BuiltinID
== AArch64::BI__builtin_arm_wsr128
)
279 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280 // along with the upper limit on the immediates allowed.
281 auto MaxLimit
= llvm::StringSwitch
<std::optional
<unsigned>>(Reg
)
282 .CaseLower("spsel", 15)
283 .CaseLower("daifclr", 15)
284 .CaseLower("daifset", 15)
285 .CaseLower("pan", 15)
286 .CaseLower("uao", 15)
287 .CaseLower("dit", 15)
288 .CaseLower("ssbs", 15)
289 .CaseLower("tco", 15)
290 .CaseLower("allint", 1)
292 .Default(std::nullopt
);
294 // If this is not a named PSTATE, just continue without validating, as this
295 // will be lowered to an "MSR (register)" instruction directly
299 // Here we only allow constants in the range for that pstate, as required by
302 // While clang also accepts the names of system registers in its ACLE
303 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304 // as the value written via a register is different to the value used as an
305 // immediate to have the same effect. e.g., for the instruction `msr tco,
306 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
309 // If a programmer wants to codegen the MSR (register) form of `msr tco,
310 // xN`, they can still do so by specifying the register using five
311 // colon-separated numbers in a string.
312 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, *MaxLimit
);
318 /// getNeonEltType - Return the QualType corresponding to the elements of
319 /// the vector type specified by the NeonTypeFlags. This is used to check
320 /// the pointer arguments for Neon load/store intrinsics.
321 static QualType
getNeonEltType(NeonTypeFlags Flags
, ASTContext
&Context
,
322 bool IsPolyUnsigned
, bool IsInt64Long
) {
323 switch (Flags
.getEltType()) {
324 case NeonTypeFlags::Int8
:
325 return Flags
.isUnsigned() ? Context
.UnsignedCharTy
: Context
.SignedCharTy
;
326 case NeonTypeFlags::Int16
:
327 return Flags
.isUnsigned() ? Context
.UnsignedShortTy
: Context
.ShortTy
;
328 case NeonTypeFlags::Int32
:
329 return Flags
.isUnsigned() ? Context
.UnsignedIntTy
: Context
.IntTy
;
330 case NeonTypeFlags::Int64
:
332 return Flags
.isUnsigned() ? Context
.UnsignedLongTy
: Context
.LongTy
;
334 return Flags
.isUnsigned() ? Context
.UnsignedLongLongTy
335 : Context
.LongLongTy
;
336 case NeonTypeFlags::Poly8
:
337 return IsPolyUnsigned
? Context
.UnsignedCharTy
: Context
.SignedCharTy
;
338 case NeonTypeFlags::Poly16
:
339 return IsPolyUnsigned
? Context
.UnsignedShortTy
: Context
.ShortTy
;
340 case NeonTypeFlags::Poly64
:
342 return Context
.UnsignedLongTy
;
344 return Context
.UnsignedLongLongTy
;
345 case NeonTypeFlags::Poly128
:
347 case NeonTypeFlags::Float16
:
348 return Context
.HalfTy
;
349 case NeonTypeFlags::Float32
:
350 return Context
.FloatTy
;
351 case NeonTypeFlags::Float64
:
352 return Context
.DoubleTy
;
353 case NeonTypeFlags::BFloat16
:
354 return Context
.BFloat16Ty
;
355 case NeonTypeFlags::MFloat8
:
356 return Context
.MFloat8Ty
;
358 llvm_unreachable("Invalid NeonTypeFlag!");
361 enum ArmSMEState
: unsigned {
369 ArmInZT0
= 0b01 << 2,
370 ArmOutZT0
= 0b10 << 2,
371 ArmInOutZT0
= 0b11 << 2,
372 ArmZT0Mask
= 0b11 << 2
375 bool SemaARM::CheckImmediateArg(CallExpr
*TheCall
, unsigned CheckTy
,
376 unsigned ArgIdx
, unsigned EltBitWidth
,
377 unsigned ContainerBitWidth
) {
378 // Function that checks whether the operand (ArgIdx) is an immediate
379 // that is one of a given set of values.
380 auto CheckImmediateInSet
= [&](std::initializer_list
<int64_t> Set
,
381 int ErrDiag
) -> bool {
382 // We can't check the value of a dependent argument.
383 Expr
*Arg
= TheCall
->getArg(ArgIdx
);
384 if (Arg
->isTypeDependent() || Arg
->isValueDependent())
387 // Check constant-ness first.
389 if (SemaRef
.BuiltinConstantArg(TheCall
, ArgIdx
, Imm
))
392 if (std::find(Set
.begin(), Set
.end(), Imm
.getSExtValue()) == Set
.end())
393 return Diag(TheCall
->getBeginLoc(), ErrDiag
) << Arg
->getSourceRange();
397 switch ((ImmCheckType
)CheckTy
) {
398 case ImmCheckType::ImmCheck0_31
:
399 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 31))
402 case ImmCheckType::ImmCheck0_13
:
403 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 13))
406 case ImmCheckType::ImmCheck0_63
:
407 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 63))
410 case ImmCheckType::ImmCheck1_16
:
411 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 16))
414 case ImmCheckType::ImmCheck0_7
:
415 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 7))
418 case ImmCheckType::ImmCheck1_1
:
419 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 1))
422 case ImmCheckType::ImmCheck1_3
:
423 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 3))
426 case ImmCheckType::ImmCheck1_7
:
427 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 7))
430 case ImmCheckType::ImmCheckExtract
:
431 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0,
432 (2048 / EltBitWidth
) - 1))
435 case ImmCheckType::ImmCheckCvt
:
436 case ImmCheckType::ImmCheckShiftRight
:
437 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, EltBitWidth
))
440 case ImmCheckType::ImmCheckShiftRightNarrow
:
441 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, EltBitWidth
/ 2))
444 case ImmCheckType::ImmCheckShiftLeft
:
445 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, EltBitWidth
- 1))
448 case ImmCheckType::ImmCheckLaneIndex
:
449 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0,
450 (ContainerBitWidth
/ EltBitWidth
) - 1))
453 case ImmCheckType::ImmCheckLaneIndexCompRotate
:
454 if (SemaRef
.BuiltinConstantArgRange(
455 TheCall
, ArgIdx
, 0, (ContainerBitWidth
/ (2 * EltBitWidth
)) - 1))
458 case ImmCheckType::ImmCheckLaneIndexDot
:
459 if (SemaRef
.BuiltinConstantArgRange(
460 TheCall
, ArgIdx
, 0, (ContainerBitWidth
/ (4 * EltBitWidth
)) - 1))
463 case ImmCheckType::ImmCheckComplexRot90_270
:
464 if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd
))
467 case ImmCheckType::ImmCheckComplexRotAll90
:
468 if (CheckImmediateInSet({0, 90, 180, 270},
469 diag::err_rotation_argument_to_cmla
))
472 case ImmCheckType::ImmCheck0_1
:
473 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 1))
476 case ImmCheckType::ImmCheck0_2
:
477 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 2))
480 case ImmCheckType::ImmCheck0_3
:
481 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 3))
484 case ImmCheckType::ImmCheck0_0
:
485 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 0))
488 case ImmCheckType::ImmCheck0_15
:
489 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 15))
492 case ImmCheckType::ImmCheck0_255
:
493 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 0, 255))
496 case ImmCheckType::ImmCheck1_32
:
497 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 32))
500 case ImmCheckType::ImmCheck1_64
:
501 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 1, 64))
504 case ImmCheckType::ImmCheck2_4_Mul2
:
505 if (SemaRef
.BuiltinConstantArgRange(TheCall
, ArgIdx
, 2, 4) ||
506 SemaRef
.BuiltinConstantArgMultiple(TheCall
, ArgIdx
, 2))
513 bool SemaARM::PerformNeonImmChecks(
515 SmallVectorImpl
<std::tuple
<int, int, int, int>> &ImmChecks
,
517 bool HasError
= false;
519 for (const auto &I
: ImmChecks
) {
520 auto [ArgIdx
, CheckTy
, ElementBitWidth
, VecBitWidth
] = I
;
522 if (OverloadType
>= 0)
523 ElementBitWidth
= NeonTypeFlags(OverloadType
).getEltSizeInBits();
525 HasError
|= CheckImmediateArg(TheCall
, CheckTy
, ArgIdx
, ElementBitWidth
,
532 bool SemaARM::PerformSVEImmChecks(
533 CallExpr
*TheCall
, SmallVectorImpl
<std::tuple
<int, int, int>> &ImmChecks
) {
534 bool HasError
= false;
536 for (const auto &I
: ImmChecks
) {
537 auto [ArgIdx
, CheckTy
, ElementBitWidth
] = I
;
539 CheckImmediateArg(TheCall
, CheckTy
, ArgIdx
, ElementBitWidth
, 128);
545 SemaARM::ArmStreamingType
getArmStreamingFnType(const FunctionDecl
*FD
) {
546 if (FD
->hasAttr
<ArmLocallyStreamingAttr
>())
547 return SemaARM::ArmStreaming
;
548 if (const Type
*Ty
= FD
->getType().getTypePtrOrNull()) {
549 if (const auto *FPT
= Ty
->getAs
<FunctionProtoType
>()) {
550 if (FPT
->getAArch64SMEAttributes() &
551 FunctionType::SME_PStateSMEnabledMask
)
552 return SemaARM::ArmStreaming
;
553 if (FPT
->getAArch64SMEAttributes() &
554 FunctionType::SME_PStateSMCompatibleMask
)
555 return SemaARM::ArmStreamingCompatible
;
558 return SemaARM::ArmNonStreaming
;
561 static bool checkArmStreamingBuiltin(Sema
&S
, CallExpr
*TheCall
,
562 const FunctionDecl
*FD
,
563 SemaARM::ArmStreamingType BuiltinType
,
564 unsigned BuiltinID
) {
565 SemaARM::ArmStreamingType FnType
= getArmStreamingFnType(FD
);
567 // Check if the intrinsic is available in the right mode, i.e.
568 // * When compiling for SME only, the caller must be in streaming mode.
569 // * When compiling for SVE only, the caller must be in non-streaming mode.
570 // * When compiling for both SVE and SME, the caller can be in either mode.
571 if (BuiltinType
== SemaARM::VerifyRuntimeMode
) {
572 llvm::StringMap
<bool> CallerFeatureMapWithoutSVE
;
573 S
.Context
.getFunctionFeatureMap(CallerFeatureMapWithoutSVE
, FD
);
574 CallerFeatureMapWithoutSVE
["sve"] = false;
576 // Avoid emitting diagnostics for a function that can never compile.
577 if (FnType
== SemaARM::ArmStreaming
&& !CallerFeatureMapWithoutSVE
["sme"])
580 llvm::StringMap
<bool> CallerFeatureMapWithoutSME
;
581 S
.Context
.getFunctionFeatureMap(CallerFeatureMapWithoutSME
, FD
);
582 CallerFeatureMapWithoutSME
["sme"] = false;
584 // We know the builtin requires either some combination of SVE flags, or
585 // some combination of SME flags, but we need to figure out which part
586 // of the required features is satisfied by the target features.
588 // For a builtin with target guard 'sve2p1|sme2', if we compile with
589 // '+sve2p1,+sme', then we know that it satisfies the 'sve2p1' part if we
590 // evaluate the features for '+sve2p1,+sme,+nosme'.
592 // Similarly, if we compile with '+sve2,+sme2', then we know it satisfies
593 // the 'sme2' part if we evaluate the features for '+sve2,+sme2,+nosve'.
594 StringRef
BuiltinTargetGuards(
595 S
.Context
.BuiltinInfo
.getRequiredFeatures(BuiltinID
));
596 bool SatisfiesSVE
= Builtin::evaluateRequiredTargetFeatures(
597 BuiltinTargetGuards
, CallerFeatureMapWithoutSME
);
598 bool SatisfiesSME
= Builtin::evaluateRequiredTargetFeatures(
599 BuiltinTargetGuards
, CallerFeatureMapWithoutSVE
);
601 if ((SatisfiesSVE
&& SatisfiesSME
) ||
602 (SatisfiesSVE
&& FnType
== SemaARM::ArmStreamingCompatible
))
604 else if (SatisfiesSVE
)
605 BuiltinType
= SemaARM::ArmNonStreaming
;
606 else if (SatisfiesSME
)
607 BuiltinType
= SemaARM::ArmStreaming
;
609 // This should be diagnosed by CodeGen
613 if (FnType
!= SemaARM::ArmNonStreaming
&&
614 BuiltinType
== SemaARM::ArmNonStreaming
)
615 S
.Diag(TheCall
->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin
)
616 << TheCall
->getSourceRange() << "non-streaming";
617 else if (FnType
!= SemaARM::ArmStreaming
&&
618 BuiltinType
== SemaARM::ArmStreaming
)
619 S
.Diag(TheCall
->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin
)
620 << TheCall
->getSourceRange() << "streaming";
627 static ArmSMEState
getSMEState(unsigned BuiltinID
) {
631 #define GET_SME_BUILTIN_GET_STATE
632 #include "clang/Basic/arm_sme_builtins_za_state.inc"
633 #undef GET_SME_BUILTIN_GET_STATE
637 bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID
,
639 if (const FunctionDecl
*FD
=
640 SemaRef
.getCurFunctionDecl(/*AllowLambda=*/true)) {
641 std::optional
<ArmStreamingType
> BuiltinType
;
644 #define GET_SME_STREAMING_ATTRS
645 #include "clang/Basic/arm_sme_streaming_attrs.inc"
646 #undef GET_SME_STREAMING_ATTRS
650 checkArmStreamingBuiltin(SemaRef
, TheCall
, FD
, *BuiltinType
, BuiltinID
))
653 if ((getSMEState(BuiltinID
) & ArmZAMask
) && !hasArmZAState(FD
))
654 Diag(TheCall
->getBeginLoc(),
655 diag::warn_attribute_arm_za_builtin_no_za_state
)
656 << TheCall
->getSourceRange();
658 if ((getSMEState(BuiltinID
) & ArmZT0Mask
) && !hasArmZT0State(FD
))
659 Diag(TheCall
->getBeginLoc(),
660 diag::warn_attribute_arm_zt0_builtin_no_zt0_state
)
661 << TheCall
->getSourceRange();
664 // Range check SME intrinsics that take immediate values.
665 SmallVector
<std::tuple
<int, int, int>, 3> ImmChecks
;
670 #define GET_SME_IMMEDIATE_CHECK
671 #include "clang/Basic/arm_sme_sema_rangechecks.inc"
672 #undef GET_SME_IMMEDIATE_CHECK
675 return PerformSVEImmChecks(TheCall
, ImmChecks
);
678 bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID
,
680 if (const FunctionDecl
*FD
=
681 SemaRef
.getCurFunctionDecl(/*AllowLambda=*/true)) {
682 std::optional
<ArmStreamingType
> BuiltinType
;
685 #define GET_SVE_STREAMING_ATTRS
686 #include "clang/Basic/arm_sve_streaming_attrs.inc"
687 #undef GET_SVE_STREAMING_ATTRS
690 checkArmStreamingBuiltin(SemaRef
, TheCall
, FD
, *BuiltinType
, BuiltinID
))
693 // Range check SVE intrinsics that take immediate values.
694 SmallVector
<std::tuple
<int, int, int>, 3> ImmChecks
;
699 #define GET_SVE_IMMEDIATE_CHECK
700 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
701 #undef GET_SVE_IMMEDIATE_CHECK
704 return PerformSVEImmChecks(TheCall
, ImmChecks
);
707 bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo
&TI
,
710 if (const FunctionDecl
*FD
=
711 SemaRef
.getCurFunctionDecl(/*AllowLambda=*/true)) {
716 #define GET_NEON_BUILTINS
717 #define TARGET_BUILTIN(id, ...) case NEON::BI##id:
718 #define BUILTIN(id, ...) case NEON::BI##id:
719 #include "clang/Basic/arm_neon.inc"
720 if (checkArmStreamingBuiltin(SemaRef
, TheCall
, FD
, ArmNonStreaming
,
724 #undef TARGET_BUILTIN
726 #undef GET_NEON_BUILTINS
734 bool HasConstPtr
= false;
736 #define GET_NEON_OVERLOAD_CHECK
737 #include "clang/Basic/arm_fp16.inc"
738 #include "clang/Basic/arm_neon.inc"
739 #undef GET_NEON_OVERLOAD_CHECK
742 // For NEON intrinsics which are overloaded on vector element type, validate
743 // the immediate which specifies which variant to emit.
744 unsigned ImmArg
= TheCall
->getNumArgs() - 1;
746 if (SemaRef
.BuiltinConstantArg(TheCall
, ImmArg
, Result
))
749 TV
= Result
.getLimitedValue(64);
750 if ((TV
> 63) || (mask
& (1ULL << TV
)) == 0)
751 return Diag(TheCall
->getBeginLoc(), diag::err_invalid_neon_type_code
)
752 << TheCall
->getArg(ImmArg
)->getSourceRange();
755 if (PtrArgNum
>= 0) {
756 // Check that pointer arguments have the specified type.
757 Expr
*Arg
= TheCall
->getArg(PtrArgNum
);
758 if (ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(Arg
))
759 Arg
= ICE
->getSubExpr();
760 ExprResult RHS
= SemaRef
.DefaultFunctionArrayLvalueConversion(Arg
);
761 QualType RHSTy
= RHS
.get()->getType();
763 llvm::Triple::ArchType Arch
= TI
.getTriple().getArch();
764 bool IsPolyUnsigned
= Arch
== llvm::Triple::aarch64
||
765 Arch
== llvm::Triple::aarch64_32
||
766 Arch
== llvm::Triple::aarch64_be
;
767 bool IsInt64Long
= TI
.getInt64Type() == TargetInfo::SignedLong
;
768 QualType EltTy
= getNeonEltType(NeonTypeFlags(TV
), getASTContext(),
769 IsPolyUnsigned
, IsInt64Long
);
771 EltTy
= EltTy
.withConst();
772 QualType LHSTy
= getASTContext().getPointerType(EltTy
);
773 Sema::AssignConvertType ConvTy
;
774 ConvTy
= SemaRef
.CheckSingleAssignmentConstraints(LHSTy
, RHS
);
777 if (SemaRef
.DiagnoseAssignmentResult(ConvTy
, Arg
->getBeginLoc(), LHSTy
,
779 AssignmentAction::Assigning
))
783 // For NEON intrinsics which take an immediate value as part of the
784 // instruction, range check them here.
785 SmallVector
<std::tuple
<int, int, int, int>, 2> ImmChecks
;
789 #define GET_NEON_IMMEDIATE_CHECK
790 #include "clang/Basic/arm_fp16.inc"
791 #include "clang/Basic/arm_neon.inc"
792 #undef GET_NEON_IMMEDIATE_CHECK
795 return PerformNeonImmChecks(TheCall
, ImmChecks
, TV
);
798 bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID
,
803 #include "clang/Basic/arm_mve_builtin_sema.inc"
807 bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo
&TI
,
814 #include "clang/Basic/arm_cde_builtin_sema.inc"
820 return CheckARMCoprocessorImmediate(TI
, TheCall
->getArg(0), /*WantCDE*/ true);
823 bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo
&TI
,
824 const Expr
*CoprocArg
,
826 ASTContext
&Context
= getASTContext();
827 if (SemaRef
.isConstantEvaluatedContext())
830 // We can't check the value of a dependent argument.
831 if (CoprocArg
->isTypeDependent() || CoprocArg
->isValueDependent())
834 llvm::APSInt CoprocNoAP
= *CoprocArg
->getIntegerConstantExpr(Context
);
835 int64_t CoprocNo
= CoprocNoAP
.getExtValue();
836 assert(CoprocNo
>= 0 && "Coprocessor immediate must be non-negative");
838 uint32_t CDECoprocMask
= TI
.getARMCDECoprocMask();
839 bool IsCDECoproc
= CoprocNo
<= 7 && (CDECoprocMask
& (1 << CoprocNo
));
841 if (IsCDECoproc
!= WantCDE
)
842 return Diag(CoprocArg
->getBeginLoc(), diag::err_arm_invalid_coproc
)
843 << (int)CoprocNo
<< (int)WantCDE
<< CoprocArg
->getSourceRange();
848 bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID
,
851 assert((BuiltinID
== ARM::BI__builtin_arm_ldrex
||
852 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
853 BuiltinID
== ARM::BI__builtin_arm_strex
||
854 BuiltinID
== ARM::BI__builtin_arm_stlex
||
855 BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
856 BuiltinID
== AArch64::BI__builtin_arm_ldaex
||
857 BuiltinID
== AArch64::BI__builtin_arm_strex
||
858 BuiltinID
== AArch64::BI__builtin_arm_stlex
) &&
859 "unexpected ARM builtin");
860 bool IsLdrex
= BuiltinID
== ARM::BI__builtin_arm_ldrex
||
861 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
862 BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
863 BuiltinID
== AArch64::BI__builtin_arm_ldaex
;
865 ASTContext
&Context
= getASTContext();
867 cast
<DeclRefExpr
>(TheCall
->getCallee()->IgnoreParenCasts());
869 // Ensure that we have the proper number of arguments.
870 if (SemaRef
.checkArgCount(TheCall
, IsLdrex
? 1 : 2))
873 // Inspect the pointer argument of the atomic builtin. This should always be
874 // a pointer type, whose element is an integral scalar or pointer type.
875 // Because it is a pointer type, we don't have to worry about any implicit
877 Expr
*PointerArg
= TheCall
->getArg(IsLdrex
? 0 : 1);
878 ExprResult PointerArgRes
=
879 SemaRef
.DefaultFunctionArrayLvalueConversion(PointerArg
);
880 if (PointerArgRes
.isInvalid())
882 PointerArg
= PointerArgRes
.get();
884 const PointerType
*pointerType
= PointerArg
->getType()->getAs
<PointerType
>();
886 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer
)
887 << PointerArg
->getType() << 0 << PointerArg
->getSourceRange();
891 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
892 // task is to insert the appropriate casts into the AST. First work out just
893 // what the appropriate type is.
894 QualType ValType
= pointerType
->getPointeeType();
895 QualType AddrType
= ValType
.getUnqualifiedType().withVolatile();
899 // Issue a warning if the cast is dodgy.
900 CastKind CastNeeded
= CK_NoOp
;
901 if (!AddrType
.isAtLeastAsQualifiedAs(ValType
, getASTContext())) {
902 CastNeeded
= CK_BitCast
;
903 Diag(DRE
->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers
)
904 << PointerArg
->getType() << Context
.getPointerType(AddrType
)
905 << AssignmentAction::Passing
<< PointerArg
->getSourceRange();
908 // Finally, do the cast and replace the argument with the corrected version.
909 AddrType
= Context
.getPointerType(AddrType
);
910 PointerArgRes
= SemaRef
.ImpCastExprToType(PointerArg
, AddrType
, CastNeeded
);
911 if (PointerArgRes
.isInvalid())
913 PointerArg
= PointerArgRes
.get();
915 TheCall
->setArg(IsLdrex
? 0 : 1, PointerArg
);
917 // In general, we allow ints, floats and pointers to be loaded and stored.
918 if (!ValType
->isIntegerType() && !ValType
->isAnyPointerType() &&
919 !ValType
->isBlockPointerType() && !ValType
->isFloatingType()) {
920 Diag(DRE
->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr
)
921 << PointerArg
->getType() << 0 << PointerArg
->getSourceRange();
925 // But ARM doesn't have instructions to deal with 128-bit versions.
926 if (Context
.getTypeSize(ValType
) > MaxWidth
) {
927 assert(MaxWidth
== 64 && "Diagnostic unexpectedly inaccurate");
928 Diag(DRE
->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size
)
929 << PointerArg
->getType() << PointerArg
->getSourceRange();
933 switch (ValType
.getObjCLifetime()) {
934 case Qualifiers::OCL_None
:
935 case Qualifiers::OCL_ExplicitNone
:
939 case Qualifiers::OCL_Weak
:
940 case Qualifiers::OCL_Strong
:
941 case Qualifiers::OCL_Autoreleasing
:
942 Diag(DRE
->getBeginLoc(), diag::err_arc_atomic_ownership
)
943 << ValType
<< PointerArg
->getSourceRange();
948 TheCall
->setType(ValType
);
952 // Initialize the argument to be stored.
953 ExprResult ValArg
= TheCall
->getArg(0);
954 InitializedEntity Entity
= InitializedEntity::InitializeParameter(
955 Context
, ValType
, /*consume*/ false);
956 ValArg
= SemaRef
.PerformCopyInitialization(Entity
, SourceLocation(), ValArg
);
957 if (ValArg
.isInvalid())
959 TheCall
->setArg(0, ValArg
.get());
961 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
962 // but the custom checker bypasses all default analysis.
963 TheCall
->setType(Context
.IntTy
);
967 bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo
&TI
,
970 if (BuiltinID
== ARM::BI__builtin_arm_ldrex
||
971 BuiltinID
== ARM::BI__builtin_arm_ldaex
||
972 BuiltinID
== ARM::BI__builtin_arm_strex
||
973 BuiltinID
== ARM::BI__builtin_arm_stlex
) {
974 return CheckARMBuiltinExclusiveCall(BuiltinID
, TheCall
, 64);
977 if (BuiltinID
== ARM::BI__builtin_arm_prefetch
) {
978 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 1) ||
979 SemaRef
.BuiltinConstantArgRange(TheCall
, 2, 0, 1);
982 if (BuiltinID
== ARM::BI__builtin_arm_rsr64
||
983 BuiltinID
== ARM::BI__builtin_arm_wsr64
)
984 return BuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 3, false);
986 if (BuiltinID
== ARM::BI__builtin_arm_rsr
||
987 BuiltinID
== ARM::BI__builtin_arm_rsrp
||
988 BuiltinID
== ARM::BI__builtin_arm_wsr
||
989 BuiltinID
== ARM::BI__builtin_arm_wsrp
)
990 return BuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
992 if (CheckNeonBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
994 if (CheckMVEBuiltinFunctionCall(BuiltinID
, TheCall
))
996 if (CheckCDEBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
999 // For intrinsics which take an immediate value as part of the instruction,
1000 // range check them here.
1001 // FIXME: VFP Intrinsics should error if VFP not present.
1002 switch (BuiltinID
) {
1005 case ARM::BI__builtin_arm_ssat
:
1006 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 1, 32);
1007 case ARM::BI__builtin_arm_usat
:
1008 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 31);
1009 case ARM::BI__builtin_arm_ssat16
:
1010 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 1, 16);
1011 case ARM::BI__builtin_arm_usat16
:
1012 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 15);
1013 case ARM::BI__builtin_arm_vcvtr_f
:
1014 case ARM::BI__builtin_arm_vcvtr_d
:
1015 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 1);
1016 case ARM::BI__builtin_arm_dmb
:
1017 case ARM::BI__builtin_arm_dsb
:
1018 case ARM::BI__builtin_arm_isb
:
1019 case ARM::BI__builtin_arm_dbg
:
1020 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 15);
1021 case ARM::BI__builtin_arm_cdp
:
1022 case ARM::BI__builtin_arm_cdp2
:
1023 case ARM::BI__builtin_arm_mcr
:
1024 case ARM::BI__builtin_arm_mcr2
:
1025 case ARM::BI__builtin_arm_mrc
:
1026 case ARM::BI__builtin_arm_mrc2
:
1027 case ARM::BI__builtin_arm_mcrr
:
1028 case ARM::BI__builtin_arm_mcrr2
:
1029 case ARM::BI__builtin_arm_mrrc
:
1030 case ARM::BI__builtin_arm_mrrc2
:
1031 case ARM::BI__builtin_arm_ldc
:
1032 case ARM::BI__builtin_arm_ldcl
:
1033 case ARM::BI__builtin_arm_ldc2
:
1034 case ARM::BI__builtin_arm_ldc2l
:
1035 case ARM::BI__builtin_arm_stc
:
1036 case ARM::BI__builtin_arm_stcl
:
1037 case ARM::BI__builtin_arm_stc2
:
1038 case ARM::BI__builtin_arm_stc2l
:
1039 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 15) ||
1040 CheckARMCoprocessorImmediate(TI
, TheCall
->getArg(0),
1045 bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo
&TI
,
1047 CallExpr
*TheCall
) {
1048 if (BuiltinID
== AArch64::BI__builtin_arm_ldrex
||
1049 BuiltinID
== AArch64::BI__builtin_arm_ldaex
||
1050 BuiltinID
== AArch64::BI__builtin_arm_strex
||
1051 BuiltinID
== AArch64::BI__builtin_arm_stlex
) {
1052 return CheckARMBuiltinExclusiveCall(BuiltinID
, TheCall
, 128);
1055 if (BuiltinID
== AArch64::BI__builtin_arm_prefetch
) {
1056 return SemaRef
.BuiltinConstantArgRange(TheCall
, 1, 0, 1) ||
1057 SemaRef
.BuiltinConstantArgRange(TheCall
, 2, 0, 3) ||
1058 SemaRef
.BuiltinConstantArgRange(TheCall
, 3, 0, 1) ||
1059 SemaRef
.BuiltinConstantArgRange(TheCall
, 4, 0, 1);
1062 if (BuiltinID
== AArch64::BI__builtin_arm_rsr64
||
1063 BuiltinID
== AArch64::BI__builtin_arm_wsr64
||
1064 BuiltinID
== AArch64::BI__builtin_arm_rsr128
||
1065 BuiltinID
== AArch64::BI__builtin_arm_wsr128
)
1066 return BuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
1068 // Memory Tagging Extensions (MTE) Intrinsics
1069 if (BuiltinID
== AArch64::BI__builtin_arm_irg
||
1070 BuiltinID
== AArch64::BI__builtin_arm_addg
||
1071 BuiltinID
== AArch64::BI__builtin_arm_gmi
||
1072 BuiltinID
== AArch64::BI__builtin_arm_ldg
||
1073 BuiltinID
== AArch64::BI__builtin_arm_stg
||
1074 BuiltinID
== AArch64::BI__builtin_arm_subp
) {
1075 return BuiltinARMMemoryTaggingCall(BuiltinID
, TheCall
);
1078 if (BuiltinID
== AArch64::BI__builtin_arm_rsr
||
1079 BuiltinID
== AArch64::BI__builtin_arm_rsrp
||
1080 BuiltinID
== AArch64::BI__builtin_arm_wsr
||
1081 BuiltinID
== AArch64::BI__builtin_arm_wsrp
)
1082 return BuiltinARMSpecialReg(BuiltinID
, TheCall
, 0, 5, true);
1084 // Only check the valid encoding range. Any constant in this range would be
1085 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1086 // an exception for incorrect registers. This matches MSVC behavior.
1087 if (BuiltinID
== AArch64::BI_ReadStatusReg
||
1088 BuiltinID
== AArch64::BI_WriteStatusReg
)
1089 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 0x7fff);
1091 if (BuiltinID
== AArch64::BI__getReg
)
1092 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 31);
1094 if (BuiltinID
== AArch64::BI__break
)
1095 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 0xffff);
1097 if (BuiltinID
== AArch64::BI__hlt
)
1098 return SemaRef
.BuiltinConstantArgRange(TheCall
, 0, 0, 0xffff);
1100 if (CheckNeonBuiltinFunctionCall(TI
, BuiltinID
, TheCall
))
1103 if (CheckSVEBuiltinFunctionCall(BuiltinID
, TheCall
))
1106 if (CheckSMEBuiltinFunctionCall(BuiltinID
, TheCall
))
1109 // For intrinsics which take an immediate value as part of the instruction,
1110 // range check them here.
1111 unsigned i
= 0, l
= 0, u
= 0;
1112 switch (BuiltinID
) {
1113 default: return false;
1114 case AArch64::BI__builtin_arm_dmb
:
1115 case AArch64::BI__builtin_arm_dsb
:
1116 case AArch64::BI__builtin_arm_isb
: l
= 0; u
= 15; break;
1117 case AArch64::BI__builtin_arm_tcancel
: l
= 0; u
= 65535; break;
1120 return SemaRef
.BuiltinConstantArgRange(TheCall
, i
, l
, u
+ l
);
1124 struct IntrinToName
{
1129 } // unnamed namespace
1131 static bool BuiltinAliasValid(unsigned BuiltinID
, StringRef AliasName
,
1132 ArrayRef
<IntrinToName
> Map
,
1133 const char *IntrinNames
) {
1134 AliasName
.consume_front("__arm_");
1135 const IntrinToName
*It
=
1136 llvm::lower_bound(Map
, BuiltinID
, [](const IntrinToName
&L
, unsigned Id
) {
1139 if (It
== Map
.end() || It
->Id
!= BuiltinID
)
1141 StringRef
FullName(&IntrinNames
[It
->FullName
]);
1142 if (AliasName
== FullName
)
1144 if (It
->ShortName
== -1)
1146 StringRef
ShortName(&IntrinNames
[It
->ShortName
]);
1147 return AliasName
== ShortName
;
1150 bool SemaARM::MveAliasValid(unsigned BuiltinID
, StringRef AliasName
) {
1151 #include "clang/Basic/arm_mve_builtin_aliases.inc"
1152 // The included file defines:
1153 // - ArrayRef<IntrinToName> Map
1154 // - const char IntrinNames[]
1155 return BuiltinAliasValid(BuiltinID
, AliasName
, Map
, IntrinNames
);
1158 bool SemaARM::CdeAliasValid(unsigned BuiltinID
, StringRef AliasName
) {
1159 #include "clang/Basic/arm_cde_builtin_aliases.inc"
1160 return BuiltinAliasValid(BuiltinID
, AliasName
, Map
, IntrinNames
);
1163 bool SemaARM::SveAliasValid(unsigned BuiltinID
, StringRef AliasName
) {
1164 if (getASTContext().BuiltinInfo
.isAuxBuiltinID(BuiltinID
))
1165 BuiltinID
= getASTContext().BuiltinInfo
.getAuxBuiltinID(BuiltinID
);
1166 return BuiltinID
>= AArch64::FirstSVEBuiltin
&&
1167 BuiltinID
<= AArch64::LastSVEBuiltin
;
1170 bool SemaARM::SmeAliasValid(unsigned BuiltinID
, StringRef AliasName
) {
1171 if (getASTContext().BuiltinInfo
.isAuxBuiltinID(BuiltinID
))
1172 BuiltinID
= getASTContext().BuiltinInfo
.getAuxBuiltinID(BuiltinID
);
1173 return BuiltinID
>= AArch64::FirstSMEBuiltin
&&
1174 BuiltinID
<= AArch64::LastSMEBuiltin
;
1177 void SemaARM::handleBuiltinAliasAttr(Decl
*D
, const ParsedAttr
&AL
) {
1178 ASTContext
&Context
= getASTContext();
1179 if (!AL
.isArgIdent(0)) {
1180 Diag(AL
.getLoc(), diag::err_attribute_argument_n_type
)
1181 << AL
<< 1 << AANT_ArgumentIdentifier
;
1185 IdentifierInfo
*Ident
= AL
.getArgAsIdent(0)->Ident
;
1186 unsigned BuiltinID
= Ident
->getBuiltinID();
1187 StringRef AliasName
= cast
<FunctionDecl
>(D
)->getIdentifier()->getName();
1189 bool IsAArch64
= Context
.getTargetInfo().getTriple().isAArch64();
1190 if ((IsAArch64
&& !SveAliasValid(BuiltinID
, AliasName
) &&
1191 !SmeAliasValid(BuiltinID
, AliasName
)) ||
1192 (!IsAArch64
&& !MveAliasValid(BuiltinID
, AliasName
) &&
1193 !CdeAliasValid(BuiltinID
, AliasName
))) {
1194 Diag(AL
.getLoc(), diag::err_attribute_arm_builtin_alias
);
1198 D
->addAttr(::new (Context
) ArmBuiltinAliasAttr(Context
, AL
, Ident
));
1201 static bool checkNewAttrMutualExclusion(
1202 Sema
&S
, const ParsedAttr
&AL
, const FunctionProtoType
*FPT
,
1203 FunctionType::ArmStateValue CurrentState
, StringRef StateName
) {
1204 auto CheckForIncompatibleAttr
=
1205 [&](FunctionType::ArmStateValue IncompatibleState
,
1206 StringRef IncompatibleStateName
) {
1207 if (CurrentState
== IncompatibleState
) {
1208 S
.Diag(AL
.getLoc(), diag::err_attributes_are_not_compatible
)
1209 << (std::string("'__arm_new(\"") + StateName
.str() + "\")'")
1210 << (std::string("'") + IncompatibleStateName
.str() + "(\"" +
1211 StateName
.str() + "\")'")
1217 CheckForIncompatibleAttr(FunctionType::ARM_In
, "__arm_in");
1218 CheckForIncompatibleAttr(FunctionType::ARM_Out
, "__arm_out");
1219 CheckForIncompatibleAttr(FunctionType::ARM_InOut
, "__arm_inout");
1220 CheckForIncompatibleAttr(FunctionType::ARM_Preserves
, "__arm_preserves");
1221 return AL
.isInvalid();
1224 void SemaARM::handleNewAttr(Decl
*D
, const ParsedAttr
&AL
) {
1225 if (!AL
.getNumArgs()) {
1226 Diag(AL
.getLoc(), diag::err_missing_arm_state
) << AL
;
1231 std::vector
<StringRef
> NewState
;
1232 if (const auto *ExistingAttr
= D
->getAttr
<ArmNewAttr
>()) {
1233 for (StringRef S
: ExistingAttr
->newArgs())
1234 NewState
.push_back(S
);
1238 bool HasZT0
= false;
1239 for (unsigned I
= 0, E
= AL
.getNumArgs(); I
!= E
; ++I
) {
1240 StringRef StateName
;
1241 SourceLocation LiteralLoc
;
1242 if (!SemaRef
.checkStringLiteralArgumentAttr(AL
, I
, StateName
, &LiteralLoc
))
1245 if (StateName
== "za")
1247 else if (StateName
== "zt0")
1250 Diag(LiteralLoc
, diag::err_unknown_arm_state
) << StateName
;
1255 if (!llvm::is_contained(NewState
, StateName
)) // Avoid adding duplicates.
1256 NewState
.push_back(StateName
);
1259 if (auto *FPT
= dyn_cast
<FunctionProtoType
>(D
->getFunctionType())) {
1260 FunctionType::ArmStateValue ZAState
=
1261 FunctionType::getArmZAState(FPT
->getAArch64SMEAttributes());
1262 if (HasZA
&& ZAState
!= FunctionType::ARM_None
&&
1263 checkNewAttrMutualExclusion(SemaRef
, AL
, FPT
, ZAState
, "za"))
1265 FunctionType::ArmStateValue ZT0State
=
1266 FunctionType::getArmZT0State(FPT
->getAArch64SMEAttributes());
1267 if (HasZT0
&& ZT0State
!= FunctionType::ARM_None
&&
1268 checkNewAttrMutualExclusion(SemaRef
, AL
, FPT
, ZT0State
, "zt0"))
1272 D
->dropAttr
<ArmNewAttr
>();
1273 D
->addAttr(::new (getASTContext()) ArmNewAttr(
1274 getASTContext(), AL
, NewState
.data(), NewState
.size()));
1277 void SemaARM::handleCmseNSEntryAttr(Decl
*D
, const ParsedAttr
&AL
) {
1278 if (getLangOpts().CPlusPlus
&& !D
->getDeclContext()->isExternCContext()) {
1279 Diag(AL
.getLoc(), diag::err_attribute_not_clinkage
) << AL
;
1283 const auto *FD
= cast
<FunctionDecl
>(D
);
1284 if (!FD
->isExternallyVisible()) {
1285 Diag(AL
.getLoc(), diag::warn_attribute_cmse_entry_static
);
1289 D
->addAttr(::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL
));
1292 void SemaARM::handleInterruptAttr(Decl
*D
, const ParsedAttr
&AL
) {
1293 // Check the attribute arguments.
1294 if (AL
.getNumArgs() > 1) {
1295 Diag(AL
.getLoc(), diag::err_attribute_too_many_arguments
) << AL
<< 1;
1300 SourceLocation ArgLoc
;
1302 if (AL
.getNumArgs() == 0)
1304 else if (!SemaRef
.checkStringLiteralArgumentAttr(AL
, 0, Str
, &ArgLoc
))
1307 ARMInterruptAttr::InterruptType Kind
;
1308 if (!ARMInterruptAttr::ConvertStrToInterruptType(Str
, Kind
)) {
1309 Diag(AL
.getLoc(), diag::warn_attribute_type_not_supported
)
1310 << AL
<< Str
<< ArgLoc
;
1314 const TargetInfo
&TI
= getASTContext().getTargetInfo();
1315 if (TI
.hasFeature("vfp"))
1316 Diag(D
->getLocation(), diag::warn_arm_interrupt_vfp_clobber
);
1318 D
->addAttr(::new (getASTContext())
1319 ARMInterruptAttr(getASTContext(), AL
, Kind
));
1322 // Check if the function definition uses any AArch64 SME features without
1323 // having the '+sme' feature enabled and warn user if sme locally streaming
1324 // function returns or uses arguments with VL-based types.
1325 void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl
*FD
) {
1326 const auto *Attr
= FD
->getAttr
<ArmNewAttr
>();
1327 bool UsesSM
= FD
->hasAttr
<ArmLocallyStreamingAttr
>();
1328 bool UsesZA
= Attr
&& Attr
->isNewZA();
1329 bool UsesZT0
= Attr
&& Attr
->isNewZT0();
1331 if (UsesZA
|| UsesZT0
) {
1332 if (const auto *FPT
= FD
->getType()->getAs
<FunctionProtoType
>()) {
1333 FunctionProtoType::ExtProtoInfo EPI
= FPT
->getExtProtoInfo();
1334 if (EPI
.AArch64SMEAttributes
& FunctionType::SME_AgnosticZAStateMask
)
1335 Diag(FD
->getLocation(), diag::err_sme_unsupported_agnostic_new
);
1339 if (FD
->hasAttr
<ArmLocallyStreamingAttr
>()) {
1340 if (FD
->getReturnType()->isSizelessVectorType())
1341 Diag(FD
->getLocation(),
1342 diag::warn_sme_locally_streaming_has_vl_args_returns
)
1344 if (llvm::any_of(FD
->parameters(), [](ParmVarDecl
*P
) {
1345 return P
->getOriginalType()->isSizelessVectorType();
1347 Diag(FD
->getLocation(),
1348 diag::warn_sme_locally_streaming_has_vl_args_returns
)
1351 if (const auto *FPT
= FD
->getType()->getAs
<FunctionProtoType
>()) {
1352 FunctionProtoType::ExtProtoInfo EPI
= FPT
->getExtProtoInfo();
1353 UsesSM
|= EPI
.AArch64SMEAttributes
& FunctionType::SME_PStateSMEnabledMask
;
1354 UsesZA
|= FunctionType::getArmZAState(EPI
.AArch64SMEAttributes
) !=
1355 FunctionType::ARM_None
;
1356 UsesZT0
|= FunctionType::getArmZT0State(EPI
.AArch64SMEAttributes
) !=
1357 FunctionType::ARM_None
;
1360 ASTContext
&Context
= getASTContext();
1361 if (UsesSM
|| UsesZA
) {
1362 llvm::StringMap
<bool> FeatureMap
;
1363 Context
.getFunctionFeatureMap(FeatureMap
, FD
);
1364 if (!FeatureMap
.contains("sme")) {
1366 Diag(FD
->getLocation(),
1367 diag::err_sme_definition_using_sm_in_non_sme_target
);
1369 Diag(FD
->getLocation(),
1370 diag::err_sme_definition_using_za_in_non_sme_target
);
1374 llvm::StringMap
<bool> FeatureMap
;
1375 Context
.getFunctionFeatureMap(FeatureMap
, FD
);
1376 if (!FeatureMap
.contains("sme2")) {
1377 Diag(FD
->getLocation(),
1378 diag::err_sme_definition_using_zt0_in_non_sme2_target
);
1383 } // namespace clang