1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/AArch64AddressingModes.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "TargetInfo/AArch64TargetInfo.h"
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCLinkerOptimizationHint.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmLexer.h"
32 #include "llvm/MC/MCParser/MCAsmParser.h"
33 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
34 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/MC/MCTargetOptions.h"
41 #include "llvm/MC/SubtargetFeature.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/MathExtras.h"
47 #include "llvm/Support/SMLoc.h"
48 #include "llvm/Support/TargetParser.h"
49 #include "llvm/Support/TargetRegistry.h"
50 #include "llvm/Support/raw_ostream.h"
71 enum RegConstraintEqualityTy
{
77 class AArch64AsmParser
: public MCTargetAsmParser
{
79 StringRef Mnemonic
; ///< Instruction mnemonic.
81 // Map of register aliases registers via the .req directive.
82 StringMap
<std::pair
<RegKind
, unsigned>> RegisterReqs
;
86 static PrefixInfo
CreateFromInst(const MCInst
&Inst
, uint64_t TSFlags
) {
88 switch (Inst
.getOpcode()) {
89 case AArch64::MOVPRFX_ZZ
:
91 Prefix
.Dst
= Inst
.getOperand(0).getReg();
93 case AArch64::MOVPRFX_ZPmZ_B
:
94 case AArch64::MOVPRFX_ZPmZ_H
:
95 case AArch64::MOVPRFX_ZPmZ_S
:
96 case AArch64::MOVPRFX_ZPmZ_D
:
98 Prefix
.Predicated
= true;
99 Prefix
.ElementSize
= TSFlags
& AArch64::ElementSizeMask
;
100 assert(Prefix
.ElementSize
!= AArch64::ElementSizeNone
&&
101 "No destructive element size set for movprfx");
102 Prefix
.Dst
= Inst
.getOperand(0).getReg();
103 Prefix
.Pg
= Inst
.getOperand(2).getReg();
105 case AArch64::MOVPRFX_ZPzZ_B
:
106 case AArch64::MOVPRFX_ZPzZ_H
:
107 case AArch64::MOVPRFX_ZPzZ_S
:
108 case AArch64::MOVPRFX_ZPzZ_D
:
109 Prefix
.Active
= true;
110 Prefix
.Predicated
= true;
111 Prefix
.ElementSize
= TSFlags
& AArch64::ElementSizeMask
;
112 assert(Prefix
.ElementSize
!= AArch64::ElementSizeNone
&&
113 "No destructive element size set for movprfx");
114 Prefix
.Dst
= Inst
.getOperand(0).getReg();
115 Prefix
.Pg
= Inst
.getOperand(1).getReg();
124 PrefixInfo() : Active(false), Predicated(false) {}
125 bool isActive() const { return Active
; }
126 bool isPredicated() const { return Predicated
; }
127 unsigned getElementSize() const {
131 unsigned getDstReg() const { return Dst
; }
132 unsigned getPgReg() const {
140 unsigned ElementSize
;
145 AArch64TargetStreamer
&getTargetStreamer() {
146 MCTargetStreamer
&TS
= *getParser().getStreamer().getTargetStreamer();
147 return static_cast<AArch64TargetStreamer
&>(TS
);
150 SMLoc
getLoc() const { return getParser().getTok().getLoc(); }
152 bool parseSysAlias(StringRef Name
, SMLoc NameLoc
, OperandVector
&Operands
);
153 void createSysAlias(uint16_t Encoding
, OperandVector
&Operands
, SMLoc S
);
154 AArch64CC::CondCode
parseCondCodeString(StringRef Cond
);
155 bool parseCondCode(OperandVector
&Operands
, bool invertCondCode
);
156 unsigned matchRegisterNameAlias(StringRef Name
, RegKind Kind
);
157 bool parseRegister(OperandVector
&Operands
);
158 bool parseSymbolicImmVal(const MCExpr
*&ImmVal
);
159 bool parseNeonVectorList(OperandVector
&Operands
);
160 bool parseOptionalMulOperand(OperandVector
&Operands
);
161 bool parseOperand(OperandVector
&Operands
, bool isCondCode
,
162 bool invertCondCode
);
164 bool showMatchError(SMLoc Loc
, unsigned ErrCode
, uint64_t ErrorInfo
,
165 OperandVector
&Operands
);
167 bool parseDirectiveArch(SMLoc L
);
168 bool parseDirectiveArchExtension(SMLoc L
);
169 bool parseDirectiveCPU(SMLoc L
);
170 bool parseDirectiveInst(SMLoc L
);
172 bool parseDirectiveTLSDescCall(SMLoc L
);
174 bool parseDirectiveLOH(StringRef LOH
, SMLoc L
);
175 bool parseDirectiveLtorg(SMLoc L
);
177 bool parseDirectiveReq(StringRef Name
, SMLoc L
);
178 bool parseDirectiveUnreq(SMLoc L
);
179 bool parseDirectiveCFINegateRAState();
180 bool parseDirectiveCFIBKeyFrame();
182 bool validateInstruction(MCInst
&Inst
, SMLoc
&IDLoc
,
183 SmallVectorImpl
<SMLoc
> &Loc
);
184 bool MatchAndEmitInstruction(SMLoc IDLoc
, unsigned &Opcode
,
185 OperandVector
&Operands
, MCStreamer
&Out
,
187 bool MatchingInlineAsm
) override
;
188 /// @name Auto-generated Match Functions
191 #define GET_ASSEMBLER_HEADER
192 #include "AArch64GenAsmMatcher.inc"
196 OperandMatchResultTy
tryParseScalarRegister(unsigned &Reg
);
197 OperandMatchResultTy
tryParseVectorRegister(unsigned &Reg
, StringRef
&Kind
,
199 OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector
&Operands
);
200 OperandMatchResultTy
tryParseBarrierOperand(OperandVector
&Operands
);
201 OperandMatchResultTy
tryParseMRSSystemRegister(OperandVector
&Operands
);
202 OperandMatchResultTy
tryParseSysReg(OperandVector
&Operands
);
203 OperandMatchResultTy
tryParseSysCROperand(OperandVector
&Operands
);
204 template <bool IsSVEPrefetch
= false>
205 OperandMatchResultTy
tryParsePrefetch(OperandVector
&Operands
);
206 OperandMatchResultTy
tryParsePSBHint(OperandVector
&Operands
);
207 OperandMatchResultTy
tryParseBTIHint(OperandVector
&Operands
);
208 OperandMatchResultTy
tryParseAdrpLabel(OperandVector
&Operands
);
209 OperandMatchResultTy
tryParseAdrLabel(OperandVector
&Operands
);
210 template<bool AddFPZeroAsLiteral
>
211 OperandMatchResultTy
tryParseFPImm(OperandVector
&Operands
);
212 OperandMatchResultTy
tryParseImmWithOptionalShift(OperandVector
&Operands
);
213 OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector
&Operands
);
214 bool tryParseNeonVectorRegister(OperandVector
&Operands
);
215 OperandMatchResultTy
tryParseVectorIndex(OperandVector
&Operands
);
216 OperandMatchResultTy
tryParseGPRSeqPair(OperandVector
&Operands
);
217 template <bool ParseShiftExtend
,
218 RegConstraintEqualityTy EqTy
= RegConstraintEqualityTy::EqualsReg
>
219 OperandMatchResultTy
tryParseGPROperand(OperandVector
&Operands
);
220 template <bool ParseShiftExtend
, bool ParseSuffix
>
221 OperandMatchResultTy
tryParseSVEDataVector(OperandVector
&Operands
);
222 OperandMatchResultTy
tryParseSVEPredicateVector(OperandVector
&Operands
);
223 template <RegKind VectorKind
>
224 OperandMatchResultTy
tryParseVectorList(OperandVector
&Operands
,
225 bool ExpectMatch
= false);
226 OperandMatchResultTy
tryParseSVEPattern(OperandVector
&Operands
);
229 enum AArch64MatchResultTy
{
230 Match_InvalidSuffix
= FIRST_TARGET_MATCH_RESULT_TY
,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES
232 #include "AArch64GenAsmMatcher.inc"
236 AArch64AsmParser(const MCSubtargetInfo
&STI
, MCAsmParser
&Parser
,
237 const MCInstrInfo
&MII
, const MCTargetOptions
&Options
)
238 : MCTargetAsmParser(Options
, STI
, MII
) {
239 IsILP32
= Options
.getABIName() == "ilp32";
240 MCAsmParserExtension::Initialize(Parser
);
241 MCStreamer
&S
= getParser().getStreamer();
242 if (S
.getTargetStreamer() == nullptr)
243 new AArch64TargetStreamer(S
);
245 // Alias .hword/.word/.[dx]word to the target-independent
246 // .2byte/.4byte/.8byte directives as they have the same form and
248 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249 Parser
.addAliasForDirective(".hword", ".2byte");
250 Parser
.addAliasForDirective(".word", ".4byte");
251 Parser
.addAliasForDirective(".dword", ".8byte");
252 Parser
.addAliasForDirective(".xword", ".8byte");
254 // Initialize the set of available features.
255 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
258 bool regsEqual(const MCParsedAsmOperand
&Op1
,
259 const MCParsedAsmOperand
&Op2
) const override
;
260 bool ParseInstruction(ParseInstructionInfo
&Info
, StringRef Name
,
261 SMLoc NameLoc
, OperandVector
&Operands
) override
;
262 bool ParseRegister(unsigned &RegNo
, SMLoc
&StartLoc
, SMLoc
&EndLoc
) override
;
263 bool ParseDirective(AsmToken DirectiveID
) override
;
264 unsigned validateTargetOperandClass(MCParsedAsmOperand
&Op
,
265 unsigned Kind
) override
;
267 static bool classifySymbolRef(const MCExpr
*Expr
,
268 AArch64MCExpr::VariantKind
&ELFRefKind
,
269 MCSymbolRefExpr::VariantKind
&DarwinRefKind
,
273 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
275 class AArch64Operand
: public MCParsedAsmOperand
{
295 SMLoc StartLoc
, EndLoc
;
300 bool IsSuffix
; // Is the operand actually a suffix on the mnemonic.
303 // Separate shift/extend operand.
304 struct ShiftExtendOp
{
305 AArch64_AM::ShiftExtendType Type
;
307 bool HasExplicitAmount
;
315 // The register may be allowed as a different register class,
316 // e.g. for GPR64as32 or GPR32as64.
317 RegConstraintEqualityTy EqualityTy
;
319 // In some cases the shift/extend needs to be explicitly parsed together
320 // with the register, rather than as a separate operand. This is needed
321 // for addressing modes where the instruction as a whole dictates the
322 // scaling/extend, rather than specific bits in the instruction.
323 // By parsing them as a single operand, we avoid the need to pass an
324 // extra operand in all CodeGen patterns (because all operands need to
325 // have an associated value), and we avoid the need to update TableGen to
326 // accept operands that have no associated bits in the instruction.
328 // An added benefit of parsing them together is that the assembler
329 // can give a sensible diagnostic if the scaling is not correct.
331 // The default is 'lsl #0' (HasExplicitAmount = false) if no
332 // ShiftExtend is specified.
333 ShiftExtendOp ShiftExtend
;
336 struct VectorListOp
{
339 unsigned NumElements
;
340 unsigned ElementWidth
;
341 RegKind RegisterKind
;
344 struct VectorIndexOp
{
352 struct ShiftedImmOp
{
354 unsigned ShiftAmount
;
358 AArch64CC::CondCode Code
;
362 uint64_t Val
; // APFloat value bitcasted to uint64_t.
363 bool IsExact
; // describes whether parsed value was exact.
369 unsigned Val
; // Not the enum since not all values have names.
377 uint32_t PStateField
;
409 struct VectorListOp VectorList
;
410 struct VectorIndexOp VectorIndex
;
412 struct ShiftedImmOp ShiftedImm
;
413 struct CondCodeOp CondCode
;
414 struct FPImmOp FPImm
;
415 struct BarrierOp Barrier
;
416 struct SysRegOp SysReg
;
417 struct SysCRImmOp SysCRImm
;
418 struct PrefetchOp Prefetch
;
419 struct PSBHintOp PSBHint
;
420 struct BTIHintOp BTIHint
;
421 struct ShiftExtendOp ShiftExtend
;
424 // Keep the MCContext around as the MCExprs may need manipulated during
425 // the add<>Operands() calls.
429 AArch64Operand(KindTy K
, MCContext
&Ctx
) : Kind(K
), Ctx(Ctx
) {}
431 AArch64Operand(const AArch64Operand
&o
) : MCParsedAsmOperand(), Ctx(o
.Ctx
) {
433 StartLoc
= o
.StartLoc
;
443 ShiftedImm
= o
.ShiftedImm
;
446 CondCode
= o
.CondCode
;
458 VectorList
= o
.VectorList
;
461 VectorIndex
= o
.VectorIndex
;
467 SysCRImm
= o
.SysCRImm
;
470 Prefetch
= o
.Prefetch
;
479 ShiftExtend
= o
.ShiftExtend
;
484 /// getStartLoc - Get the location of the first token of this operand.
485 SMLoc
getStartLoc() const override
{ return StartLoc
; }
486 /// getEndLoc - Get the location of the last token of this operand.
487 SMLoc
getEndLoc() const override
{ return EndLoc
; }
489 StringRef
getToken() const {
490 assert(Kind
== k_Token
&& "Invalid access!");
491 return StringRef(Tok
.Data
, Tok
.Length
);
494 bool isTokenSuffix() const {
495 assert(Kind
== k_Token
&& "Invalid access!");
499 const MCExpr
*getImm() const {
500 assert(Kind
== k_Immediate
&& "Invalid access!");
504 const MCExpr
*getShiftedImmVal() const {
505 assert(Kind
== k_ShiftedImm
&& "Invalid access!");
506 return ShiftedImm
.Val
;
509 unsigned getShiftedImmShift() const {
510 assert(Kind
== k_ShiftedImm
&& "Invalid access!");
511 return ShiftedImm
.ShiftAmount
;
514 AArch64CC::CondCode
getCondCode() const {
515 assert(Kind
== k_CondCode
&& "Invalid access!");
516 return CondCode
.Code
;
519 APFloat
getFPImm() const {
520 assert (Kind
== k_FPImm
&& "Invalid access!");
521 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm
.Val
, true));
524 bool getFPImmIsExact() const {
525 assert (Kind
== k_FPImm
&& "Invalid access!");
526 return FPImm
.IsExact
;
529 unsigned getBarrier() const {
530 assert(Kind
== k_Barrier
&& "Invalid access!");
534 StringRef
getBarrierName() const {
535 assert(Kind
== k_Barrier
&& "Invalid access!");
536 return StringRef(Barrier
.Data
, Barrier
.Length
);
539 unsigned getReg() const override
{
540 assert(Kind
== k_Register
&& "Invalid access!");
544 RegConstraintEqualityTy
getRegEqualityTy() const {
545 assert(Kind
== k_Register
&& "Invalid access!");
546 return Reg
.EqualityTy
;
549 unsigned getVectorListStart() const {
550 assert(Kind
== k_VectorList
&& "Invalid access!");
551 return VectorList
.RegNum
;
554 unsigned getVectorListCount() const {
555 assert(Kind
== k_VectorList
&& "Invalid access!");
556 return VectorList
.Count
;
559 unsigned getVectorIndex() const {
560 assert(Kind
== k_VectorIndex
&& "Invalid access!");
561 return VectorIndex
.Val
;
564 StringRef
getSysReg() const {
565 assert(Kind
== k_SysReg
&& "Invalid access!");
566 return StringRef(SysReg
.Data
, SysReg
.Length
);
569 unsigned getSysCR() const {
570 assert(Kind
== k_SysCR
&& "Invalid access!");
574 unsigned getPrefetch() const {
575 assert(Kind
== k_Prefetch
&& "Invalid access!");
579 unsigned getPSBHint() const {
580 assert(Kind
== k_PSBHint
&& "Invalid access!");
584 StringRef
getPSBHintName() const {
585 assert(Kind
== k_PSBHint
&& "Invalid access!");
586 return StringRef(PSBHint
.Data
, PSBHint
.Length
);
589 unsigned getBTIHint() const {
590 assert(Kind
== k_BTIHint
&& "Invalid access!");
594 StringRef
getBTIHintName() const {
595 assert(Kind
== k_BTIHint
&& "Invalid access!");
596 return StringRef(BTIHint
.Data
, BTIHint
.Length
);
599 StringRef
getPrefetchName() const {
600 assert(Kind
== k_Prefetch
&& "Invalid access!");
601 return StringRef(Prefetch
.Data
, Prefetch
.Length
);
604 AArch64_AM::ShiftExtendType
getShiftExtendType() const {
605 if (Kind
== k_ShiftExtend
)
606 return ShiftExtend
.Type
;
607 if (Kind
== k_Register
)
608 return Reg
.ShiftExtend
.Type
;
609 llvm_unreachable("Invalid access!");
612 unsigned getShiftExtendAmount() const {
613 if (Kind
== k_ShiftExtend
)
614 return ShiftExtend
.Amount
;
615 if (Kind
== k_Register
)
616 return Reg
.ShiftExtend
.Amount
;
617 llvm_unreachable("Invalid access!");
620 bool hasShiftExtendAmount() const {
621 if (Kind
== k_ShiftExtend
)
622 return ShiftExtend
.HasExplicitAmount
;
623 if (Kind
== k_Register
)
624 return Reg
.ShiftExtend
.HasExplicitAmount
;
625 llvm_unreachable("Invalid access!");
628 bool isImm() const override
{ return Kind
== k_Immediate
; }
629 bool isMem() const override
{ return false; }
631 bool isUImm6() const {
634 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
637 int64_t Val
= MCE
->getValue();
638 return (Val
>= 0 && Val
< 64);
641 template <int Width
> bool isSImm() const { return isSImmScaled
<Width
, 1>(); }
643 template <int Bits
, int Scale
> DiagnosticPredicate
isSImmScaled() const {
644 return isImmScaled
<Bits
, Scale
>(true);
647 template <int Bits
, int Scale
> DiagnosticPredicate
isUImmScaled() const {
648 return isImmScaled
<Bits
, Scale
>(false);
651 template <int Bits
, int Scale
>
652 DiagnosticPredicate
isImmScaled(bool Signed
) const {
654 return DiagnosticPredicateTy::NoMatch
;
656 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
658 return DiagnosticPredicateTy::NoMatch
;
660 int64_t MinVal
, MaxVal
;
662 int64_t Shift
= Bits
- 1;
663 MinVal
= (int64_t(1) << Shift
) * -Scale
;
664 MaxVal
= ((int64_t(1) << Shift
) - 1) * Scale
;
667 MaxVal
= ((int64_t(1) << Bits
) - 1) * Scale
;
670 int64_t Val
= MCE
->getValue();
671 if (Val
>= MinVal
&& Val
<= MaxVal
&& (Val
% Scale
) == 0)
672 return DiagnosticPredicateTy::Match
;
674 return DiagnosticPredicateTy::NearMatch
;
677 DiagnosticPredicate
isSVEPattern() const {
679 return DiagnosticPredicateTy::NoMatch
;
680 auto *MCE
= dyn_cast
<MCConstantExpr
>(getImm());
682 return DiagnosticPredicateTy::NoMatch
;
683 int64_t Val
= MCE
->getValue();
684 if (Val
>= 0 && Val
< 32)
685 return DiagnosticPredicateTy::Match
;
686 return DiagnosticPredicateTy::NearMatch
;
689 bool isSymbolicUImm12Offset(const MCExpr
*Expr
) const {
690 AArch64MCExpr::VariantKind ELFRefKind
;
691 MCSymbolRefExpr::VariantKind DarwinRefKind
;
693 if (!AArch64AsmParser::classifySymbolRef(Expr
, ELFRefKind
, DarwinRefKind
,
695 // If we don't understand the expression, assume the best and
696 // let the fixup and relocation code deal with it.
700 if (DarwinRefKind
== MCSymbolRefExpr::VK_PAGEOFF
||
701 ELFRefKind
== AArch64MCExpr::VK_LO12
||
702 ELFRefKind
== AArch64MCExpr::VK_GOT_LO12
||
703 ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12
||
704 ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12_NC
||
705 ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12
||
706 ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12_NC
||
707 ELFRefKind
== AArch64MCExpr::VK_GOTTPREL_LO12_NC
||
708 ELFRefKind
== AArch64MCExpr::VK_TLSDESC_LO12
||
709 ELFRefKind
== AArch64MCExpr::VK_SECREL_LO12
||
710 ELFRefKind
== AArch64MCExpr::VK_SECREL_HI12
) {
711 // Note that we don't range-check the addend. It's adjusted modulo page
712 // size when converted, so there is no "out of range" condition when using
715 } else if (DarwinRefKind
== MCSymbolRefExpr::VK_GOTPAGEOFF
||
716 DarwinRefKind
== MCSymbolRefExpr::VK_TLVPPAGEOFF
) {
717 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
724 template <int Scale
> bool isUImm12Offset() const {
728 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
730 return isSymbolicUImm12Offset(getImm());
732 int64_t Val
= MCE
->getValue();
733 return (Val
% Scale
) == 0 && Val
>= 0 && (Val
/ Scale
) < 0x1000;
736 template <int N
, int M
>
737 bool isImmInRange() const {
740 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
743 int64_t Val
= MCE
->getValue();
744 return (Val
>= N
&& Val
<= M
);
747 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
748 // a logical immediate can always be represented when inverted.
749 template <typename T
>
750 bool isLogicalImm() const {
753 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
757 int64_t Val
= MCE
->getValue();
758 int64_t SVal
= typename
std::make_signed
<T
>::type(Val
);
759 int64_t UVal
= typename
std::make_unsigned
<T
>::type(Val
);
760 if (Val
!= SVal
&& Val
!= UVal
)
763 return AArch64_AM::isLogicalImmediate(UVal
, sizeof(T
) * 8);
766 bool isShiftedImm() const { return Kind
== k_ShiftedImm
; }
768 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
769 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
770 /// immediate that can be shifted by 'Shift'.
771 template <unsigned Width
>
772 Optional
<std::pair
<int64_t, unsigned> > getShiftedVal() const {
773 if (isShiftedImm() && Width
== getShiftedImmShift())
774 if (auto *CE
= dyn_cast
<MCConstantExpr
>(getShiftedImmVal()))
775 return std::make_pair(CE
->getValue(), Width
);
778 if (auto *CE
= dyn_cast
<MCConstantExpr
>(getImm())) {
779 int64_t Val
= CE
->getValue();
780 if ((Val
!= 0) && (uint64_t(Val
>> Width
) << Width
) == uint64_t(Val
))
781 return std::make_pair(Val
>> Width
, Width
);
783 return std::make_pair(Val
, 0u);
789 bool isAddSubImm() const {
790 if (!isShiftedImm() && !isImm())
795 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
796 if (isShiftedImm()) {
797 unsigned Shift
= ShiftedImm
.ShiftAmount
;
798 Expr
= ShiftedImm
.Val
;
799 if (Shift
!= 0 && Shift
!= 12)
805 AArch64MCExpr::VariantKind ELFRefKind
;
806 MCSymbolRefExpr::VariantKind DarwinRefKind
;
808 if (AArch64AsmParser::classifySymbolRef(Expr
, ELFRefKind
,
809 DarwinRefKind
, Addend
)) {
810 return DarwinRefKind
== MCSymbolRefExpr::VK_PAGEOFF
811 || DarwinRefKind
== MCSymbolRefExpr::VK_TLVPPAGEOFF
812 || (DarwinRefKind
== MCSymbolRefExpr::VK_GOTPAGEOFF
&& Addend
== 0)
813 || ELFRefKind
== AArch64MCExpr::VK_LO12
814 || ELFRefKind
== AArch64MCExpr::VK_DTPREL_HI12
815 || ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12
816 || ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12_NC
817 || ELFRefKind
== AArch64MCExpr::VK_TPREL_HI12
818 || ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12
819 || ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12_NC
820 || ELFRefKind
== AArch64MCExpr::VK_TLSDESC_LO12
821 || ELFRefKind
== AArch64MCExpr::VK_SECREL_HI12
822 || ELFRefKind
== AArch64MCExpr::VK_SECREL_LO12
;
825 // If it's a constant, it should be a real immediate in range.
826 if (auto ShiftedVal
= getShiftedVal
<12>())
827 return ShiftedVal
->first
>= 0 && ShiftedVal
->first
<= 0xfff;
829 // If it's an expression, we hope for the best and let the fixup/relocation
830 // code deal with it.
834 bool isAddSubImmNeg() const {
835 if (!isShiftedImm() && !isImm())
838 // Otherwise it should be a real negative immediate in range.
839 if (auto ShiftedVal
= getShiftedVal
<12>())
840 return ShiftedVal
->first
< 0 && -ShiftedVal
->first
<= 0xfff;
845 // Signed value in the range -128 to +127. For element widths of
846 // 16 bits or higher it may also be a signed multiple of 256 in the
847 // range -32768 to +32512.
848 // For element-width of 8 bits a range of -128 to 255 is accepted,
849 // since a copy of a byte can be either signed/unsigned.
850 template <typename T
>
851 DiagnosticPredicate
isSVECpyImm() const {
852 if (!isShiftedImm() && (!isImm() || !isa
<MCConstantExpr
>(getImm())))
853 return DiagnosticPredicateTy::NoMatch
;
856 std::is_same
<int8_t, typename
std::make_signed
<T
>::type
>::value
;
857 if (auto ShiftedImm
= getShiftedVal
<8>())
858 if (!(IsByte
&& ShiftedImm
->second
) &&
859 AArch64_AM::isSVECpyImm
<T
>(uint64_t(ShiftedImm
->first
)
860 << ShiftedImm
->second
))
861 return DiagnosticPredicateTy::Match
;
863 return DiagnosticPredicateTy::NearMatch
;
866 // Unsigned value in the range 0 to 255. For element widths of
867 // 16 bits or higher it may also be a signed multiple of 256 in the
869 template <typename T
> DiagnosticPredicate
isSVEAddSubImm() const {
870 if (!isShiftedImm() && (!isImm() || !isa
<MCConstantExpr
>(getImm())))
871 return DiagnosticPredicateTy::NoMatch
;
874 std::is_same
<int8_t, typename
std::make_signed
<T
>::type
>::value
;
875 if (auto ShiftedImm
= getShiftedVal
<8>())
876 if (!(IsByte
&& ShiftedImm
->second
) &&
877 AArch64_AM::isSVEAddSubImm
<T
>(ShiftedImm
->first
878 << ShiftedImm
->second
))
879 return DiagnosticPredicateTy::Match
;
881 return DiagnosticPredicateTy::NearMatch
;
884 template <typename T
> DiagnosticPredicate
isSVEPreferredLogicalImm() const {
885 if (isLogicalImm
<T
>() && !isSVECpyImm
<T
>())
886 return DiagnosticPredicateTy::Match
;
887 return DiagnosticPredicateTy::NoMatch
;
890 bool isCondCode() const { return Kind
== k_CondCode
; }
892 bool isSIMDImmType10() const {
895 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
898 return AArch64_AM::isAdvSIMDModImmType10(MCE
->getValue());
902 bool isBranchTarget() const {
905 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
908 int64_t Val
= MCE
->getValue();
911 assert(N
> 0 && "Branch target immediate cannot be 0 bits!");
912 return (Val
>= -((1<<(N
-1)) << 2) && Val
<= (((1<<(N
-1))-1) << 2));
916 isMovWSymbol(ArrayRef
<AArch64MCExpr::VariantKind
> AllowedModifiers
) const {
920 AArch64MCExpr::VariantKind ELFRefKind
;
921 MCSymbolRefExpr::VariantKind DarwinRefKind
;
923 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind
,
924 DarwinRefKind
, Addend
)) {
927 if (DarwinRefKind
!= MCSymbolRefExpr::VK_None
)
930 for (unsigned i
= 0; i
!= AllowedModifiers
.size(); ++i
) {
931 if (ELFRefKind
== AllowedModifiers
[i
])
938 bool isMovWSymbolG3() const {
939 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3
, AArch64MCExpr::VK_PREL_G3
});
942 bool isMovWSymbolG2() const {
944 {AArch64MCExpr::VK_ABS_G2
, AArch64MCExpr::VK_ABS_G2_S
,
945 AArch64MCExpr::VK_ABS_G2_NC
, AArch64MCExpr::VK_PREL_G2
,
946 AArch64MCExpr::VK_PREL_G2_NC
, AArch64MCExpr::VK_TPREL_G2
,
947 AArch64MCExpr::VK_DTPREL_G2
});
950 bool isMovWSymbolG1() const {
952 {AArch64MCExpr::VK_ABS_G1
, AArch64MCExpr::VK_ABS_G1_S
,
953 AArch64MCExpr::VK_ABS_G1_NC
, AArch64MCExpr::VK_PREL_G1
,
954 AArch64MCExpr::VK_PREL_G1_NC
, AArch64MCExpr::VK_GOTTPREL_G1
,
955 AArch64MCExpr::VK_TPREL_G1
, AArch64MCExpr::VK_TPREL_G1_NC
,
956 AArch64MCExpr::VK_DTPREL_G1
, AArch64MCExpr::VK_DTPREL_G1_NC
});
959 bool isMovWSymbolG0() const {
961 {AArch64MCExpr::VK_ABS_G0
, AArch64MCExpr::VK_ABS_G0_S
,
962 AArch64MCExpr::VK_ABS_G0_NC
, AArch64MCExpr::VK_PREL_G0
,
963 AArch64MCExpr::VK_PREL_G0_NC
, AArch64MCExpr::VK_GOTTPREL_G0_NC
,
964 AArch64MCExpr::VK_TPREL_G0
, AArch64MCExpr::VK_TPREL_G0_NC
,
965 AArch64MCExpr::VK_DTPREL_G0
, AArch64MCExpr::VK_DTPREL_G0_NC
});
968 template<int RegWidth
, int Shift
>
969 bool isMOVZMovAlias() const {
970 if (!isImm()) return false;
972 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
973 if (!CE
) return false;
974 uint64_t Value
= CE
->getValue();
976 return AArch64_AM::isMOVZMovAlias(Value
, Shift
, RegWidth
);
979 template<int RegWidth
, int Shift
>
980 bool isMOVNMovAlias() const {
981 if (!isImm()) return false;
983 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
984 if (!CE
) return false;
985 uint64_t Value
= CE
->getValue();
987 return AArch64_AM::isMOVNMovAlias(Value
, Shift
, RegWidth
);
990 bool isFPImm() const {
991 return Kind
== k_FPImm
&&
992 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
995 bool isBarrier() const { return Kind
== k_Barrier
; }
996 bool isSysReg() const { return Kind
== k_SysReg
; }
998 bool isMRSSystemRegister() const {
999 if (!isSysReg()) return false;
1001 return SysReg
.MRSReg
!= -1U;
1004 bool isMSRSystemRegister() const {
1005 if (!isSysReg()) return false;
1006 return SysReg
.MSRReg
!= -1U;
1009 bool isSystemPStateFieldWithImm0_1() const {
1010 if (!isSysReg()) return false;
1011 return (SysReg
.PStateField
== AArch64PState::PAN
||
1012 SysReg
.PStateField
== AArch64PState::DIT
||
1013 SysReg
.PStateField
== AArch64PState::UAO
||
1014 SysReg
.PStateField
== AArch64PState::SSBS
);
1017 bool isSystemPStateFieldWithImm0_15() const {
1018 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1019 return SysReg
.PStateField
!= -1U;
1022 bool isReg() const override
{
1023 return Kind
== k_Register
;
1026 bool isScalarReg() const {
1027 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
;
1030 bool isNeonVectorReg() const {
1031 return Kind
== k_Register
&& Reg
.Kind
== RegKind::NeonVector
;
1034 bool isNeonVectorRegLo() const {
1035 return Kind
== k_Register
&& Reg
.Kind
== RegKind::NeonVector
&&
1036 AArch64MCRegisterClasses
[AArch64::FPR128_loRegClassID
].contains(
1040 template <unsigned Class
> bool isSVEVectorReg() const {
1043 case AArch64::ZPRRegClassID
:
1044 case AArch64::ZPR_3bRegClassID
:
1045 case AArch64::ZPR_4bRegClassID
:
1046 RK
= RegKind::SVEDataVector
;
1048 case AArch64::PPRRegClassID
:
1049 case AArch64::PPR_3bRegClassID
:
1050 RK
= RegKind::SVEPredicateVector
;
1053 llvm_unreachable("Unsupport register class");
1056 return (Kind
== k_Register
&& Reg
.Kind
== RK
) &&
1057 AArch64MCRegisterClasses
[Class
].contains(getReg());
1060 template <unsigned Class
> bool isFPRasZPR() const {
1061 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1062 AArch64MCRegisterClasses
[Class
].contains(getReg());
1065 template <int ElementWidth
, unsigned Class
>
1066 DiagnosticPredicate
isSVEPredicateVectorRegOfWidth() const {
1067 if (Kind
!= k_Register
|| Reg
.Kind
!= RegKind::SVEPredicateVector
)
1068 return DiagnosticPredicateTy::NoMatch
;
1070 if (isSVEVectorReg
<Class
>() && (Reg
.ElementWidth
== ElementWidth
))
1071 return DiagnosticPredicateTy::Match
;
1073 return DiagnosticPredicateTy::NearMatch
;
1076 template <int ElementWidth
, unsigned Class
>
1077 DiagnosticPredicate
isSVEDataVectorRegOfWidth() const {
1078 if (Kind
!= k_Register
|| Reg
.Kind
!= RegKind::SVEDataVector
)
1079 return DiagnosticPredicateTy::NoMatch
;
1081 if (isSVEVectorReg
<Class
>() && Reg
.ElementWidth
== ElementWidth
)
1082 return DiagnosticPredicateTy::Match
;
1084 return DiagnosticPredicateTy::NearMatch
;
1087 template <int ElementWidth
, unsigned Class
,
1088 AArch64_AM::ShiftExtendType ShiftExtendTy
, int ShiftWidth
,
1089 bool ShiftWidthAlwaysSame
>
1090 DiagnosticPredicate
isSVEDataVectorRegWithShiftExtend() const {
1091 auto VectorMatch
= isSVEDataVectorRegOfWidth
<ElementWidth
, Class
>();
1092 if (!VectorMatch
.isMatch())
1093 return DiagnosticPredicateTy::NoMatch
;
1095 // Give a more specific diagnostic when the user has explicitly typed in
1096 // a shift-amount that does not match what is expected, but for which
1097 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1098 bool MatchShift
= getShiftExtendAmount() == Log2_32(ShiftWidth
/ 8);
1099 if (!MatchShift
&& (ShiftExtendTy
== AArch64_AM::UXTW
||
1100 ShiftExtendTy
== AArch64_AM::SXTW
) &&
1101 !ShiftWidthAlwaysSame
&& hasShiftExtendAmount() && ShiftWidth
== 8)
1102 return DiagnosticPredicateTy::NoMatch
;
1104 if (MatchShift
&& ShiftExtendTy
== getShiftExtendType())
1105 return DiagnosticPredicateTy::Match
;
1107 return DiagnosticPredicateTy::NearMatch
;
1110 bool isGPR32as64() const {
1111 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1112 AArch64MCRegisterClasses
[AArch64::GPR64RegClassID
].contains(Reg
.RegNum
);
1115 bool isGPR64as32() const {
1116 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1117 AArch64MCRegisterClasses
[AArch64::GPR32RegClassID
].contains(Reg
.RegNum
);
1120 bool isWSeqPair() const {
1121 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1122 AArch64MCRegisterClasses
[AArch64::WSeqPairsClassRegClassID
].contains(
1126 bool isXSeqPair() const {
1127 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1128 AArch64MCRegisterClasses
[AArch64::XSeqPairsClassRegClassID
].contains(
1132 template<int64_t Angle
, int64_t Remainder
>
1133 DiagnosticPredicate
isComplexRotation() const {
1134 if (!isImm()) return DiagnosticPredicateTy::NoMatch
;
1136 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1137 if (!CE
) return DiagnosticPredicateTy::NoMatch
;
1138 uint64_t Value
= CE
->getValue();
1140 if (Value
% Angle
== Remainder
&& Value
<= 270)
1141 return DiagnosticPredicateTy::Match
;
1142 return DiagnosticPredicateTy::NearMatch
;
1145 template <unsigned RegClassID
> bool isGPR64() const {
1146 return Kind
== k_Register
&& Reg
.Kind
== RegKind::Scalar
&&
1147 AArch64MCRegisterClasses
[RegClassID
].contains(getReg());
1150 template <unsigned RegClassID
, int ExtWidth
>
1151 DiagnosticPredicate
isGPR64WithShiftExtend() const {
1152 if (Kind
!= k_Register
|| Reg
.Kind
!= RegKind::Scalar
)
1153 return DiagnosticPredicateTy::NoMatch
;
1155 if (isGPR64
<RegClassID
>() && getShiftExtendType() == AArch64_AM::LSL
&&
1156 getShiftExtendAmount() == Log2_32(ExtWidth
/ 8))
1157 return DiagnosticPredicateTy::Match
;
1158 return DiagnosticPredicateTy::NearMatch
;
1161 /// Is this a vector list with the type implicit (presumably attached to the
1162 /// instruction itself)?
1163 template <RegKind VectorKind
, unsigned NumRegs
>
1164 bool isImplicitlyTypedVectorList() const {
1165 return Kind
== k_VectorList
&& VectorList
.Count
== NumRegs
&&
1166 VectorList
.NumElements
== 0 &&
1167 VectorList
.RegisterKind
== VectorKind
;
1170 template <RegKind VectorKind
, unsigned NumRegs
, unsigned NumElements
,
1171 unsigned ElementWidth
>
1172 bool isTypedVectorList() const {
1173 if (Kind
!= k_VectorList
)
1175 if (VectorList
.Count
!= NumRegs
)
1177 if (VectorList
.RegisterKind
!= VectorKind
)
1179 if (VectorList
.ElementWidth
!= ElementWidth
)
1181 return VectorList
.NumElements
== NumElements
;
1184 template <int Min
, int Max
>
1185 DiagnosticPredicate
isVectorIndex() const {
1186 if (Kind
!= k_VectorIndex
)
1187 return DiagnosticPredicateTy::NoMatch
;
1188 if (VectorIndex
.Val
>= Min
&& VectorIndex
.Val
<= Max
)
1189 return DiagnosticPredicateTy::Match
;
1190 return DiagnosticPredicateTy::NearMatch
;
1193 bool isToken() const override
{ return Kind
== k_Token
; }
1195 bool isTokenEqual(StringRef Str
) const {
1196 return Kind
== k_Token
&& getToken() == Str
;
1198 bool isSysCR() const { return Kind
== k_SysCR
; }
1199 bool isPrefetch() const { return Kind
== k_Prefetch
; }
1200 bool isPSBHint() const { return Kind
== k_PSBHint
; }
1201 bool isBTIHint() const { return Kind
== k_BTIHint
; }
1202 bool isShiftExtend() const { return Kind
== k_ShiftExtend
; }
1203 bool isShifter() const {
1204 if (!isShiftExtend())
1207 AArch64_AM::ShiftExtendType ST
= getShiftExtendType();
1208 return (ST
== AArch64_AM::LSL
|| ST
== AArch64_AM::LSR
||
1209 ST
== AArch64_AM::ASR
|| ST
== AArch64_AM::ROR
||
1210 ST
== AArch64_AM::MSL
);
1213 template <unsigned ImmEnum
> DiagnosticPredicate
isExactFPImm() const {
1214 if (Kind
!= k_FPImm
)
1215 return DiagnosticPredicateTy::NoMatch
;
1217 if (getFPImmIsExact()) {
1218 // Lookup the immediate from table of supported immediates.
1219 auto *Desc
= AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum
);
1220 assert(Desc
&& "Unknown enum value");
1222 // Calculate its FP value.
1223 APFloat
RealVal(APFloat::IEEEdouble());
1225 RealVal
.convertFromString(Desc
->Repr
, APFloat::rmTowardZero
);
1226 if (errorToBool(StatusOrErr
.takeError()) || *StatusOrErr
!= APFloat::opOK
)
1227 llvm_unreachable("FP immediate is not exact");
1229 if (getFPImm().bitwiseIsEqual(RealVal
))
1230 return DiagnosticPredicateTy::Match
;
1233 return DiagnosticPredicateTy::NearMatch
;
1236 template <unsigned ImmA
, unsigned ImmB
>
1237 DiagnosticPredicate
isExactFPImm() const {
1238 DiagnosticPredicate Res
= DiagnosticPredicateTy::NoMatch
;
1239 if ((Res
= isExactFPImm
<ImmA
>()))
1240 return DiagnosticPredicateTy::Match
;
1241 if ((Res
= isExactFPImm
<ImmB
>()))
1242 return DiagnosticPredicateTy::Match
;
1246 bool isExtend() const {
1247 if (!isShiftExtend())
1250 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1251 return (ET
== AArch64_AM::UXTB
|| ET
== AArch64_AM::SXTB
||
1252 ET
== AArch64_AM::UXTH
|| ET
== AArch64_AM::SXTH
||
1253 ET
== AArch64_AM::UXTW
|| ET
== AArch64_AM::SXTW
||
1254 ET
== AArch64_AM::UXTX
|| ET
== AArch64_AM::SXTX
||
1255 ET
== AArch64_AM::LSL
) &&
1256 getShiftExtendAmount() <= 4;
1259 bool isExtend64() const {
1262 // Make sure the extend expects a 32-bit source register.
1263 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1264 return ET
== AArch64_AM::UXTB
|| ET
== AArch64_AM::SXTB
||
1265 ET
== AArch64_AM::UXTH
|| ET
== AArch64_AM::SXTH
||
1266 ET
== AArch64_AM::UXTW
|| ET
== AArch64_AM::SXTW
;
1269 bool isExtendLSL64() const {
1272 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1273 return (ET
== AArch64_AM::UXTX
|| ET
== AArch64_AM::SXTX
||
1274 ET
== AArch64_AM::LSL
) &&
1275 getShiftExtendAmount() <= 4;
1278 template<int Width
> bool isMemXExtend() const {
1281 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1282 return (ET
== AArch64_AM::LSL
|| ET
== AArch64_AM::SXTX
) &&
1283 (getShiftExtendAmount() == Log2_32(Width
/ 8) ||
1284 getShiftExtendAmount() == 0);
1287 template<int Width
> bool isMemWExtend() const {
1290 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1291 return (ET
== AArch64_AM::UXTW
|| ET
== AArch64_AM::SXTW
) &&
1292 (getShiftExtendAmount() == Log2_32(Width
/ 8) ||
1293 getShiftExtendAmount() == 0);
1296 template <unsigned width
>
1297 bool isArithmeticShifter() const {
1301 // An arithmetic shifter is LSL, LSR, or ASR.
1302 AArch64_AM::ShiftExtendType ST
= getShiftExtendType();
1303 return (ST
== AArch64_AM::LSL
|| ST
== AArch64_AM::LSR
||
1304 ST
== AArch64_AM::ASR
) && getShiftExtendAmount() < width
;
1307 template <unsigned width
>
1308 bool isLogicalShifter() const {
1312 // A logical shifter is LSL, LSR, ASR or ROR.
1313 AArch64_AM::ShiftExtendType ST
= getShiftExtendType();
1314 return (ST
== AArch64_AM::LSL
|| ST
== AArch64_AM::LSR
||
1315 ST
== AArch64_AM::ASR
|| ST
== AArch64_AM::ROR
) &&
1316 getShiftExtendAmount() < width
;
1319 bool isMovImm32Shifter() const {
1323 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1324 AArch64_AM::ShiftExtendType ST
= getShiftExtendType();
1325 if (ST
!= AArch64_AM::LSL
)
1327 uint64_t Val
= getShiftExtendAmount();
1328 return (Val
== 0 || Val
== 16);
1331 bool isMovImm64Shifter() const {
1335 // A MOVi shifter is LSL of 0 or 16.
1336 AArch64_AM::ShiftExtendType ST
= getShiftExtendType();
1337 if (ST
!= AArch64_AM::LSL
)
1339 uint64_t Val
= getShiftExtendAmount();
1340 return (Val
== 0 || Val
== 16 || Val
== 32 || Val
== 48);
1343 bool isLogicalVecShifter() const {
1347 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1348 unsigned Shift
= getShiftExtendAmount();
1349 return getShiftExtendType() == AArch64_AM::LSL
&&
1350 (Shift
== 0 || Shift
== 8 || Shift
== 16 || Shift
== 24);
1353 bool isLogicalVecHalfWordShifter() const {
1354 if (!isLogicalVecShifter())
1357 // A logical vector shifter is a left shift by 0 or 8.
1358 unsigned Shift
= getShiftExtendAmount();
1359 return getShiftExtendType() == AArch64_AM::LSL
&&
1360 (Shift
== 0 || Shift
== 8);
1363 bool isMoveVecShifter() const {
1364 if (!isShiftExtend())
1367 // A logical vector shifter is a left shift by 8 or 16.
1368 unsigned Shift
= getShiftExtendAmount();
1369 return getShiftExtendType() == AArch64_AM::MSL
&&
1370 (Shift
== 8 || Shift
== 16);
1373 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1374 // to LDUR/STUR when the offset is not legal for the former but is for
1375 // the latter. As such, in addition to checking for being a legal unscaled
1376 // address, also check that it is not a legal scaled address. This avoids
1377 // ambiguity in the matcher.
1379 bool isSImm9OffsetFB() const {
1380 return isSImm
<9>() && !isUImm12Offset
<Width
/ 8>();
1383 bool isAdrpLabel() const {
1384 // Validation was handled during parsing, so we just sanity check that
1385 // something didn't go haywire.
1389 if (const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Imm
.Val
)) {
1390 int64_t Val
= CE
->getValue();
1391 int64_t Min
= - (4096 * (1LL << (21 - 1)));
1392 int64_t Max
= 4096 * ((1LL << (21 - 1)) - 1);
1393 return (Val
% 4096) == 0 && Val
>= Min
&& Val
<= Max
;
1399 bool isAdrLabel() const {
1400 // Validation was handled during parsing, so we just sanity check that
1401 // something didn't go haywire.
1405 if (const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Imm
.Val
)) {
1406 int64_t Val
= CE
->getValue();
1407 int64_t Min
= - (1LL << (21 - 1));
1408 int64_t Max
= ((1LL << (21 - 1)) - 1);
1409 return Val
>= Min
&& Val
<= Max
;
1415 void addExpr(MCInst
&Inst
, const MCExpr
*Expr
) const {
1416 // Add as immediates when possible. Null MCExpr = 0.
1418 Inst
.addOperand(MCOperand::createImm(0));
1419 else if (const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Expr
))
1420 Inst
.addOperand(MCOperand::createImm(CE
->getValue()));
1422 Inst
.addOperand(MCOperand::createExpr(Expr
));
1425 void addRegOperands(MCInst
&Inst
, unsigned N
) const {
1426 assert(N
== 1 && "Invalid number of operands!");
1427 Inst
.addOperand(MCOperand::createReg(getReg()));
1430 void addGPR32as64Operands(MCInst
&Inst
, unsigned N
) const {
1431 assert(N
== 1 && "Invalid number of operands!");
1433 AArch64MCRegisterClasses
[AArch64::GPR64RegClassID
].contains(getReg()));
1435 const MCRegisterInfo
*RI
= Ctx
.getRegisterInfo();
1436 uint32_t Reg
= RI
->getRegClass(AArch64::GPR32RegClassID
).getRegister(
1437 RI
->getEncodingValue(getReg()));
1439 Inst
.addOperand(MCOperand::createReg(Reg
));
1442 void addGPR64as32Operands(MCInst
&Inst
, unsigned N
) const {
1443 assert(N
== 1 && "Invalid number of operands!");
1445 AArch64MCRegisterClasses
[AArch64::GPR32RegClassID
].contains(getReg()));
1447 const MCRegisterInfo
*RI
= Ctx
.getRegisterInfo();
1448 uint32_t Reg
= RI
->getRegClass(AArch64::GPR64RegClassID
).getRegister(
1449 RI
->getEncodingValue(getReg()));
1451 Inst
.addOperand(MCOperand::createReg(Reg
));
1454 template <int Width
>
1455 void addFPRasZPRRegOperands(MCInst
&Inst
, unsigned N
) const {
1458 case 8: Base
= AArch64::B0
; break;
1459 case 16: Base
= AArch64::H0
; break;
1460 case 32: Base
= AArch64::S0
; break;
1461 case 64: Base
= AArch64::D0
; break;
1462 case 128: Base
= AArch64::Q0
; break;
1464 llvm_unreachable("Unsupported width");
1466 Inst
.addOperand(MCOperand::createReg(AArch64::Z0
+ getReg() - Base
));
1469 void addVectorReg64Operands(MCInst
&Inst
, unsigned N
) const {
1470 assert(N
== 1 && "Invalid number of operands!");
1472 AArch64MCRegisterClasses
[AArch64::FPR128RegClassID
].contains(getReg()));
1473 Inst
.addOperand(MCOperand::createReg(AArch64::D0
+ getReg() - AArch64::Q0
));
1476 void addVectorReg128Operands(MCInst
&Inst
, unsigned N
) const {
1477 assert(N
== 1 && "Invalid number of operands!");
1479 AArch64MCRegisterClasses
[AArch64::FPR128RegClassID
].contains(getReg()));
1480 Inst
.addOperand(MCOperand::createReg(getReg()));
1483 void addVectorRegLoOperands(MCInst
&Inst
, unsigned N
) const {
1484 assert(N
== 1 && "Invalid number of operands!");
1485 Inst
.addOperand(MCOperand::createReg(getReg()));
1488 enum VecListIndexType
{
1489 VecListIdx_DReg
= 0,
1490 VecListIdx_QReg
= 1,
1491 VecListIdx_ZReg
= 2,
1494 template <VecListIndexType RegTy
, unsigned NumRegs
>
1495 void addVectorListOperands(MCInst
&Inst
, unsigned N
) const {
1496 assert(N
== 1 && "Invalid number of operands!");
1497 static const unsigned FirstRegs
[][5] = {
1498 /* DReg */ { AArch64::Q0
,
1499 AArch64::D0
, AArch64::D0_D1
,
1500 AArch64::D0_D1_D2
, AArch64::D0_D1_D2_D3
},
1501 /* QReg */ { AArch64::Q0
,
1502 AArch64::Q0
, AArch64::Q0_Q1
,
1503 AArch64::Q0_Q1_Q2
, AArch64::Q0_Q1_Q2_Q3
},
1504 /* ZReg */ { AArch64::Z0
,
1505 AArch64::Z0
, AArch64::Z0_Z1
,
1506 AArch64::Z0_Z1_Z2
, AArch64::Z0_Z1_Z2_Z3
}
1509 assert((RegTy
!= VecListIdx_ZReg
|| NumRegs
<= 4) &&
1510 " NumRegs must be <= 4 for ZRegs");
1512 unsigned FirstReg
= FirstRegs
[(unsigned)RegTy
][NumRegs
];
1513 Inst
.addOperand(MCOperand::createReg(FirstReg
+ getVectorListStart() -
1514 FirstRegs
[(unsigned)RegTy
][0]));
1517 void addVectorIndexOperands(MCInst
&Inst
, unsigned N
) const {
1518 assert(N
== 1 && "Invalid number of operands!");
1519 Inst
.addOperand(MCOperand::createImm(getVectorIndex()));
1522 template <unsigned ImmIs0
, unsigned ImmIs1
>
1523 void addExactFPImmOperands(MCInst
&Inst
, unsigned N
) const {
1524 assert(N
== 1 && "Invalid number of operands!");
1525 assert(bool(isExactFPImm
<ImmIs0
, ImmIs1
>()) && "Invalid operand");
1526 Inst
.addOperand(MCOperand::createImm(bool(isExactFPImm
<ImmIs1
>())));
1529 void addImmOperands(MCInst
&Inst
, unsigned N
) const {
1530 assert(N
== 1 && "Invalid number of operands!");
1531 // If this is a pageoff symrefexpr with an addend, adjust the addend
1532 // to be only the page-offset portion. Otherwise, just add the expr
1534 addExpr(Inst
, getImm());
1537 template <int Shift
>
1538 void addImmWithOptionalShiftOperands(MCInst
&Inst
, unsigned N
) const {
1539 assert(N
== 2 && "Invalid number of operands!");
1540 if (auto ShiftedVal
= getShiftedVal
<Shift
>()) {
1541 Inst
.addOperand(MCOperand::createImm(ShiftedVal
->first
));
1542 Inst
.addOperand(MCOperand::createImm(ShiftedVal
->second
));
1543 } else if (isShiftedImm()) {
1544 addExpr(Inst
, getShiftedImmVal());
1545 Inst
.addOperand(MCOperand::createImm(getShiftedImmShift()));
1547 addExpr(Inst
, getImm());
1548 Inst
.addOperand(MCOperand::createImm(0));
1552 template <int Shift
>
1553 void addImmNegWithOptionalShiftOperands(MCInst
&Inst
, unsigned N
) const {
1554 assert(N
== 2 && "Invalid number of operands!");
1555 if (auto ShiftedVal
= getShiftedVal
<Shift
>()) {
1556 Inst
.addOperand(MCOperand::createImm(-ShiftedVal
->first
));
1557 Inst
.addOperand(MCOperand::createImm(ShiftedVal
->second
));
1559 llvm_unreachable("Not a shifted negative immediate");
1562 void addCondCodeOperands(MCInst
&Inst
, unsigned N
) const {
1563 assert(N
== 1 && "Invalid number of operands!");
1564 Inst
.addOperand(MCOperand::createImm(getCondCode()));
1567 void addAdrpLabelOperands(MCInst
&Inst
, unsigned N
) const {
1568 assert(N
== 1 && "Invalid number of operands!");
1569 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
1571 addExpr(Inst
, getImm());
1573 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() >> 12));
1576 void addAdrLabelOperands(MCInst
&Inst
, unsigned N
) const {
1577 addImmOperands(Inst
, N
);
1581 void addUImm12OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1582 assert(N
== 1 && "Invalid number of operands!");
1583 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
1586 Inst
.addOperand(MCOperand::createExpr(getImm()));
1589 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() / Scale
));
1592 void addUImm6Operands(MCInst
&Inst
, unsigned N
) const {
1593 assert(N
== 1 && "Invalid number of operands!");
1594 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1595 Inst
.addOperand(MCOperand::createImm(MCE
->getValue()));
1598 template <int Scale
>
1599 void addImmScaledOperands(MCInst
&Inst
, unsigned N
) const {
1600 assert(N
== 1 && "Invalid number of operands!");
1601 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1602 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() / Scale
));
1605 template <typename T
>
1606 void addLogicalImmOperands(MCInst
&Inst
, unsigned N
) const {
1607 assert(N
== 1 && "Invalid number of operands!");
1608 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1609 typename
std::make_unsigned
<T
>::type Val
= MCE
->getValue();
1610 uint64_t encoding
= AArch64_AM::encodeLogicalImmediate(Val
, sizeof(T
) * 8);
1611 Inst
.addOperand(MCOperand::createImm(encoding
));
1614 template <typename T
>
1615 void addLogicalImmNotOperands(MCInst
&Inst
, unsigned N
) const {
1616 assert(N
== 1 && "Invalid number of operands!");
1617 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1618 typename
std::make_unsigned
<T
>::type Val
= ~MCE
->getValue();
1619 uint64_t encoding
= AArch64_AM::encodeLogicalImmediate(Val
, sizeof(T
) * 8);
1620 Inst
.addOperand(MCOperand::createImm(encoding
));
1623 void addSIMDImmType10Operands(MCInst
&Inst
, unsigned N
) const {
1624 assert(N
== 1 && "Invalid number of operands!");
1625 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1626 uint64_t encoding
= AArch64_AM::encodeAdvSIMDModImmType10(MCE
->getValue());
1627 Inst
.addOperand(MCOperand::createImm(encoding
));
1630 void addBranchTarget26Operands(MCInst
&Inst
, unsigned N
) const {
1631 // Branch operands don't encode the low bits, so shift them off
1632 // here. If it's a label, however, just put it on directly as there's
1633 // not enough information now to do anything.
1634 assert(N
== 1 && "Invalid number of operands!");
1635 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
1637 addExpr(Inst
, getImm());
1640 assert(MCE
&& "Invalid constant immediate operand!");
1641 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() >> 2));
1644 void addPCRelLabel19Operands(MCInst
&Inst
, unsigned N
) const {
1645 // Branch operands don't encode the low bits, so shift them off
1646 // here. If it's a label, however, just put it on directly as there's
1647 // not enough information now to do anything.
1648 assert(N
== 1 && "Invalid number of operands!");
1649 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
1651 addExpr(Inst
, getImm());
1654 assert(MCE
&& "Invalid constant immediate operand!");
1655 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() >> 2));
1658 void addBranchTarget14Operands(MCInst
&Inst
, unsigned N
) const {
1659 // Branch operands don't encode the low bits, so shift them off
1660 // here. If it's a label, however, just put it on directly as there's
1661 // not enough information now to do anything.
1662 assert(N
== 1 && "Invalid number of operands!");
1663 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(getImm());
1665 addExpr(Inst
, getImm());
1668 assert(MCE
&& "Invalid constant immediate operand!");
1669 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() >> 2));
1672 void addFPImmOperands(MCInst
&Inst
, unsigned N
) const {
1673 assert(N
== 1 && "Invalid number of operands!");
1674 Inst
.addOperand(MCOperand::createImm(
1675 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1678 void addBarrierOperands(MCInst
&Inst
, unsigned N
) const {
1679 assert(N
== 1 && "Invalid number of operands!");
1680 Inst
.addOperand(MCOperand::createImm(getBarrier()));
1683 void addMRSSystemRegisterOperands(MCInst
&Inst
, unsigned N
) const {
1684 assert(N
== 1 && "Invalid number of operands!");
1686 Inst
.addOperand(MCOperand::createImm(SysReg
.MRSReg
));
1689 void addMSRSystemRegisterOperands(MCInst
&Inst
, unsigned N
) const {
1690 assert(N
== 1 && "Invalid number of operands!");
1692 Inst
.addOperand(MCOperand::createImm(SysReg
.MSRReg
));
1695 void addSystemPStateFieldWithImm0_1Operands(MCInst
&Inst
, unsigned N
) const {
1696 assert(N
== 1 && "Invalid number of operands!");
1698 Inst
.addOperand(MCOperand::createImm(SysReg
.PStateField
));
1701 void addSystemPStateFieldWithImm0_15Operands(MCInst
&Inst
, unsigned N
) const {
1702 assert(N
== 1 && "Invalid number of operands!");
1704 Inst
.addOperand(MCOperand::createImm(SysReg
.PStateField
));
1707 void addSysCROperands(MCInst
&Inst
, unsigned N
) const {
1708 assert(N
== 1 && "Invalid number of operands!");
1709 Inst
.addOperand(MCOperand::createImm(getSysCR()));
1712 void addPrefetchOperands(MCInst
&Inst
, unsigned N
) const {
1713 assert(N
== 1 && "Invalid number of operands!");
1714 Inst
.addOperand(MCOperand::createImm(getPrefetch()));
1717 void addPSBHintOperands(MCInst
&Inst
, unsigned N
) const {
1718 assert(N
== 1 && "Invalid number of operands!");
1719 Inst
.addOperand(MCOperand::createImm(getPSBHint()));
1722 void addBTIHintOperands(MCInst
&Inst
, unsigned N
) const {
1723 assert(N
== 1 && "Invalid number of operands!");
1724 Inst
.addOperand(MCOperand::createImm(getBTIHint()));
1727 void addShifterOperands(MCInst
&Inst
, unsigned N
) const {
1728 assert(N
== 1 && "Invalid number of operands!");
1730 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1731 Inst
.addOperand(MCOperand::createImm(Imm
));
1734 void addExtendOperands(MCInst
&Inst
, unsigned N
) const {
1735 assert(N
== 1 && "Invalid number of operands!");
1736 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1737 if (ET
== AArch64_AM::LSL
) ET
= AArch64_AM::UXTW
;
1738 unsigned Imm
= AArch64_AM::getArithExtendImm(ET
, getShiftExtendAmount());
1739 Inst
.addOperand(MCOperand::createImm(Imm
));
1742 void addExtend64Operands(MCInst
&Inst
, unsigned N
) const {
1743 assert(N
== 1 && "Invalid number of operands!");
1744 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1745 if (ET
== AArch64_AM::LSL
) ET
= AArch64_AM::UXTX
;
1746 unsigned Imm
= AArch64_AM::getArithExtendImm(ET
, getShiftExtendAmount());
1747 Inst
.addOperand(MCOperand::createImm(Imm
));
1750 void addMemExtendOperands(MCInst
&Inst
, unsigned N
) const {
1751 assert(N
== 2 && "Invalid number of operands!");
1752 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1753 bool IsSigned
= ET
== AArch64_AM::SXTW
|| ET
== AArch64_AM::SXTX
;
1754 Inst
.addOperand(MCOperand::createImm(IsSigned
));
1755 Inst
.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1758 // For 8-bit load/store instructions with a register offset, both the
1759 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1760 // they're disambiguated by whether the shift was explicit or implicit rather
1762 void addMemExtend8Operands(MCInst
&Inst
, unsigned N
) const {
1763 assert(N
== 2 && "Invalid number of operands!");
1764 AArch64_AM::ShiftExtendType ET
= getShiftExtendType();
1765 bool IsSigned
= ET
== AArch64_AM::SXTW
|| ET
== AArch64_AM::SXTX
;
1766 Inst
.addOperand(MCOperand::createImm(IsSigned
));
1767 Inst
.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1771 void addMOVZMovAliasOperands(MCInst
&Inst
, unsigned N
) const {
1772 assert(N
== 1 && "Invalid number of operands!");
1774 const MCConstantExpr
*CE
= cast
<MCConstantExpr
>(getImm());
1775 uint64_t Value
= CE
->getValue();
1776 Inst
.addOperand(MCOperand::createImm((Value
>> Shift
) & 0xffff));
1780 void addMOVNMovAliasOperands(MCInst
&Inst
, unsigned N
) const {
1781 assert(N
== 1 && "Invalid number of operands!");
1783 const MCConstantExpr
*CE
= cast
<MCConstantExpr
>(getImm());
1784 uint64_t Value
= CE
->getValue();
1785 Inst
.addOperand(MCOperand::createImm((~Value
>> Shift
) & 0xffff));
1788 void addComplexRotationEvenOperands(MCInst
&Inst
, unsigned N
) const {
1789 assert(N
== 1 && "Invalid number of operands!");
1790 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1791 Inst
.addOperand(MCOperand::createImm(MCE
->getValue() / 90));
1794 void addComplexRotationOddOperands(MCInst
&Inst
, unsigned N
) const {
1795 assert(N
== 1 && "Invalid number of operands!");
1796 const MCConstantExpr
*MCE
= cast
<MCConstantExpr
>(getImm());
1797 Inst
.addOperand(MCOperand::createImm((MCE
->getValue() - 90) / 180));
1800 void print(raw_ostream
&OS
) const override
;
1802 static std::unique_ptr
<AArch64Operand
>
1803 CreateToken(StringRef Str
, bool IsSuffix
, SMLoc S
, MCContext
&Ctx
) {
1804 auto Op
= std::make_unique
<AArch64Operand
>(k_Token
, Ctx
);
1805 Op
->Tok
.Data
= Str
.data();
1806 Op
->Tok
.Length
= Str
.size();
1807 Op
->Tok
.IsSuffix
= IsSuffix
;
1813 static std::unique_ptr
<AArch64Operand
>
1814 CreateReg(unsigned RegNum
, RegKind Kind
, SMLoc S
, SMLoc E
, MCContext
&Ctx
,
1815 RegConstraintEqualityTy EqTy
= RegConstraintEqualityTy::EqualsReg
,
1816 AArch64_AM::ShiftExtendType ExtTy
= AArch64_AM::LSL
,
1817 unsigned ShiftAmount
= 0,
1818 unsigned HasExplicitAmount
= false) {
1819 auto Op
= std::make_unique
<AArch64Operand
>(k_Register
, Ctx
);
1820 Op
->Reg
.RegNum
= RegNum
;
1821 Op
->Reg
.Kind
= Kind
;
1822 Op
->Reg
.ElementWidth
= 0;
1823 Op
->Reg
.EqualityTy
= EqTy
;
1824 Op
->Reg
.ShiftExtend
.Type
= ExtTy
;
1825 Op
->Reg
.ShiftExtend
.Amount
= ShiftAmount
;
1826 Op
->Reg
.ShiftExtend
.HasExplicitAmount
= HasExplicitAmount
;
1832 static std::unique_ptr
<AArch64Operand
>
1833 CreateVectorReg(unsigned RegNum
, RegKind Kind
, unsigned ElementWidth
,
1834 SMLoc S
, SMLoc E
, MCContext
&Ctx
,
1835 AArch64_AM::ShiftExtendType ExtTy
= AArch64_AM::LSL
,
1836 unsigned ShiftAmount
= 0,
1837 unsigned HasExplicitAmount
= false) {
1838 assert((Kind
== RegKind::NeonVector
|| Kind
== RegKind::SVEDataVector
||
1839 Kind
== RegKind::SVEPredicateVector
) &&
1840 "Invalid vector kind");
1841 auto Op
= CreateReg(RegNum
, Kind
, S
, E
, Ctx
, EqualsReg
, ExtTy
, ShiftAmount
,
1843 Op
->Reg
.ElementWidth
= ElementWidth
;
1847 static std::unique_ptr
<AArch64Operand
>
1848 CreateVectorList(unsigned RegNum
, unsigned Count
, unsigned NumElements
,
1849 unsigned ElementWidth
, RegKind RegisterKind
, SMLoc S
, SMLoc E
,
1851 auto Op
= std::make_unique
<AArch64Operand
>(k_VectorList
, Ctx
);
1852 Op
->VectorList
.RegNum
= RegNum
;
1853 Op
->VectorList
.Count
= Count
;
1854 Op
->VectorList
.NumElements
= NumElements
;
1855 Op
->VectorList
.ElementWidth
= ElementWidth
;
1856 Op
->VectorList
.RegisterKind
= RegisterKind
;
1862 static std::unique_ptr
<AArch64Operand
>
1863 CreateVectorIndex(unsigned Idx
, SMLoc S
, SMLoc E
, MCContext
&Ctx
) {
1864 auto Op
= std::make_unique
<AArch64Operand
>(k_VectorIndex
, Ctx
);
1865 Op
->VectorIndex
.Val
= Idx
;
1871 static std::unique_ptr
<AArch64Operand
> CreateImm(const MCExpr
*Val
, SMLoc S
,
1872 SMLoc E
, MCContext
&Ctx
) {
1873 auto Op
= std::make_unique
<AArch64Operand
>(k_Immediate
, Ctx
);
1880 static std::unique_ptr
<AArch64Operand
> CreateShiftedImm(const MCExpr
*Val
,
1881 unsigned ShiftAmount
,
1884 auto Op
= std::make_unique
<AArch64Operand
>(k_ShiftedImm
, Ctx
);
1885 Op
->ShiftedImm
.Val
= Val
;
1886 Op
->ShiftedImm
.ShiftAmount
= ShiftAmount
;
1892 static std::unique_ptr
<AArch64Operand
>
1893 CreateCondCode(AArch64CC::CondCode Code
, SMLoc S
, SMLoc E
, MCContext
&Ctx
) {
1894 auto Op
= std::make_unique
<AArch64Operand
>(k_CondCode
, Ctx
);
1895 Op
->CondCode
.Code
= Code
;
1901 static std::unique_ptr
<AArch64Operand
>
1902 CreateFPImm(APFloat Val
, bool IsExact
, SMLoc S
, MCContext
&Ctx
) {
1903 auto Op
= std::make_unique
<AArch64Operand
>(k_FPImm
, Ctx
);
1904 Op
->FPImm
.Val
= Val
.bitcastToAPInt().getSExtValue();
1905 Op
->FPImm
.IsExact
= IsExact
;
1911 static std::unique_ptr
<AArch64Operand
> CreateBarrier(unsigned Val
,
1915 auto Op
= std::make_unique
<AArch64Operand
>(k_Barrier
, Ctx
);
1916 Op
->Barrier
.Val
= Val
;
1917 Op
->Barrier
.Data
= Str
.data();
1918 Op
->Barrier
.Length
= Str
.size();
1924 static std::unique_ptr
<AArch64Operand
> CreateSysReg(StringRef Str
, SMLoc S
,
1927 uint32_t PStateField
,
1929 auto Op
= std::make_unique
<AArch64Operand
>(k_SysReg
, Ctx
);
1930 Op
->SysReg
.Data
= Str
.data();
1931 Op
->SysReg
.Length
= Str
.size();
1932 Op
->SysReg
.MRSReg
= MRSReg
;
1933 Op
->SysReg
.MSRReg
= MSRReg
;
1934 Op
->SysReg
.PStateField
= PStateField
;
1940 static std::unique_ptr
<AArch64Operand
> CreateSysCR(unsigned Val
, SMLoc S
,
1941 SMLoc E
, MCContext
&Ctx
) {
1942 auto Op
= std::make_unique
<AArch64Operand
>(k_SysCR
, Ctx
);
1943 Op
->SysCRImm
.Val
= Val
;
1949 static std::unique_ptr
<AArch64Operand
> CreatePrefetch(unsigned Val
,
1953 auto Op
= std::make_unique
<AArch64Operand
>(k_Prefetch
, Ctx
);
1954 Op
->Prefetch
.Val
= Val
;
1955 Op
->Barrier
.Data
= Str
.data();
1956 Op
->Barrier
.Length
= Str
.size();
1962 static std::unique_ptr
<AArch64Operand
> CreatePSBHint(unsigned Val
,
1966 auto Op
= std::make_unique
<AArch64Operand
>(k_PSBHint
, Ctx
);
1967 Op
->PSBHint
.Val
= Val
;
1968 Op
->PSBHint
.Data
= Str
.data();
1969 Op
->PSBHint
.Length
= Str
.size();
1975 static std::unique_ptr
<AArch64Operand
> CreateBTIHint(unsigned Val
,
1979 auto Op
= std::make_unique
<AArch64Operand
>(k_BTIHint
, Ctx
);
1980 Op
->BTIHint
.Val
= Val
<< 1 | 32;
1981 Op
->BTIHint
.Data
= Str
.data();
1982 Op
->BTIHint
.Length
= Str
.size();
1988 static std::unique_ptr
<AArch64Operand
>
1989 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp
, unsigned Val
,
1990 bool HasExplicitAmount
, SMLoc S
, SMLoc E
, MCContext
&Ctx
) {
1991 auto Op
= std::make_unique
<AArch64Operand
>(k_ShiftExtend
, Ctx
);
1992 Op
->ShiftExtend
.Type
= ShOp
;
1993 Op
->ShiftExtend
.Amount
= Val
;
1994 Op
->ShiftExtend
.HasExplicitAmount
= HasExplicitAmount
;
2001 } // end anonymous namespace.
2003 void AArch64Operand::print(raw_ostream
&OS
) const {
2006 OS
<< "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2007 if (!getFPImmIsExact())
2012 StringRef Name
= getBarrierName();
2014 OS
<< "<barrier " << Name
<< ">";
2016 OS
<< "<barrier invalid #" << getBarrier() << ">";
2022 case k_ShiftedImm
: {
2023 unsigned Shift
= getShiftedImmShift();
2024 OS
<< "<shiftedimm ";
2025 OS
<< *getShiftedImmVal();
2026 OS
<< ", lsl #" << AArch64_AM::getShiftValue(Shift
) << ">";
2030 OS
<< "<condcode " << getCondCode() << ">";
2032 case k_VectorList
: {
2033 OS
<< "<vectorlist ";
2034 unsigned Reg
= getVectorListStart();
2035 for (unsigned i
= 0, e
= getVectorListCount(); i
!= e
; ++i
)
2036 OS
<< Reg
+ i
<< " ";
2041 OS
<< "<vectorindex " << getVectorIndex() << ">";
2044 OS
<< "<sysreg: " << getSysReg() << '>';
2047 OS
<< "'" << getToken() << "'";
2050 OS
<< "c" << getSysCR();
2053 StringRef Name
= getPrefetchName();
2055 OS
<< "<prfop " << Name
<< ">";
2057 OS
<< "<prfop invalid #" << getPrefetch() << ">";
2061 OS
<< getPSBHintName();
2064 OS
<< "<register " << getReg() << ">";
2065 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2069 OS
<< getBTIHintName();
2072 OS
<< "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2073 << getShiftExtendAmount();
2074 if (!hasShiftExtendAmount())
2081 /// @name Auto-generated Match Functions
2084 static unsigned MatchRegisterName(StringRef Name
);
2088 static unsigned MatchNeonVectorRegName(StringRef Name
) {
2089 return StringSwitch
<unsigned>(Name
.lower())
2090 .Case("v0", AArch64::Q0
)
2091 .Case("v1", AArch64::Q1
)
2092 .Case("v2", AArch64::Q2
)
2093 .Case("v3", AArch64::Q3
)
2094 .Case("v4", AArch64::Q4
)
2095 .Case("v5", AArch64::Q5
)
2096 .Case("v6", AArch64::Q6
)
2097 .Case("v7", AArch64::Q7
)
2098 .Case("v8", AArch64::Q8
)
2099 .Case("v9", AArch64::Q9
)
2100 .Case("v10", AArch64::Q10
)
2101 .Case("v11", AArch64::Q11
)
2102 .Case("v12", AArch64::Q12
)
2103 .Case("v13", AArch64::Q13
)
2104 .Case("v14", AArch64::Q14
)
2105 .Case("v15", AArch64::Q15
)
2106 .Case("v16", AArch64::Q16
)
2107 .Case("v17", AArch64::Q17
)
2108 .Case("v18", AArch64::Q18
)
2109 .Case("v19", AArch64::Q19
)
2110 .Case("v20", AArch64::Q20
)
2111 .Case("v21", AArch64::Q21
)
2112 .Case("v22", AArch64::Q22
)
2113 .Case("v23", AArch64::Q23
)
2114 .Case("v24", AArch64::Q24
)
2115 .Case("v25", AArch64::Q25
)
2116 .Case("v26", AArch64::Q26
)
2117 .Case("v27", AArch64::Q27
)
2118 .Case("v28", AArch64::Q28
)
2119 .Case("v29", AArch64::Q29
)
2120 .Case("v30", AArch64::Q30
)
2121 .Case("v31", AArch64::Q31
)
2125 /// Returns an optional pair of (#elements, element-width) if Suffix
2126 /// is a valid vector kind. Where the number of elements in a vector
2127 /// or the vector width is implicit or explicitly unknown (but still a
2128 /// valid suffix kind), 0 is used.
2129 static Optional
<std::pair
<int, int>> parseVectorKind(StringRef Suffix
,
2130 RegKind VectorKind
) {
2131 std::pair
<int, int> Res
= {-1, -1};
2133 switch (VectorKind
) {
2134 case RegKind::NeonVector
:
2136 StringSwitch
<std::pair
<int, int>>(Suffix
.lower())
2138 .Case(".1d", {1, 64})
2139 .Case(".1q", {1, 128})
2140 // '.2h' needed for fp16 scalar pairwise reductions
2141 .Case(".2h", {2, 16})
2142 .Case(".2s", {2, 32})
2143 .Case(".2d", {2, 64})
2144 // '.4b' is another special case for the ARMv8.2a dot product
2146 .Case(".4b", {4, 8})
2147 .Case(".4h", {4, 16})
2148 .Case(".4s", {4, 32})
2149 .Case(".8b", {8, 8})
2150 .Case(".8h", {8, 16})
2151 .Case(".16b", {16, 8})
2152 // Accept the width neutral ones, too, for verbose syntax. If those
2153 // aren't used in the right places, the token operand won't match so
2154 // all will work out.
2156 .Case(".h", {0, 16})
2157 .Case(".s", {0, 32})
2158 .Case(".d", {0, 64})
2161 case RegKind::SVEPredicateVector
:
2162 case RegKind::SVEDataVector
:
2163 Res
= StringSwitch
<std::pair
<int, int>>(Suffix
.lower())
2166 .Case(".h", {0, 16})
2167 .Case(".s", {0, 32})
2168 .Case(".d", {0, 64})
2169 .Case(".q", {0, 128})
2173 llvm_unreachable("Unsupported RegKind");
2176 if (Res
== std::make_pair(-1, -1))
2177 return Optional
<std::pair
<int, int>>();
2179 return Optional
<std::pair
<int, int>>(Res
);
2182 static bool isValidVectorKind(StringRef Suffix
, RegKind VectorKind
) {
2183 return parseVectorKind(Suffix
, VectorKind
).hasValue();
2186 static unsigned matchSVEDataVectorRegName(StringRef Name
) {
2187 return StringSwitch
<unsigned>(Name
.lower())
2188 .Case("z0", AArch64::Z0
)
2189 .Case("z1", AArch64::Z1
)
2190 .Case("z2", AArch64::Z2
)
2191 .Case("z3", AArch64::Z3
)
2192 .Case("z4", AArch64::Z4
)
2193 .Case("z5", AArch64::Z5
)
2194 .Case("z6", AArch64::Z6
)
2195 .Case("z7", AArch64::Z7
)
2196 .Case("z8", AArch64::Z8
)
2197 .Case("z9", AArch64::Z9
)
2198 .Case("z10", AArch64::Z10
)
2199 .Case("z11", AArch64::Z11
)
2200 .Case("z12", AArch64::Z12
)
2201 .Case("z13", AArch64::Z13
)
2202 .Case("z14", AArch64::Z14
)
2203 .Case("z15", AArch64::Z15
)
2204 .Case("z16", AArch64::Z16
)
2205 .Case("z17", AArch64::Z17
)
2206 .Case("z18", AArch64::Z18
)
2207 .Case("z19", AArch64::Z19
)
2208 .Case("z20", AArch64::Z20
)
2209 .Case("z21", AArch64::Z21
)
2210 .Case("z22", AArch64::Z22
)
2211 .Case("z23", AArch64::Z23
)
2212 .Case("z24", AArch64::Z24
)
2213 .Case("z25", AArch64::Z25
)
2214 .Case("z26", AArch64::Z26
)
2215 .Case("z27", AArch64::Z27
)
2216 .Case("z28", AArch64::Z28
)
2217 .Case("z29", AArch64::Z29
)
2218 .Case("z30", AArch64::Z30
)
2219 .Case("z31", AArch64::Z31
)
2223 static unsigned matchSVEPredicateVectorRegName(StringRef Name
) {
2224 return StringSwitch
<unsigned>(Name
.lower())
2225 .Case("p0", AArch64::P0
)
2226 .Case("p1", AArch64::P1
)
2227 .Case("p2", AArch64::P2
)
2228 .Case("p3", AArch64::P3
)
2229 .Case("p4", AArch64::P4
)
2230 .Case("p5", AArch64::P5
)
2231 .Case("p6", AArch64::P6
)
2232 .Case("p7", AArch64::P7
)
2233 .Case("p8", AArch64::P8
)
2234 .Case("p9", AArch64::P9
)
2235 .Case("p10", AArch64::P10
)
2236 .Case("p11", AArch64::P11
)
2237 .Case("p12", AArch64::P12
)
2238 .Case("p13", AArch64::P13
)
2239 .Case("p14", AArch64::P14
)
2240 .Case("p15", AArch64::P15
)
2244 bool AArch64AsmParser::ParseRegister(unsigned &RegNo
, SMLoc
&StartLoc
,
2246 StartLoc
= getLoc();
2247 auto Res
= tryParseScalarRegister(RegNo
);
2248 EndLoc
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
2249 return Res
!= MatchOperand_Success
;
2252 // Matches a register name or register alias previously defined by '.req'
2253 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name
,
2255 unsigned RegNum
= 0;
2256 if ((RegNum
= matchSVEDataVectorRegName(Name
)))
2257 return Kind
== RegKind::SVEDataVector
? RegNum
: 0;
2259 if ((RegNum
= matchSVEPredicateVectorRegName(Name
)))
2260 return Kind
== RegKind::SVEPredicateVector
? RegNum
: 0;
2262 if ((RegNum
= MatchNeonVectorRegName(Name
)))
2263 return Kind
== RegKind::NeonVector
? RegNum
: 0;
2265 // The parsed register must be of RegKind Scalar
2266 if ((RegNum
= MatchRegisterName(Name
)))
2267 return Kind
== RegKind::Scalar
? RegNum
: 0;
2270 // Handle a few common aliases of registers.
2271 if (auto RegNum
= StringSwitch
<unsigned>(Name
.lower())
2272 .Case("fp", AArch64::FP
)
2273 .Case("lr", AArch64::LR
)
2274 .Case("x31", AArch64::XZR
)
2275 .Case("w31", AArch64::WZR
)
2277 return Kind
== RegKind::Scalar
? RegNum
: 0;
2279 // Check for aliases registered via .req. Canonicalize to lower case.
2280 // That's more consistent since register names are case insensitive, and
2281 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2282 auto Entry
= RegisterReqs
.find(Name
.lower());
2283 if (Entry
== RegisterReqs
.end())
2286 // set RegNum if the match is the right kind of register
2287 if (Kind
== Entry
->getValue().first
)
2288 RegNum
= Entry
->getValue().second
;
2293 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2294 /// Identifier when called, and if it is a register name the token is eaten and
2295 /// the register is added to the operand list.
2296 OperandMatchResultTy
2297 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum
) {
2298 MCAsmParser
&Parser
= getParser();
2299 const AsmToken
&Tok
= Parser
.getTok();
2300 if (Tok
.isNot(AsmToken::Identifier
))
2301 return MatchOperand_NoMatch
;
2303 std::string lowerCase
= Tok
.getString().lower();
2304 unsigned Reg
= matchRegisterNameAlias(lowerCase
, RegKind::Scalar
);
2306 return MatchOperand_NoMatch
;
2309 Parser
.Lex(); // Eat identifier token.
2310 return MatchOperand_Success
;
2313 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2314 OperandMatchResultTy
2315 AArch64AsmParser::tryParseSysCROperand(OperandVector
&Operands
) {
2316 MCAsmParser
&Parser
= getParser();
2319 if (Parser
.getTok().isNot(AsmToken::Identifier
)) {
2320 Error(S
, "Expected cN operand where 0 <= N <= 15");
2321 return MatchOperand_ParseFail
;
2324 StringRef Tok
= Parser
.getTok().getIdentifier();
2325 if (Tok
[0] != 'c' && Tok
[0] != 'C') {
2326 Error(S
, "Expected cN operand where 0 <= N <= 15");
2327 return MatchOperand_ParseFail
;
2331 bool BadNum
= Tok
.drop_front().getAsInteger(10, CRNum
);
2332 if (BadNum
|| CRNum
> 15) {
2333 Error(S
, "Expected cN operand where 0 <= N <= 15");
2334 return MatchOperand_ParseFail
;
2337 Parser
.Lex(); // Eat identifier token.
2339 AArch64Operand::CreateSysCR(CRNum
, S
, getLoc(), getContext()));
2340 return MatchOperand_Success
;
2343 /// tryParsePrefetch - Try to parse a prefetch operand.
2344 template <bool IsSVEPrefetch
>
2345 OperandMatchResultTy
2346 AArch64AsmParser::tryParsePrefetch(OperandVector
&Operands
) {
2347 MCAsmParser
&Parser
= getParser();
2349 const AsmToken
&Tok
= Parser
.getTok();
2351 auto LookupByName
= [](StringRef N
) {
2352 if (IsSVEPrefetch
) {
2353 if (auto Res
= AArch64SVEPRFM::lookupSVEPRFMByName(N
))
2354 return Optional
<unsigned>(Res
->Encoding
);
2355 } else if (auto Res
= AArch64PRFM::lookupPRFMByName(N
))
2356 return Optional
<unsigned>(Res
->Encoding
);
2357 return Optional
<unsigned>();
2360 auto LookupByEncoding
= [](unsigned E
) {
2361 if (IsSVEPrefetch
) {
2362 if (auto Res
= AArch64SVEPRFM::lookupSVEPRFMByEncoding(E
))
2363 return Optional
<StringRef
>(Res
->Name
);
2364 } else if (auto Res
= AArch64PRFM::lookupPRFMByEncoding(E
))
2365 return Optional
<StringRef
>(Res
->Name
);
2366 return Optional
<StringRef
>();
2368 unsigned MaxVal
= IsSVEPrefetch
? 15 : 31;
2370 // Either an identifier for named values or a 5-bit immediate.
2371 // Eat optional hash.
2372 if (parseOptionalToken(AsmToken::Hash
) ||
2373 Tok
.is(AsmToken::Integer
)) {
2374 const MCExpr
*ImmVal
;
2375 if (getParser().parseExpression(ImmVal
))
2376 return MatchOperand_ParseFail
;
2378 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
2380 TokError("immediate value expected for prefetch operand");
2381 return MatchOperand_ParseFail
;
2383 unsigned prfop
= MCE
->getValue();
2384 if (prfop
> MaxVal
) {
2385 TokError("prefetch operand out of range, [0," + utostr(MaxVal
) +
2387 return MatchOperand_ParseFail
;
2390 auto PRFM
= LookupByEncoding(MCE
->getValue());
2391 Operands
.push_back(AArch64Operand::CreatePrefetch(
2392 prfop
, PRFM
.getValueOr(""), S
, getContext()));
2393 return MatchOperand_Success
;
2396 if (Tok
.isNot(AsmToken::Identifier
)) {
2397 TokError("prefetch hint expected");
2398 return MatchOperand_ParseFail
;
2401 auto PRFM
= LookupByName(Tok
.getString());
2403 TokError("prefetch hint expected");
2404 return MatchOperand_ParseFail
;
2407 Parser
.Lex(); // Eat identifier token.
2408 Operands
.push_back(AArch64Operand::CreatePrefetch(
2409 *PRFM
, Tok
.getString(), S
, getContext()));
2410 return MatchOperand_Success
;
2413 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2414 OperandMatchResultTy
2415 AArch64AsmParser::tryParsePSBHint(OperandVector
&Operands
) {
2416 MCAsmParser
&Parser
= getParser();
2418 const AsmToken
&Tok
= Parser
.getTok();
2419 if (Tok
.isNot(AsmToken::Identifier
)) {
2420 TokError("invalid operand for instruction");
2421 return MatchOperand_ParseFail
;
2424 auto PSB
= AArch64PSBHint::lookupPSBByName(Tok
.getString());
2426 TokError("invalid operand for instruction");
2427 return MatchOperand_ParseFail
;
2430 Parser
.Lex(); // Eat identifier token.
2431 Operands
.push_back(AArch64Operand::CreatePSBHint(
2432 PSB
->Encoding
, Tok
.getString(), S
, getContext()));
2433 return MatchOperand_Success
;
2436 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2437 OperandMatchResultTy
2438 AArch64AsmParser::tryParseBTIHint(OperandVector
&Operands
) {
2439 MCAsmParser
&Parser
= getParser();
2441 const AsmToken
&Tok
= Parser
.getTok();
2442 if (Tok
.isNot(AsmToken::Identifier
)) {
2443 TokError("invalid operand for instruction");
2444 return MatchOperand_ParseFail
;
2447 auto BTI
= AArch64BTIHint::lookupBTIByName(Tok
.getString());
2449 TokError("invalid operand for instruction");
2450 return MatchOperand_ParseFail
;
2453 Parser
.Lex(); // Eat identifier token.
2454 Operands
.push_back(AArch64Operand::CreateBTIHint(
2455 BTI
->Encoding
, Tok
.getString(), S
, getContext()));
2456 return MatchOperand_Success
;
2459 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2461 OperandMatchResultTy
2462 AArch64AsmParser::tryParseAdrpLabel(OperandVector
&Operands
) {
2463 MCAsmParser
&Parser
= getParser();
2465 const MCExpr
*Expr
= nullptr;
2467 if (Parser
.getTok().is(AsmToken::Hash
)) {
2468 Parser
.Lex(); // Eat hash token.
2471 if (parseSymbolicImmVal(Expr
))
2472 return MatchOperand_ParseFail
;
2474 AArch64MCExpr::VariantKind ELFRefKind
;
2475 MCSymbolRefExpr::VariantKind DarwinRefKind
;
2477 if (classifySymbolRef(Expr
, ELFRefKind
, DarwinRefKind
, Addend
)) {
2478 if (DarwinRefKind
== MCSymbolRefExpr::VK_None
&&
2479 ELFRefKind
== AArch64MCExpr::VK_INVALID
) {
2480 // No modifier was specified at all; this is the syntax for an ELF basic
2481 // ADRP relocation (unfortunately).
2483 AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_PAGE
, getContext());
2484 } else if ((DarwinRefKind
== MCSymbolRefExpr::VK_GOTPAGE
||
2485 DarwinRefKind
== MCSymbolRefExpr::VK_TLVPPAGE
) &&
2487 Error(S
, "gotpage label reference not allowed an addend");
2488 return MatchOperand_ParseFail
;
2489 } else if (DarwinRefKind
!= MCSymbolRefExpr::VK_PAGE
&&
2490 DarwinRefKind
!= MCSymbolRefExpr::VK_GOTPAGE
&&
2491 DarwinRefKind
!= MCSymbolRefExpr::VK_TLVPPAGE
&&
2492 ELFRefKind
!= AArch64MCExpr::VK_ABS_PAGE_NC
&&
2493 ELFRefKind
!= AArch64MCExpr::VK_GOT_PAGE
&&
2494 ELFRefKind
!= AArch64MCExpr::VK_GOTTPREL_PAGE
&&
2495 ELFRefKind
!= AArch64MCExpr::VK_TLSDESC_PAGE
) {
2496 // The operand must be an @page or @gotpage qualified symbolref.
2497 Error(S
, "page or gotpage label reference expected");
2498 return MatchOperand_ParseFail
;
2502 // We have either a label reference possibly with addend or an immediate. The
2503 // addend is a raw value here. The linker will adjust it to only reference the
2505 SMLoc E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
2506 Operands
.push_back(AArch64Operand::CreateImm(Expr
, S
, E
, getContext()));
2508 return MatchOperand_Success
;
2511 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2513 OperandMatchResultTy
2514 AArch64AsmParser::tryParseAdrLabel(OperandVector
&Operands
) {
2516 const MCExpr
*Expr
= nullptr;
2518 // Leave anything with a bracket to the default for SVE
2519 if (getParser().getTok().is(AsmToken::LBrac
))
2520 return MatchOperand_NoMatch
;
2522 if (getParser().getTok().is(AsmToken::Hash
))
2523 getParser().Lex(); // Eat hash token.
2525 if (parseSymbolicImmVal(Expr
))
2526 return MatchOperand_ParseFail
;
2528 AArch64MCExpr::VariantKind ELFRefKind
;
2529 MCSymbolRefExpr::VariantKind DarwinRefKind
;
2531 if (classifySymbolRef(Expr
, ELFRefKind
, DarwinRefKind
, Addend
)) {
2532 if (DarwinRefKind
== MCSymbolRefExpr::VK_None
&&
2533 ELFRefKind
== AArch64MCExpr::VK_INVALID
) {
2534 // No modifier was specified at all; this is the syntax for an ELF basic
2535 // ADR relocation (unfortunately).
2536 Expr
= AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS
, getContext());
2538 Error(S
, "unexpected adr label");
2539 return MatchOperand_ParseFail
;
2543 SMLoc E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
2544 Operands
.push_back(AArch64Operand::CreateImm(Expr
, S
, E
, getContext()));
2545 return MatchOperand_Success
;
2548 /// tryParseFPImm - A floating point immediate expression operand.
2549 template<bool AddFPZeroAsLiteral
>
2550 OperandMatchResultTy
2551 AArch64AsmParser::tryParseFPImm(OperandVector
&Operands
) {
2552 MCAsmParser
&Parser
= getParser();
2555 bool Hash
= parseOptionalToken(AsmToken::Hash
);
2557 // Handle negation, as that still comes through as a separate token.
2558 bool isNegative
= parseOptionalToken(AsmToken::Minus
);
2560 const AsmToken
&Tok
= Parser
.getTok();
2561 if (!Tok
.is(AsmToken::Real
) && !Tok
.is(AsmToken::Integer
)) {
2563 return MatchOperand_NoMatch
;
2564 TokError("invalid floating point immediate");
2565 return MatchOperand_ParseFail
;
2568 // Parse hexadecimal representation.
2569 if (Tok
.is(AsmToken::Integer
) && Tok
.getString().startswith("0x")) {
2570 if (Tok
.getIntVal() > 255 || isNegative
) {
2571 TokError("encoded floating point value out of range");
2572 return MatchOperand_ParseFail
;
2575 APFloat
F((double)AArch64_AM::getFPImmFloat(Tok
.getIntVal()));
2577 AArch64Operand::CreateFPImm(F
, true, S
, getContext()));
2579 // Parse FP representation.
2580 APFloat
RealVal(APFloat::IEEEdouble());
2582 RealVal
.convertFromString(Tok
.getString(), APFloat::rmTowardZero
);
2583 if (errorToBool(StatusOrErr
.takeError())) {
2584 TokError("invalid floating point representation");
2585 return MatchOperand_ParseFail
;
2589 RealVal
.changeSign();
2591 if (AddFPZeroAsLiteral
&& RealVal
.isPosZero()) {
2593 AArch64Operand::CreateToken("#0", false, S
, getContext()));
2595 AArch64Operand::CreateToken(".0", false, S
, getContext()));
2597 Operands
.push_back(AArch64Operand::CreateFPImm(
2598 RealVal
, *StatusOrErr
== APFloat::opOK
, S
, getContext()));
2601 Parser
.Lex(); // Eat the token.
2603 return MatchOperand_Success
;
2606 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2607 /// a shift suffix, for example '#1, lsl #12'.
2608 OperandMatchResultTy
2609 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector
&Operands
) {
2610 MCAsmParser
&Parser
= getParser();
2613 if (Parser
.getTok().is(AsmToken::Hash
))
2614 Parser
.Lex(); // Eat '#'
2615 else if (Parser
.getTok().isNot(AsmToken::Integer
))
2616 // Operand should start from # or should be integer, emit error otherwise.
2617 return MatchOperand_NoMatch
;
2619 const MCExpr
*Imm
= nullptr;
2620 if (parseSymbolicImmVal(Imm
))
2621 return MatchOperand_ParseFail
;
2622 else if (Parser
.getTok().isNot(AsmToken::Comma
)) {
2623 SMLoc E
= Parser
.getTok().getLoc();
2625 AArch64Operand::CreateImm(Imm
, S
, E
, getContext()));
2626 return MatchOperand_Success
;
2632 // The optional operand must be "lsl #N" where N is non-negative.
2633 if (!Parser
.getTok().is(AsmToken::Identifier
) ||
2634 !Parser
.getTok().getIdentifier().equals_lower("lsl")) {
2635 Error(Parser
.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2636 return MatchOperand_ParseFail
;
2642 parseOptionalToken(AsmToken::Hash
);
2644 if (Parser
.getTok().isNot(AsmToken::Integer
)) {
2645 Error(Parser
.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2646 return MatchOperand_ParseFail
;
2649 int64_t ShiftAmount
= Parser
.getTok().getIntVal();
2651 if (ShiftAmount
< 0) {
2652 Error(Parser
.getTok().getLoc(), "positive shift amount required");
2653 return MatchOperand_ParseFail
;
2655 Parser
.Lex(); // Eat the number
2657 // Just in case the optional lsl #0 is used for immediates other than zero.
2658 if (ShiftAmount
== 0 && Imm
!= nullptr) {
2659 SMLoc E
= Parser
.getTok().getLoc();
2660 Operands
.push_back(AArch64Operand::CreateImm(Imm
, S
, E
, getContext()));
2661 return MatchOperand_Success
;
2664 SMLoc E
= Parser
.getTok().getLoc();
2665 Operands
.push_back(AArch64Operand::CreateShiftedImm(Imm
, ShiftAmount
,
2666 S
, E
, getContext()));
2667 return MatchOperand_Success
;
2670 /// parseCondCodeString - Parse a Condition Code string.
2671 AArch64CC::CondCode
AArch64AsmParser::parseCondCodeString(StringRef Cond
) {
2672 AArch64CC::CondCode CC
= StringSwitch
<AArch64CC::CondCode
>(Cond
.lower())
2673 .Case("eq", AArch64CC::EQ
)
2674 .Case("ne", AArch64CC::NE
)
2675 .Case("cs", AArch64CC::HS
)
2676 .Case("hs", AArch64CC::HS
)
2677 .Case("cc", AArch64CC::LO
)
2678 .Case("lo", AArch64CC::LO
)
2679 .Case("mi", AArch64CC::MI
)
2680 .Case("pl", AArch64CC::PL
)
2681 .Case("vs", AArch64CC::VS
)
2682 .Case("vc", AArch64CC::VC
)
2683 .Case("hi", AArch64CC::HI
)
2684 .Case("ls", AArch64CC::LS
)
2685 .Case("ge", AArch64CC::GE
)
2686 .Case("lt", AArch64CC::LT
)
2687 .Case("gt", AArch64CC::GT
)
2688 .Case("le", AArch64CC::LE
)
2689 .Case("al", AArch64CC::AL
)
2690 .Case("nv", AArch64CC::NV
)
2691 .Default(AArch64CC::Invalid
);
2693 if (CC
== AArch64CC::Invalid
&&
2694 getSTI().getFeatureBits()[AArch64::FeatureSVE
])
2695 CC
= StringSwitch
<AArch64CC::CondCode
>(Cond
.lower())
2696 .Case("none", AArch64CC::EQ
)
2697 .Case("any", AArch64CC::NE
)
2698 .Case("nlast", AArch64CC::HS
)
2699 .Case("last", AArch64CC::LO
)
2700 .Case("first", AArch64CC::MI
)
2701 .Case("nfrst", AArch64CC::PL
)
2702 .Case("pmore", AArch64CC::HI
)
2703 .Case("plast", AArch64CC::LS
)
2704 .Case("tcont", AArch64CC::GE
)
2705 .Case("tstop", AArch64CC::LT
)
2706 .Default(AArch64CC::Invalid
);
2711 /// parseCondCode - Parse a Condition Code operand.
2712 bool AArch64AsmParser::parseCondCode(OperandVector
&Operands
,
2713 bool invertCondCode
) {
2714 MCAsmParser
&Parser
= getParser();
2716 const AsmToken
&Tok
= Parser
.getTok();
2717 assert(Tok
.is(AsmToken::Identifier
) && "Token is not an Identifier");
2719 StringRef Cond
= Tok
.getString();
2720 AArch64CC::CondCode CC
= parseCondCodeString(Cond
);
2721 if (CC
== AArch64CC::Invalid
)
2722 return TokError("invalid condition code");
2723 Parser
.Lex(); // Eat identifier token.
2725 if (invertCondCode
) {
2726 if (CC
== AArch64CC::AL
|| CC
== AArch64CC::NV
)
2727 return TokError("condition codes AL and NV are invalid for this instruction");
2728 CC
= AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC
));
2732 AArch64Operand::CreateCondCode(CC
, S
, getLoc(), getContext()));
2736 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2737 /// them if present.
2738 OperandMatchResultTy
2739 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector
&Operands
) {
2740 MCAsmParser
&Parser
= getParser();
2741 const AsmToken
&Tok
= Parser
.getTok();
2742 std::string LowerID
= Tok
.getString().lower();
2743 AArch64_AM::ShiftExtendType ShOp
=
2744 StringSwitch
<AArch64_AM::ShiftExtendType
>(LowerID
)
2745 .Case("lsl", AArch64_AM::LSL
)
2746 .Case("lsr", AArch64_AM::LSR
)
2747 .Case("asr", AArch64_AM::ASR
)
2748 .Case("ror", AArch64_AM::ROR
)
2749 .Case("msl", AArch64_AM::MSL
)
2750 .Case("uxtb", AArch64_AM::UXTB
)
2751 .Case("uxth", AArch64_AM::UXTH
)
2752 .Case("uxtw", AArch64_AM::UXTW
)
2753 .Case("uxtx", AArch64_AM::UXTX
)
2754 .Case("sxtb", AArch64_AM::SXTB
)
2755 .Case("sxth", AArch64_AM::SXTH
)
2756 .Case("sxtw", AArch64_AM::SXTW
)
2757 .Case("sxtx", AArch64_AM::SXTX
)
2758 .Default(AArch64_AM::InvalidShiftExtend
);
2760 if (ShOp
== AArch64_AM::InvalidShiftExtend
)
2761 return MatchOperand_NoMatch
;
2763 SMLoc S
= Tok
.getLoc();
2766 bool Hash
= parseOptionalToken(AsmToken::Hash
);
2768 if (!Hash
&& getLexer().isNot(AsmToken::Integer
)) {
2769 if (ShOp
== AArch64_AM::LSL
|| ShOp
== AArch64_AM::LSR
||
2770 ShOp
== AArch64_AM::ASR
|| ShOp
== AArch64_AM::ROR
||
2771 ShOp
== AArch64_AM::MSL
) {
2772 // We expect a number here.
2773 TokError("expected #imm after shift specifier");
2774 return MatchOperand_ParseFail
;
2777 // "extend" type operations don't need an immediate, #0 is implicit.
2778 SMLoc E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
2780 AArch64Operand::CreateShiftExtend(ShOp
, 0, false, S
, E
, getContext()));
2781 return MatchOperand_Success
;
2784 // Make sure we do actually have a number, identifier or a parenthesized
2786 SMLoc E
= Parser
.getTok().getLoc();
2787 if (!Parser
.getTok().is(AsmToken::Integer
) &&
2788 !Parser
.getTok().is(AsmToken::LParen
) &&
2789 !Parser
.getTok().is(AsmToken::Identifier
)) {
2790 Error(E
, "expected integer shift amount");
2791 return MatchOperand_ParseFail
;
2794 const MCExpr
*ImmVal
;
2795 if (getParser().parseExpression(ImmVal
))
2796 return MatchOperand_ParseFail
;
2798 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
2800 Error(E
, "expected constant '#imm' after shift specifier");
2801 return MatchOperand_ParseFail
;
2804 E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
2805 Operands
.push_back(AArch64Operand::CreateShiftExtend(
2806 ShOp
, MCE
->getValue(), true, S
, E
, getContext()));
2807 return MatchOperand_Success
;
2810 static const struct Extension
{
2812 const FeatureBitset Features
;
2813 } ExtensionMap
[] = {
2814 {"crc", {AArch64::FeatureCRC
}},
2815 {"sm4", {AArch64::FeatureSM4
}},
2816 {"sha3", {AArch64::FeatureSHA3
}},
2817 {"sha2", {AArch64::FeatureSHA2
}},
2818 {"aes", {AArch64::FeatureAES
}},
2819 {"crypto", {AArch64::FeatureCrypto
}},
2820 {"fp", {AArch64::FeatureFPARMv8
}},
2821 {"simd", {AArch64::FeatureNEON
}},
2822 {"ras", {AArch64::FeatureRAS
}},
2823 {"lse", {AArch64::FeatureLSE
}},
2824 {"predres", {AArch64::FeaturePredRes
}},
2825 {"ccdp", {AArch64::FeatureCacheDeepPersist
}},
2826 {"mte", {AArch64::FeatureMTE
}},
2827 {"tlb-rmi", {AArch64::FeatureTLB_RMI
}},
2828 {"pan-rwv", {AArch64::FeaturePAN_RWV
}},
2829 {"ccpp", {AArch64::FeatureCCPP
}},
2830 {"sve", {AArch64::FeatureSVE
}},
2831 {"sve2", {AArch64::FeatureSVE2
}},
2832 {"sve2-aes", {AArch64::FeatureSVE2AES
}},
2833 {"sve2-sm4", {AArch64::FeatureSVE2SM4
}},
2834 {"sve2-sha3", {AArch64::FeatureSVE2SHA3
}},
2835 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm
}},
2836 // FIXME: Unsupported extensions
2843 static void setRequiredFeatureString(FeatureBitset FBS
, std::string
&Str
) {
2844 if (FBS
[AArch64::HasV8_1aOps
])
2846 else if (FBS
[AArch64::HasV8_2aOps
])
2848 else if (FBS
[AArch64::HasV8_3aOps
])
2850 else if (FBS
[AArch64::HasV8_4aOps
])
2852 else if (FBS
[AArch64::HasV8_5aOps
])
2855 auto ext
= std::find_if(std::begin(ExtensionMap
),
2856 std::end(ExtensionMap
),
2857 [&](const Extension
& e
)
2858 // Use & in case multiple features are enabled
2859 { return (FBS
& e
.Features
) != FeatureBitset(); }
2862 Str
+= ext
!= std::end(ExtensionMap
) ? ext
->Name
: "(unknown)";
2866 void AArch64AsmParser::createSysAlias(uint16_t Encoding
, OperandVector
&Operands
,
2868 const uint16_t Op2
= Encoding
& 7;
2869 const uint16_t Cm
= (Encoding
& 0x78) >> 3;
2870 const uint16_t Cn
= (Encoding
& 0x780) >> 7;
2871 const uint16_t Op1
= (Encoding
& 0x3800) >> 11;
2873 const MCExpr
*Expr
= MCConstantExpr::create(Op1
, getContext());
2876 AArch64Operand::CreateImm(Expr
, S
, getLoc(), getContext()));
2878 AArch64Operand::CreateSysCR(Cn
, S
, getLoc(), getContext()));
2880 AArch64Operand::CreateSysCR(Cm
, S
, getLoc(), getContext()));
2881 Expr
= MCConstantExpr::create(Op2
, getContext());
2883 AArch64Operand::CreateImm(Expr
, S
, getLoc(), getContext()));
2886 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2887 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2888 bool AArch64AsmParser::parseSysAlias(StringRef Name
, SMLoc NameLoc
,
2889 OperandVector
&Operands
) {
2890 if (Name
.find('.') != StringRef::npos
)
2891 return TokError("invalid operand");
2895 AArch64Operand::CreateToken("sys", false, NameLoc
, getContext()));
2897 MCAsmParser
&Parser
= getParser();
2898 const AsmToken
&Tok
= Parser
.getTok();
2899 StringRef Op
= Tok
.getString();
2900 SMLoc S
= Tok
.getLoc();
2902 if (Mnemonic
== "ic") {
2903 const AArch64IC::IC
*IC
= AArch64IC::lookupICByName(Op
);
2905 return TokError("invalid operand for IC instruction");
2906 else if (!IC
->haveFeatures(getSTI().getFeatureBits())) {
2907 std::string
Str("IC " + std::string(IC
->Name
) + " requires ");
2908 setRequiredFeatureString(IC
->getRequiredFeatures(), Str
);
2909 return TokError(Str
.c_str());
2911 createSysAlias(IC
->Encoding
, Operands
, S
);
2912 } else if (Mnemonic
== "dc") {
2913 const AArch64DC::DC
*DC
= AArch64DC::lookupDCByName(Op
);
2915 return TokError("invalid operand for DC instruction");
2916 else if (!DC
->haveFeatures(getSTI().getFeatureBits())) {
2917 std::string
Str("DC " + std::string(DC
->Name
) + " requires ");
2918 setRequiredFeatureString(DC
->getRequiredFeatures(), Str
);
2919 return TokError(Str
.c_str());
2921 createSysAlias(DC
->Encoding
, Operands
, S
);
2922 } else if (Mnemonic
== "at") {
2923 const AArch64AT::AT
*AT
= AArch64AT::lookupATByName(Op
);
2925 return TokError("invalid operand for AT instruction");
2926 else if (!AT
->haveFeatures(getSTI().getFeatureBits())) {
2927 std::string
Str("AT " + std::string(AT
->Name
) + " requires ");
2928 setRequiredFeatureString(AT
->getRequiredFeatures(), Str
);
2929 return TokError(Str
.c_str());
2931 createSysAlias(AT
->Encoding
, Operands
, S
);
2932 } else if (Mnemonic
== "tlbi") {
2933 const AArch64TLBI::TLBI
*TLBI
= AArch64TLBI::lookupTLBIByName(Op
);
2935 return TokError("invalid operand for TLBI instruction");
2936 else if (!TLBI
->haveFeatures(getSTI().getFeatureBits())) {
2937 std::string
Str("TLBI " + std::string(TLBI
->Name
) + " requires ");
2938 setRequiredFeatureString(TLBI
->getRequiredFeatures(), Str
);
2939 return TokError(Str
.c_str());
2941 createSysAlias(TLBI
->Encoding
, Operands
, S
);
2942 } else if (Mnemonic
== "cfp" || Mnemonic
== "dvp" || Mnemonic
== "cpp") {
2943 const AArch64PRCTX::PRCTX
*PRCTX
= AArch64PRCTX::lookupPRCTXByName(Op
);
2945 return TokError("invalid operand for prediction restriction instruction");
2946 else if (!PRCTX
->haveFeatures(getSTI().getFeatureBits())) {
2948 Mnemonic
.upper() + std::string(PRCTX
->Name
) + " requires ");
2949 setRequiredFeatureString(PRCTX
->getRequiredFeatures(), Str
);
2950 return TokError(Str
.c_str());
2952 uint16_t PRCTX_Op2
=
2953 Mnemonic
== "cfp" ? 4 :
2954 Mnemonic
== "dvp" ? 5 :
2955 Mnemonic
== "cpp" ? 7 :
2957 assert(PRCTX_Op2
&& "Invalid mnemonic for prediction restriction instruction");
2958 createSysAlias(PRCTX
->Encoding
<< 3 | PRCTX_Op2
, Operands
, S
);
2961 Parser
.Lex(); // Eat operand.
2963 bool ExpectRegister
= (Op
.lower().find("all") == StringRef::npos
);
2964 bool HasRegister
= false;
2966 // Check for the optional register operand.
2967 if (parseOptionalToken(AsmToken::Comma
)) {
2968 if (Tok
.isNot(AsmToken::Identifier
) || parseRegister(Operands
))
2969 return TokError("expected register operand");
2973 if (ExpectRegister
&& !HasRegister
)
2974 return TokError("specified " + Mnemonic
+ " op requires a register");
2975 else if (!ExpectRegister
&& HasRegister
)
2976 return TokError("specified " + Mnemonic
+ " op does not use a register");
2978 if (parseToken(AsmToken::EndOfStatement
, "unexpected token in argument list"))
2984 OperandMatchResultTy
2985 AArch64AsmParser::tryParseBarrierOperand(OperandVector
&Operands
) {
2986 MCAsmParser
&Parser
= getParser();
2987 const AsmToken
&Tok
= Parser
.getTok();
2989 if (Mnemonic
== "tsb" && Tok
.isNot(AsmToken::Identifier
)) {
2990 TokError("'csync' operand expected");
2991 return MatchOperand_ParseFail
;
2992 // Can be either a #imm style literal or an option name
2993 } else if (parseOptionalToken(AsmToken::Hash
) || Tok
.is(AsmToken::Integer
)) {
2994 // Immediate operand.
2995 const MCExpr
*ImmVal
;
2996 SMLoc ExprLoc
= getLoc();
2997 if (getParser().parseExpression(ImmVal
))
2998 return MatchOperand_ParseFail
;
2999 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
3001 Error(ExprLoc
, "immediate value expected for barrier operand");
3002 return MatchOperand_ParseFail
;
3004 if (MCE
->getValue() < 0 || MCE
->getValue() > 15) {
3005 Error(ExprLoc
, "barrier operand out of range");
3006 return MatchOperand_ParseFail
;
3008 auto DB
= AArch64DB::lookupDBByEncoding(MCE
->getValue());
3009 Operands
.push_back(AArch64Operand::CreateBarrier(
3010 MCE
->getValue(), DB
? DB
->Name
: "", ExprLoc
, getContext()));
3011 return MatchOperand_Success
;
3014 if (Tok
.isNot(AsmToken::Identifier
)) {
3015 TokError("invalid operand for instruction");
3016 return MatchOperand_ParseFail
;
3019 auto TSB
= AArch64TSB::lookupTSBByName(Tok
.getString());
3020 // The only valid named option for ISB is 'sy'
3021 auto DB
= AArch64DB::lookupDBByName(Tok
.getString());
3022 if (Mnemonic
== "isb" && (!DB
|| DB
->Encoding
!= AArch64DB::sy
)) {
3023 TokError("'sy' or #imm operand expected");
3024 return MatchOperand_ParseFail
;
3025 // The only valid named option for TSB is 'csync'
3026 } else if (Mnemonic
== "tsb" && (!TSB
|| TSB
->Encoding
!= AArch64TSB::csync
)) {
3027 TokError("'csync' operand expected");
3028 return MatchOperand_ParseFail
;
3029 } else if (!DB
&& !TSB
) {
3030 TokError("invalid barrier option name");
3031 return MatchOperand_ParseFail
;
3034 Operands
.push_back(AArch64Operand::CreateBarrier(
3035 DB
? DB
->Encoding
: TSB
->Encoding
, Tok
.getString(), getLoc(), getContext()));
3036 Parser
.Lex(); // Consume the option
3038 return MatchOperand_Success
;
3041 OperandMatchResultTy
3042 AArch64AsmParser::tryParseSysReg(OperandVector
&Operands
) {
3043 MCAsmParser
&Parser
= getParser();
3044 const AsmToken
&Tok
= Parser
.getTok();
3046 if (Tok
.isNot(AsmToken::Identifier
))
3047 return MatchOperand_NoMatch
;
3050 auto SysReg
= AArch64SysReg::lookupSysRegByName(Tok
.getString());
3051 if (SysReg
&& SysReg
->haveFeatures(getSTI().getFeatureBits())) {
3052 MRSReg
= SysReg
->Readable
? SysReg
->Encoding
: -1;
3053 MSRReg
= SysReg
->Writeable
? SysReg
->Encoding
: -1;
3055 MRSReg
= MSRReg
= AArch64SysReg::parseGenericRegister(Tok
.getString());
3057 auto PState
= AArch64PState::lookupPStateByName(Tok
.getString());
3058 unsigned PStateImm
= -1;
3059 if (PState
&& PState
->haveFeatures(getSTI().getFeatureBits()))
3060 PStateImm
= PState
->Encoding
;
3063 AArch64Operand::CreateSysReg(Tok
.getString(), getLoc(), MRSReg
, MSRReg
,
3064 PStateImm
, getContext()));
3065 Parser
.Lex(); // Eat identifier
3067 return MatchOperand_Success
;
3070 /// tryParseNeonVectorRegister - Parse a vector register operand.
3071 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector
&Operands
) {
3072 MCAsmParser
&Parser
= getParser();
3073 if (Parser
.getTok().isNot(AsmToken::Identifier
))
3077 // Check for a vector register specifier first.
3080 OperandMatchResultTy Res
=
3081 tryParseVectorRegister(Reg
, Kind
, RegKind::NeonVector
);
3082 if (Res
!= MatchOperand_Success
)
3085 const auto &KindRes
= parseVectorKind(Kind
, RegKind::NeonVector
);
3089 unsigned ElementWidth
= KindRes
->second
;
3091 AArch64Operand::CreateVectorReg(Reg
, RegKind::NeonVector
, ElementWidth
,
3092 S
, getLoc(), getContext()));
3094 // If there was an explicit qualifier, that goes on as a literal text
3098 AArch64Operand::CreateToken(Kind
, false, S
, getContext()));
3100 return tryParseVectorIndex(Operands
) == MatchOperand_ParseFail
;
3103 OperandMatchResultTy
3104 AArch64AsmParser::tryParseVectorIndex(OperandVector
&Operands
) {
3105 SMLoc SIdx
= getLoc();
3106 if (parseOptionalToken(AsmToken::LBrac
)) {
3107 const MCExpr
*ImmVal
;
3108 if (getParser().parseExpression(ImmVal
))
3109 return MatchOperand_NoMatch
;
3110 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
3112 TokError("immediate value expected for vector index");
3113 return MatchOperand_ParseFail
;;
3118 if (parseToken(AsmToken::RBrac
, "']' expected"))
3119 return MatchOperand_ParseFail
;;
3121 Operands
.push_back(AArch64Operand::CreateVectorIndex(MCE
->getValue(), SIdx
,
3123 return MatchOperand_Success
;
3126 return MatchOperand_NoMatch
;
3129 // tryParseVectorRegister - Try to parse a vector register name with
3130 // optional kind specifier. If it is a register specifier, eat the token
3132 OperandMatchResultTy
3133 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg
, StringRef
&Kind
,
3134 RegKind MatchKind
) {
3135 MCAsmParser
&Parser
= getParser();
3136 const AsmToken
&Tok
= Parser
.getTok();
3138 if (Tok
.isNot(AsmToken::Identifier
))
3139 return MatchOperand_NoMatch
;
3141 StringRef Name
= Tok
.getString();
3142 // If there is a kind specifier, it's separated from the register name by
3144 size_t Start
= 0, Next
= Name
.find('.');
3145 StringRef Head
= Name
.slice(Start
, Next
);
3146 unsigned RegNum
= matchRegisterNameAlias(Head
, MatchKind
);
3149 if (Next
!= StringRef::npos
) {
3150 Kind
= Name
.slice(Next
, StringRef::npos
);
3151 if (!isValidVectorKind(Kind
, MatchKind
)) {
3152 TokError("invalid vector kind qualifier");
3153 return MatchOperand_ParseFail
;
3156 Parser
.Lex(); // Eat the register token.
3159 return MatchOperand_Success
;
3162 return MatchOperand_NoMatch
;
3165 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3166 OperandMatchResultTy
3167 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector
&Operands
) {
3168 // Check for a SVE predicate register specifier first.
3169 const SMLoc S
= getLoc();
3172 auto Res
= tryParseVectorRegister(RegNum
, Kind
, RegKind::SVEPredicateVector
);
3173 if (Res
!= MatchOperand_Success
)
3176 const auto &KindRes
= parseVectorKind(Kind
, RegKind::SVEPredicateVector
);
3178 return MatchOperand_NoMatch
;
3180 unsigned ElementWidth
= KindRes
->second
;
3181 Operands
.push_back(AArch64Operand::CreateVectorReg(
3182 RegNum
, RegKind::SVEPredicateVector
, ElementWidth
, S
,
3183 getLoc(), getContext()));
3185 // Not all predicates are followed by a '/m' or '/z'.
3186 MCAsmParser
&Parser
= getParser();
3187 if (Parser
.getTok().isNot(AsmToken::Slash
))
3188 return MatchOperand_Success
;
3190 // But when they do they shouldn't have an element type suffix.
3191 if (!Kind
.empty()) {
3192 Error(S
, "not expecting size suffix");
3193 return MatchOperand_ParseFail
;
3196 // Add a literal slash as operand
3198 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3200 Parser
.Lex(); // Eat the slash.
3202 // Zeroing or merging?
3203 auto Pred
= Parser
.getTok().getString().lower();
3204 if (Pred
!= "z" && Pred
!= "m") {
3205 Error(getLoc(), "expecting 'm' or 'z' predication");
3206 return MatchOperand_ParseFail
;
3209 // Add zero/merge token.
3210 const char *ZM
= Pred
== "z" ? "z" : "m";
3212 AArch64Operand::CreateToken(ZM
, false, getLoc(), getContext()));
3214 Parser
.Lex(); // Eat zero/merge token.
3215 return MatchOperand_Success
;
3218 /// parseRegister - Parse a register operand.
3219 bool AArch64AsmParser::parseRegister(OperandVector
&Operands
) {
3220 // Try for a Neon vector register.
3221 if (!tryParseNeonVectorRegister(Operands
))
3224 // Otherwise try for a scalar register.
3225 if (tryParseGPROperand
<false>(Operands
) == MatchOperand_Success
)
3231 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr
*&ImmVal
) {
3232 MCAsmParser
&Parser
= getParser();
3233 bool HasELFModifier
= false;
3234 AArch64MCExpr::VariantKind RefKind
;
3236 if (parseOptionalToken(AsmToken::Colon
)) {
3237 HasELFModifier
= true;
3239 if (Parser
.getTok().isNot(AsmToken::Identifier
))
3240 return TokError("expect relocation specifier in operand after ':'");
3242 std::string LowerCase
= Parser
.getTok().getIdentifier().lower();
3243 RefKind
= StringSwitch
<AArch64MCExpr::VariantKind
>(LowerCase
)
3244 .Case("lo12", AArch64MCExpr::VK_LO12
)
3245 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3
)
3246 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2
)
3247 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S
)
3248 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC
)
3249 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1
)
3250 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S
)
3251 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC
)
3252 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0
)
3253 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S
)
3254 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC
)
3255 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3
)
3256 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2
)
3257 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC
)
3258 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1
)
3259 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC
)
3260 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0
)
3261 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC
)
3262 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2
)
3263 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1
)
3264 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC
)
3265 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0
)
3266 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC
)
3267 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12
)
3268 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12
)
3269 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC
)
3270 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC
)
3271 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2
)
3272 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1
)
3273 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC
)
3274 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0
)
3275 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC
)
3276 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12
)
3277 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12
)
3278 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC
)
3279 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12
)
3280 .Case("got", AArch64MCExpr::VK_GOT_PAGE
)
3281 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12
)
3282 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE
)
3283 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC
)
3284 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1
)
3285 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC
)
3286 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE
)
3287 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12
)
3288 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12
)
3289 .Default(AArch64MCExpr::VK_INVALID
);
3291 if (RefKind
== AArch64MCExpr::VK_INVALID
)
3292 return TokError("expect relocation specifier in operand after ':'");
3294 Parser
.Lex(); // Eat identifier
3296 if (parseToken(AsmToken::Colon
, "expect ':' after relocation specifier"))
3300 if (getParser().parseExpression(ImmVal
))
3304 ImmVal
= AArch64MCExpr::create(ImmVal
, RefKind
, getContext());
3309 template <RegKind VectorKind
>
3310 OperandMatchResultTy
3311 AArch64AsmParser::tryParseVectorList(OperandVector
&Operands
,
3313 MCAsmParser
&Parser
= getParser();
3314 if (!Parser
.getTok().is(AsmToken::LCurly
))
3315 return MatchOperand_NoMatch
;
3317 // Wrapper around parse function
3318 auto ParseVector
= [this, &Parser
](unsigned &Reg
, StringRef
&Kind
, SMLoc Loc
,
3319 bool NoMatchIsError
) {
3320 auto RegTok
= Parser
.getTok();
3321 auto ParseRes
= tryParseVectorRegister(Reg
, Kind
, VectorKind
);
3322 if (ParseRes
== MatchOperand_Success
) {
3323 if (parseVectorKind(Kind
, VectorKind
))
3325 llvm_unreachable("Expected a valid vector kind");
3328 if (RegTok
.isNot(AsmToken::Identifier
) ||
3329 ParseRes
== MatchOperand_ParseFail
||
3330 (ParseRes
== MatchOperand_NoMatch
&& NoMatchIsError
)) {
3331 Error(Loc
, "vector register expected");
3332 return MatchOperand_ParseFail
;
3335 return MatchOperand_NoMatch
;
3339 auto LCurly
= Parser
.getTok();
3340 Parser
.Lex(); // Eat left bracket token.
3344 auto ParseRes
= ParseVector(FirstReg
, Kind
, getLoc(), ExpectMatch
);
3346 // Put back the original left bracket if there was no match, so that
3347 // different types of list-operands can be matched (e.g. SVE, Neon).
3348 if (ParseRes
== MatchOperand_NoMatch
)
3349 Parser
.getLexer().UnLex(LCurly
);
3351 if (ParseRes
!= MatchOperand_Success
)
3354 int64_t PrevReg
= FirstReg
;
3357 if (parseOptionalToken(AsmToken::Minus
)) {
3358 SMLoc Loc
= getLoc();
3362 ParseRes
= ParseVector(Reg
, NextKind
, getLoc(), true);
3363 if (ParseRes
!= MatchOperand_Success
)
3366 // Any Kind suffices must match on all regs in the list.
3367 if (Kind
!= NextKind
) {
3368 Error(Loc
, "mismatched register size suffix");
3369 return MatchOperand_ParseFail
;
3372 unsigned Space
= (PrevReg
< Reg
) ? (Reg
- PrevReg
) : (Reg
+ 32 - PrevReg
);
3374 if (Space
== 0 || Space
> 3) {
3375 Error(Loc
, "invalid number of vectors");
3376 return MatchOperand_ParseFail
;
3382 while (parseOptionalToken(AsmToken::Comma
)) {
3383 SMLoc Loc
= getLoc();
3386 ParseRes
= ParseVector(Reg
, NextKind
, getLoc(), true);
3387 if (ParseRes
!= MatchOperand_Success
)
3390 // Any Kind suffices must match on all regs in the list.
3391 if (Kind
!= NextKind
) {
3392 Error(Loc
, "mismatched register size suffix");
3393 return MatchOperand_ParseFail
;
3396 // Registers must be incremental (with wraparound at 31)
3397 if (getContext().getRegisterInfo()->getEncodingValue(Reg
) !=
3398 (getContext().getRegisterInfo()->getEncodingValue(PrevReg
) + 1) % 32) {
3399 Error(Loc
, "registers must be sequential");
3400 return MatchOperand_ParseFail
;
3408 if (parseToken(AsmToken::RCurly
, "'}' expected"))
3409 return MatchOperand_ParseFail
;
3412 Error(S
, "invalid number of vectors");
3413 return MatchOperand_ParseFail
;
3416 unsigned NumElements
= 0;
3417 unsigned ElementWidth
= 0;
3418 if (!Kind
.empty()) {
3419 if (const auto &VK
= parseVectorKind(Kind
, VectorKind
))
3420 std::tie(NumElements
, ElementWidth
) = *VK
;
3423 Operands
.push_back(AArch64Operand::CreateVectorList(
3424 FirstReg
, Count
, NumElements
, ElementWidth
, VectorKind
, S
, getLoc(),
3427 return MatchOperand_Success
;
3430 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3431 bool AArch64AsmParser::parseNeonVectorList(OperandVector
&Operands
) {
3432 auto ParseRes
= tryParseVectorList
<RegKind::NeonVector
>(Operands
, true);
3433 if (ParseRes
!= MatchOperand_Success
)
3436 return tryParseVectorIndex(Operands
) == MatchOperand_ParseFail
;
3439 OperandMatchResultTy
3440 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector
&Operands
) {
3441 SMLoc StartLoc
= getLoc();
3444 OperandMatchResultTy Res
= tryParseScalarRegister(RegNum
);
3445 if (Res
!= MatchOperand_Success
)
3448 if (!parseOptionalToken(AsmToken::Comma
)) {
3449 Operands
.push_back(AArch64Operand::CreateReg(
3450 RegNum
, RegKind::Scalar
, StartLoc
, getLoc(), getContext()));
3451 return MatchOperand_Success
;
3454 parseOptionalToken(AsmToken::Hash
);
3456 if (getParser().getTok().isNot(AsmToken::Integer
)) {
3457 Error(getLoc(), "index must be absent or #0");
3458 return MatchOperand_ParseFail
;
3461 const MCExpr
*ImmVal
;
3462 if (getParser().parseExpression(ImmVal
) || !isa
<MCConstantExpr
>(ImmVal
) ||
3463 cast
<MCConstantExpr
>(ImmVal
)->getValue() != 0) {
3464 Error(getLoc(), "index must be absent or #0");
3465 return MatchOperand_ParseFail
;
3468 Operands
.push_back(AArch64Operand::CreateReg(
3469 RegNum
, RegKind::Scalar
, StartLoc
, getLoc(), getContext()));
3470 return MatchOperand_Success
;
3473 template <bool ParseShiftExtend
, RegConstraintEqualityTy EqTy
>
3474 OperandMatchResultTy
3475 AArch64AsmParser::tryParseGPROperand(OperandVector
&Operands
) {
3476 SMLoc StartLoc
= getLoc();
3479 OperandMatchResultTy Res
= tryParseScalarRegister(RegNum
);
3480 if (Res
!= MatchOperand_Success
)
3483 // No shift/extend is the default.
3484 if (!ParseShiftExtend
|| getParser().getTok().isNot(AsmToken::Comma
)) {
3485 Operands
.push_back(AArch64Operand::CreateReg(
3486 RegNum
, RegKind::Scalar
, StartLoc
, getLoc(), getContext(), EqTy
));
3487 return MatchOperand_Success
;
3494 SmallVector
<std::unique_ptr
<MCParsedAsmOperand
>, 1> ExtOpnd
;
3495 Res
= tryParseOptionalShiftExtend(ExtOpnd
);
3496 if (Res
!= MatchOperand_Success
)
3499 auto Ext
= static_cast<AArch64Operand
*>(ExtOpnd
.back().get());
3500 Operands
.push_back(AArch64Operand::CreateReg(
3501 RegNum
, RegKind::Scalar
, StartLoc
, Ext
->getEndLoc(), getContext(), EqTy
,
3502 Ext
->getShiftExtendType(), Ext
->getShiftExtendAmount(),
3503 Ext
->hasShiftExtendAmount()));
3505 return MatchOperand_Success
;
3508 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector
&Operands
) {
3509 MCAsmParser
&Parser
= getParser();
3511 // Some SVE instructions have a decoration after the immediate, i.e.
3512 // "mul vl". We parse them here and add tokens, which must be present in the
3513 // asm string in the tablegen instruction.
3514 bool NextIsVL
= Parser
.getLexer().peekTok().getString().equals_lower("vl");
3515 bool NextIsHash
= Parser
.getLexer().peekTok().is(AsmToken::Hash
);
3516 if (!Parser
.getTok().getString().equals_lower("mul") ||
3517 !(NextIsVL
|| NextIsHash
))
3521 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3522 Parser
.Lex(); // Eat the "mul"
3526 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3527 Parser
.Lex(); // Eat the "vl"
3532 Parser
.Lex(); // Eat the #
3535 // Parse immediate operand.
3536 const MCExpr
*ImmVal
;
3537 if (!Parser
.parseExpression(ImmVal
))
3538 if (const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
)) {
3539 Operands
.push_back(AArch64Operand::CreateImm(
3540 MCConstantExpr::create(MCE
->getValue(), getContext()), S
, getLoc(),
3542 return MatchOperand_Success
;
3546 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3549 /// parseOperand - Parse a arm instruction operand. For now this parses the
3550 /// operand regardless of the mnemonic.
3551 bool AArch64AsmParser::parseOperand(OperandVector
&Operands
, bool isCondCode
,
3552 bool invertCondCode
) {
3553 MCAsmParser
&Parser
= getParser();
3555 OperandMatchResultTy ResTy
=
3556 MatchOperandParserImpl(Operands
, Mnemonic
, /*ParseForAllFeatures=*/ true);
3558 // Check if the current operand has a custom associated parser, if so, try to
3559 // custom parse the operand, or fallback to the general approach.
3560 if (ResTy
== MatchOperand_Success
)
3562 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3563 // there was a match, but an error occurred, in which case, just return that
3564 // the operand parsing failed.
3565 if (ResTy
== MatchOperand_ParseFail
)
3568 // Nothing custom, so do general case parsing.
3570 switch (getLexer().getKind()) {
3574 if (parseSymbolicImmVal(Expr
))
3575 return Error(S
, "invalid operand");
3577 SMLoc E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
3578 Operands
.push_back(AArch64Operand::CreateImm(Expr
, S
, E
, getContext()));
3581 case AsmToken::LBrac
: {
3582 SMLoc Loc
= Parser
.getTok().getLoc();
3583 Operands
.push_back(AArch64Operand::CreateToken("[", false, Loc
,
3585 Parser
.Lex(); // Eat '['
3587 // There's no comma after a '[', so we can parse the next operand
3589 return parseOperand(Operands
, false, false);
3591 case AsmToken::LCurly
:
3592 return parseNeonVectorList(Operands
);
3593 case AsmToken::Identifier
: {
3594 // If we're expecting a Condition Code operand, then just parse that.
3596 return parseCondCode(Operands
, invertCondCode
);
3598 // If it's a register name, parse it.
3599 if (!parseRegister(Operands
))
3602 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3603 // by SVE instructions.
3604 if (!parseOptionalMulOperand(Operands
))
3607 // This could be an optional "shift" or "extend" operand.
3608 OperandMatchResultTy GotShift
= tryParseOptionalShiftExtend(Operands
);
3609 // We can only continue if no tokens were eaten.
3610 if (GotShift
!= MatchOperand_NoMatch
)
3613 // This was not a register so parse other operands that start with an
3614 // identifier (like labels) as expressions and create them as immediates.
3615 const MCExpr
*IdVal
;
3617 if (getParser().parseExpression(IdVal
))
3619 E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
3620 Operands
.push_back(AArch64Operand::CreateImm(IdVal
, S
, E
, getContext()));
3623 case AsmToken::Integer
:
3624 case AsmToken::Real
:
3625 case AsmToken::Hash
: {
3626 // #42 -> immediate.
3629 parseOptionalToken(AsmToken::Hash
);
3631 // Parse a negative sign
3632 bool isNegative
= false;
3633 if (Parser
.getTok().is(AsmToken::Minus
)) {
3635 // We need to consume this token only when we have a Real, otherwise
3636 // we let parseSymbolicImmVal take care of it
3637 if (Parser
.getLexer().peekTok().is(AsmToken::Real
))
3641 // The only Real that should come through here is a literal #0.0 for
3642 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3643 // so convert the value.
3644 const AsmToken
&Tok
= Parser
.getTok();
3645 if (Tok
.is(AsmToken::Real
)) {
3646 APFloat
RealVal(APFloat::IEEEdouble(), Tok
.getString());
3647 uint64_t IntVal
= RealVal
.bitcastToAPInt().getZExtValue();
3648 if (Mnemonic
!= "fcmp" && Mnemonic
!= "fcmpe" && Mnemonic
!= "fcmeq" &&
3649 Mnemonic
!= "fcmge" && Mnemonic
!= "fcmgt" && Mnemonic
!= "fcmle" &&
3650 Mnemonic
!= "fcmlt" && Mnemonic
!= "fcmne")
3651 return TokError("unexpected floating point literal");
3652 else if (IntVal
!= 0 || isNegative
)
3653 return TokError("expected floating-point constant #0.0");
3654 Parser
.Lex(); // Eat the token.
3657 AArch64Operand::CreateToken("#0", false, S
, getContext()));
3659 AArch64Operand::CreateToken(".0", false, S
, getContext()));
3663 const MCExpr
*ImmVal
;
3664 if (parseSymbolicImmVal(ImmVal
))
3667 E
= SMLoc::getFromPointer(getLoc().getPointer() - 1);
3668 Operands
.push_back(AArch64Operand::CreateImm(ImmVal
, S
, E
, getContext()));
3671 case AsmToken::Equal
: {
3672 SMLoc Loc
= getLoc();
3673 if (Mnemonic
!= "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3674 return TokError("unexpected token in operand");
3675 Parser
.Lex(); // Eat '='
3676 const MCExpr
*SubExprVal
;
3677 if (getParser().parseExpression(SubExprVal
))
3680 if (Operands
.size() < 2 ||
3681 !static_cast<AArch64Operand
&>(*Operands
[1]).isScalarReg())
3682 return Error(Loc
, "Only valid when first operand is register");
3685 AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
3686 Operands
[1]->getReg());
3688 MCContext
& Ctx
= getContext();
3689 E
= SMLoc::getFromPointer(Loc
.getPointer() - 1);
3690 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3691 if (isa
<MCConstantExpr
>(SubExprVal
)) {
3692 uint64_t Imm
= (cast
<MCConstantExpr
>(SubExprVal
))->getValue();
3693 uint32_t ShiftAmt
= 0, MaxShiftAmt
= IsXReg
? 48 : 16;
3694 while(Imm
> 0xFFFF && countTrailingZeros(Imm
) >= 16) {
3698 if (ShiftAmt
<= MaxShiftAmt
&& Imm
<= 0xFFFF) {
3699 Operands
[0] = AArch64Operand::CreateToken("movz", false, Loc
, Ctx
);
3700 Operands
.push_back(AArch64Operand::CreateImm(
3701 MCConstantExpr::create(Imm
, Ctx
), S
, E
, Ctx
));
3703 Operands
.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL
,
3704 ShiftAmt
, true, S
, E
, Ctx
));
3707 APInt Simm
= APInt(64, Imm
<< ShiftAmt
);
3708 // check if the immediate is an unsigned or signed 32-bit int for W regs
3709 if (!IsXReg
&& !(Simm
.isIntN(32) || Simm
.isSignedIntN(32)))
3710 return Error(Loc
, "Immediate too large for register");
3712 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3713 const MCExpr
*CPLoc
=
3714 getTargetStreamer().addConstantPoolEntry(SubExprVal
, IsXReg
? 8 : 4, Loc
);
3715 Operands
.push_back(AArch64Operand::CreateImm(CPLoc
, S
, E
, Ctx
));
3721 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand
&Op1
,
3722 const MCParsedAsmOperand
&Op2
) const {
3723 auto &AOp1
= static_cast<const AArch64Operand
&>(Op1
);
3724 auto &AOp2
= static_cast<const AArch64Operand
&>(Op2
);
3725 if (AOp1
.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg
&&
3726 AOp2
.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg
)
3727 return MCTargetAsmParser::regsEqual(Op1
, Op2
);
3729 assert(AOp1
.isScalarReg() && AOp2
.isScalarReg() &&
3730 "Testing equality of non-scalar registers not supported");
3732 // Check if a registers match their sub/super register classes.
3733 if (AOp1
.getRegEqualityTy() == EqualsSuperReg
)
3734 return getXRegFromWReg(Op1
.getReg()) == Op2
.getReg();
3735 if (AOp1
.getRegEqualityTy() == EqualsSubReg
)
3736 return getWRegFromXReg(Op1
.getReg()) == Op2
.getReg();
3737 if (AOp2
.getRegEqualityTy() == EqualsSuperReg
)
3738 return getXRegFromWReg(Op2
.getReg()) == Op1
.getReg();
3739 if (AOp2
.getRegEqualityTy() == EqualsSubReg
)
3740 return getWRegFromXReg(Op2
.getReg()) == Op1
.getReg();
3745 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3747 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo
&Info
,
3748 StringRef Name
, SMLoc NameLoc
,
3749 OperandVector
&Operands
) {
3750 MCAsmParser
&Parser
= getParser();
3751 Name
= StringSwitch
<StringRef
>(Name
.lower())
3752 .Case("beq", "b.eq")
3753 .Case("bne", "b.ne")
3754 .Case("bhs", "b.hs")
3755 .Case("bcs", "b.cs")
3756 .Case("blo", "b.lo")
3757 .Case("bcc", "b.cc")
3758 .Case("bmi", "b.mi")
3759 .Case("bpl", "b.pl")
3760 .Case("bvs", "b.vs")
3761 .Case("bvc", "b.vc")
3762 .Case("bhi", "b.hi")
3763 .Case("bls", "b.ls")
3764 .Case("bge", "b.ge")
3765 .Case("blt", "b.lt")
3766 .Case("bgt", "b.gt")
3767 .Case("ble", "b.le")
3768 .Case("bal", "b.al")
3769 .Case("bnv", "b.nv")
3772 // First check for the AArch64-specific .req directive.
3773 if (Parser
.getTok().is(AsmToken::Identifier
) &&
3774 Parser
.getTok().getIdentifier().lower() == ".req") {
3775 parseDirectiveReq(Name
, NameLoc
);
3776 // We always return 'error' for this, as we're done with this
3777 // statement and don't need to match the 'instruction."
3781 // Create the leading tokens for the mnemonic, split by '.' characters.
3782 size_t Start
= 0, Next
= Name
.find('.');
3783 StringRef Head
= Name
.slice(Start
, Next
);
3785 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3786 // the SYS instruction.
3787 if (Head
== "ic" || Head
== "dc" || Head
== "at" || Head
== "tlbi" ||
3788 Head
== "cfp" || Head
== "dvp" || Head
== "cpp")
3789 return parseSysAlias(Head
, NameLoc
, Operands
);
3792 AArch64Operand::CreateToken(Head
, false, NameLoc
, getContext()));
3795 // Handle condition codes for a branch mnemonic
3796 if (Head
== "b" && Next
!= StringRef::npos
) {
3798 Next
= Name
.find('.', Start
+ 1);
3799 Head
= Name
.slice(Start
+ 1, Next
);
3801 SMLoc SuffixLoc
= SMLoc::getFromPointer(NameLoc
.getPointer() +
3802 (Head
.data() - Name
.data()));
3803 AArch64CC::CondCode CC
= parseCondCodeString(Head
);
3804 if (CC
== AArch64CC::Invalid
)
3805 return Error(SuffixLoc
, "invalid condition code");
3807 AArch64Operand::CreateToken(".", true, SuffixLoc
, getContext()));
3809 AArch64Operand::CreateCondCode(CC
, NameLoc
, NameLoc
, getContext()));
3812 // Add the remaining tokens in the mnemonic.
3813 while (Next
!= StringRef::npos
) {
3815 Next
= Name
.find('.', Start
+ 1);
3816 Head
= Name
.slice(Start
, Next
);
3817 SMLoc SuffixLoc
= SMLoc::getFromPointer(NameLoc
.getPointer() +
3818 (Head
.data() - Name
.data()) + 1);
3820 AArch64Operand::CreateToken(Head
, true, SuffixLoc
, getContext()));
3823 // Conditional compare instructions have a Condition Code operand, which needs
3824 // to be parsed and an immediate operand created.
3825 bool condCodeFourthOperand
=
3826 (Head
== "ccmp" || Head
== "ccmn" || Head
== "fccmp" ||
3827 Head
== "fccmpe" || Head
== "fcsel" || Head
== "csel" ||
3828 Head
== "csinc" || Head
== "csinv" || Head
== "csneg");
3830 // These instructions are aliases to some of the conditional select
3831 // instructions. However, the condition code is inverted in the aliased
3834 // FIXME: Is this the correct way to handle these? Or should the parser
3835 // generate the aliased instructions directly?
3836 bool condCodeSecondOperand
= (Head
== "cset" || Head
== "csetm");
3837 bool condCodeThirdOperand
=
3838 (Head
== "cinc" || Head
== "cinv" || Head
== "cneg");
3840 // Read the remaining operands.
3841 if (getLexer().isNot(AsmToken::EndOfStatement
)) {
3845 // Parse and remember the operand.
3846 if (parseOperand(Operands
, (N
== 4 && condCodeFourthOperand
) ||
3847 (N
== 3 && condCodeThirdOperand
) ||
3848 (N
== 2 && condCodeSecondOperand
),
3849 condCodeSecondOperand
|| condCodeThirdOperand
)) {
3853 // After successfully parsing some operands there are two special cases to
3854 // consider (i.e. notional operands not separated by commas). Both are due
3855 // to memory specifiers:
3856 // + An RBrac will end an address for load/store/prefetch
3857 // + An '!' will indicate a pre-indexed operation.
3859 // It's someone else's responsibility to make sure these tokens are sane
3860 // in the given context!
3862 SMLoc RLoc
= Parser
.getTok().getLoc();
3863 if (parseOptionalToken(AsmToken::RBrac
))
3865 AArch64Operand::CreateToken("]", false, RLoc
, getContext()));
3866 SMLoc ELoc
= Parser
.getTok().getLoc();
3867 if (parseOptionalToken(AsmToken::Exclaim
))
3869 AArch64Operand::CreateToken("!", false, ELoc
, getContext()));
3872 } while (parseOptionalToken(AsmToken::Comma
));
3875 if (parseToken(AsmToken::EndOfStatement
, "unexpected token in argument list"))
3881 static inline bool isMatchingOrAlias(unsigned ZReg
, unsigned Reg
) {
3882 assert((ZReg
>= AArch64::Z0
) && (ZReg
<= AArch64::Z31
));
3883 return (ZReg
== ((Reg
- AArch64::B0
) + AArch64::Z0
)) ||
3884 (ZReg
== ((Reg
- AArch64::H0
) + AArch64::Z0
)) ||
3885 (ZReg
== ((Reg
- AArch64::S0
) + AArch64::Z0
)) ||
3886 (ZReg
== ((Reg
- AArch64::D0
) + AArch64::Z0
)) ||
3887 (ZReg
== ((Reg
- AArch64::Q0
) + AArch64::Z0
)) ||
3888 (ZReg
== ((Reg
- AArch64::Z0
) + AArch64::Z0
));
3891 // FIXME: This entire function is a giant hack to provide us with decent
3892 // operand range validation/diagnostics until TableGen/MC can be extended
3893 // to support autogeneration of this kind of validation.
3894 bool AArch64AsmParser::validateInstruction(MCInst
&Inst
, SMLoc
&IDLoc
,
3895 SmallVectorImpl
<SMLoc
> &Loc
) {
3896 const MCRegisterInfo
*RI
= getContext().getRegisterInfo();
3897 const MCInstrDesc
&MCID
= MII
.get(Inst
.getOpcode());
3899 // A prefix only applies to the instruction following it. Here we extract
3900 // prefix information for the next instruction before validating the current
3901 // one so that in the case of failure we don't erronously continue using the
3903 PrefixInfo Prefix
= NextPrefix
;
3904 NextPrefix
= PrefixInfo::CreateFromInst(Inst
, MCID
.TSFlags
);
3906 // Before validating the instruction in isolation we run through the rules
3907 // applicable when it follows a prefix instruction.
3908 // NOTE: brk & hlt can be prefixed but require no additional validation.
3909 if (Prefix
.isActive() &&
3910 (Inst
.getOpcode() != AArch64::BRK
) &&
3911 (Inst
.getOpcode() != AArch64::HLT
)) {
3913 // Prefixed intructions must have a destructive operand.
3914 if ((MCID
.TSFlags
& AArch64::DestructiveInstTypeMask
) ==
3915 AArch64::NotDestructive
)
3916 return Error(IDLoc
, "instruction is unpredictable when following a"
3917 " movprfx, suggest replacing movprfx with mov");
3919 // Destination operands must match.
3920 if (Inst
.getOperand(0).getReg() != Prefix
.getDstReg())
3921 return Error(Loc
[0], "instruction is unpredictable when following a"
3922 " movprfx writing to a different destination");
3924 // Destination operand must not be used in any other location.
3925 for (unsigned i
= 1; i
< Inst
.getNumOperands(); ++i
) {
3926 if (Inst
.getOperand(i
).isReg() &&
3927 (MCID
.getOperandConstraint(i
, MCOI::TIED_TO
) == -1) &&
3928 isMatchingOrAlias(Prefix
.getDstReg(), Inst
.getOperand(i
).getReg()))
3929 return Error(Loc
[0], "instruction is unpredictable when following a"
3930 " movprfx and destination also used as non-destructive"
3934 auto PPRRegClass
= AArch64MCRegisterClasses
[AArch64::PPRRegClassID
];
3935 if (Prefix
.isPredicated()) {
3938 // Find the instructions general predicate.
3939 for (unsigned i
= 1; i
< Inst
.getNumOperands(); ++i
)
3940 if (Inst
.getOperand(i
).isReg() &&
3941 PPRRegClass
.contains(Inst
.getOperand(i
).getReg())) {
3946 // Instruction must be predicated if the movprfx is predicated.
3948 (MCID
.TSFlags
& AArch64::ElementSizeMask
) == AArch64::ElementSizeNone
)
3949 return Error(IDLoc
, "instruction is unpredictable when following a"
3950 " predicated movprfx, suggest using unpredicated movprfx");
3952 // Instruction must use same general predicate as the movprfx.
3953 if (Inst
.getOperand(PgIdx
).getReg() != Prefix
.getPgReg())
3954 return Error(IDLoc
, "instruction is unpredictable when following a"
3955 " predicated movprfx using a different general predicate");
3957 // Instruction element type must match the movprfx.
3958 if ((MCID
.TSFlags
& AArch64::ElementSizeMask
) != Prefix
.getElementSize())
3959 return Error(IDLoc
, "instruction is unpredictable when following a"
3960 " predicated movprfx with a different element size");
3964 // Check for indexed addressing modes w/ the base register being the
3965 // same as a destination/source register or pair load where
3966 // the Rt == Rt2. All of those are undefined behaviour.
3967 switch (Inst
.getOpcode()) {
3968 case AArch64::LDPSWpre
:
3969 case AArch64::LDPWpost
:
3970 case AArch64::LDPWpre
:
3971 case AArch64::LDPXpost
:
3972 case AArch64::LDPXpre
: {
3973 unsigned Rt
= Inst
.getOperand(1).getReg();
3974 unsigned Rt2
= Inst
.getOperand(2).getReg();
3975 unsigned Rn
= Inst
.getOperand(3).getReg();
3976 if (RI
->isSubRegisterEq(Rn
, Rt
))
3977 return Error(Loc
[0], "unpredictable LDP instruction, writeback base "
3978 "is also a destination");
3979 if (RI
->isSubRegisterEq(Rn
, Rt2
))
3980 return Error(Loc
[1], "unpredictable LDP instruction, writeback base "
3981 "is also a destination");
3984 case AArch64::LDPDi
:
3985 case AArch64::LDPQi
:
3986 case AArch64::LDPSi
:
3987 case AArch64::LDPSWi
:
3988 case AArch64::LDPWi
:
3989 case AArch64::LDPXi
: {
3990 unsigned Rt
= Inst
.getOperand(0).getReg();
3991 unsigned Rt2
= Inst
.getOperand(1).getReg();
3993 return Error(Loc
[1], "unpredictable LDP instruction, Rt2==Rt");
3996 case AArch64::LDPDpost
:
3997 case AArch64::LDPDpre
:
3998 case AArch64::LDPQpost
:
3999 case AArch64::LDPQpre
:
4000 case AArch64::LDPSpost
:
4001 case AArch64::LDPSpre
:
4002 case AArch64::LDPSWpost
: {
4003 unsigned Rt
= Inst
.getOperand(1).getReg();
4004 unsigned Rt2
= Inst
.getOperand(2).getReg();
4006 return Error(Loc
[1], "unpredictable LDP instruction, Rt2==Rt");
4009 case AArch64::STPDpost
:
4010 case AArch64::STPDpre
:
4011 case AArch64::STPQpost
:
4012 case AArch64::STPQpre
:
4013 case AArch64::STPSpost
:
4014 case AArch64::STPSpre
:
4015 case AArch64::STPWpost
:
4016 case AArch64::STPWpre
:
4017 case AArch64::STPXpost
:
4018 case AArch64::STPXpre
: {
4019 unsigned Rt
= Inst
.getOperand(1).getReg();
4020 unsigned Rt2
= Inst
.getOperand(2).getReg();
4021 unsigned Rn
= Inst
.getOperand(3).getReg();
4022 if (RI
->isSubRegisterEq(Rn
, Rt
))
4023 return Error(Loc
[0], "unpredictable STP instruction, writeback base "
4024 "is also a source");
4025 if (RI
->isSubRegisterEq(Rn
, Rt2
))
4026 return Error(Loc
[1], "unpredictable STP instruction, writeback base "
4027 "is also a source");
4030 case AArch64::LDRBBpre
:
4031 case AArch64::LDRBpre
:
4032 case AArch64::LDRHHpre
:
4033 case AArch64::LDRHpre
:
4034 case AArch64::LDRSBWpre
:
4035 case AArch64::LDRSBXpre
:
4036 case AArch64::LDRSHWpre
:
4037 case AArch64::LDRSHXpre
:
4038 case AArch64::LDRSWpre
:
4039 case AArch64::LDRWpre
:
4040 case AArch64::LDRXpre
:
4041 case AArch64::LDRBBpost
:
4042 case AArch64::LDRBpost
:
4043 case AArch64::LDRHHpost
:
4044 case AArch64::LDRHpost
:
4045 case AArch64::LDRSBWpost
:
4046 case AArch64::LDRSBXpost
:
4047 case AArch64::LDRSHWpost
:
4048 case AArch64::LDRSHXpost
:
4049 case AArch64::LDRSWpost
:
4050 case AArch64::LDRWpost
:
4051 case AArch64::LDRXpost
: {
4052 unsigned Rt
= Inst
.getOperand(1).getReg();
4053 unsigned Rn
= Inst
.getOperand(2).getReg();
4054 if (RI
->isSubRegisterEq(Rn
, Rt
))
4055 return Error(Loc
[0], "unpredictable LDR instruction, writeback base "
4056 "is also a source");
4059 case AArch64::STRBBpost
:
4060 case AArch64::STRBpost
:
4061 case AArch64::STRHHpost
:
4062 case AArch64::STRHpost
:
4063 case AArch64::STRWpost
:
4064 case AArch64::STRXpost
:
4065 case AArch64::STRBBpre
:
4066 case AArch64::STRBpre
:
4067 case AArch64::STRHHpre
:
4068 case AArch64::STRHpre
:
4069 case AArch64::STRWpre
:
4070 case AArch64::STRXpre
: {
4071 unsigned Rt
= Inst
.getOperand(1).getReg();
4072 unsigned Rn
= Inst
.getOperand(2).getReg();
4073 if (RI
->isSubRegisterEq(Rn
, Rt
))
4074 return Error(Loc
[0], "unpredictable STR instruction, writeback base "
4075 "is also a source");
4078 case AArch64::STXRB
:
4079 case AArch64::STXRH
:
4080 case AArch64::STXRW
:
4081 case AArch64::STXRX
:
4082 case AArch64::STLXRB
:
4083 case AArch64::STLXRH
:
4084 case AArch64::STLXRW
:
4085 case AArch64::STLXRX
: {
4086 unsigned Rs
= Inst
.getOperand(0).getReg();
4087 unsigned Rt
= Inst
.getOperand(1).getReg();
4088 unsigned Rn
= Inst
.getOperand(2).getReg();
4089 if (RI
->isSubRegisterEq(Rt
, Rs
) ||
4090 (RI
->isSubRegisterEq(Rn
, Rs
) && Rn
!= AArch64::SP
))
4091 return Error(Loc
[0],
4092 "unpredictable STXR instruction, status is also a source");
4095 case AArch64::STXPW
:
4096 case AArch64::STXPX
:
4097 case AArch64::STLXPW
:
4098 case AArch64::STLXPX
: {
4099 unsigned Rs
= Inst
.getOperand(0).getReg();
4100 unsigned Rt1
= Inst
.getOperand(1).getReg();
4101 unsigned Rt2
= Inst
.getOperand(2).getReg();
4102 unsigned Rn
= Inst
.getOperand(3).getReg();
4103 if (RI
->isSubRegisterEq(Rt1
, Rs
) || RI
->isSubRegisterEq(Rt2
, Rs
) ||
4104 (RI
->isSubRegisterEq(Rn
, Rs
) && Rn
!= AArch64::SP
))
4105 return Error(Loc
[0],
4106 "unpredictable STXP instruction, status is also a source");
4112 // Now check immediate ranges. Separate from the above as there is overlap
4113 // in the instructions being checked and this keeps the nested conditionals
4115 switch (Inst
.getOpcode()) {
4116 case AArch64::ADDSWri
:
4117 case AArch64::ADDSXri
:
4118 case AArch64::ADDWri
:
4119 case AArch64::ADDXri
:
4120 case AArch64::SUBSWri
:
4121 case AArch64::SUBSXri
:
4122 case AArch64::SUBWri
:
4123 case AArch64::SUBXri
: {
4124 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4125 // some slight duplication here.
4126 if (Inst
.getOperand(2).isExpr()) {
4127 const MCExpr
*Expr
= Inst
.getOperand(2).getExpr();
4128 AArch64MCExpr::VariantKind ELFRefKind
;
4129 MCSymbolRefExpr::VariantKind DarwinRefKind
;
4131 if (classifySymbolRef(Expr
, ELFRefKind
, DarwinRefKind
, Addend
)) {
4133 // Only allow these with ADDXri.
4134 if ((DarwinRefKind
== MCSymbolRefExpr::VK_PAGEOFF
||
4135 DarwinRefKind
== MCSymbolRefExpr::VK_TLVPPAGEOFF
) &&
4136 Inst
.getOpcode() == AArch64::ADDXri
)
4139 // Only allow these with ADDXri/ADDWri
4140 if ((ELFRefKind
== AArch64MCExpr::VK_LO12
||
4141 ELFRefKind
== AArch64MCExpr::VK_DTPREL_HI12
||
4142 ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12
||
4143 ELFRefKind
== AArch64MCExpr::VK_DTPREL_LO12_NC
||
4144 ELFRefKind
== AArch64MCExpr::VK_TPREL_HI12
||
4145 ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12
||
4146 ELFRefKind
== AArch64MCExpr::VK_TPREL_LO12_NC
||
4147 ELFRefKind
== AArch64MCExpr::VK_TLSDESC_LO12
||
4148 ELFRefKind
== AArch64MCExpr::VK_SECREL_LO12
||
4149 ELFRefKind
== AArch64MCExpr::VK_SECREL_HI12
) &&
4150 (Inst
.getOpcode() == AArch64::ADDXri
||
4151 Inst
.getOpcode() == AArch64::ADDWri
))
4154 // Don't allow symbol refs in the immediate field otherwise
4155 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4156 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4157 // 'cmp w0, 'borked')
4158 return Error(Loc
.back(), "invalid immediate expression");
4160 // We don't validate more complex expressions here
4169 static std::string
AArch64MnemonicSpellCheck(StringRef S
,
4170 const FeatureBitset
&FBS
,
4171 unsigned VariantID
= 0);
4173 bool AArch64AsmParser::showMatchError(SMLoc Loc
, unsigned ErrCode
,
4175 OperandVector
&Operands
) {
4177 case Match_InvalidTiedOperand
: {
4178 RegConstraintEqualityTy EqTy
=
4179 static_cast<const AArch64Operand
&>(*Operands
[ErrorInfo
])
4180 .getRegEqualityTy();
4182 case RegConstraintEqualityTy::EqualsSubReg
:
4183 return Error(Loc
, "operand must be 64-bit form of destination register");
4184 case RegConstraintEqualityTy::EqualsSuperReg
:
4185 return Error(Loc
, "operand must be 32-bit form of destination register");
4186 case RegConstraintEqualityTy::EqualsReg
:
4187 return Error(Loc
, "operand must match destination register");
4189 llvm_unreachable("Unknown RegConstraintEqualityTy");
4191 case Match_MissingFeature
:
4193 "instruction requires a CPU feature not currently enabled");
4194 case Match_InvalidOperand
:
4195 return Error(Loc
, "invalid operand for instruction");
4196 case Match_InvalidSuffix
:
4197 return Error(Loc
, "invalid type suffix for instruction");
4198 case Match_InvalidCondCode
:
4199 return Error(Loc
, "expected AArch64 condition code");
4200 case Match_AddSubRegExtendSmall
:
4202 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4203 case Match_AddSubRegExtendLarge
:
4205 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206 case Match_AddSubSecondSource
:
4208 "expected compatible register, symbol or integer in range [0, 4095]");
4209 case Match_LogicalSecondSource
:
4210 return Error(Loc
, "expected compatible register or logical immediate");
4211 case Match_InvalidMovImm32Shift
:
4212 return Error(Loc
, "expected 'lsl' with optional integer 0 or 16");
4213 case Match_InvalidMovImm64Shift
:
4214 return Error(Loc
, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4215 case Match_AddSubRegShift32
:
4217 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218 case Match_AddSubRegShift64
:
4220 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221 case Match_InvalidFPImm
:
4223 "expected compatible register or floating-point constant");
4224 case Match_InvalidMemoryIndexedSImm6
:
4225 return Error(Loc
, "index must be an integer in range [-32, 31].");
4226 case Match_InvalidMemoryIndexedSImm5
:
4227 return Error(Loc
, "index must be an integer in range [-16, 15].");
4228 case Match_InvalidMemoryIndexed1SImm4
:
4229 return Error(Loc
, "index must be an integer in range [-8, 7].");
4230 case Match_InvalidMemoryIndexed2SImm4
:
4231 return Error(Loc
, "index must be a multiple of 2 in range [-16, 14].");
4232 case Match_InvalidMemoryIndexed3SImm4
:
4233 return Error(Loc
, "index must be a multiple of 3 in range [-24, 21].");
4234 case Match_InvalidMemoryIndexed4SImm4
:
4235 return Error(Loc
, "index must be a multiple of 4 in range [-32, 28].");
4236 case Match_InvalidMemoryIndexed16SImm4
:
4237 return Error(Loc
, "index must be a multiple of 16 in range [-128, 112].");
4238 case Match_InvalidMemoryIndexed1SImm6
:
4239 return Error(Loc
, "index must be an integer in range [-32, 31].");
4240 case Match_InvalidMemoryIndexedSImm8
:
4241 return Error(Loc
, "index must be an integer in range [-128, 127].");
4242 case Match_InvalidMemoryIndexedSImm9
:
4243 return Error(Loc
, "index must be an integer in range [-256, 255].");
4244 case Match_InvalidMemoryIndexed16SImm9
:
4245 return Error(Loc
, "index must be a multiple of 16 in range [-4096, 4080].");
4246 case Match_InvalidMemoryIndexed8SImm10
:
4247 return Error(Loc
, "index must be a multiple of 8 in range [-4096, 4088].");
4248 case Match_InvalidMemoryIndexed4SImm7
:
4249 return Error(Loc
, "index must be a multiple of 4 in range [-256, 252].");
4250 case Match_InvalidMemoryIndexed8SImm7
:
4251 return Error(Loc
, "index must be a multiple of 8 in range [-512, 504].");
4252 case Match_InvalidMemoryIndexed16SImm7
:
4253 return Error(Loc
, "index must be a multiple of 16 in range [-1024, 1008].");
4254 case Match_InvalidMemoryIndexed8UImm5
:
4255 return Error(Loc
, "index must be a multiple of 8 in range [0, 248].");
4256 case Match_InvalidMemoryIndexed4UImm5
:
4257 return Error(Loc
, "index must be a multiple of 4 in range [0, 124].");
4258 case Match_InvalidMemoryIndexed2UImm5
:
4259 return Error(Loc
, "index must be a multiple of 2 in range [0, 62].");
4260 case Match_InvalidMemoryIndexed8UImm6
:
4261 return Error(Loc
, "index must be a multiple of 8 in range [0, 504].");
4262 case Match_InvalidMemoryIndexed16UImm6
:
4263 return Error(Loc
, "index must be a multiple of 16 in range [0, 1008].");
4264 case Match_InvalidMemoryIndexed4UImm6
:
4265 return Error(Loc
, "index must be a multiple of 4 in range [0, 252].");
4266 case Match_InvalidMemoryIndexed2UImm6
:
4267 return Error(Loc
, "index must be a multiple of 2 in range [0, 126].");
4268 case Match_InvalidMemoryIndexed1UImm6
:
4269 return Error(Loc
, "index must be in range [0, 63].");
4270 case Match_InvalidMemoryWExtend8
:
4272 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273 case Match_InvalidMemoryWExtend16
:
4275 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276 case Match_InvalidMemoryWExtend32
:
4278 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279 case Match_InvalidMemoryWExtend64
:
4281 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282 case Match_InvalidMemoryWExtend128
:
4284 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285 case Match_InvalidMemoryXExtend8
:
4287 "expected 'lsl' or 'sxtx' with optional shift of #0");
4288 case Match_InvalidMemoryXExtend16
:
4290 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291 case Match_InvalidMemoryXExtend32
:
4293 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294 case Match_InvalidMemoryXExtend64
:
4296 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297 case Match_InvalidMemoryXExtend128
:
4299 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300 case Match_InvalidMemoryIndexed1
:
4301 return Error(Loc
, "index must be an integer in range [0, 4095].");
4302 case Match_InvalidMemoryIndexed2
:
4303 return Error(Loc
, "index must be a multiple of 2 in range [0, 8190].");
4304 case Match_InvalidMemoryIndexed4
:
4305 return Error(Loc
, "index must be a multiple of 4 in range [0, 16380].");
4306 case Match_InvalidMemoryIndexed8
:
4307 return Error(Loc
, "index must be a multiple of 8 in range [0, 32760].");
4308 case Match_InvalidMemoryIndexed16
:
4309 return Error(Loc
, "index must be a multiple of 16 in range [0, 65520].");
4310 case Match_InvalidImm0_1
:
4311 return Error(Loc
, "immediate must be an integer in range [0, 1].");
4312 case Match_InvalidImm0_7
:
4313 return Error(Loc
, "immediate must be an integer in range [0, 7].");
4314 case Match_InvalidImm0_15
:
4315 return Error(Loc
, "immediate must be an integer in range [0, 15].");
4316 case Match_InvalidImm0_31
:
4317 return Error(Loc
, "immediate must be an integer in range [0, 31].");
4318 case Match_InvalidImm0_63
:
4319 return Error(Loc
, "immediate must be an integer in range [0, 63].");
4320 case Match_InvalidImm0_127
:
4321 return Error(Loc
, "immediate must be an integer in range [0, 127].");
4322 case Match_InvalidImm0_255
:
4323 return Error(Loc
, "immediate must be an integer in range [0, 255].");
4324 case Match_InvalidImm0_65535
:
4325 return Error(Loc
, "immediate must be an integer in range [0, 65535].");
4326 case Match_InvalidImm1_8
:
4327 return Error(Loc
, "immediate must be an integer in range [1, 8].");
4328 case Match_InvalidImm1_16
:
4329 return Error(Loc
, "immediate must be an integer in range [1, 16].");
4330 case Match_InvalidImm1_32
:
4331 return Error(Loc
, "immediate must be an integer in range [1, 32].");
4332 case Match_InvalidImm1_64
:
4333 return Error(Loc
, "immediate must be an integer in range [1, 64].");
4334 case Match_InvalidSVEAddSubImm8
:
4335 return Error(Loc
, "immediate must be an integer in range [0, 255]"
4336 " with a shift amount of 0");
4337 case Match_InvalidSVEAddSubImm16
:
4338 case Match_InvalidSVEAddSubImm32
:
4339 case Match_InvalidSVEAddSubImm64
:
4340 return Error(Loc
, "immediate must be an integer in range [0, 255] or a "
4341 "multiple of 256 in range [256, 65280]");
4342 case Match_InvalidSVECpyImm8
:
4343 return Error(Loc
, "immediate must be an integer in range [-128, 255]"
4344 " with a shift amount of 0");
4345 case Match_InvalidSVECpyImm16
:
4346 return Error(Loc
, "immediate must be an integer in range [-128, 127] or a "
4347 "multiple of 256 in range [-32768, 65280]");
4348 case Match_InvalidSVECpyImm32
:
4349 case Match_InvalidSVECpyImm64
:
4350 return Error(Loc
, "immediate must be an integer in range [-128, 127] or a "
4351 "multiple of 256 in range [-32768, 32512]");
4352 case Match_InvalidIndexRange1_1
:
4353 return Error(Loc
, "expected lane specifier '[1]'");
4354 case Match_InvalidIndexRange0_15
:
4355 return Error(Loc
, "vector lane must be an integer in range [0, 15].");
4356 case Match_InvalidIndexRange0_7
:
4357 return Error(Loc
, "vector lane must be an integer in range [0, 7].");
4358 case Match_InvalidIndexRange0_3
:
4359 return Error(Loc
, "vector lane must be an integer in range [0, 3].");
4360 case Match_InvalidIndexRange0_1
:
4361 return Error(Loc
, "vector lane must be an integer in range [0, 1].");
4362 case Match_InvalidSVEIndexRange0_63
:
4363 return Error(Loc
, "vector lane must be an integer in range [0, 63].");
4364 case Match_InvalidSVEIndexRange0_31
:
4365 return Error(Loc
, "vector lane must be an integer in range [0, 31].");
4366 case Match_InvalidSVEIndexRange0_15
:
4367 return Error(Loc
, "vector lane must be an integer in range [0, 15].");
4368 case Match_InvalidSVEIndexRange0_7
:
4369 return Error(Loc
, "vector lane must be an integer in range [0, 7].");
4370 case Match_InvalidSVEIndexRange0_3
:
4371 return Error(Loc
, "vector lane must be an integer in range [0, 3].");
4372 case Match_InvalidLabel
:
4373 return Error(Loc
, "expected label or encodable integer pc offset");
4375 return Error(Loc
, "expected readable system register");
4377 return Error(Loc
, "expected writable system register or pstate");
4378 case Match_InvalidComplexRotationEven
:
4379 return Error(Loc
, "complex rotation must be 0, 90, 180 or 270.");
4380 case Match_InvalidComplexRotationOdd
:
4381 return Error(Loc
, "complex rotation must be 90 or 270.");
4382 case Match_MnemonicFail
: {
4383 std::string Suggestion
= AArch64MnemonicSpellCheck(
4384 ((AArch64Operand
&)*Operands
[0]).getToken(),
4385 ComputeAvailableFeatures(STI
->getFeatureBits()));
4386 return Error(Loc
, "unrecognized instruction mnemonic" + Suggestion
);
4388 case Match_InvalidGPR64shifted8
:
4389 return Error(Loc
, "register must be x0..x30 or xzr, without shift");
4390 case Match_InvalidGPR64shifted16
:
4391 return Error(Loc
, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392 case Match_InvalidGPR64shifted32
:
4393 return Error(Loc
, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394 case Match_InvalidGPR64shifted64
:
4395 return Error(Loc
, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396 case Match_InvalidGPR64NoXZRshifted8
:
4397 return Error(Loc
, "register must be x0..x30 without shift");
4398 case Match_InvalidGPR64NoXZRshifted16
:
4399 return Error(Loc
, "register must be x0..x30 with required shift 'lsl #1'");
4400 case Match_InvalidGPR64NoXZRshifted32
:
4401 return Error(Loc
, "register must be x0..x30 with required shift 'lsl #2'");
4402 case Match_InvalidGPR64NoXZRshifted64
:
4403 return Error(Loc
, "register must be x0..x30 with required shift 'lsl #3'");
4404 case Match_InvalidZPR32UXTW8
:
4405 case Match_InvalidZPR32SXTW8
:
4406 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407 case Match_InvalidZPR32UXTW16
:
4408 case Match_InvalidZPR32SXTW16
:
4409 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410 case Match_InvalidZPR32UXTW32
:
4411 case Match_InvalidZPR32SXTW32
:
4412 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413 case Match_InvalidZPR32UXTW64
:
4414 case Match_InvalidZPR32SXTW64
:
4415 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416 case Match_InvalidZPR64UXTW8
:
4417 case Match_InvalidZPR64SXTW8
:
4418 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419 case Match_InvalidZPR64UXTW16
:
4420 case Match_InvalidZPR64SXTW16
:
4421 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422 case Match_InvalidZPR64UXTW32
:
4423 case Match_InvalidZPR64SXTW32
:
4424 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425 case Match_InvalidZPR64UXTW64
:
4426 case Match_InvalidZPR64SXTW64
:
4427 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428 case Match_InvalidZPR32LSL8
:
4429 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s'");
4430 case Match_InvalidZPR32LSL16
:
4431 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432 case Match_InvalidZPR32LSL32
:
4433 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434 case Match_InvalidZPR32LSL64
:
4435 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436 case Match_InvalidZPR64LSL8
:
4437 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d'");
4438 case Match_InvalidZPR64LSL16
:
4439 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440 case Match_InvalidZPR64LSL32
:
4441 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442 case Match_InvalidZPR64LSL64
:
4443 return Error(Loc
, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444 case Match_InvalidZPR0
:
4445 return Error(Loc
, "expected register without element width suffix");
4446 case Match_InvalidZPR8
:
4447 case Match_InvalidZPR16
:
4448 case Match_InvalidZPR32
:
4449 case Match_InvalidZPR64
:
4450 case Match_InvalidZPR128
:
4451 return Error(Loc
, "invalid element width");
4452 case Match_InvalidZPR_3b8
:
4453 return Error(Loc
, "Invalid restricted vector register, expected z0.b..z7.b");
4454 case Match_InvalidZPR_3b16
:
4455 return Error(Loc
, "Invalid restricted vector register, expected z0.h..z7.h");
4456 case Match_InvalidZPR_3b32
:
4457 return Error(Loc
, "Invalid restricted vector register, expected z0.s..z7.s");
4458 case Match_InvalidZPR_4b16
:
4459 return Error(Loc
, "Invalid restricted vector register, expected z0.h..z15.h");
4460 case Match_InvalidZPR_4b32
:
4461 return Error(Loc
, "Invalid restricted vector register, expected z0.s..z15.s");
4462 case Match_InvalidZPR_4b64
:
4463 return Error(Loc
, "Invalid restricted vector register, expected z0.d..z15.d");
4464 case Match_InvalidSVEPattern
:
4465 return Error(Loc
, "invalid predicate pattern");
4466 case Match_InvalidSVEPredicateAnyReg
:
4467 case Match_InvalidSVEPredicateBReg
:
4468 case Match_InvalidSVEPredicateHReg
:
4469 case Match_InvalidSVEPredicateSReg
:
4470 case Match_InvalidSVEPredicateDReg
:
4471 return Error(Loc
, "invalid predicate register.");
4472 case Match_InvalidSVEPredicate3bAnyReg
:
4473 return Error(Loc
, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4474 case Match_InvalidSVEPredicate3bBReg
:
4475 return Error(Loc
, "invalid restricted predicate register, expected p0.b..p7.b");
4476 case Match_InvalidSVEPredicate3bHReg
:
4477 return Error(Loc
, "invalid restricted predicate register, expected p0.h..p7.h");
4478 case Match_InvalidSVEPredicate3bSReg
:
4479 return Error(Loc
, "invalid restricted predicate register, expected p0.s..p7.s");
4480 case Match_InvalidSVEPredicate3bDReg
:
4481 return Error(Loc
, "invalid restricted predicate register, expected p0.d..p7.d");
4482 case Match_InvalidSVEExactFPImmOperandHalfOne
:
4483 return Error(Loc
, "Invalid floating point constant, expected 0.5 or 1.0.");
4484 case Match_InvalidSVEExactFPImmOperandHalfTwo
:
4485 return Error(Loc
, "Invalid floating point constant, expected 0.5 or 2.0.");
4486 case Match_InvalidSVEExactFPImmOperandZeroOne
:
4487 return Error(Loc
, "Invalid floating point constant, expected 0.0 or 1.0.");
4489 llvm_unreachable("unexpected error code!");
4493 static const char *getSubtargetFeatureName(uint64_t Val
);
4495 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc
, unsigned &Opcode
,
4496 OperandVector
&Operands
,
4498 uint64_t &ErrorInfo
,
4499 bool MatchingInlineAsm
) {
4500 assert(!Operands
.empty() && "Unexpect empty operand list!");
4501 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[0]);
4502 assert(Op
.isToken() && "Leading operand should always be a mnemonic!");
4504 StringRef Tok
= Op
.getToken();
4505 unsigned NumOperands
= Operands
.size();
4507 if (NumOperands
== 4 && Tok
== "lsl") {
4508 AArch64Operand
&Op2
= static_cast<AArch64Operand
&>(*Operands
[2]);
4509 AArch64Operand
&Op3
= static_cast<AArch64Operand
&>(*Operands
[3]);
4510 if (Op2
.isScalarReg() && Op3
.isImm()) {
4511 const MCConstantExpr
*Op3CE
= dyn_cast
<MCConstantExpr
>(Op3
.getImm());
4513 uint64_t Op3Val
= Op3CE
->getValue();
4514 uint64_t NewOp3Val
= 0;
4515 uint64_t NewOp4Val
= 0;
4516 if (AArch64MCRegisterClasses
[AArch64::GPR32allRegClassID
].contains(
4518 NewOp3Val
= (32 - Op3Val
) & 0x1f;
4519 NewOp4Val
= 31 - Op3Val
;
4521 NewOp3Val
= (64 - Op3Val
) & 0x3f;
4522 NewOp4Val
= 63 - Op3Val
;
4525 const MCExpr
*NewOp3
= MCConstantExpr::create(NewOp3Val
, getContext());
4526 const MCExpr
*NewOp4
= MCConstantExpr::create(NewOp4Val
, getContext());
4528 Operands
[0] = AArch64Operand::CreateToken(
4529 "ubfm", false, Op
.getStartLoc(), getContext());
4530 Operands
.push_back(AArch64Operand::CreateImm(
4531 NewOp4
, Op3
.getStartLoc(), Op3
.getEndLoc(), getContext()));
4532 Operands
[3] = AArch64Operand::CreateImm(NewOp3
, Op3
.getStartLoc(),
4533 Op3
.getEndLoc(), getContext());
4536 } else if (NumOperands
== 4 && Tok
== "bfc") {
4537 // FIXME: Horrible hack to handle BFC->BFM alias.
4538 AArch64Operand
&Op1
= static_cast<AArch64Operand
&>(*Operands
[1]);
4539 AArch64Operand LSBOp
= static_cast<AArch64Operand
&>(*Operands
[2]);
4540 AArch64Operand WidthOp
= static_cast<AArch64Operand
&>(*Operands
[3]);
4542 if (Op1
.isScalarReg() && LSBOp
.isImm() && WidthOp
.isImm()) {
4543 const MCConstantExpr
*LSBCE
= dyn_cast
<MCConstantExpr
>(LSBOp
.getImm());
4544 const MCConstantExpr
*WidthCE
= dyn_cast
<MCConstantExpr
>(WidthOp
.getImm());
4546 if (LSBCE
&& WidthCE
) {
4547 uint64_t LSB
= LSBCE
->getValue();
4548 uint64_t Width
= WidthCE
->getValue();
4550 uint64_t RegWidth
= 0;
4551 if (AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
4557 if (LSB
>= RegWidth
)
4558 return Error(LSBOp
.getStartLoc(),
4559 "expected integer in range [0, 31]");
4560 if (Width
< 1 || Width
> RegWidth
)
4561 return Error(WidthOp
.getStartLoc(),
4562 "expected integer in range [1, 32]");
4566 ImmR
= (32 - LSB
) & 0x1f;
4568 ImmR
= (64 - LSB
) & 0x3f;
4570 uint64_t ImmS
= Width
- 1;
4572 if (ImmR
!= 0 && ImmS
>= ImmR
)
4573 return Error(WidthOp
.getStartLoc(),
4574 "requested insert overflows register");
4576 const MCExpr
*ImmRExpr
= MCConstantExpr::create(ImmR
, getContext());
4577 const MCExpr
*ImmSExpr
= MCConstantExpr::create(ImmS
, getContext());
4578 Operands
[0] = AArch64Operand::CreateToken(
4579 "bfm", false, Op
.getStartLoc(), getContext());
4580 Operands
[2] = AArch64Operand::CreateReg(
4581 RegWidth
== 32 ? AArch64::WZR
: AArch64::XZR
, RegKind::Scalar
,
4582 SMLoc(), SMLoc(), getContext());
4583 Operands
[3] = AArch64Operand::CreateImm(
4584 ImmRExpr
, LSBOp
.getStartLoc(), LSBOp
.getEndLoc(), getContext());
4585 Operands
.emplace_back(
4586 AArch64Operand::CreateImm(ImmSExpr
, WidthOp
.getStartLoc(),
4587 WidthOp
.getEndLoc(), getContext()));
4590 } else if (NumOperands
== 5) {
4591 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4592 // UBFIZ -> UBFM aliases.
4593 if (Tok
== "bfi" || Tok
== "sbfiz" || Tok
== "ubfiz") {
4594 AArch64Operand
&Op1
= static_cast<AArch64Operand
&>(*Operands
[1]);
4595 AArch64Operand
&Op3
= static_cast<AArch64Operand
&>(*Operands
[3]);
4596 AArch64Operand
&Op4
= static_cast<AArch64Operand
&>(*Operands
[4]);
4598 if (Op1
.isScalarReg() && Op3
.isImm() && Op4
.isImm()) {
4599 const MCConstantExpr
*Op3CE
= dyn_cast
<MCConstantExpr
>(Op3
.getImm());
4600 const MCConstantExpr
*Op4CE
= dyn_cast
<MCConstantExpr
>(Op4
.getImm());
4602 if (Op3CE
&& Op4CE
) {
4603 uint64_t Op3Val
= Op3CE
->getValue();
4604 uint64_t Op4Val
= Op4CE
->getValue();
4606 uint64_t RegWidth
= 0;
4607 if (AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
4613 if (Op3Val
>= RegWidth
)
4614 return Error(Op3
.getStartLoc(),
4615 "expected integer in range [0, 31]");
4616 if (Op4Val
< 1 || Op4Val
> RegWidth
)
4617 return Error(Op4
.getStartLoc(),
4618 "expected integer in range [1, 32]");
4620 uint64_t NewOp3Val
= 0;
4622 NewOp3Val
= (32 - Op3Val
) & 0x1f;
4624 NewOp3Val
= (64 - Op3Val
) & 0x3f;
4626 uint64_t NewOp4Val
= Op4Val
- 1;
4628 if (NewOp3Val
!= 0 && NewOp4Val
>= NewOp3Val
)
4629 return Error(Op4
.getStartLoc(),
4630 "requested insert overflows register");
4632 const MCExpr
*NewOp3
=
4633 MCConstantExpr::create(NewOp3Val
, getContext());
4634 const MCExpr
*NewOp4
=
4635 MCConstantExpr::create(NewOp4Val
, getContext());
4636 Operands
[3] = AArch64Operand::CreateImm(
4637 NewOp3
, Op3
.getStartLoc(), Op3
.getEndLoc(), getContext());
4638 Operands
[4] = AArch64Operand::CreateImm(
4639 NewOp4
, Op4
.getStartLoc(), Op4
.getEndLoc(), getContext());
4641 Operands
[0] = AArch64Operand::CreateToken(
4642 "bfm", false, Op
.getStartLoc(), getContext());
4643 else if (Tok
== "sbfiz")
4644 Operands
[0] = AArch64Operand::CreateToken(
4645 "sbfm", false, Op
.getStartLoc(), getContext());
4646 else if (Tok
== "ubfiz")
4647 Operands
[0] = AArch64Operand::CreateToken(
4648 "ubfm", false, Op
.getStartLoc(), getContext());
4650 llvm_unreachable("No valid mnemonic for alias?");
4654 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4655 // UBFX -> UBFM aliases.
4656 } else if (NumOperands
== 5 &&
4657 (Tok
== "bfxil" || Tok
== "sbfx" || Tok
== "ubfx")) {
4658 AArch64Operand
&Op1
= static_cast<AArch64Operand
&>(*Operands
[1]);
4659 AArch64Operand
&Op3
= static_cast<AArch64Operand
&>(*Operands
[3]);
4660 AArch64Operand
&Op4
= static_cast<AArch64Operand
&>(*Operands
[4]);
4662 if (Op1
.isScalarReg() && Op3
.isImm() && Op4
.isImm()) {
4663 const MCConstantExpr
*Op3CE
= dyn_cast
<MCConstantExpr
>(Op3
.getImm());
4664 const MCConstantExpr
*Op4CE
= dyn_cast
<MCConstantExpr
>(Op4
.getImm());
4666 if (Op3CE
&& Op4CE
) {
4667 uint64_t Op3Val
= Op3CE
->getValue();
4668 uint64_t Op4Val
= Op4CE
->getValue();
4670 uint64_t RegWidth
= 0;
4671 if (AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
4677 if (Op3Val
>= RegWidth
)
4678 return Error(Op3
.getStartLoc(),
4679 "expected integer in range [0, 31]");
4680 if (Op4Val
< 1 || Op4Val
> RegWidth
)
4681 return Error(Op4
.getStartLoc(),
4682 "expected integer in range [1, 32]");
4684 uint64_t NewOp4Val
= Op3Val
+ Op4Val
- 1;
4686 if (NewOp4Val
>= RegWidth
|| NewOp4Val
< Op3Val
)
4687 return Error(Op4
.getStartLoc(),
4688 "requested extract overflows register");
4690 const MCExpr
*NewOp4
=
4691 MCConstantExpr::create(NewOp4Val
, getContext());
4692 Operands
[4] = AArch64Operand::CreateImm(
4693 NewOp4
, Op4
.getStartLoc(), Op4
.getEndLoc(), getContext());
4695 Operands
[0] = AArch64Operand::CreateToken(
4696 "bfm", false, Op
.getStartLoc(), getContext());
4697 else if (Tok
== "sbfx")
4698 Operands
[0] = AArch64Operand::CreateToken(
4699 "sbfm", false, Op
.getStartLoc(), getContext());
4700 else if (Tok
== "ubfx")
4701 Operands
[0] = AArch64Operand::CreateToken(
4702 "ubfm", false, Op
.getStartLoc(), getContext());
4704 llvm_unreachable("No valid mnemonic for alias?");
4710 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4711 // instruction for FP registers correctly in some rare circumstances. Convert
4712 // it to a safe instruction and warn (because silently changing someone's
4713 // assembly is rude).
4714 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround
] &&
4715 NumOperands
== 4 && Tok
== "movi") {
4716 AArch64Operand
&Op1
= static_cast<AArch64Operand
&>(*Operands
[1]);
4717 AArch64Operand
&Op2
= static_cast<AArch64Operand
&>(*Operands
[2]);
4718 AArch64Operand
&Op3
= static_cast<AArch64Operand
&>(*Operands
[3]);
4719 if ((Op1
.isToken() && Op2
.isNeonVectorReg() && Op3
.isImm()) ||
4720 (Op1
.isNeonVectorReg() && Op2
.isToken() && Op3
.isImm())) {
4721 StringRef Suffix
= Op1
.isToken() ? Op1
.getToken() : Op2
.getToken();
4722 if (Suffix
.lower() == ".2d" &&
4723 cast
<MCConstantExpr
>(Op3
.getImm())->getValue() == 0) {
4724 Warning(IDLoc
, "instruction movi.2d with immediate #0 may not function"
4725 " correctly on this CPU, converting to equivalent movi.16b");
4726 // Switch the suffix to .16b.
4727 unsigned Idx
= Op1
.isToken() ? 1 : 2;
4728 Operands
[Idx
] = AArch64Operand::CreateToken(".16b", false, IDLoc
,
4734 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4735 // InstAlias can't quite handle this since the reg classes aren't
4737 if (NumOperands
== 3 && (Tok
== "sxtw" || Tok
== "uxtw")) {
4738 // The source register can be Wn here, but the matcher expects a
4739 // GPR64. Twiddle it here if necessary.
4740 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[2]);
4741 if (Op
.isScalarReg()) {
4742 unsigned Reg
= getXRegFromWReg(Op
.getReg());
4743 Operands
[2] = AArch64Operand::CreateReg(Reg
, RegKind::Scalar
,
4744 Op
.getStartLoc(), Op
.getEndLoc(),
4748 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4749 else if (NumOperands
== 3 && (Tok
== "sxtb" || Tok
== "sxth")) {
4750 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[1]);
4751 if (Op
.isScalarReg() &&
4752 AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
4754 // The source register can be Wn here, but the matcher expects a
4755 // GPR64. Twiddle it here if necessary.
4756 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[2]);
4757 if (Op
.isScalarReg()) {
4758 unsigned Reg
= getXRegFromWReg(Op
.getReg());
4759 Operands
[2] = AArch64Operand::CreateReg(Reg
, RegKind::Scalar
,
4761 Op
.getEndLoc(), getContext());
4765 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4766 else if (NumOperands
== 3 && (Tok
== "uxtb" || Tok
== "uxth")) {
4767 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[1]);
4768 if (Op
.isScalarReg() &&
4769 AArch64MCRegisterClasses
[AArch64::GPR64allRegClassID
].contains(
4771 // The source register can be Wn here, but the matcher expects a
4772 // GPR32. Twiddle it here if necessary.
4773 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(*Operands
[1]);
4774 if (Op
.isScalarReg()) {
4775 unsigned Reg
= getWRegFromXReg(Op
.getReg());
4776 Operands
[1] = AArch64Operand::CreateReg(Reg
, RegKind::Scalar
,
4778 Op
.getEndLoc(), getContext());
4784 FeatureBitset MissingFeatures
;
4785 // First try to match against the secondary set of tables containing the
4786 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4787 unsigned MatchResult
=
4788 MatchInstructionImpl(Operands
, Inst
, ErrorInfo
, MissingFeatures
,
4789 MatchingInlineAsm
, 1);
4791 // If that fails, try against the alternate table containing long-form NEON:
4792 // "fadd v0.2s, v1.2s, v2.2s"
4793 if (MatchResult
!= Match_Success
) {
4794 // But first, save the short-form match result: we can use it in case the
4795 // long-form match also fails.
4796 auto ShortFormNEONErrorInfo
= ErrorInfo
;
4797 auto ShortFormNEONMatchResult
= MatchResult
;
4798 auto ShortFormNEONMissingFeatures
= MissingFeatures
;
4801 MatchInstructionImpl(Operands
, Inst
, ErrorInfo
, MissingFeatures
,
4802 MatchingInlineAsm
, 0);
4804 // Now, both matches failed, and the long-form match failed on the mnemonic
4805 // suffix token operand. The short-form match failure is probably more
4806 // relevant: use it instead.
4807 if (MatchResult
== Match_InvalidOperand
&& ErrorInfo
== 1 &&
4808 Operands
.size() > 1 && ((AArch64Operand
&)*Operands
[1]).isToken() &&
4809 ((AArch64Operand
&)*Operands
[1]).isTokenSuffix()) {
4810 MatchResult
= ShortFormNEONMatchResult
;
4811 ErrorInfo
= ShortFormNEONErrorInfo
;
4812 MissingFeatures
= ShortFormNEONMissingFeatures
;
4816 switch (MatchResult
) {
4817 case Match_Success
: {
4818 // Perform range checking and other semantic validations
4819 SmallVector
<SMLoc
, 8> OperandLocs
;
4820 NumOperands
= Operands
.size();
4821 for (unsigned i
= 1; i
< NumOperands
; ++i
)
4822 OperandLocs
.push_back(Operands
[i
]->getStartLoc());
4823 if (validateInstruction(Inst
, IDLoc
, OperandLocs
))
4827 Out
.EmitInstruction(Inst
, getSTI());
4830 case Match_MissingFeature
: {
4831 assert(MissingFeatures
.any() && "Unknown missing feature!");
4832 // Special case the error message for the very common case where only
4833 // a single subtarget feature is missing (neon, e.g.).
4834 std::string Msg
= "instruction requires:";
4835 for (unsigned i
= 0, e
= MissingFeatures
.size(); i
!= e
; ++i
) {
4836 if (MissingFeatures
[i
]) {
4838 Msg
+= getSubtargetFeatureName(i
);
4841 return Error(IDLoc
, Msg
);
4843 case Match_MnemonicFail
:
4844 return showMatchError(IDLoc
, MatchResult
, ErrorInfo
, Operands
);
4845 case Match_InvalidOperand
: {
4846 SMLoc ErrorLoc
= IDLoc
;
4848 if (ErrorInfo
!= ~0ULL) {
4849 if (ErrorInfo
>= Operands
.size())
4850 return Error(IDLoc
, "too few operands for instruction",
4851 SMRange(IDLoc
, getTok().getLoc()));
4853 ErrorLoc
= ((AArch64Operand
&)*Operands
[ErrorInfo
]).getStartLoc();
4854 if (ErrorLoc
== SMLoc())
4857 // If the match failed on a suffix token operand, tweak the diagnostic
4859 if (((AArch64Operand
&)*Operands
[ErrorInfo
]).isToken() &&
4860 ((AArch64Operand
&)*Operands
[ErrorInfo
]).isTokenSuffix())
4861 MatchResult
= Match_InvalidSuffix
;
4863 return showMatchError(ErrorLoc
, MatchResult
, ErrorInfo
, Operands
);
4865 case Match_InvalidTiedOperand
:
4866 case Match_InvalidMemoryIndexed1
:
4867 case Match_InvalidMemoryIndexed2
:
4868 case Match_InvalidMemoryIndexed4
:
4869 case Match_InvalidMemoryIndexed8
:
4870 case Match_InvalidMemoryIndexed16
:
4871 case Match_InvalidCondCode
:
4872 case Match_AddSubRegExtendSmall
:
4873 case Match_AddSubRegExtendLarge
:
4874 case Match_AddSubSecondSource
:
4875 case Match_LogicalSecondSource
:
4876 case Match_AddSubRegShift32
:
4877 case Match_AddSubRegShift64
:
4878 case Match_InvalidMovImm32Shift
:
4879 case Match_InvalidMovImm64Shift
:
4880 case Match_InvalidFPImm
:
4881 case Match_InvalidMemoryWExtend8
:
4882 case Match_InvalidMemoryWExtend16
:
4883 case Match_InvalidMemoryWExtend32
:
4884 case Match_InvalidMemoryWExtend64
:
4885 case Match_InvalidMemoryWExtend128
:
4886 case Match_InvalidMemoryXExtend8
:
4887 case Match_InvalidMemoryXExtend16
:
4888 case Match_InvalidMemoryXExtend32
:
4889 case Match_InvalidMemoryXExtend64
:
4890 case Match_InvalidMemoryXExtend128
:
4891 case Match_InvalidMemoryIndexed1SImm4
:
4892 case Match_InvalidMemoryIndexed2SImm4
:
4893 case Match_InvalidMemoryIndexed3SImm4
:
4894 case Match_InvalidMemoryIndexed4SImm4
:
4895 case Match_InvalidMemoryIndexed1SImm6
:
4896 case Match_InvalidMemoryIndexed16SImm4
:
4897 case Match_InvalidMemoryIndexed4SImm7
:
4898 case Match_InvalidMemoryIndexed8SImm7
:
4899 case Match_InvalidMemoryIndexed16SImm7
:
4900 case Match_InvalidMemoryIndexed8UImm5
:
4901 case Match_InvalidMemoryIndexed4UImm5
:
4902 case Match_InvalidMemoryIndexed2UImm5
:
4903 case Match_InvalidMemoryIndexed1UImm6
:
4904 case Match_InvalidMemoryIndexed2UImm6
:
4905 case Match_InvalidMemoryIndexed4UImm6
:
4906 case Match_InvalidMemoryIndexed8UImm6
:
4907 case Match_InvalidMemoryIndexed16UImm6
:
4908 case Match_InvalidMemoryIndexedSImm6
:
4909 case Match_InvalidMemoryIndexedSImm5
:
4910 case Match_InvalidMemoryIndexedSImm8
:
4911 case Match_InvalidMemoryIndexedSImm9
:
4912 case Match_InvalidMemoryIndexed16SImm9
:
4913 case Match_InvalidMemoryIndexed8SImm10
:
4914 case Match_InvalidImm0_1
:
4915 case Match_InvalidImm0_7
:
4916 case Match_InvalidImm0_15
:
4917 case Match_InvalidImm0_31
:
4918 case Match_InvalidImm0_63
:
4919 case Match_InvalidImm0_127
:
4920 case Match_InvalidImm0_255
:
4921 case Match_InvalidImm0_65535
:
4922 case Match_InvalidImm1_8
:
4923 case Match_InvalidImm1_16
:
4924 case Match_InvalidImm1_32
:
4925 case Match_InvalidImm1_64
:
4926 case Match_InvalidSVEAddSubImm8
:
4927 case Match_InvalidSVEAddSubImm16
:
4928 case Match_InvalidSVEAddSubImm32
:
4929 case Match_InvalidSVEAddSubImm64
:
4930 case Match_InvalidSVECpyImm8
:
4931 case Match_InvalidSVECpyImm16
:
4932 case Match_InvalidSVECpyImm32
:
4933 case Match_InvalidSVECpyImm64
:
4934 case Match_InvalidIndexRange1_1
:
4935 case Match_InvalidIndexRange0_15
:
4936 case Match_InvalidIndexRange0_7
:
4937 case Match_InvalidIndexRange0_3
:
4938 case Match_InvalidIndexRange0_1
:
4939 case Match_InvalidSVEIndexRange0_63
:
4940 case Match_InvalidSVEIndexRange0_31
:
4941 case Match_InvalidSVEIndexRange0_15
:
4942 case Match_InvalidSVEIndexRange0_7
:
4943 case Match_InvalidSVEIndexRange0_3
:
4944 case Match_InvalidLabel
:
4945 case Match_InvalidComplexRotationEven
:
4946 case Match_InvalidComplexRotationOdd
:
4947 case Match_InvalidGPR64shifted8
:
4948 case Match_InvalidGPR64shifted16
:
4949 case Match_InvalidGPR64shifted32
:
4950 case Match_InvalidGPR64shifted64
:
4951 case Match_InvalidGPR64NoXZRshifted8
:
4952 case Match_InvalidGPR64NoXZRshifted16
:
4953 case Match_InvalidGPR64NoXZRshifted32
:
4954 case Match_InvalidGPR64NoXZRshifted64
:
4955 case Match_InvalidZPR32UXTW8
:
4956 case Match_InvalidZPR32UXTW16
:
4957 case Match_InvalidZPR32UXTW32
:
4958 case Match_InvalidZPR32UXTW64
:
4959 case Match_InvalidZPR32SXTW8
:
4960 case Match_InvalidZPR32SXTW16
:
4961 case Match_InvalidZPR32SXTW32
:
4962 case Match_InvalidZPR32SXTW64
:
4963 case Match_InvalidZPR64UXTW8
:
4964 case Match_InvalidZPR64SXTW8
:
4965 case Match_InvalidZPR64UXTW16
:
4966 case Match_InvalidZPR64SXTW16
:
4967 case Match_InvalidZPR64UXTW32
:
4968 case Match_InvalidZPR64SXTW32
:
4969 case Match_InvalidZPR64UXTW64
:
4970 case Match_InvalidZPR64SXTW64
:
4971 case Match_InvalidZPR32LSL8
:
4972 case Match_InvalidZPR32LSL16
:
4973 case Match_InvalidZPR32LSL32
:
4974 case Match_InvalidZPR32LSL64
:
4975 case Match_InvalidZPR64LSL8
:
4976 case Match_InvalidZPR64LSL16
:
4977 case Match_InvalidZPR64LSL32
:
4978 case Match_InvalidZPR64LSL64
:
4979 case Match_InvalidZPR0
:
4980 case Match_InvalidZPR8
:
4981 case Match_InvalidZPR16
:
4982 case Match_InvalidZPR32
:
4983 case Match_InvalidZPR64
:
4984 case Match_InvalidZPR128
:
4985 case Match_InvalidZPR_3b8
:
4986 case Match_InvalidZPR_3b16
:
4987 case Match_InvalidZPR_3b32
:
4988 case Match_InvalidZPR_4b16
:
4989 case Match_InvalidZPR_4b32
:
4990 case Match_InvalidZPR_4b64
:
4991 case Match_InvalidSVEPredicateAnyReg
:
4992 case Match_InvalidSVEPattern
:
4993 case Match_InvalidSVEPredicateBReg
:
4994 case Match_InvalidSVEPredicateHReg
:
4995 case Match_InvalidSVEPredicateSReg
:
4996 case Match_InvalidSVEPredicateDReg
:
4997 case Match_InvalidSVEPredicate3bAnyReg
:
4998 case Match_InvalidSVEPredicate3bBReg
:
4999 case Match_InvalidSVEPredicate3bHReg
:
5000 case Match_InvalidSVEPredicate3bSReg
:
5001 case Match_InvalidSVEPredicate3bDReg
:
5002 case Match_InvalidSVEExactFPImmOperandHalfOne
:
5003 case Match_InvalidSVEExactFPImmOperandHalfTwo
:
5004 case Match_InvalidSVEExactFPImmOperandZeroOne
:
5007 if (ErrorInfo
>= Operands
.size())
5008 return Error(IDLoc
, "too few operands for instruction", SMRange(IDLoc
, (*Operands
.back()).getEndLoc()));
5009 // Any time we get here, there's nothing fancy to do. Just get the
5010 // operand SMLoc and display the diagnostic.
5011 SMLoc ErrorLoc
= ((AArch64Operand
&)*Operands
[ErrorInfo
]).getStartLoc();
5012 if (ErrorLoc
== SMLoc())
5014 return showMatchError(ErrorLoc
, MatchResult
, ErrorInfo
, Operands
);
5018 llvm_unreachable("Implement any new match types added!");
5021 /// ParseDirective parses the arm specific directives
5022 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID
) {
5023 const MCObjectFileInfo::Environment Format
=
5024 getContext().getObjectFileInfo()->getObjectFileType();
5025 bool IsMachO
= Format
== MCObjectFileInfo::IsMachO
;
5027 auto IDVal
= DirectiveID
.getIdentifier().lower();
5028 SMLoc Loc
= DirectiveID
.getLoc();
5029 if (IDVal
== ".arch")
5030 parseDirectiveArch(Loc
);
5031 else if (IDVal
== ".cpu")
5032 parseDirectiveCPU(Loc
);
5033 else if (IDVal
== ".tlsdesccall")
5034 parseDirectiveTLSDescCall(Loc
);
5035 else if (IDVal
== ".ltorg" || IDVal
== ".pool")
5036 parseDirectiveLtorg(Loc
);
5037 else if (IDVal
== ".unreq")
5038 parseDirectiveUnreq(Loc
);
5039 else if (IDVal
== ".inst")
5040 parseDirectiveInst(Loc
);
5041 else if (IDVal
== ".cfi_negate_ra_state")
5042 parseDirectiveCFINegateRAState();
5043 else if (IDVal
== ".cfi_b_key_frame")
5044 parseDirectiveCFIBKeyFrame();
5045 else if (IDVal
== ".arch_extension")
5046 parseDirectiveArchExtension(Loc
);
5048 if (IDVal
== MCLOHDirectiveName())
5049 parseDirectiveLOH(IDVal
, Loc
);
5057 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind
,
5058 SmallVector
<StringRef
, 4> &RequestedExtensions
) {
5059 const bool NoCrypto
=
5060 (std::find(RequestedExtensions
.begin(), RequestedExtensions
.end(),
5061 "nocrypto") != std::end(RequestedExtensions
));
5063 (std::find(RequestedExtensions
.begin(), RequestedExtensions
.end(),
5064 "crypto") != std::end(RequestedExtensions
));
5066 if (!NoCrypto
&& Crypto
) {
5069 // Map 'generic' (and others) to sha2 and aes, because
5070 // that was the traditional meaning of crypto.
5071 case AArch64::ArchKind::ARMV8_1A
:
5072 case AArch64::ArchKind::ARMV8_2A
:
5073 case AArch64::ArchKind::ARMV8_3A
:
5074 RequestedExtensions
.push_back("sha2");
5075 RequestedExtensions
.push_back("aes");
5077 case AArch64::ArchKind::ARMV8_4A
:
5078 case AArch64::ArchKind::ARMV8_5A
:
5079 RequestedExtensions
.push_back("sm4");
5080 RequestedExtensions
.push_back("sha3");
5081 RequestedExtensions
.push_back("sha2");
5082 RequestedExtensions
.push_back("aes");
5085 } else if (NoCrypto
) {
5088 // Map 'generic' (and others) to sha2 and aes, because
5089 // that was the traditional meaning of crypto.
5090 case AArch64::ArchKind::ARMV8_1A
:
5091 case AArch64::ArchKind::ARMV8_2A
:
5092 case AArch64::ArchKind::ARMV8_3A
:
5093 RequestedExtensions
.push_back("nosha2");
5094 RequestedExtensions
.push_back("noaes");
5096 case AArch64::ArchKind::ARMV8_4A
:
5097 case AArch64::ArchKind::ARMV8_5A
:
5098 RequestedExtensions
.push_back("nosm4");
5099 RequestedExtensions
.push_back("nosha3");
5100 RequestedExtensions
.push_back("nosha2");
5101 RequestedExtensions
.push_back("noaes");
5107 /// parseDirectiveArch
5109 bool AArch64AsmParser::parseDirectiveArch(SMLoc L
) {
5110 SMLoc ArchLoc
= getLoc();
5112 StringRef Arch
, ExtensionString
;
5113 std::tie(Arch
, ExtensionString
) =
5114 getParser().parseStringToEndOfStatement().trim().split('+');
5116 AArch64::ArchKind ID
= AArch64::parseArch(Arch
);
5117 if (ID
== AArch64::ArchKind::INVALID
)
5118 return Error(ArchLoc
, "unknown arch name");
5120 if (parseToken(AsmToken::EndOfStatement
))
5123 // Get the architecture and extension features.
5124 std::vector
<StringRef
> AArch64Features
;
5125 AArch64::getArchFeatures(ID
, AArch64Features
);
5126 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID
),
5129 MCSubtargetInfo
&STI
= copySTI();
5130 std::vector
<std::string
> ArchFeatures(AArch64Features
.begin(), AArch64Features
.end());
5131 STI
.setDefaultFeatures("generic", join(ArchFeatures
.begin(), ArchFeatures
.end(), ","));
5133 SmallVector
<StringRef
, 4> RequestedExtensions
;
5134 if (!ExtensionString
.empty())
5135 ExtensionString
.split(RequestedExtensions
, '+');
5137 ExpandCryptoAEK(ID
, RequestedExtensions
);
5139 FeatureBitset Features
= STI
.getFeatureBits();
5140 for (auto Name
: RequestedExtensions
) {
5141 bool EnableFeature
= true;
5143 if (Name
.startswith_lower("no")) {
5144 EnableFeature
= false;
5145 Name
= Name
.substr(2);
5148 for (const auto &Extension
: ExtensionMap
) {
5149 if (Extension
.Name
!= Name
)
5152 if (Extension
.Features
.none())
5153 report_fatal_error("unsupported architectural extension: " + Name
);
5155 FeatureBitset ToggleFeatures
= EnableFeature
5156 ? (~Features
& Extension
.Features
)
5157 : ( Features
& Extension
.Features
);
5158 FeatureBitset Features
=
5159 ComputeAvailableFeatures(STI
.ToggleFeature(ToggleFeatures
));
5160 setAvailableFeatures(Features
);
5167 /// parseDirectiveArchExtension
5168 /// ::= .arch_extension [no]feature
5169 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L
) {
5170 SMLoc ExtLoc
= getLoc();
5172 StringRef Name
= getParser().parseStringToEndOfStatement().trim();
5174 if (parseToken(AsmToken::EndOfStatement
,
5175 "unexpected token in '.arch_extension' directive"))
5178 bool EnableFeature
= true;
5179 if (Name
.startswith_lower("no")) {
5180 EnableFeature
= false;
5181 Name
= Name
.substr(2);
5184 MCSubtargetInfo
&STI
= copySTI();
5185 FeatureBitset Features
= STI
.getFeatureBits();
5186 for (const auto &Extension
: ExtensionMap
) {
5187 if (Extension
.Name
!= Name
)
5190 if (Extension
.Features
.none())
5191 return Error(ExtLoc
, "unsupported architectural extension: " + Name
);
5193 FeatureBitset ToggleFeatures
= EnableFeature
5194 ? (~Features
& Extension
.Features
)
5195 : (Features
& Extension
.Features
);
5196 FeatureBitset Features
=
5197 ComputeAvailableFeatures(STI
.ToggleFeature(ToggleFeatures
));
5198 setAvailableFeatures(Features
);
5202 return Error(ExtLoc
, "unknown architectural extension: " + Name
);
5205 static SMLoc
incrementLoc(SMLoc L
, int Offset
) {
5206 return SMLoc::getFromPointer(L
.getPointer() + Offset
);
5209 /// parseDirectiveCPU
5211 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L
) {
5212 SMLoc CurLoc
= getLoc();
5214 StringRef CPU
, ExtensionString
;
5215 std::tie(CPU
, ExtensionString
) =
5216 getParser().parseStringToEndOfStatement().trim().split('+');
5218 if (parseToken(AsmToken::EndOfStatement
))
5221 SmallVector
<StringRef
, 4> RequestedExtensions
;
5222 if (!ExtensionString
.empty())
5223 ExtensionString
.split(RequestedExtensions
, '+');
5225 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5226 // once that is tablegen'ed
5227 if (!getSTI().isCPUStringValid(CPU
)) {
5228 Error(CurLoc
, "unknown CPU name");
5232 MCSubtargetInfo
&STI
= copySTI();
5233 STI
.setDefaultFeatures(CPU
, "");
5234 CurLoc
= incrementLoc(CurLoc
, CPU
.size());
5236 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU
), RequestedExtensions
);
5238 FeatureBitset Features
= STI
.getFeatureBits();
5239 for (auto Name
: RequestedExtensions
) {
5240 // Advance source location past '+'.
5241 CurLoc
= incrementLoc(CurLoc
, 1);
5243 bool EnableFeature
= true;
5245 if (Name
.startswith_lower("no")) {
5246 EnableFeature
= false;
5247 Name
= Name
.substr(2);
5250 bool FoundExtension
= false;
5251 for (const auto &Extension
: ExtensionMap
) {
5252 if (Extension
.Name
!= Name
)
5255 if (Extension
.Features
.none())
5256 report_fatal_error("unsupported architectural extension: " + Name
);
5258 FeatureBitset ToggleFeatures
= EnableFeature
5259 ? (~Features
& Extension
.Features
)
5260 : ( Features
& Extension
.Features
);
5261 FeatureBitset Features
=
5262 ComputeAvailableFeatures(STI
.ToggleFeature(ToggleFeatures
));
5263 setAvailableFeatures(Features
);
5264 FoundExtension
= true;
5269 if (!FoundExtension
)
5270 Error(CurLoc
, "unsupported architectural extension");
5272 CurLoc
= incrementLoc(CurLoc
, Name
.size());
5277 /// parseDirectiveInst
5278 /// ::= .inst opcode [, ...]
5279 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc
) {
5280 if (getLexer().is(AsmToken::EndOfStatement
))
5281 return Error(Loc
, "expected expression following '.inst' directive");
5283 auto parseOp
= [&]() -> bool {
5285 const MCExpr
*Expr
= nullptr;
5286 if (check(getParser().parseExpression(Expr
), L
, "expected expression"))
5288 const MCConstantExpr
*Value
= dyn_cast_or_null
<MCConstantExpr
>(Expr
);
5289 if (check(!Value
, L
, "expected constant expression"))
5291 getTargetStreamer().emitInst(Value
->getValue());
5295 if (parseMany(parseOp
))
5296 return addErrorSuffix(" in '.inst' directive");
5300 // parseDirectiveTLSDescCall:
5301 // ::= .tlsdesccall symbol
5302 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L
) {
5304 if (check(getParser().parseIdentifier(Name
), L
,
5305 "expected symbol after directive") ||
5306 parseToken(AsmToken::EndOfStatement
))
5309 MCSymbol
*Sym
= getContext().getOrCreateSymbol(Name
);
5310 const MCExpr
*Expr
= MCSymbolRefExpr::create(Sym
, getContext());
5311 Expr
= AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_TLSDESC
, getContext());
5314 Inst
.setOpcode(AArch64::TLSDESCCALL
);
5315 Inst
.addOperand(MCOperand::createExpr(Expr
));
5317 getParser().getStreamer().EmitInstruction(Inst
, getSTI());
5321 /// ::= .loh <lohName | lohId> label1, ..., labelN
5322 /// The number of arguments depends on the loh identifier.
5323 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal
, SMLoc Loc
) {
5325 if (getParser().getTok().isNot(AsmToken::Identifier
)) {
5326 if (getParser().getTok().isNot(AsmToken::Integer
))
5327 return TokError("expected an identifier or a number in directive");
5328 // We successfully get a numeric value for the identifier.
5329 // Check if it is valid.
5330 int64_t Id
= getParser().getTok().getIntVal();
5331 if (Id
<= -1U && !isValidMCLOHType(Id
))
5332 return TokError("invalid numeric identifier in directive");
5333 Kind
= (MCLOHType
)Id
;
5335 StringRef Name
= getTok().getIdentifier();
5336 // We successfully parse an identifier.
5337 // Check if it is a recognized one.
5338 int Id
= MCLOHNameToId(Name
);
5341 return TokError("invalid identifier in directive");
5342 Kind
= (MCLOHType
)Id
;
5344 // Consume the identifier.
5346 // Get the number of arguments of this LOH.
5347 int NbArgs
= MCLOHIdToNbArgs(Kind
);
5349 assert(NbArgs
!= -1 && "Invalid number of arguments");
5351 SmallVector
<MCSymbol
*, 3> Args
;
5352 for (int Idx
= 0; Idx
< NbArgs
; ++Idx
) {
5354 if (getParser().parseIdentifier(Name
))
5355 return TokError("expected identifier in directive");
5356 Args
.push_back(getContext().getOrCreateSymbol(Name
));
5358 if (Idx
+ 1 == NbArgs
)
5360 if (parseToken(AsmToken::Comma
,
5361 "unexpected token in '" + Twine(IDVal
) + "' directive"))
5364 if (parseToken(AsmToken::EndOfStatement
,
5365 "unexpected token in '" + Twine(IDVal
) + "' directive"))
5368 getStreamer().EmitLOHDirective((MCLOHType
)Kind
, Args
);
5372 /// parseDirectiveLtorg
5373 /// ::= .ltorg | .pool
5374 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L
) {
5375 if (parseToken(AsmToken::EndOfStatement
, "unexpected token in directive"))
5377 getTargetStreamer().emitCurrentConstantPool();
5381 /// parseDirectiveReq
5382 /// ::= name .req registername
5383 bool AArch64AsmParser::parseDirectiveReq(StringRef Name
, SMLoc L
) {
5384 MCAsmParser
&Parser
= getParser();
5385 Parser
.Lex(); // Eat the '.req' token.
5386 SMLoc SRegLoc
= getLoc();
5387 RegKind RegisterKind
= RegKind::Scalar
;
5389 OperandMatchResultTy ParseRes
= tryParseScalarRegister(RegNum
);
5391 if (ParseRes
!= MatchOperand_Success
) {
5393 RegisterKind
= RegKind::NeonVector
;
5394 ParseRes
= tryParseVectorRegister(RegNum
, Kind
, RegKind::NeonVector
);
5396 if (ParseRes
== MatchOperand_ParseFail
)
5399 if (ParseRes
== MatchOperand_Success
&& !Kind
.empty())
5400 return Error(SRegLoc
, "vector register without type specifier expected");
5403 if (ParseRes
!= MatchOperand_Success
) {
5405 RegisterKind
= RegKind::SVEDataVector
;
5407 tryParseVectorRegister(RegNum
, Kind
, RegKind::SVEDataVector
);
5409 if (ParseRes
== MatchOperand_ParseFail
)
5412 if (ParseRes
== MatchOperand_Success
&& !Kind
.empty())
5413 return Error(SRegLoc
,
5414 "sve vector register without type specifier expected");
5417 if (ParseRes
!= MatchOperand_Success
) {
5419 RegisterKind
= RegKind::SVEPredicateVector
;
5420 ParseRes
= tryParseVectorRegister(RegNum
, Kind
, RegKind::SVEPredicateVector
);
5422 if (ParseRes
== MatchOperand_ParseFail
)
5425 if (ParseRes
== MatchOperand_Success
&& !Kind
.empty())
5426 return Error(SRegLoc
,
5427 "sve predicate register without type specifier expected");
5430 if (ParseRes
!= MatchOperand_Success
)
5431 return Error(SRegLoc
, "register name or alias expected");
5433 // Shouldn't be anything else.
5434 if (parseToken(AsmToken::EndOfStatement
,
5435 "unexpected input in .req directive"))
5438 auto pair
= std::make_pair(RegisterKind
, (unsigned) RegNum
);
5439 if (RegisterReqs
.insert(std::make_pair(Name
, pair
)).first
->second
!= pair
)
5440 Warning(L
, "ignoring redefinition of register alias '" + Name
+ "'");
5445 /// parseDirectiveUneq
5446 /// ::= .unreq registername
5447 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L
) {
5448 MCAsmParser
&Parser
= getParser();
5449 if (getTok().isNot(AsmToken::Identifier
))
5450 return TokError("unexpected input in .unreq directive.");
5451 RegisterReqs
.erase(Parser
.getTok().getIdentifier().lower());
5452 Parser
.Lex(); // Eat the identifier.
5453 if (parseToken(AsmToken::EndOfStatement
))
5454 return addErrorSuffix("in '.unreq' directive");
5458 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5459 if (parseToken(AsmToken::EndOfStatement
, "unexpected token in directive"))
5461 getStreamer().EmitCFINegateRAState();
5465 /// parseDirectiveCFIBKeyFrame
5467 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5468 if (parseToken(AsmToken::EndOfStatement
,
5469 "unexpected token in '.cfi_b_key_frame'"))
5471 getStreamer().EmitCFIBKeyFrame();
5476 AArch64AsmParser::classifySymbolRef(const MCExpr
*Expr
,
5477 AArch64MCExpr::VariantKind
&ELFRefKind
,
5478 MCSymbolRefExpr::VariantKind
&DarwinRefKind
,
5480 ELFRefKind
= AArch64MCExpr::VK_INVALID
;
5481 DarwinRefKind
= MCSymbolRefExpr::VK_None
;
5484 if (const AArch64MCExpr
*AE
= dyn_cast
<AArch64MCExpr
>(Expr
)) {
5485 ELFRefKind
= AE
->getKind();
5486 Expr
= AE
->getSubExpr();
5489 const MCSymbolRefExpr
*SE
= dyn_cast
<MCSymbolRefExpr
>(Expr
);
5491 // It's a simple symbol reference with no addend.
5492 DarwinRefKind
= SE
->getKind();
5496 // Check that it looks like a symbol + an addend
5498 bool Relocatable
= Expr
->evaluateAsRelocatable(Res
, nullptr, nullptr);
5499 if (!Relocatable
|| Res
.getSymB())
5502 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5503 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5504 if (!Res
.getSymA() && ELFRefKind
== AArch64MCExpr::VK_INVALID
)
5508 DarwinRefKind
= Res
.getSymA()->getKind();
5509 Addend
= Res
.getConstant();
5511 // It's some symbol reference + a constant addend, but really
5512 // shouldn't use both Darwin and ELF syntax.
5513 return ELFRefKind
== AArch64MCExpr::VK_INVALID
||
5514 DarwinRefKind
== MCSymbolRefExpr::VK_None
;
5517 /// Force static initialization.
5518 extern "C" LLVM_EXTERNAL_VISIBILITY
void LLVMInitializeAArch64AsmParser() {
5519 RegisterMCAsmParser
<AArch64AsmParser
> X(getTheAArch64leTarget());
5520 RegisterMCAsmParser
<AArch64AsmParser
> Y(getTheAArch64beTarget());
5521 RegisterMCAsmParser
<AArch64AsmParser
> Z(getTheARM64Target());
5522 RegisterMCAsmParser
<AArch64AsmParser
> W(getTheARM64_32Target());
5523 RegisterMCAsmParser
<AArch64AsmParser
> V(getTheAArch64_32Target());
5526 #define GET_REGISTER_MATCHER
5527 #define GET_SUBTARGET_FEATURE_NAME
5528 #define GET_MATCHER_IMPLEMENTATION
5529 #define GET_MNEMONIC_SPELL_CHECKER
5530 #include "AArch64GenAsmMatcher.inc"
5532 // Define this matcher function after the auto-generated include so we
5533 // have the match class enum definitions.
5534 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand
&AsmOp
,
5536 AArch64Operand
&Op
= static_cast<AArch64Operand
&>(AsmOp
);
5537 // If the kind is a token for a literal immediate, check if our asm
5538 // operand matches. This is for InstAliases which have a fixed-value
5539 // immediate in the syntax.
5540 int64_t ExpectedVal
;
5543 return Match_InvalidOperand
;
5585 return Match_InvalidOperand
;
5586 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Op
.getImm());
5588 return Match_InvalidOperand
;
5589 if (CE
->getValue() == ExpectedVal
)
5590 return Match_Success
;
5591 return Match_InvalidOperand
;
5594 OperandMatchResultTy
5595 AArch64AsmParser::tryParseGPRSeqPair(OperandVector
&Operands
) {
5599 if (getParser().getTok().isNot(AsmToken::Identifier
)) {
5600 Error(S
, "expected register");
5601 return MatchOperand_ParseFail
;
5605 OperandMatchResultTy Res
= tryParseScalarRegister(FirstReg
);
5606 if (Res
!= MatchOperand_Success
)
5607 return MatchOperand_ParseFail
;
5609 const MCRegisterClass
&WRegClass
=
5610 AArch64MCRegisterClasses
[AArch64::GPR32RegClassID
];
5611 const MCRegisterClass
&XRegClass
=
5612 AArch64MCRegisterClasses
[AArch64::GPR64RegClassID
];
5614 bool isXReg
= XRegClass
.contains(FirstReg
),
5615 isWReg
= WRegClass
.contains(FirstReg
);
5616 if (!isXReg
&& !isWReg
) {
5617 Error(S
, "expected first even register of a "
5618 "consecutive same-size even/odd register pair");
5619 return MatchOperand_ParseFail
;
5622 const MCRegisterInfo
*RI
= getContext().getRegisterInfo();
5623 unsigned FirstEncoding
= RI
->getEncodingValue(FirstReg
);
5625 if (FirstEncoding
& 0x1) {
5626 Error(S
, "expected first even register of a "
5627 "consecutive same-size even/odd register pair");
5628 return MatchOperand_ParseFail
;
5631 if (getParser().getTok().isNot(AsmToken::Comma
)) {
5632 Error(getLoc(), "expected comma");
5633 return MatchOperand_ParseFail
;
5640 Res
= tryParseScalarRegister(SecondReg
);
5641 if (Res
!= MatchOperand_Success
)
5642 return MatchOperand_ParseFail
;
5644 if (RI
->getEncodingValue(SecondReg
) != FirstEncoding
+ 1 ||
5645 (isXReg
&& !XRegClass
.contains(SecondReg
)) ||
5646 (isWReg
&& !WRegClass
.contains(SecondReg
))) {
5647 Error(E
,"expected second odd register of a "
5648 "consecutive same-size even/odd register pair");
5649 return MatchOperand_ParseFail
;
5654 Pair
= RI
->getMatchingSuperReg(FirstReg
, AArch64::sube64
,
5655 &AArch64MCRegisterClasses
[AArch64::XSeqPairsClassRegClassID
]);
5657 Pair
= RI
->getMatchingSuperReg(FirstReg
, AArch64::sube32
,
5658 &AArch64MCRegisterClasses
[AArch64::WSeqPairsClassRegClassID
]);
5661 Operands
.push_back(AArch64Operand::CreateReg(Pair
, RegKind::Scalar
, S
,
5662 getLoc(), getContext()));
5664 return MatchOperand_Success
;
5667 template <bool ParseShiftExtend
, bool ParseSuffix
>
5668 OperandMatchResultTy
5669 AArch64AsmParser::tryParseSVEDataVector(OperandVector
&Operands
) {
5670 const SMLoc S
= getLoc();
5671 // Check for a SVE vector register specifier first.
5675 OperandMatchResultTy Res
=
5676 tryParseVectorRegister(RegNum
, Kind
, RegKind::SVEDataVector
);
5678 if (Res
!= MatchOperand_Success
)
5681 if (ParseSuffix
&& Kind
.empty())
5682 return MatchOperand_NoMatch
;
5684 const auto &KindRes
= parseVectorKind(Kind
, RegKind::SVEDataVector
);
5686 return MatchOperand_NoMatch
;
5688 unsigned ElementWidth
= KindRes
->second
;
5690 // No shift/extend is the default.
5691 if (!ParseShiftExtend
|| getParser().getTok().isNot(AsmToken::Comma
)) {
5692 Operands
.push_back(AArch64Operand::CreateVectorReg(
5693 RegNum
, RegKind::SVEDataVector
, ElementWidth
, S
, S
, getContext()));
5695 OperandMatchResultTy Res
= tryParseVectorIndex(Operands
);
5696 if (Res
== MatchOperand_ParseFail
)
5697 return MatchOperand_ParseFail
;
5698 return MatchOperand_Success
;
5705 SmallVector
<std::unique_ptr
<MCParsedAsmOperand
>, 1> ExtOpnd
;
5706 Res
= tryParseOptionalShiftExtend(ExtOpnd
);
5707 if (Res
!= MatchOperand_Success
)
5710 auto Ext
= static_cast<AArch64Operand
*>(ExtOpnd
.back().get());
5711 Operands
.push_back(AArch64Operand::CreateVectorReg(
5712 RegNum
, RegKind::SVEDataVector
, ElementWidth
, S
, Ext
->getEndLoc(),
5713 getContext(), Ext
->getShiftExtendType(), Ext
->getShiftExtendAmount(),
5714 Ext
->hasShiftExtendAmount()));
5716 return MatchOperand_Success
;
5719 OperandMatchResultTy
5720 AArch64AsmParser::tryParseSVEPattern(OperandVector
&Operands
) {
5721 MCAsmParser
&Parser
= getParser();
5723 SMLoc SS
= getLoc();
5724 const AsmToken
&TokE
= Parser
.getTok();
5725 bool IsHash
= TokE
.is(AsmToken::Hash
);
5727 if (!IsHash
&& TokE
.isNot(AsmToken::Identifier
))
5728 return MatchOperand_NoMatch
;
5732 Parser
.Lex(); // Eat hash
5734 // Parse the immediate operand.
5735 const MCExpr
*ImmVal
;
5737 if (Parser
.parseExpression(ImmVal
))
5738 return MatchOperand_ParseFail
;
5740 auto *MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
5742 return MatchOperand_ParseFail
;
5744 Pattern
= MCE
->getValue();
5746 // Parse the pattern
5747 auto Pat
= AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE
.getString());
5749 return MatchOperand_NoMatch
;
5752 Pattern
= Pat
->Encoding
;
5753 assert(Pattern
>= 0 && Pattern
< 32);
5757 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern
, getContext()),
5758 SS
, getLoc(), getContext()));
5760 return MatchOperand_Success
;