1 // Copyright 2015, ARM Limited
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef VIXL_A64_INSTRUCTIONS_A64_H_
28 #define VIXL_A64_INSTRUCTIONS_A64_H_
30 #include "vixl/globals.h"
31 #include "vixl/utils.h"
32 #include "vixl/a64/constants-a64.h"
35 // ISA constants. --------------------------------------------------------------
37 typedef uint32_t Instr
;
38 const unsigned kInstructionSize
= 4;
39 const unsigned kInstructionSizeLog2
= 2;
40 const unsigned kLiteralEntrySize
= 4;
41 const unsigned kLiteralEntrySizeLog2
= 2;
42 const unsigned kMaxLoadLiteralRange
= 1 * MBytes
;
44 // This is the nominal page size (as used by the adrp instruction); the actual
45 // size of the memory pages allocated by the kernel is likely to differ.
46 const unsigned kPageSize
= 4 * KBytes
;
47 const unsigned kPageSizeLog2
= 12;
49 const unsigned kBRegSize
= 8;
50 const unsigned kBRegSizeLog2
= 3;
51 const unsigned kBRegSizeInBytes
= kBRegSize
/ 8;
52 const unsigned kBRegSizeInBytesLog2
= kBRegSizeLog2
- 3;
53 const unsigned kHRegSize
= 16;
54 const unsigned kHRegSizeLog2
= 4;
55 const unsigned kHRegSizeInBytes
= kHRegSize
/ 8;
56 const unsigned kHRegSizeInBytesLog2
= kHRegSizeLog2
- 3;
57 const unsigned kWRegSize
= 32;
58 const unsigned kWRegSizeLog2
= 5;
59 const unsigned kWRegSizeInBytes
= kWRegSize
/ 8;
60 const unsigned kWRegSizeInBytesLog2
= kWRegSizeLog2
- 3;
61 const unsigned kXRegSize
= 64;
62 const unsigned kXRegSizeLog2
= 6;
63 const unsigned kXRegSizeInBytes
= kXRegSize
/ 8;
64 const unsigned kXRegSizeInBytesLog2
= kXRegSizeLog2
- 3;
65 const unsigned kSRegSize
= 32;
66 const unsigned kSRegSizeLog2
= 5;
67 const unsigned kSRegSizeInBytes
= kSRegSize
/ 8;
68 const unsigned kSRegSizeInBytesLog2
= kSRegSizeLog2
- 3;
69 const unsigned kDRegSize
= 64;
70 const unsigned kDRegSizeLog2
= 6;
71 const unsigned kDRegSizeInBytes
= kDRegSize
/ 8;
72 const unsigned kDRegSizeInBytesLog2
= kDRegSizeLog2
- 3;
73 const unsigned kQRegSize
= 128;
74 const unsigned kQRegSizeLog2
= 7;
75 const unsigned kQRegSizeInBytes
= kQRegSize
/ 8;
76 const unsigned kQRegSizeInBytesLog2
= kQRegSizeLog2
- 3;
77 const uint64_t kWRegMask
= UINT64_C(0xffffffff);
78 const uint64_t kXRegMask
= UINT64_C(0xffffffffffffffff);
79 const uint64_t kSRegMask
= UINT64_C(0xffffffff);
80 const uint64_t kDRegMask
= UINT64_C(0xffffffffffffffff);
81 const uint64_t kSSignMask
= UINT64_C(0x80000000);
82 const uint64_t kDSignMask
= UINT64_C(0x8000000000000000);
83 const uint64_t kWSignMask
= UINT64_C(0x80000000);
84 const uint64_t kXSignMask
= UINT64_C(0x8000000000000000);
85 const uint64_t kByteMask
= UINT64_C(0xff);
86 const uint64_t kHalfWordMask
= UINT64_C(0xffff);
87 const uint64_t kWordMask
= UINT64_C(0xffffffff);
88 const uint64_t kXMaxUInt
= UINT64_C(0xffffffffffffffff);
89 const uint64_t kWMaxUInt
= UINT64_C(0xffffffff);
90 const int64_t kXMaxInt
= INT64_C(0x7fffffffffffffff);
91 const int64_t kXMinInt
= INT64_C(0x8000000000000000);
92 const int32_t kWMaxInt
= INT32_C(0x7fffffff);
93 const int32_t kWMinInt
= INT32_C(0x80000000);
94 const unsigned kLinkRegCode
= 30;
95 const unsigned kZeroRegCode
= 31;
96 const unsigned kSPRegInternalCode
= 63;
97 const unsigned kRegCodeMask
= 0x1f;
99 const unsigned kAddressTagOffset
= 56;
100 const unsigned kAddressTagWidth
= 8;
101 const uint64_t kAddressTagMask
=
102 ((UINT64_C(1) << kAddressTagWidth
) - 1) << kAddressTagOffset
;
103 VIXL_STATIC_ASSERT(kAddressTagMask
== UINT64_C(0xff00000000000000));
105 // AArch64 floating-point specifics. These match IEEE-754.
106 const unsigned kDoubleMantissaBits
= 52;
107 const unsigned kDoubleExponentBits
= 11;
108 const unsigned kFloatMantissaBits
= 23;
109 const unsigned kFloatExponentBits
= 8;
110 const unsigned kFloat16MantissaBits
= 10;
111 const unsigned kFloat16ExponentBits
= 5;
113 // Floating-point infinity values.
114 extern const float16 kFP16PositiveInfinity
;
115 extern const float16 kFP16NegativeInfinity
;
116 extern const float kFP32PositiveInfinity
;
117 extern const float kFP32NegativeInfinity
;
118 extern const double kFP64PositiveInfinity
;
119 extern const double kFP64NegativeInfinity
;
121 // The default NaN values (for FPCR.DN=1).
122 extern const float16 kFP16DefaultNaN
;
123 extern const float kFP32DefaultNaN
;
124 extern const double kFP64DefaultNaN
;
126 unsigned CalcLSDataSize(LoadStoreOp op
);
127 unsigned CalcLSPairDataSize(LoadStorePairOp op
);
130 UnknownBranchType
= 0,
132 UncondBranchType
= 2,
133 CompareBranchType
= 3,
144 // The first four values are encodable directly by FPCR<RMode>.
146 FPPositiveInfinity
= 0x1,
147 FPNegativeInfinity
= 0x2,
150 // The final rounding modes are only available when explicitly specified by
151 // the instruction (such as with fcvta). It cannot be set in FPCR.
161 // Instructions. ---------------------------------------------------------------
165 Instr
InstructionBits() const {
166 return *(reinterpret_cast<const Instr
*>(this));
169 void SetInstructionBits(Instr new_instr
) {
170 *(reinterpret_cast<Instr
*>(this)) = new_instr
;
173 int Bit(int pos
) const {
174 return (InstructionBits() >> pos
) & 1;
177 uint32_t Bits(int msb
, int lsb
) const {
178 return unsigned_bitextract_32(msb
, lsb
, InstructionBits());
181 int32_t SignedBits(int msb
, int lsb
) const {
182 int32_t bits
= *(reinterpret_cast<const int32_t*>(this));
183 return signed_bitextract_32(msb
, lsb
, bits
);
186 Instr
Mask(uint32_t mask
) const {
187 return InstructionBits() & mask
;
190 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
191 int32_t Name() const { return Func(HighBit, LowBit); }
192 INSTRUCTION_FIELDS_LIST(DEFINE_GETTER
)
195 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
196 // formed from ImmPCRelLo and ImmPCRelHi.
197 int ImmPCRel() const {
199 static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width
) | ImmPCRelLo());
200 int width
= ImmPCRelLo_width
+ ImmPCRelHi_width
;
201 return signed_bitextract_32(width
- 1, 0, offset
);
204 uint64_t ImmLogical() const;
205 unsigned ImmNEONabcdefgh() const;
206 float ImmFP32() const;
207 double ImmFP64() const;
208 float ImmNEONFP32() const;
209 double ImmNEONFP64() const;
211 unsigned SizeLS() const {
212 return CalcLSDataSize(static_cast<LoadStoreOp
>(Mask(LoadStoreMask
)));
215 unsigned SizeLSPair() const {
216 return CalcLSPairDataSize(
217 static_cast<LoadStorePairOp
>(Mask(LoadStorePairMask
)));
220 int NEONLSIndex(int access_size_shift
) const {
223 int64_t size
= NEONLSSize();
224 int64_t index
= (q
<< 3) | (s
<< 2) | size
;
225 return static_cast<int>(index
>> access_size_shift
);
229 bool IsCondBranchImm() const {
230 return Mask(ConditionalBranchFMask
) == ConditionalBranchFixed
;
233 bool IsUncondBranchImm() const {
234 return Mask(UnconditionalBranchFMask
) == UnconditionalBranchFixed
;
237 bool IsCompareBranch() const {
238 return Mask(CompareBranchFMask
) == CompareBranchFixed
;
241 bool IsTestBranch() const {
242 return Mask(TestBranchFMask
) == TestBranchFixed
;
245 bool IsImmBranch() const {
246 return BranchType() != UnknownBranchType
;
249 bool IsPCRelAddressing() const {
250 return Mask(PCRelAddressingFMask
) == PCRelAddressingFixed
;
253 bool IsLogicalImmediate() const {
254 return Mask(LogicalImmediateFMask
) == LogicalImmediateFixed
;
257 bool IsAddSubImmediate() const {
258 return Mask(AddSubImmediateFMask
) == AddSubImmediateFixed
;
261 bool IsAddSubExtended() const {
262 return Mask(AddSubExtendedFMask
) == AddSubExtendedFixed
;
265 bool IsLoadOrStore() const {
266 return Mask(LoadStoreAnyFMask
) == LoadStoreAnyFixed
;
270 bool IsStore() const;
272 bool IsLoadLiteral() const {
273 // This includes PRFM_lit.
274 return Mask(LoadLiteralFMask
) == LoadLiteralFixed
;
277 bool IsMovn() const {
278 return (Mask(MoveWideImmediateMask
) == MOVN_x
) ||
279 (Mask(MoveWideImmediateMask
) == MOVN_w
);
282 static int ImmBranchRangeBitwidth(ImmBranchType branch_type
);
283 static int32_t ImmBranchForwardRange(ImmBranchType branch_type
);
284 static bool IsValidImmPCOffset(ImmBranchType branch_type
, int64_t offset
);
286 // Indicate whether Rd can be the stack pointer or the zero register. This
287 // does not check that the instruction actually has an Rd field.
288 Reg31Mode
RdMode() const {
289 // The following instructions use sp or wsp as Rd:
290 // Add/sub (immediate) when not setting the flags.
291 // Add/sub (extended) when not setting the flags.
292 // Logical (immediate) when not setting the flags.
293 // Otherwise, r31 is the zero register.
294 if (IsAddSubImmediate() || IsAddSubExtended()) {
295 if (Mask(AddSubSetFlagsBit
)) {
296 return Reg31IsZeroRegister
;
298 return Reg31IsStackPointer
;
301 if (IsLogicalImmediate()) {
302 // Of the logical (immediate) instructions, only ANDS (and its aliases)
303 // can set the flags. The others can all write into sp.
304 // Note that some logical operations are not available to
305 // immediate-operand instructions, so we have to combine two masks here.
306 if (Mask(LogicalImmediateMask
& LogicalOpMask
) == ANDS
) {
307 return Reg31IsZeroRegister
;
309 return Reg31IsStackPointer
;
312 return Reg31IsZeroRegister
;
315 // Indicate whether Rn can be the stack pointer or the zero register. This
316 // does not check that the instruction actually has an Rn field.
317 Reg31Mode
RnMode() const {
318 // The following instructions use sp or wsp as Rn:
319 // All loads and stores.
320 // Add/sub (immediate).
321 // Add/sub (extended).
322 // Otherwise, r31 is the zero register.
323 if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
324 return Reg31IsStackPointer
;
326 return Reg31IsZeroRegister
;
329 ImmBranchType
BranchType() const {
330 if (IsCondBranchImm()) {
331 return CondBranchType
;
332 } else if (IsUncondBranchImm()) {
333 return UncondBranchType
;
334 } else if (IsCompareBranch()) {
335 return CompareBranchType
;
336 } else if (IsTestBranch()) {
337 return TestBranchType
;
339 return UnknownBranchType
;
343 // Find the target of this instruction. 'this' may be a branch or a
344 // PC-relative addressing instruction.
345 const Instruction
* ImmPCOffsetTarget() const;
347 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
348 // a PC-relative addressing instruction.
349 void SetImmPCOffsetTarget(const Instruction
* target
);
350 // Patch a literal load instruction to load from 'source'.
351 void SetImmLLiteral(const Instruction
* source
);
353 // The range of a load literal instruction, expressed as 'instr +- range'.
354 // The range is actually the 'positive' range; the branch instruction can
355 // target [instr - range - kInstructionSize, instr + range].
356 static const int kLoadLiteralImmBitwidth
= 19;
357 static const int kLoadLiteralRange
=
358 (1 << kLoadLiteralImmBitwidth
) / 2 - kInstructionSize
;
360 // Calculate the address of a literal referred to by a load-literal
361 // instruction, and return it as the specified type.
363 // The literal itself is safely mutable only if the backing buffer is safely
365 template <typename T
>
366 T
LiteralAddress() const {
367 uint64_t base_raw
= reinterpret_cast<uint64_t>(this);
368 int64_t offset
= ImmLLiteral() << kLiteralEntrySizeLog2
;
369 uint64_t address_raw
= base_raw
+ offset
;
371 // Cast the address using a C-style cast. A reinterpret_cast would be
372 // appropriate, but it can't cast one integral type to another.
373 T address
= (T
)(address_raw
);
375 // Assert that the address can be represented by the specified type.
376 VIXL_ASSERT((uint64_t)(address
) == address_raw
);
381 uint32_t Literal32() const {
383 memcpy(&literal
, LiteralAddress
<const void*>(), sizeof(literal
));
387 uint64_t Literal64() const {
389 memcpy(&literal
, LiteralAddress
<const void*>(), sizeof(literal
));
393 float LiteralFP32() const {
394 return rawbits_to_float(Literal32());
397 double LiteralFP64() const {
398 return rawbits_to_double(Literal64());
401 const Instruction
* NextInstruction() const {
402 return this + kInstructionSize
;
405 const Instruction
* InstructionAtOffset(int64_t offset
) const {
406 VIXL_ASSERT(IsWordAligned(this + offset
));
407 return this + offset
;
410 template<typename T
> static Instruction
* Cast(T src
) {
411 return reinterpret_cast<Instruction
*>(src
);
414 template<typename T
> static const Instruction
* CastConst(T src
) {
415 return reinterpret_cast<const Instruction
*>(src
);
419 int ImmBranch() const;
421 static float Imm8ToFP32(uint32_t imm8
);
422 static double Imm8ToFP64(uint32_t imm8
);
424 void SetPCRelImmTarget(const Instruction
* target
);
425 void SetBranchImmTarget(const Instruction
* target
);
429 // Functions for handling NEON vector format information.
431 kFormatUndefined
= 0xffffffff,
433 kFormat16B
= NEON_16B
,
441 // Scalar formats. We add the scalar bit to distinguish between scalar and
442 // vector enumerations; the bit is always set in the encoding of scalar ops
443 // and always clear for vector ops. Although kFormatD and kFormat1D appear
444 // to be the same, their meaning is subtly different. The first is a scalar
445 // operation, the second a vector operation that only affects one lane.
446 kFormatB
= NEON_B
| NEONScalar
,
447 kFormatH
= NEON_H
| NEONScalar
,
448 kFormatS
= NEON_S
| NEONScalar
,
449 kFormatD
= NEON_D
| NEONScalar
452 VectorFormat
VectorFormatHalfWidth(const VectorFormat vform
);
453 VectorFormat
VectorFormatDoubleWidth(const VectorFormat vform
);
454 VectorFormat
VectorFormatDoubleLanes(const VectorFormat vform
);
455 VectorFormat
VectorFormatHalfLanes(const VectorFormat vform
);
456 VectorFormat
ScalarFormatFromLaneSize(int lanesize
);
457 VectorFormat
VectorFormatHalfWidthDoubleLanes(const VectorFormat vform
);
458 VectorFormat
VectorFormatFillQ(const VectorFormat vform
);
459 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform
);
460 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform
);
461 // TODO: Make the return types of these functions consistent.
462 unsigned LaneSizeInBitsFromFormat(VectorFormat vform
);
463 int LaneSizeInBytesFromFormat(VectorFormat vform
);
464 int LaneSizeInBytesLog2FromFormat(VectorFormat vform
);
465 int LaneCountFromFormat(VectorFormat vform
);
466 int MaxLaneCountFromFormat(VectorFormat vform
);
467 bool IsVectorFormat(VectorFormat vform
);
468 int64_t MaxIntFromFormat(VectorFormat vform
);
469 int64_t MinIntFromFormat(VectorFormat vform
);
470 uint64_t MaxUintFromFormat(VectorFormat vform
);
489 static const unsigned kNEONFormatMaxBits
= 6;
491 struct NEONFormatMap
{
492 // The bit positions in the instruction to consider.
493 uint8_t bits
[kNEONFormatMaxBits
];
495 // Mapping from concatenated bits to format.
496 NEONFormat map
[1 << kNEONFormatMaxBits
];
499 class NEONFormatDecoder
{
501 enum SubstitutionMode
{
506 // Construct a format decoder with increasingly specific format maps for each
507 // subsitution. If no format map is specified, the default is the integer
509 explicit NEONFormatDecoder(const Instruction
* instr
) {
510 instrbits_
= instr
->InstructionBits();
511 SetFormatMaps(IntegerFormatMap());
513 NEONFormatDecoder(const Instruction
* instr
,
514 const NEONFormatMap
* format
) {
515 instrbits_
= instr
->InstructionBits();
516 SetFormatMaps(format
);
518 NEONFormatDecoder(const Instruction
* instr
,
519 const NEONFormatMap
* format0
,
520 const NEONFormatMap
* format1
) {
521 instrbits_
= instr
->InstructionBits();
522 SetFormatMaps(format0
, format1
);
524 NEONFormatDecoder(const Instruction
* instr
,
525 const NEONFormatMap
* format0
,
526 const NEONFormatMap
* format1
,
527 const NEONFormatMap
* format2
) {
528 instrbits_
= instr
->InstructionBits();
529 SetFormatMaps(format0
, format1
, format2
);
532 // Set the format mapping for all or individual substitutions.
533 void SetFormatMaps(const NEONFormatMap
* format0
,
534 const NEONFormatMap
* format1
= NULL
,
535 const NEONFormatMap
* format2
= NULL
) {
536 VIXL_ASSERT(format0
!= NULL
);
537 formats_
[0] = format0
;
538 formats_
[1] = (format1
== NULL
) ? formats_
[0] : format1
;
539 formats_
[2] = (format2
== NULL
) ? formats_
[1] : format2
;
541 void SetFormatMap(unsigned index
, const NEONFormatMap
* format
) {
542 VIXL_ASSERT(index
<= (sizeof(formats_
) / sizeof(formats_
[0])));
543 VIXL_ASSERT(format
!= NULL
);
544 formats_
[index
] = format
;
547 // Substitute %s in the input string with the placeholder string for each
548 // register, ie. "'B", "'H", etc.
549 const char* SubstitutePlaceholders(const char* string
) {
550 return Substitute(string
, kPlaceholder
, kPlaceholder
, kPlaceholder
);
553 // Substitute %s in the input string with a new string based on the
554 // substitution mode.
555 const char* Substitute(const char* string
,
556 SubstitutionMode mode0
= kFormat
,
557 SubstitutionMode mode1
= kFormat
,
558 SubstitutionMode mode2
= kFormat
) {
559 snprintf(form_buffer_
, sizeof(form_buffer_
), string
,
560 GetSubstitute(0, mode0
),
561 GetSubstitute(1, mode1
),
562 GetSubstitute(2, mode2
));
566 // Append a "2" to a mnemonic string based of the state of the Q bit.
567 const char* Mnemonic(const char* mnemonic
) {
568 if ((instrbits_
& NEON_Q
) != 0) {
569 snprintf(mne_buffer_
, sizeof(mne_buffer_
), "%s2", mnemonic
);
575 VectorFormat
GetVectorFormat(int format_index
= 0) {
576 return GetVectorFormat(formats_
[format_index
]);
579 VectorFormat
GetVectorFormat(const NEONFormatMap
* format_map
) {
580 static const VectorFormat vform
[] = {
582 kFormat8B
, kFormat16B
, kFormat4H
, kFormat8H
,
583 kFormat2S
, kFormat4S
, kFormat1D
, kFormat2D
,
584 kFormatB
, kFormatH
, kFormatS
, kFormatD
586 VIXL_ASSERT(GetNEONFormat(format_map
) < (sizeof(vform
) / sizeof(vform
[0])));
587 return vform
[GetNEONFormat(format_map
)];
590 // Built in mappings for common cases.
592 // The integer format map uses three bits (Q, size<1:0>) to encode the
593 // "standard" set of NEON integer vector formats.
594 static const NEONFormatMap
* IntegerFormatMap() {
595 static const NEONFormatMap map
= {
597 {NF_8B
, NF_16B
, NF_4H
, NF_8H
, NF_2S
, NF_4S
, NF_UNDEF
, NF_2D
}
602 // The long integer format map uses two bits (size<1:0>) to encode the
603 // long set of NEON integer vector formats. These are used in narrow, wide
604 // and long operations.
605 static const NEONFormatMap
* LongIntegerFormatMap() {
606 static const NEONFormatMap map
= {
607 {23, 22}, {NF_8H
, NF_4S
, NF_2D
}
612 // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
613 // formats: NF_2S, NF_4S, NF_2D.
614 static const NEONFormatMap
* FPFormatMap() {
615 // The FP format map assumes two bits (Q, size<0>) are used to encode the
616 // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
617 static const NEONFormatMap map
= {
618 {22, 30}, {NF_2S
, NF_4S
, NF_UNDEF
, NF_2D
}
623 // The load/store format map uses three bits (Q, 11, 10) to encode the
624 // set of NEON vector formats.
625 static const NEONFormatMap
* LoadStoreFormatMap() {
626 static const NEONFormatMap map
= {
628 {NF_8B
, NF_16B
, NF_4H
, NF_8H
, NF_2S
, NF_4S
, NF_1D
, NF_2D
}
633 // The logical format map uses one bit (Q) to encode the NEON vector format:
635 static const NEONFormatMap
* LogicalFormatMap() {
636 static const NEONFormatMap map
= {
637 {30}, {NF_8B
, NF_16B
}
642 // The triangular format map uses between two and five bits to encode the NEON
644 // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
645 // x1000->2S, x1001->4S, 10001->2D, all others undefined.
646 static const NEONFormatMap
* TriangularFormatMap() {
647 static const NEONFormatMap map
= {
648 {19, 18, 17, 16, 30},
649 {NF_UNDEF
, NF_UNDEF
, NF_8B
, NF_16B
, NF_4H
, NF_8H
, NF_8B
, NF_16B
, NF_2S
,
650 NF_4S
, NF_8B
, NF_16B
, NF_4H
, NF_8H
, NF_8B
, NF_16B
, NF_UNDEF
, NF_2D
,
651 NF_8B
, NF_16B
, NF_4H
, NF_8H
, NF_8B
, NF_16B
, NF_2S
, NF_4S
, NF_8B
, NF_16B
,
652 NF_4H
, NF_8H
, NF_8B
, NF_16B
}
657 // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
658 // formats: NF_B, NF_H, NF_S, NF_D.
659 static const NEONFormatMap
* ScalarFormatMap() {
660 static const NEONFormatMap map
= {
661 {23, 22}, {NF_B
, NF_H
, NF_S
, NF_D
}
666 // The long scalar format map uses two bits (size<1:0>) to encode the longer
667 // NEON scalar formats: NF_H, NF_S, NF_D.
668 static const NEONFormatMap
* LongScalarFormatMap() {
669 static const NEONFormatMap map
= {
670 {23, 22}, {NF_H
, NF_S
, NF_D
}
675 // The FP scalar format map assumes one bit (size<0>) is used to encode the
676 // NEON FP scalar formats: NF_S, NF_D.
677 static const NEONFormatMap
* FPScalarFormatMap() {
678 static const NEONFormatMap map
= {
684 // The triangular scalar format map uses between one and four bits to encode
685 // the NEON FP scalar formats:
686 // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
687 static const NEONFormatMap
* TriangularScalarFormatMap() {
688 static const NEONFormatMap map
= {
690 {NF_UNDEF
, NF_B
, NF_H
, NF_B
, NF_S
, NF_B
, NF_H
, NF_B
,
691 NF_D
, NF_B
, NF_H
, NF_B
, NF_S
, NF_B
, NF_H
, NF_B
}
697 // Get a pointer to a string that represents the format or placeholder for
698 // the specified substitution index, based on the format map and instruction.
699 const char* GetSubstitute(int index
, SubstitutionMode mode
) {
700 if (mode
== kFormat
) {
701 return NEONFormatAsString(GetNEONFormat(formats_
[index
]));
703 VIXL_ASSERT(mode
== kPlaceholder
);
704 return NEONFormatAsPlaceholder(GetNEONFormat(formats_
[index
]));
707 // Get the NEONFormat enumerated value for bits obtained from the
708 // instruction based on the specified format mapping.
709 NEONFormat
GetNEONFormat(const NEONFormatMap
* format_map
) {
710 return format_map
->map
[PickBits(format_map
->bits
)];
713 // Convert a NEONFormat into a string.
714 static const char* NEONFormatAsString(NEONFormat format
) {
715 static const char* formats
[] = {
717 "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
720 VIXL_ASSERT(format
< (sizeof(formats
) / sizeof(formats
[0])));
721 return formats
[format
];
724 // Convert a NEONFormat into a register placeholder string.
725 static const char* NEONFormatAsPlaceholder(NEONFormat format
) {
726 VIXL_ASSERT((format
== NF_B
) || (format
== NF_H
) ||
727 (format
== NF_S
) || (format
== NF_D
) ||
728 (format
== NF_UNDEF
));
729 static const char* formats
[] = {
731 "undefined", "undefined", "undefined", "undefined",
732 "undefined", "undefined", "undefined", "undefined",
733 "'B", "'H", "'S", "'D"
735 return formats
[format
];
738 // Select bits from instrbits_ defined by the bits array, concatenate them,
739 // and return the value.
740 uint8_t PickBits(const uint8_t bits
[]) {
742 for (unsigned b
= 0; b
< kNEONFormatMaxBits
; b
++) {
743 if (bits
[b
] == 0) break;
745 result
|= ((instrbits_
& (1 << bits
[b
])) == 0) ? 0 : 1;
751 const NEONFormatMap
* formats_
[3];
752 char form_buffer_
[64];
753 char mne_buffer_
[16];
757 #endif // VIXL_A64_INSTRUCTIONS_A64_H_