1 // Copyright 2015, ARM Limited
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "vixl/a64/instructions-a64.h"
28 #include "vixl/a64/assembler-a64.h"
33 // Floating-point infinity values.
34 const float16 kFP16PositiveInfinity
= 0x7c00;
35 const float16 kFP16NegativeInfinity
= 0xfc00;
36 const float kFP32PositiveInfinity
= rawbits_to_float(0x7f800000);
37 const float kFP32NegativeInfinity
= rawbits_to_float(0xff800000);
38 const double kFP64PositiveInfinity
=
39 rawbits_to_double(UINT64_C(0x7ff0000000000000));
40 const double kFP64NegativeInfinity
=
41 rawbits_to_double(UINT64_C(0xfff0000000000000));
44 // The default NaN values (for FPCR.DN=1).
45 const double kFP64DefaultNaN
= rawbits_to_double(UINT64_C(0x7ff8000000000000));
46 const float kFP32DefaultNaN
= rawbits_to_float(0x7fc00000);
47 const float16 kFP16DefaultNaN
= 0x7e00;
50 static uint64_t RotateRight(uint64_t value
,
53 VIXL_ASSERT(width
<= 64);
55 return ((value
& ((UINT64_C(1) << rotate
) - 1)) <<
56 (width
- rotate
)) | (value
>> rotate
);
60 static uint64_t RepeatBitsAcrossReg(unsigned reg_size
,
63 VIXL_ASSERT((width
== 2) || (width
== 4) || (width
== 8) || (width
== 16) ||
65 VIXL_ASSERT((reg_size
== kWRegSize
) || (reg_size
== kXRegSize
));
66 uint64_t result
= value
& ((UINT64_C(1) << width
) - 1);
67 for (unsigned i
= width
; i
< reg_size
; i
*= 2) {
68 result
|= (result
<< i
);
74 bool Instruction::IsLoad() const {
75 if (Mask(LoadStoreAnyFMask
) != LoadStoreAnyFixed
) {
79 if (Mask(LoadStorePairAnyFMask
) == LoadStorePairAnyFixed
) {
80 return Mask(LoadStorePairLBit
) != 0;
82 LoadStoreOp op
= static_cast<LoadStoreOp
>(Mask(LoadStoreMask
));
97 case LDR_q
: return true;
98 default: return false;
104 bool Instruction::IsStore() const {
105 if (Mask(LoadStoreAnyFMask
) != LoadStoreAnyFixed
) {
109 if (Mask(LoadStorePairAnyFMask
) == LoadStorePairAnyFixed
) {
110 return Mask(LoadStorePairLBit
) == 0;
112 LoadStoreOp op
= static_cast<LoadStoreOp
>(Mask(LoadStoreMask
));
122 case STR_q
: return true;
123 default: return false;
129 // Logical immediates can't encode zero, so a return value of zero is used to
130 // indicate a failure case. Specifically, where the constraints on imm_s are
132 uint64_t Instruction::ImmLogical() const {
133 unsigned reg_size
= SixtyFourBits() ? kXRegSize
: kWRegSize
;
135 int32_t imm_s
= ImmSetBits();
136 int32_t imm_r
= ImmRotate();
138 // An integer is constructed from the n, imm_s and imm_r bits according to
139 // the following table:
141 // N imms immr size S R
142 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
143 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
144 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
145 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
146 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
147 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
148 // (s bits must not be all set)
150 // A pattern is constructed of size bits, where the least significant S+1
151 // bits are set. The pattern is rotated right by R, and repeated across a
152 // 32 or 64-bit value, depending on destination register width.
159 uint64_t bits
= (UINT64_C(1) << (imm_s
+ 1)) - 1;
160 return RotateRight(bits
, imm_r
, 64);
162 if ((imm_s
>> 1) == 0x1f) {
165 for (int width
= 0x20; width
>= 0x2; width
>>= 1) {
166 if ((imm_s
& width
) == 0) {
167 int mask
= width
- 1;
168 if ((imm_s
& mask
) == mask
) {
171 uint64_t bits
= (UINT64_C(1) << ((imm_s
& mask
) + 1)) - 1;
172 return RepeatBitsAcrossReg(reg_size
,
173 RotateRight(bits
, imm_r
& mask
, width
),
183 uint32_t Instruction::ImmNEONabcdefgh() const {
184 return ImmNEONabc() << 5 | ImmNEONdefgh();
188 float Instruction::Imm8ToFP32(uint32_t imm8
) {
189 // Imm8: abcdefgh (8 bits)
190 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
192 uint32_t bits
= imm8
;
193 uint32_t bit7
= (bits
>> 7) & 0x1;
194 uint32_t bit6
= (bits
>> 6) & 0x1;
195 uint32_t bit5_to_0
= bits
& 0x3f;
196 uint32_t result
= (bit7
<< 31) | ((32 - bit6
) << 25) | (bit5_to_0
<< 19);
198 return rawbits_to_float(result
);
202 float Instruction::ImmFP32() const {
203 return Imm8ToFP32(ImmFP());
207 double Instruction::Imm8ToFP64(uint32_t imm8
) {
208 // Imm8: abcdefgh (8 bits)
209 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
210 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
212 uint32_t bits
= imm8
;
213 uint64_t bit7
= (bits
>> 7) & 0x1;
214 uint64_t bit6
= (bits
>> 6) & 0x1;
215 uint64_t bit5_to_0
= bits
& 0x3f;
216 uint64_t result
= (bit7
<< 63) | ((256 - bit6
) << 54) | (bit5_to_0
<< 48);
218 return rawbits_to_double(result
);
222 double Instruction::ImmFP64() const {
223 return Imm8ToFP64(ImmFP());
227 float Instruction::ImmNEONFP32() const {
228 return Imm8ToFP32(ImmNEONabcdefgh());
232 double Instruction::ImmNEONFP64() const {
233 return Imm8ToFP64(ImmNEONabcdefgh());
237 unsigned CalcLSDataSize(LoadStoreOp op
) {
238 VIXL_ASSERT((LSSize_offset
+ LSSize_width
) == (kInstructionSize
* 8));
239 unsigned size
= static_cast<Instr
>(op
) >> LSSize_offset
;
240 if ((op
& LSVector_mask
) != 0) {
241 // Vector register memory operations encode the access size in the "size"
243 if ((size
== 0) && ((op
& LSOpc_mask
) >> LSOpc_offset
) >= 2) {
244 size
= kQRegSizeInBytesLog2
;
251 unsigned CalcLSPairDataSize(LoadStorePairOp op
) {
252 VIXL_STATIC_ASSERT(kXRegSizeInBytes
== kDRegSizeInBytes
);
253 VIXL_STATIC_ASSERT(kWRegSizeInBytes
== kSRegSizeInBytes
);
256 case LDP_q
: return kQRegSizeInBytesLog2
;
260 case LDP_d
: return kXRegSizeInBytesLog2
;
261 default: return kWRegSizeInBytesLog2
;
266 int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type
) {
267 switch (branch_type
) {
268 case UncondBranchType
:
269 return ImmUncondBranch_width
;
271 return ImmCondBranch_width
;
272 case CompareBranchType
:
273 return ImmCmpBranch_width
;
275 return ImmTestBranch_width
;
283 int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type
) {
284 int32_t encoded_max
= 1 << (ImmBranchRangeBitwidth(branch_type
) - 1);
285 return encoded_max
* kInstructionSize
;
289 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type
,
291 return is_intn(ImmBranchRangeBitwidth(branch_type
), offset
);
295 const Instruction
* Instruction::ImmPCOffsetTarget() const {
296 const Instruction
* base
= this;
298 if (IsPCRelAddressing()) {
301 if (Mask(PCRelAddressingMask
) == ADRP
) {
302 base
= AlignDown(base
, kPageSize
);
305 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADR
);
308 // All PC-relative branches.
309 VIXL_ASSERT(BranchType() != UnknownBranchType
);
310 // Relative branch offsets are instruction-size-aligned.
311 offset
= ImmBranch() << kInstructionSizeLog2
;
313 return base
+ offset
;
317 int Instruction::ImmBranch() const {
318 switch (BranchType()) {
319 case CondBranchType
: return ImmCondBranch();
320 case UncondBranchType
: return ImmUncondBranch();
321 case CompareBranchType
: return ImmCmpBranch();
322 case TestBranchType
: return ImmTestBranch();
323 default: VIXL_UNREACHABLE();
329 void Instruction::SetImmPCOffsetTarget(const Instruction
* target
) {
330 if (IsPCRelAddressing()) {
331 SetPCRelImmTarget(target
);
333 SetBranchImmTarget(target
);
338 void Instruction::SetPCRelImmTarget(const Instruction
* target
) {
340 if ((Mask(PCRelAddressingMask
) == ADR
)) {
341 imm21
= target
- this;
343 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADRP
);
344 uintptr_t this_page
= reinterpret_cast<uintptr_t>(this) / kPageSize
;
345 uintptr_t target_page
= reinterpret_cast<uintptr_t>(target
) / kPageSize
;
346 imm21
= target_page
- this_page
;
348 Instr imm
= Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21
));
350 SetInstructionBits(Mask(~ImmPCRel_mask
) | imm
);
354 void Instruction::SetBranchImmTarget(const Instruction
* target
) {
355 VIXL_ASSERT(((target
- this) & 3) == 0);
356 Instr branch_imm
= 0;
357 uint32_t imm_mask
= 0;
358 int offset
= static_cast<int>((target
- this) >> kInstructionSizeLog2
);
359 switch (BranchType()) {
360 case CondBranchType
: {
361 branch_imm
= Assembler::ImmCondBranch(offset
);
362 imm_mask
= ImmCondBranch_mask
;
365 case UncondBranchType
: {
366 branch_imm
= Assembler::ImmUncondBranch(offset
);
367 imm_mask
= ImmUncondBranch_mask
;
370 case CompareBranchType
: {
371 branch_imm
= Assembler::ImmCmpBranch(offset
);
372 imm_mask
= ImmCmpBranch_mask
;
375 case TestBranchType
: {
376 branch_imm
= Assembler::ImmTestBranch(offset
);
377 imm_mask
= ImmTestBranch_mask
;
380 default: VIXL_UNREACHABLE();
382 SetInstructionBits(Mask(~imm_mask
) | branch_imm
);
386 void Instruction::SetImmLLiteral(const Instruction
* source
) {
387 VIXL_ASSERT(IsWordAligned(source
));
388 ptrdiff_t offset
= (source
- this) >> kLiteralEntrySizeLog2
;
389 Instr imm
= Assembler::ImmLLiteral(static_cast<int>(offset
));
390 Instr mask
= ImmLLiteral_mask
;
392 SetInstructionBits(Mask(~mask
) | imm
);
396 VectorFormat
VectorFormatHalfWidth(const VectorFormat vform
) {
397 VIXL_ASSERT(vform
== kFormat8H
|| vform
== kFormat4S
|| vform
== kFormat2D
||
398 vform
== kFormatH
|| vform
== kFormatS
|| vform
== kFormatD
);
400 case kFormat8H
: return kFormat8B
;
401 case kFormat4S
: return kFormat4H
;
402 case kFormat2D
: return kFormat2S
;
403 case kFormatH
: return kFormatB
;
404 case kFormatS
: return kFormatH
;
405 case kFormatD
: return kFormatS
;
406 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
411 VectorFormat
VectorFormatDoubleWidth(const VectorFormat vform
) {
412 VIXL_ASSERT(vform
== kFormat8B
|| vform
== kFormat4H
|| vform
== kFormat2S
||
413 vform
== kFormatB
|| vform
== kFormatH
|| vform
== kFormatS
);
415 case kFormat8B
: return kFormat8H
;
416 case kFormat4H
: return kFormat4S
;
417 case kFormat2S
: return kFormat2D
;
418 case kFormatB
: return kFormatH
;
419 case kFormatH
: return kFormatS
;
420 case kFormatS
: return kFormatD
;
421 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
426 VectorFormat
VectorFormatFillQ(const VectorFormat vform
) {
430 case kFormat16B
: return kFormat16B
;
433 case kFormat8H
: return kFormat8H
;
436 case kFormat4S
: return kFormat4S
;
439 case kFormat2D
: return kFormat2D
;
440 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
444 VectorFormat
VectorFormatHalfWidthDoubleLanes(const VectorFormat vform
) {
446 case kFormat4H
: return kFormat8B
;
447 case kFormat8H
: return kFormat16B
;
448 case kFormat2S
: return kFormat4H
;
449 case kFormat4S
: return kFormat8H
;
450 case kFormat1D
: return kFormat2S
;
451 case kFormat2D
: return kFormat4S
;
452 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
456 VectorFormat
VectorFormatDoubleLanes(const VectorFormat vform
) {
457 VIXL_ASSERT(vform
== kFormat8B
|| vform
== kFormat4H
|| vform
== kFormat2S
);
459 case kFormat8B
: return kFormat16B
;
460 case kFormat4H
: return kFormat8H
;
461 case kFormat2S
: return kFormat4S
;
462 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
467 VectorFormat
VectorFormatHalfLanes(const VectorFormat vform
) {
468 VIXL_ASSERT(vform
== kFormat16B
|| vform
== kFormat8H
|| vform
== kFormat4S
);
470 case kFormat16B
: return kFormat8B
;
471 case kFormat8H
: return kFormat4H
;
472 case kFormat4S
: return kFormat2S
;
473 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
478 VectorFormat
ScalarFormatFromLaneSize(int laneSize
) {
480 case 8: return kFormatB
;
481 case 16: return kFormatH
;
482 case 32: return kFormatS
;
483 case 64: return kFormatD
;
484 default: VIXL_UNREACHABLE(); return kFormatUndefined
;
489 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform
) {
490 VIXL_ASSERT(vform
!= kFormatUndefined
);
492 case kFormatB
: return kBRegSize
;
493 case kFormatH
: return kHRegSize
;
494 case kFormatS
: return kSRegSize
;
495 case kFormatD
: return kDRegSize
;
499 case kFormat1D
: return kDRegSize
;
500 default: return kQRegSize
;
505 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform
) {
506 return RegisterSizeInBitsFromFormat(vform
) / 8;
510 unsigned LaneSizeInBitsFromFormat(VectorFormat vform
) {
511 VIXL_ASSERT(vform
!= kFormatUndefined
);
515 case kFormat16B
: return 8;
518 case kFormat8H
: return 16;
521 case kFormat4S
: return 32;
524 case kFormat2D
: return 64;
525 default: VIXL_UNREACHABLE(); return 0;
530 int LaneSizeInBytesFromFormat(VectorFormat vform
) {
531 return LaneSizeInBitsFromFormat(vform
) / 8;
535 int LaneSizeInBytesLog2FromFormat(VectorFormat vform
) {
536 VIXL_ASSERT(vform
!= kFormatUndefined
);
540 case kFormat16B
: return 0;
543 case kFormat8H
: return 1;
546 case kFormat4S
: return 2;
549 case kFormat2D
: return 3;
550 default: VIXL_UNREACHABLE(); return 0;
555 int LaneCountFromFormat(VectorFormat vform
) {
556 VIXL_ASSERT(vform
!= kFormatUndefined
);
558 case kFormat16B
: return 16;
560 case kFormat8H
: return 8;
562 case kFormat4S
: return 4;
564 case kFormat2D
: return 2;
569 case kFormatD
: return 1;
570 default: VIXL_UNREACHABLE(); return 0;
575 int MaxLaneCountFromFormat(VectorFormat vform
) {
576 VIXL_ASSERT(vform
!= kFormatUndefined
);
580 case kFormat16B
: return 16;
583 case kFormat8H
: return 8;
586 case kFormat4S
: return 4;
589 case kFormat2D
: return 2;
590 default: VIXL_UNREACHABLE(); return 0;
595 // Does 'vform' indicate a vector format or a scalar format?
596 bool IsVectorFormat(VectorFormat vform
) {
597 VIXL_ASSERT(vform
!= kFormatUndefined
);
602 case kFormatD
: return false;
603 default: return true;
608 int64_t MaxIntFromFormat(VectorFormat vform
) {
609 return INT64_MAX
>> (64 - LaneSizeInBitsFromFormat(vform
));
613 int64_t MinIntFromFormat(VectorFormat vform
) {
614 return INT64_MIN
>> (64 - LaneSizeInBitsFromFormat(vform
));
618 uint64_t MaxUintFromFormat(VectorFormat vform
) {
619 return UINT64_MAX
>> (64 - LaneSizeInBitsFromFormat(vform
));