1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/KnownBits.h"
24 #include "llvm/Support/MathExtras.h"
25 #include "llvm/Support/raw_ostream.h"
29 #define DEBUG_TYPE "aarch64-isel"
31 //===--------------------------------------------------------------------===//
32 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
33 /// instructions for SelectionDAG operations.
37 class AArch64DAGToDAGISel
: public SelectionDAGISel
{
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget
*Subtarget
;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine
&tm
,
47 CodeGenOpt::Level OptLevel
)
48 : SelectionDAGISel(tm
, OptLevel
), Subtarget(nullptr),
51 StringRef
getPassName() const override
{
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction
&MF
) override
{
56 ForCodeSize
= MF
.getFunction().optForSize();
57 Subtarget
= &MF
.getSubtarget
<AArch64Subtarget
>();
58 return SelectionDAGISel::runOnMachineFunction(MF
);
61 void Select(SDNode
*Node
) override
;
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue
&Op
,
66 unsigned ConstraintID
,
67 std::vector
<SDValue
> &OutOps
) override
;
69 bool tryMLAV64LaneV128(SDNode
*N
);
70 bool tryMULLV64LaneV128(unsigned IntNo
, SDNode
*N
);
71 bool SelectArithExtendedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
);
72 bool SelectArithImmed(SDValue N
, SDValue
&Val
, SDValue
&Shift
);
73 bool SelectNegArithImmed(SDValue N
, SDValue
&Val
, SDValue
&Shift
);
74 bool SelectArithShiftedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
) {
75 return SelectShiftedRegister(N
, false, Reg
, Shift
);
77 bool SelectLogicalShiftedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
) {
78 return SelectShiftedRegister(N
, true, Reg
, Shift
);
80 bool SelectAddrModeIndexed7S8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
81 return SelectAddrModeIndexed7S(N
, 1, Base
, OffImm
);
83 bool SelectAddrModeIndexed7S16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
84 return SelectAddrModeIndexed7S(N
, 2, Base
, OffImm
);
86 bool SelectAddrModeIndexed7S32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
87 return SelectAddrModeIndexed7S(N
, 4, Base
, OffImm
);
89 bool SelectAddrModeIndexed7S64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
90 return SelectAddrModeIndexed7S(N
, 8, Base
, OffImm
);
92 bool SelectAddrModeIndexed7S128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
93 return SelectAddrModeIndexed7S(N
, 16, Base
, OffImm
);
95 bool SelectAddrModeIndexed8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
96 return SelectAddrModeIndexed(N
, 1, Base
, OffImm
);
98 bool SelectAddrModeIndexed16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
99 return SelectAddrModeIndexed(N
, 2, Base
, OffImm
);
101 bool SelectAddrModeIndexed32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
102 return SelectAddrModeIndexed(N
, 4, Base
, OffImm
);
104 bool SelectAddrModeIndexed64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
105 return SelectAddrModeIndexed(N
, 8, Base
, OffImm
);
107 bool SelectAddrModeIndexed128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
108 return SelectAddrModeIndexed(N
, 16, Base
, OffImm
);
110 bool SelectAddrModeUnscaled8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
111 return SelectAddrModeUnscaled(N
, 1, Base
, OffImm
);
113 bool SelectAddrModeUnscaled16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
114 return SelectAddrModeUnscaled(N
, 2, Base
, OffImm
);
116 bool SelectAddrModeUnscaled32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
117 return SelectAddrModeUnscaled(N
, 4, Base
, OffImm
);
119 bool SelectAddrModeUnscaled64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
120 return SelectAddrModeUnscaled(N
, 8, Base
, OffImm
);
122 bool SelectAddrModeUnscaled128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
123 return SelectAddrModeUnscaled(N
, 16, Base
, OffImm
);
127 bool SelectAddrModeWRO(SDValue N
, SDValue
&Base
, SDValue
&Offset
,
128 SDValue
&SignExtend
, SDValue
&DoShift
) {
129 return SelectAddrModeWRO(N
, Width
/ 8, Base
, Offset
, SignExtend
, DoShift
);
133 bool SelectAddrModeXRO(SDValue N
, SDValue
&Base
, SDValue
&Offset
,
134 SDValue
&SignExtend
, SDValue
&DoShift
) {
135 return SelectAddrModeXRO(N
, Width
/ 8, Base
, Offset
, SignExtend
, DoShift
);
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue
createDTuple(ArrayRef
<SDValue
> Vecs
);
144 SDValue
createQTuple(ArrayRef
<SDValue
> Vecs
);
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
148 SDValue
createTuple(ArrayRef
<SDValue
> Vecs
, const unsigned RegClassIDs
[],
149 const unsigned SubRegs
[]);
151 void SelectTable(SDNode
*N
, unsigned NumVecs
, unsigned Opc
, bool isExt
);
153 bool tryIndexedLoad(SDNode
*N
);
155 void SelectLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
157 void SelectPostLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
159 void SelectLoadLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
160 void SelectPostLoadLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
162 void SelectStore(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
163 void SelectPostStore(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
164 void SelectStoreLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
165 void SelectPostStoreLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
167 bool tryBitfieldExtractOp(SDNode
*N
);
168 bool tryBitfieldExtractOpFromSExt(SDNode
*N
);
169 bool tryBitfieldInsertOp(SDNode
*N
);
170 bool tryBitfieldInsertInZeroOp(SDNode
*N
);
171 bool tryShiftAmountMod(SDNode
*N
);
173 bool tryReadRegister(SDNode
*N
);
174 bool tryWriteRegister(SDNode
*N
);
176 // Include the pieces autogenerated from the target description.
177 #include "AArch64GenDAGISel.inc"
180 bool SelectShiftedRegister(SDValue N
, bool AllowROR
, SDValue
&Reg
,
182 bool SelectAddrModeIndexed7S(SDValue N
, unsigned Size
, SDValue
&Base
,
184 bool SelectAddrModeIndexed(SDValue N
, unsigned Size
, SDValue
&Base
,
186 bool SelectAddrModeUnscaled(SDValue N
, unsigned Size
, SDValue
&Base
,
188 bool SelectAddrModeWRO(SDValue N
, unsigned Size
, SDValue
&Base
,
189 SDValue
&Offset
, SDValue
&SignExtend
,
191 bool SelectAddrModeXRO(SDValue N
, unsigned Size
, SDValue
&Base
,
192 SDValue
&Offset
, SDValue
&SignExtend
,
194 bool isWorthFolding(SDValue V
) const;
195 bool SelectExtendedSHL(SDValue N
, unsigned Size
, bool WantExtend
,
196 SDValue
&Offset
, SDValue
&SignExtend
);
198 template<unsigned RegWidth
>
199 bool SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
) {
200 return SelectCVTFixedPosOperand(N
, FixedPos
, RegWidth
);
203 bool SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
, unsigned Width
);
205 bool SelectCMP_SWAP(SDNode
*N
);
208 } // end anonymous namespace
210 /// isIntImmediate - This method tests to see if the node is a constant
211 /// operand. If so Imm will receive the 32-bit value.
212 static bool isIntImmediate(const SDNode
*N
, uint64_t &Imm
) {
213 if (const ConstantSDNode
*C
= dyn_cast
<const ConstantSDNode
>(N
)) {
214 Imm
= C
->getZExtValue();
220 // isIntImmediate - This method tests to see if a constant operand.
221 // If so Imm will receive the value.
222 static bool isIntImmediate(SDValue N
, uint64_t &Imm
) {
223 return isIntImmediate(N
.getNode(), Imm
);
226 // isOpcWithIntImmediate - This method tests to see if the node is a specific
227 // opcode and that it has a immediate integer right operand.
228 // If so Imm will receive the 32 bit value.
229 static bool isOpcWithIntImmediate(const SDNode
*N
, unsigned Opc
,
231 return N
->getOpcode() == Opc
&&
232 isIntImmediate(N
->getOperand(1).getNode(), Imm
);
235 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
236 const SDValue
&Op
, unsigned ConstraintID
, std::vector
<SDValue
> &OutOps
) {
237 switch(ConstraintID
) {
239 llvm_unreachable("Unexpected asm memory constraint");
240 case InlineAsm::Constraint_i
:
241 case InlineAsm::Constraint_m
:
242 case InlineAsm::Constraint_Q
:
243 // We need to make sure that this one operand does not end up in XZR, thus
244 // require the address to be in a PointerRegClass register.
245 const TargetRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
246 const TargetRegisterClass
*TRC
= TRI
->getPointerRegClass(*MF
);
248 SDValue RC
= CurDAG
->getTargetConstant(TRC
->getID(), dl
, MVT::i64
);
250 SDValue(CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
251 dl
, Op
.getValueType(),
253 OutOps
.push_back(NewOp
);
259 /// SelectArithImmed - Select an immediate value that can be represented as
260 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
261 /// Val set to the 12-bit value and Shift set to the shifter operand.
262 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N
, SDValue
&Val
,
264 // This function is called from the addsub_shifted_imm ComplexPattern,
265 // which lists [imm] as the list of opcode it's interested in, however
266 // we still need to check whether the operand is actually an immediate
267 // here because the ComplexPattern opcode list is only used in
268 // root-level opcode matching.
269 if (!isa
<ConstantSDNode
>(N
.getNode()))
272 uint64_t Immed
= cast
<ConstantSDNode
>(N
.getNode())->getZExtValue();
275 if (Immed
>> 12 == 0) {
277 } else if ((Immed
& 0xfff) == 0 && Immed
>> 24 == 0) {
283 unsigned ShVal
= AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftAmt
);
285 Val
= CurDAG
->getTargetConstant(Immed
, dl
, MVT::i32
);
286 Shift
= CurDAG
->getTargetConstant(ShVal
, dl
, MVT::i32
);
290 /// SelectNegArithImmed - As above, but negates the value before trying to
292 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N
, SDValue
&Val
,
294 // This function is called from the addsub_shifted_imm ComplexPattern,
295 // which lists [imm] as the list of opcode it's interested in, however
296 // we still need to check whether the operand is actually an immediate
297 // here because the ComplexPattern opcode list is only used in
298 // root-level opcode matching.
299 if (!isa
<ConstantSDNode
>(N
.getNode()))
302 // The immediate operand must be a 24-bit zero-extended immediate.
303 uint64_t Immed
= cast
<ConstantSDNode
>(N
.getNode())->getZExtValue();
305 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
306 // have the opposite effect on the C flag, so this pattern mustn't match under
307 // those circumstances.
311 if (N
.getValueType() == MVT::i32
)
312 Immed
= ~((uint32_t)Immed
) + 1;
314 Immed
= ~Immed
+ 1ULL;
315 if (Immed
& 0xFFFFFFFFFF000000ULL
)
318 Immed
&= 0xFFFFFFULL
;
319 return SelectArithImmed(CurDAG
->getConstant(Immed
, SDLoc(N
), MVT::i32
), Val
,
323 /// getShiftTypeForNode - Translate a shift node to the corresponding
325 static AArch64_AM::ShiftExtendType
getShiftTypeForNode(SDValue N
) {
326 switch (N
.getOpcode()) {
328 return AArch64_AM::InvalidShiftExtend
;
330 return AArch64_AM::LSL
;
332 return AArch64_AM::LSR
;
334 return AArch64_AM::ASR
;
336 return AArch64_AM::ROR
;
340 /// Determine whether it is worth it to fold SHL into the addressing
342 static bool isWorthFoldingSHL(SDValue V
) {
343 assert(V
.getOpcode() == ISD::SHL
&& "invalid opcode");
344 // It is worth folding logical shift of up to three places.
345 auto *CSD
= dyn_cast
<ConstantSDNode
>(V
.getOperand(1));
348 unsigned ShiftVal
= CSD
->getZExtValue();
352 // Check if this particular node is reused in any non-memory related
353 // operation. If yes, do not try to fold this node into the address
354 // computation, since the computation will be kept.
355 const SDNode
*Node
= V
.getNode();
356 for (SDNode
*UI
: Node
->uses())
357 if (!isa
<MemSDNode
>(*UI
))
358 for (SDNode
*UII
: UI
->uses())
359 if (!isa
<MemSDNode
>(*UII
))
364 /// Determine whether it is worth to fold V into an extended register.
365 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V
) const {
366 // Trivial if we are optimizing for code size or if there is only
367 // one use of the value.
368 if (ForCodeSize
|| V
.hasOneUse())
370 // If a subtarget has a fastpath LSL we can fold a logical shift into
371 // the addressing mode and save a cycle.
372 if (Subtarget
->hasLSLFast() && V
.getOpcode() == ISD::SHL
&&
373 isWorthFoldingSHL(V
))
375 if (Subtarget
->hasLSLFast() && V
.getOpcode() == ISD::ADD
) {
376 const SDValue LHS
= V
.getOperand(0);
377 const SDValue RHS
= V
.getOperand(1);
378 if (LHS
.getOpcode() == ISD::SHL
&& isWorthFoldingSHL(LHS
))
380 if (RHS
.getOpcode() == ISD::SHL
&& isWorthFoldingSHL(RHS
))
384 // It hurts otherwise, since the value will be reused.
388 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
389 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
390 /// instructions allow the shifted register to be rotated, but the arithmetic
391 /// instructions do not. The AllowROR parameter specifies whether ROR is
393 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N
, bool AllowROR
,
394 SDValue
&Reg
, SDValue
&Shift
) {
395 AArch64_AM::ShiftExtendType ShType
= getShiftTypeForNode(N
);
396 if (ShType
== AArch64_AM::InvalidShiftExtend
)
398 if (!AllowROR
&& ShType
== AArch64_AM::ROR
)
401 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
402 unsigned BitSize
= N
.getValueSizeInBits();
403 unsigned Val
= RHS
->getZExtValue() & (BitSize
- 1);
404 unsigned ShVal
= AArch64_AM::getShifterImm(ShType
, Val
);
406 Reg
= N
.getOperand(0);
407 Shift
= CurDAG
->getTargetConstant(ShVal
, SDLoc(N
), MVT::i32
);
408 return isWorthFolding(N
);
414 /// getExtendTypeForNode - Translate an extend node to the corresponding
415 /// ExtendType value.
416 static AArch64_AM::ShiftExtendType
417 getExtendTypeForNode(SDValue N
, bool IsLoadStore
= false) {
418 if (N
.getOpcode() == ISD::SIGN_EXTEND
||
419 N
.getOpcode() == ISD::SIGN_EXTEND_INREG
) {
421 if (N
.getOpcode() == ISD::SIGN_EXTEND_INREG
)
422 SrcVT
= cast
<VTSDNode
>(N
.getOperand(1))->getVT();
424 SrcVT
= N
.getOperand(0).getValueType();
426 if (!IsLoadStore
&& SrcVT
== MVT::i8
)
427 return AArch64_AM::SXTB
;
428 else if (!IsLoadStore
&& SrcVT
== MVT::i16
)
429 return AArch64_AM::SXTH
;
430 else if (SrcVT
== MVT::i32
)
431 return AArch64_AM::SXTW
;
432 assert(SrcVT
!= MVT::i64
&& "extend from 64-bits?");
434 return AArch64_AM::InvalidShiftExtend
;
435 } else if (N
.getOpcode() == ISD::ZERO_EXTEND
||
436 N
.getOpcode() == ISD::ANY_EXTEND
) {
437 EVT SrcVT
= N
.getOperand(0).getValueType();
438 if (!IsLoadStore
&& SrcVT
== MVT::i8
)
439 return AArch64_AM::UXTB
;
440 else if (!IsLoadStore
&& SrcVT
== MVT::i16
)
441 return AArch64_AM::UXTH
;
442 else if (SrcVT
== MVT::i32
)
443 return AArch64_AM::UXTW
;
444 assert(SrcVT
!= MVT::i64
&& "extend from 64-bits?");
446 return AArch64_AM::InvalidShiftExtend
;
447 } else if (N
.getOpcode() == ISD::AND
) {
448 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
450 return AArch64_AM::InvalidShiftExtend
;
451 uint64_t AndMask
= CSD
->getZExtValue();
455 return AArch64_AM::InvalidShiftExtend
;
457 return !IsLoadStore
? AArch64_AM::UXTB
: AArch64_AM::InvalidShiftExtend
;
459 return !IsLoadStore
? AArch64_AM::UXTH
: AArch64_AM::InvalidShiftExtend
;
461 return AArch64_AM::UXTW
;
465 return AArch64_AM::InvalidShiftExtend
;
468 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
469 static bool checkHighLaneIndex(SDNode
*DL
, SDValue
&LaneOp
, int &LaneIdx
) {
470 if (DL
->getOpcode() != AArch64ISD::DUPLANE16
&&
471 DL
->getOpcode() != AArch64ISD::DUPLANE32
)
474 SDValue SV
= DL
->getOperand(0);
475 if (SV
.getOpcode() != ISD::INSERT_SUBVECTOR
)
478 SDValue EV
= SV
.getOperand(1);
479 if (EV
.getOpcode() != ISD::EXTRACT_SUBVECTOR
)
482 ConstantSDNode
*DLidx
= cast
<ConstantSDNode
>(DL
->getOperand(1).getNode());
483 ConstantSDNode
*EVidx
= cast
<ConstantSDNode
>(EV
.getOperand(1).getNode());
484 LaneIdx
= DLidx
->getSExtValue() + EVidx
->getSExtValue();
485 LaneOp
= EV
.getOperand(0);
490 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
491 // high lane extract.
492 static bool checkV64LaneV128(SDValue Op0
, SDValue Op1
, SDValue
&StdOp
,
493 SDValue
&LaneOp
, int &LaneIdx
) {
495 if (!checkHighLaneIndex(Op0
.getNode(), LaneOp
, LaneIdx
)) {
497 if (!checkHighLaneIndex(Op0
.getNode(), LaneOp
, LaneIdx
))
504 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
505 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
506 /// so that we don't emit unnecessary lane extracts.
507 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode
*N
) {
509 SDValue Op0
= N
->getOperand(0);
510 SDValue Op1
= N
->getOperand(1);
511 SDValue MLAOp1
; // Will hold ordinary multiplicand for MLA.
512 SDValue MLAOp2
; // Will hold lane-accessed multiplicand for MLA.
513 int LaneIdx
= -1; // Will hold the lane index.
515 if (Op1
.getOpcode() != ISD::MUL
||
516 !checkV64LaneV128(Op1
.getOperand(0), Op1
.getOperand(1), MLAOp1
, MLAOp2
,
519 if (Op1
.getOpcode() != ISD::MUL
||
520 !checkV64LaneV128(Op1
.getOperand(0), Op1
.getOperand(1), MLAOp1
, MLAOp2
,
525 SDValue LaneIdxVal
= CurDAG
->getTargetConstant(LaneIdx
, dl
, MVT::i64
);
527 SDValue Ops
[] = { Op0
, MLAOp1
, MLAOp2
, LaneIdxVal
};
529 unsigned MLAOpc
= ~0U;
531 switch (N
->getSimpleValueType(0).SimpleTy
) {
533 llvm_unreachable("Unrecognized MLA.");
535 MLAOpc
= AArch64::MLAv4i16_indexed
;
538 MLAOpc
= AArch64::MLAv8i16_indexed
;
541 MLAOpc
= AArch64::MLAv2i32_indexed
;
544 MLAOpc
= AArch64::MLAv4i32_indexed
;
548 ReplaceNode(N
, CurDAG
->getMachineNode(MLAOpc
, dl
, N
->getValueType(0), Ops
));
552 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo
, SDNode
*N
) {
558 if (!checkV64LaneV128(N
->getOperand(1), N
->getOperand(2), SMULLOp0
, SMULLOp1
,
562 SDValue LaneIdxVal
= CurDAG
->getTargetConstant(LaneIdx
, dl
, MVT::i64
);
564 SDValue Ops
[] = { SMULLOp0
, SMULLOp1
, LaneIdxVal
};
566 unsigned SMULLOpc
= ~0U;
568 if (IntNo
== Intrinsic::aarch64_neon_smull
) {
569 switch (N
->getSimpleValueType(0).SimpleTy
) {
571 llvm_unreachable("Unrecognized SMULL.");
573 SMULLOpc
= AArch64::SMULLv4i16_indexed
;
576 SMULLOpc
= AArch64::SMULLv2i32_indexed
;
579 } else if (IntNo
== Intrinsic::aarch64_neon_umull
) {
580 switch (N
->getSimpleValueType(0).SimpleTy
) {
582 llvm_unreachable("Unrecognized SMULL.");
584 SMULLOpc
= AArch64::UMULLv4i16_indexed
;
587 SMULLOpc
= AArch64::UMULLv2i32_indexed
;
591 llvm_unreachable("Unrecognized intrinsic.");
593 ReplaceNode(N
, CurDAG
->getMachineNode(SMULLOpc
, dl
, N
->getValueType(0), Ops
));
597 /// Instructions that accept extend modifiers like UXTW expect the register
598 /// being extended to be a GPR32, but the incoming DAG might be acting on a
599 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
600 /// this is the case.
601 static SDValue
narrowIfNeeded(SelectionDAG
*CurDAG
, SDValue N
) {
602 if (N
.getValueType() == MVT::i32
)
606 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
607 MachineSDNode
*Node
= CurDAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
608 dl
, MVT::i32
, N
, SubReg
);
609 return SDValue(Node
, 0);
613 /// SelectArithExtendedRegister - Select a "extended register" operand. This
614 /// operand folds in an extend followed by an optional left shift.
615 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N
, SDValue
&Reg
,
617 unsigned ShiftVal
= 0;
618 AArch64_AM::ShiftExtendType Ext
;
620 if (N
.getOpcode() == ISD::SHL
) {
621 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
624 ShiftVal
= CSD
->getZExtValue();
628 Ext
= getExtendTypeForNode(N
.getOperand(0));
629 if (Ext
== AArch64_AM::InvalidShiftExtend
)
632 Reg
= N
.getOperand(0).getOperand(0);
634 Ext
= getExtendTypeForNode(N
);
635 if (Ext
== AArch64_AM::InvalidShiftExtend
)
638 Reg
= N
.getOperand(0);
640 // Don't match if free 32-bit -> 64-bit zext can be used instead.
641 if (Ext
== AArch64_AM::UXTW
&&
642 Reg
->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg
.getNode()))
646 // AArch64 mandates that the RHS of the operation must use the smallest
647 // register class that could contain the size being extended from. Thus,
648 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
649 // there might not be an actual 32-bit value in the program. We can
650 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
651 assert(Ext
!= AArch64_AM::UXTX
&& Ext
!= AArch64_AM::SXTX
);
652 Reg
= narrowIfNeeded(CurDAG
, Reg
);
653 Shift
= CurDAG
->getTargetConstant(getArithExtendImm(Ext
, ShiftVal
), SDLoc(N
),
655 return isWorthFolding(N
);
658 /// If there's a use of this ADDlow that's not itself a load/store then we'll
659 /// need to create a real ADD instruction from it anyway and there's no point in
660 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
661 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
662 /// leads to duplicated ADRP instructions.
663 static bool isWorthFoldingADDlow(SDValue N
) {
664 for (auto Use
: N
->uses()) {
665 if (Use
->getOpcode() != ISD::LOAD
&& Use
->getOpcode() != ISD::STORE
&&
666 Use
->getOpcode() != ISD::ATOMIC_LOAD
&&
667 Use
->getOpcode() != ISD::ATOMIC_STORE
)
670 // ldar and stlr have much more restrictive addressing modes (just a
672 if (isStrongerThanMonotonic(cast
<MemSDNode
>(Use
)->getOrdering()))
679 /// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
680 /// immediate" address. The "Size" argument is the size in bytes of the memory
681 /// reference, which determines the scale.
682 bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N
, unsigned Size
,
686 const DataLayout
&DL
= CurDAG
->getDataLayout();
687 const TargetLowering
*TLI
= getTargetLowering();
688 if (N
.getOpcode() == ISD::FrameIndex
) {
689 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
690 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
691 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
695 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
696 // selected here doesn't support labels/immediates, only base+offset.
698 if (CurDAG
->isBaseWithConstantOffset(N
)) {
699 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
700 int64_t RHSC
= RHS
->getSExtValue();
701 unsigned Scale
= Log2_32(Size
);
702 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= -(0x40 << Scale
) &&
703 RHSC
< (0x40 << Scale
)) {
704 Base
= N
.getOperand(0);
705 if (Base
.getOpcode() == ISD::FrameIndex
) {
706 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
707 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
709 OffImm
= CurDAG
->getTargetConstant(RHSC
>> Scale
, dl
, MVT::i64
);
715 // Base only. The address will be materialized into a register before
716 // the memory is accessed.
717 // add x0, Xbase, #offset
720 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
724 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
725 /// immediate" address. The "Size" argument is the size in bytes of the memory
726 /// reference, which determines the scale.
727 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N
, unsigned Size
,
728 SDValue
&Base
, SDValue
&OffImm
) {
730 const DataLayout
&DL
= CurDAG
->getDataLayout();
731 const TargetLowering
*TLI
= getTargetLowering();
732 if (N
.getOpcode() == ISD::FrameIndex
) {
733 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
734 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
735 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
739 if (N
.getOpcode() == AArch64ISD::ADDlow
&& isWorthFoldingADDlow(N
)) {
740 GlobalAddressSDNode
*GAN
=
741 dyn_cast
<GlobalAddressSDNode
>(N
.getOperand(1).getNode());
742 Base
= N
.getOperand(0);
743 OffImm
= N
.getOperand(1);
747 if (GAN
->getOffset() % Size
== 0) {
748 const GlobalValue
*GV
= GAN
->getGlobal();
749 unsigned Alignment
= GV
->getAlignment();
750 Type
*Ty
= GV
->getValueType();
751 if (Alignment
== 0 && Ty
->isSized())
752 Alignment
= DL
.getABITypeAlignment(Ty
);
754 if (Alignment
>= Size
)
759 if (CurDAG
->isBaseWithConstantOffset(N
)) {
760 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
761 int64_t RHSC
= (int64_t)RHS
->getZExtValue();
762 unsigned Scale
= Log2_32(Size
);
763 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= 0 && RHSC
< (0x1000 << Scale
)) {
764 Base
= N
.getOperand(0);
765 if (Base
.getOpcode() == ISD::FrameIndex
) {
766 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
767 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
769 OffImm
= CurDAG
->getTargetConstant(RHSC
>> Scale
, dl
, MVT::i64
);
775 // Before falling back to our general case, check if the unscaled
776 // instructions can handle this. If so, that's preferable.
777 if (SelectAddrModeUnscaled(N
, Size
, Base
, OffImm
))
780 // Base only. The address will be materialized into a register before
781 // the memory is accessed.
782 // add x0, Xbase, #offset
785 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
789 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
790 /// immediate" address. This should only match when there is an offset that
791 /// is not valid for a scaled immediate addressing mode. The "Size" argument
792 /// is the size in bytes of the memory reference, which is needed here to know
793 /// what is valid for a scaled immediate.
794 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N
, unsigned Size
,
797 if (!CurDAG
->isBaseWithConstantOffset(N
))
799 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
800 int64_t RHSC
= RHS
->getSExtValue();
801 // If the offset is valid as a scaled immediate, don't match here.
802 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= 0 &&
803 RHSC
< (0x1000 << Log2_32(Size
)))
805 if (RHSC
>= -256 && RHSC
< 256) {
806 Base
= N
.getOperand(0);
807 if (Base
.getOpcode() == ISD::FrameIndex
) {
808 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
809 const TargetLowering
*TLI
= getTargetLowering();
810 Base
= CurDAG
->getTargetFrameIndex(
811 FI
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
813 OffImm
= CurDAG
->getTargetConstant(RHSC
, SDLoc(N
), MVT::i64
);
820 static SDValue
Widen(SelectionDAG
*CurDAG
, SDValue N
) {
822 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
823 SDValue ImpDef
= SDValue(
824 CurDAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, dl
, MVT::i64
), 0);
825 MachineSDNode
*Node
= CurDAG
->getMachineNode(
826 TargetOpcode::INSERT_SUBREG
, dl
, MVT::i64
, ImpDef
, N
, SubReg
);
827 return SDValue(Node
, 0);
830 /// Check if the given SHL node (\p N), can be used to form an
831 /// extended register for an addressing mode.
832 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N
, unsigned Size
,
833 bool WantExtend
, SDValue
&Offset
,
834 SDValue
&SignExtend
) {
835 assert(N
.getOpcode() == ISD::SHL
&& "Invalid opcode.");
836 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
837 if (!CSD
|| (CSD
->getZExtValue() & 0x7) != CSD
->getZExtValue())
842 AArch64_AM::ShiftExtendType Ext
=
843 getExtendTypeForNode(N
.getOperand(0), true);
844 if (Ext
== AArch64_AM::InvalidShiftExtend
)
847 Offset
= narrowIfNeeded(CurDAG
, N
.getOperand(0).getOperand(0));
848 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
851 Offset
= N
.getOperand(0);
852 SignExtend
= CurDAG
->getTargetConstant(0, dl
, MVT::i32
);
855 unsigned LegalShiftVal
= Log2_32(Size
);
856 unsigned ShiftVal
= CSD
->getZExtValue();
858 if (ShiftVal
!= 0 && ShiftVal
!= LegalShiftVal
)
861 return isWorthFolding(N
);
864 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N
, unsigned Size
,
865 SDValue
&Base
, SDValue
&Offset
,
868 if (N
.getOpcode() != ISD::ADD
)
870 SDValue LHS
= N
.getOperand(0);
871 SDValue RHS
= N
.getOperand(1);
874 // We don't want to match immediate adds here, because they are better lowered
875 // to the register-immediate addressing modes.
876 if (isa
<ConstantSDNode
>(LHS
) || isa
<ConstantSDNode
>(RHS
))
879 // Check if this particular node is reused in any non-memory related
880 // operation. If yes, do not try to fold this node into the address
881 // computation, since the computation will be kept.
882 const SDNode
*Node
= N
.getNode();
883 for (SDNode
*UI
: Node
->uses()) {
884 if (!isa
<MemSDNode
>(*UI
))
888 // Remember if it is worth folding N when it produces extended register.
889 bool IsExtendedRegisterWorthFolding
= isWorthFolding(N
);
891 // Try to match a shifted extend on the RHS.
892 if (IsExtendedRegisterWorthFolding
&& RHS
.getOpcode() == ISD::SHL
&&
893 SelectExtendedSHL(RHS
, Size
, true, Offset
, SignExtend
)) {
895 DoShift
= CurDAG
->getTargetConstant(true, dl
, MVT::i32
);
899 // Try to match a shifted extend on the LHS.
900 if (IsExtendedRegisterWorthFolding
&& LHS
.getOpcode() == ISD::SHL
&&
901 SelectExtendedSHL(LHS
, Size
, true, Offset
, SignExtend
)) {
903 DoShift
= CurDAG
->getTargetConstant(true, dl
, MVT::i32
);
907 // There was no shift, whatever else we find.
908 DoShift
= CurDAG
->getTargetConstant(false, dl
, MVT::i32
);
910 AArch64_AM::ShiftExtendType Ext
= AArch64_AM::InvalidShiftExtend
;
911 // Try to match an unshifted extend on the LHS.
912 if (IsExtendedRegisterWorthFolding
&&
913 (Ext
= getExtendTypeForNode(LHS
, true)) !=
914 AArch64_AM::InvalidShiftExtend
) {
916 Offset
= narrowIfNeeded(CurDAG
, LHS
.getOperand(0));
917 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
919 if (isWorthFolding(LHS
))
923 // Try to match an unshifted extend on the RHS.
924 if (IsExtendedRegisterWorthFolding
&&
925 (Ext
= getExtendTypeForNode(RHS
, true)) !=
926 AArch64_AM::InvalidShiftExtend
) {
928 Offset
= narrowIfNeeded(CurDAG
, RHS
.getOperand(0));
929 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
931 if (isWorthFolding(RHS
))
938 // Check if the given immediate is preferred by ADD. If an immediate can be
939 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
940 // encoded by one MOVZ, return true.
941 static bool isPreferredADD(int64_t ImmOff
) {
942 // Constant in [0x0, 0xfff] can be encoded in ADD.
943 if ((ImmOff
& 0xfffffffffffff000LL
) == 0x0LL
)
945 // Check if it can be encoded in an "ADD LSL #12".
946 if ((ImmOff
& 0xffffffffff000fffLL
) == 0x0LL
)
947 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
948 return (ImmOff
& 0xffffffffff00ffffLL
) != 0x0LL
&&
949 (ImmOff
& 0xffffffffffff0fffLL
) != 0x0LL
;
953 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N
, unsigned Size
,
954 SDValue
&Base
, SDValue
&Offset
,
957 if (N
.getOpcode() != ISD::ADD
)
959 SDValue LHS
= N
.getOperand(0);
960 SDValue RHS
= N
.getOperand(1);
963 // Check if this particular node is reused in any non-memory related
964 // operation. If yes, do not try to fold this node into the address
965 // computation, since the computation will be kept.
966 const SDNode
*Node
= N
.getNode();
967 for (SDNode
*UI
: Node
->uses()) {
968 if (!isa
<MemSDNode
>(*UI
))
972 // Watch out if RHS is a wide immediate, it can not be selected into
973 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
974 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
975 // instructions like:
976 // MOV X0, WideImmediate
977 // ADD X1, BaseReg, X0
979 // For such situation, using [BaseReg, XReg] addressing mode can save one
981 // MOV X0, WideImmediate
982 // LDR X2, [BaseReg, X0]
983 if (isa
<ConstantSDNode
>(RHS
)) {
984 int64_t ImmOff
= (int64_t)cast
<ConstantSDNode
>(RHS
)->getZExtValue();
985 unsigned Scale
= Log2_32(Size
);
986 // Skip the immediate can be selected by load/store addressing mode.
987 // Also skip the immediate can be encoded by a single ADD (SUB is also
988 // checked by using -ImmOff).
989 if ((ImmOff
% Size
== 0 && ImmOff
>= 0 && ImmOff
< (0x1000 << Scale
)) ||
990 isPreferredADD(ImmOff
) || isPreferredADD(-ImmOff
))
993 SDValue Ops
[] = { RHS
};
995 CurDAG
->getMachineNode(AArch64::MOVi64imm
, DL
, MVT::i64
, Ops
);
996 SDValue MOVIV
= SDValue(MOVI
, 0);
997 // This ADD of two X register will be selected into [Reg+Reg] mode.
998 N
= CurDAG
->getNode(ISD::ADD
, DL
, MVT::i64
, LHS
, MOVIV
);
1001 // Remember if it is worth folding N when it produces extended register.
1002 bool IsExtendedRegisterWorthFolding
= isWorthFolding(N
);
1004 // Try to match a shifted extend on the RHS.
1005 if (IsExtendedRegisterWorthFolding
&& RHS
.getOpcode() == ISD::SHL
&&
1006 SelectExtendedSHL(RHS
, Size
, false, Offset
, SignExtend
)) {
1008 DoShift
= CurDAG
->getTargetConstant(true, DL
, MVT::i32
);
1012 // Try to match a shifted extend on the LHS.
1013 if (IsExtendedRegisterWorthFolding
&& LHS
.getOpcode() == ISD::SHL
&&
1014 SelectExtendedSHL(LHS
, Size
, false, Offset
, SignExtend
)) {
1016 DoShift
= CurDAG
->getTargetConstant(true, DL
, MVT::i32
);
1020 // Match any non-shifted, non-extend, non-immediate add expression.
1023 SignExtend
= CurDAG
->getTargetConstant(false, DL
, MVT::i32
);
1024 DoShift
= CurDAG
->getTargetConstant(false, DL
, MVT::i32
);
1025 // Reg1 + Reg2 is free: no check needed.
1029 SDValue
AArch64DAGToDAGISel::createDTuple(ArrayRef
<SDValue
> Regs
) {
1030 static const unsigned RegClassIDs
[] = {
1031 AArch64::DDRegClassID
, AArch64::DDDRegClassID
, AArch64::DDDDRegClassID
};
1032 static const unsigned SubRegs
[] = {AArch64::dsub0
, AArch64::dsub1
,
1033 AArch64::dsub2
, AArch64::dsub3
};
1035 return createTuple(Regs
, RegClassIDs
, SubRegs
);
1038 SDValue
AArch64DAGToDAGISel::createQTuple(ArrayRef
<SDValue
> Regs
) {
1039 static const unsigned RegClassIDs
[] = {
1040 AArch64::QQRegClassID
, AArch64::QQQRegClassID
, AArch64::QQQQRegClassID
};
1041 static const unsigned SubRegs
[] = {AArch64::qsub0
, AArch64::qsub1
,
1042 AArch64::qsub2
, AArch64::qsub3
};
1044 return createTuple(Regs
, RegClassIDs
, SubRegs
);
1047 SDValue
AArch64DAGToDAGISel::createTuple(ArrayRef
<SDValue
> Regs
,
1048 const unsigned RegClassIDs
[],
1049 const unsigned SubRegs
[]) {
1050 // There's no special register-class for a vector-list of 1 element: it's just
1052 if (Regs
.size() == 1)
1055 assert(Regs
.size() >= 2 && Regs
.size() <= 4);
1059 SmallVector
<SDValue
, 4> Ops
;
1061 // First operand of REG_SEQUENCE is the desired RegClass.
1063 CurDAG
->getTargetConstant(RegClassIDs
[Regs
.size() - 2], DL
, MVT::i32
));
1065 // Then we get pairs of source & subregister-position for the components.
1066 for (unsigned i
= 0; i
< Regs
.size(); ++i
) {
1067 Ops
.push_back(Regs
[i
]);
1068 Ops
.push_back(CurDAG
->getTargetConstant(SubRegs
[i
], DL
, MVT::i32
));
1072 CurDAG
->getMachineNode(TargetOpcode::REG_SEQUENCE
, DL
, MVT::Untyped
, Ops
);
1073 return SDValue(N
, 0);
1076 void AArch64DAGToDAGISel::SelectTable(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
1079 EVT VT
= N
->getValueType(0);
1081 unsigned ExtOff
= isExt
;
1083 // Form a REG_SEQUENCE to force register allocation.
1084 unsigned Vec0Off
= ExtOff
+ 1;
1085 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + Vec0Off
,
1086 N
->op_begin() + Vec0Off
+ NumVecs
);
1087 SDValue RegSeq
= createQTuple(Regs
);
1089 SmallVector
<SDValue
, 6> Ops
;
1091 Ops
.push_back(N
->getOperand(1));
1092 Ops
.push_back(RegSeq
);
1093 Ops
.push_back(N
->getOperand(NumVecs
+ ExtOff
+ 1));
1094 ReplaceNode(N
, CurDAG
->getMachineNode(Opc
, dl
, VT
, Ops
));
1097 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode
*N
) {
1098 LoadSDNode
*LD
= cast
<LoadSDNode
>(N
);
1099 if (LD
->isUnindexed())
1101 EVT VT
= LD
->getMemoryVT();
1102 EVT DstVT
= N
->getValueType(0);
1103 ISD::MemIndexedMode AM
= LD
->getAddressingMode();
1104 bool IsPre
= AM
== ISD::PRE_INC
|| AM
== ISD::PRE_DEC
;
1106 // We're not doing validity checking here. That was done when checking
1107 // if we should mark the load as indexed or not. We're just selecting
1108 // the right instruction.
1109 unsigned Opcode
= 0;
1111 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1112 bool InsertTo64
= false;
1114 Opcode
= IsPre
? AArch64::LDRXpre
: AArch64::LDRXpost
;
1115 else if (VT
== MVT::i32
) {
1116 if (ExtType
== ISD::NON_EXTLOAD
)
1117 Opcode
= IsPre
? AArch64::LDRWpre
: AArch64::LDRWpost
;
1118 else if (ExtType
== ISD::SEXTLOAD
)
1119 Opcode
= IsPre
? AArch64::LDRSWpre
: AArch64::LDRSWpost
;
1121 Opcode
= IsPre
? AArch64::LDRWpre
: AArch64::LDRWpost
;
1123 // The result of the load is only i32. It's the subreg_to_reg that makes
1127 } else if (VT
== MVT::i16
) {
1128 if (ExtType
== ISD::SEXTLOAD
) {
1129 if (DstVT
== MVT::i64
)
1130 Opcode
= IsPre
? AArch64::LDRSHXpre
: AArch64::LDRSHXpost
;
1132 Opcode
= IsPre
? AArch64::LDRSHWpre
: AArch64::LDRSHWpost
;
1134 Opcode
= IsPre
? AArch64::LDRHHpre
: AArch64::LDRHHpost
;
1135 InsertTo64
= DstVT
== MVT::i64
;
1136 // The result of the load is only i32. It's the subreg_to_reg that makes
1140 } else if (VT
== MVT::i8
) {
1141 if (ExtType
== ISD::SEXTLOAD
) {
1142 if (DstVT
== MVT::i64
)
1143 Opcode
= IsPre
? AArch64::LDRSBXpre
: AArch64::LDRSBXpost
;
1145 Opcode
= IsPre
? AArch64::LDRSBWpre
: AArch64::LDRSBWpost
;
1147 Opcode
= IsPre
? AArch64::LDRBBpre
: AArch64::LDRBBpost
;
1148 InsertTo64
= DstVT
== MVT::i64
;
1149 // The result of the load is only i32. It's the subreg_to_reg that makes
1153 } else if (VT
== MVT::f16
) {
1154 Opcode
= IsPre
? AArch64::LDRHpre
: AArch64::LDRHpost
;
1155 } else if (VT
== MVT::f32
) {
1156 Opcode
= IsPre
? AArch64::LDRSpre
: AArch64::LDRSpost
;
1157 } else if (VT
== MVT::f64
|| VT
.is64BitVector()) {
1158 Opcode
= IsPre
? AArch64::LDRDpre
: AArch64::LDRDpost
;
1159 } else if (VT
.is128BitVector()) {
1160 Opcode
= IsPre
? AArch64::LDRQpre
: AArch64::LDRQpost
;
1163 SDValue Chain
= LD
->getChain();
1164 SDValue Base
= LD
->getBasePtr();
1165 ConstantSDNode
*OffsetOp
= cast
<ConstantSDNode
>(LD
->getOffset());
1166 int OffsetVal
= (int)OffsetOp
->getZExtValue();
1168 SDValue Offset
= CurDAG
->getTargetConstant(OffsetVal
, dl
, MVT::i64
);
1169 SDValue Ops
[] = { Base
, Offset
, Chain
};
1170 SDNode
*Res
= CurDAG
->getMachineNode(Opcode
, dl
, MVT::i64
, DstVT
,
1172 // Either way, we're replacing the node, so tell the caller that.
1173 SDValue LoadedVal
= SDValue(Res
, 1);
1175 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
1177 SDValue(CurDAG
->getMachineNode(
1178 AArch64::SUBREG_TO_REG
, dl
, MVT::i64
,
1179 CurDAG
->getTargetConstant(0, dl
, MVT::i64
), LoadedVal
,
1184 ReplaceUses(SDValue(N
, 0), LoadedVal
);
1185 ReplaceUses(SDValue(N
, 1), SDValue(Res
, 0));
1186 ReplaceUses(SDValue(N
, 2), SDValue(Res
, 2));
1187 CurDAG
->RemoveDeadNode(N
);
1191 void AArch64DAGToDAGISel::SelectLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
1192 unsigned SubRegIdx
) {
1194 EVT VT
= N
->getValueType(0);
1195 SDValue Chain
= N
->getOperand(0);
1197 SDValue Ops
[] = {N
->getOperand(2), // Mem operand;
1200 const EVT ResTys
[] = {MVT::Untyped
, MVT::Other
};
1202 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1203 SDValue SuperReg
= SDValue(Ld
, 0);
1204 for (unsigned i
= 0; i
< NumVecs
; ++i
)
1205 ReplaceUses(SDValue(N
, i
),
1206 CurDAG
->getTargetExtractSubreg(SubRegIdx
+ i
, dl
, VT
, SuperReg
));
1208 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 1));
1210 // Transfer memoperands.
1211 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
1212 MemOp
[0] = cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1213 cast
<MachineSDNode
>(Ld
)->setMemRefs(MemOp
, MemOp
+ 1);
1215 CurDAG
->RemoveDeadNode(N
);
1218 void AArch64DAGToDAGISel::SelectPostLoad(SDNode
*N
, unsigned NumVecs
,
1219 unsigned Opc
, unsigned SubRegIdx
) {
1221 EVT VT
= N
->getValueType(0);
1222 SDValue Chain
= N
->getOperand(0);
1224 SDValue Ops
[] = {N
->getOperand(1), // Mem operand
1225 N
->getOperand(2), // Incremental
1228 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1229 MVT::Untyped
, MVT::Other
};
1231 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1233 // Update uses of write back register
1234 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 0));
1236 // Update uses of vector list
1237 SDValue SuperReg
= SDValue(Ld
, 1);
1239 ReplaceUses(SDValue(N
, 0), SuperReg
);
1241 for (unsigned i
= 0; i
< NumVecs
; ++i
)
1242 ReplaceUses(SDValue(N
, i
),
1243 CurDAG
->getTargetExtractSubreg(SubRegIdx
+ i
, dl
, VT
, SuperReg
));
1246 ReplaceUses(SDValue(N
, NumVecs
+ 1), SDValue(Ld
, 2));
1247 CurDAG
->RemoveDeadNode(N
);
1250 void AArch64DAGToDAGISel::SelectStore(SDNode
*N
, unsigned NumVecs
,
1253 EVT VT
= N
->getOperand(2)->getValueType(0);
1255 // Form a REG_SEQUENCE to force register allocation.
1256 bool Is128Bit
= VT
.getSizeInBits() == 128;
1257 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1258 SDValue RegSeq
= Is128Bit
? createQTuple(Regs
) : createDTuple(Regs
);
1260 SDValue Ops
[] = {RegSeq
, N
->getOperand(NumVecs
+ 2), N
->getOperand(0)};
1261 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, N
->getValueType(0), Ops
);
1263 // Transfer memoperands.
1264 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
1265 MemOp
[0] = cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1266 cast
<MachineSDNode
>(St
)->setMemRefs(MemOp
, MemOp
+ 1);
1271 void AArch64DAGToDAGISel::SelectPostStore(SDNode
*N
, unsigned NumVecs
,
1274 EVT VT
= N
->getOperand(2)->getValueType(0);
1275 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1276 MVT::Other
}; // Type for the Chain
1278 // Form a REG_SEQUENCE to force register allocation.
1279 bool Is128Bit
= VT
.getSizeInBits() == 128;
1280 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1281 SDValue RegSeq
= Is128Bit
? createQTuple(Regs
) : createDTuple(Regs
);
1283 SDValue Ops
[] = {RegSeq
,
1284 N
->getOperand(NumVecs
+ 1), // base register
1285 N
->getOperand(NumVecs
+ 2), // Incremental
1286 N
->getOperand(0)}; // Chain
1287 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1293 /// WidenVector - Given a value in the V64 register class, produce the
1294 /// equivalent value in the V128 register class.
1299 WidenVector(SelectionDAG
&DAG
) : DAG(DAG
) {}
1301 SDValue
operator()(SDValue V64Reg
) {
1302 EVT VT
= V64Reg
.getValueType();
1303 unsigned NarrowSize
= VT
.getVectorNumElements();
1304 MVT EltTy
= VT
.getVectorElementType().getSimpleVT();
1305 MVT WideTy
= MVT::getVectorVT(EltTy
, 2 * NarrowSize
);
1309 SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, WideTy
), 0);
1310 return DAG
.getTargetInsertSubreg(AArch64::dsub
, DL
, WideTy
, Undef
, V64Reg
);
1315 /// NarrowVector - Given a value in the V128 register class, produce the
1316 /// equivalent value in the V64 register class.
1317 static SDValue
NarrowVector(SDValue V128Reg
, SelectionDAG
&DAG
) {
1318 EVT VT
= V128Reg
.getValueType();
1319 unsigned WideSize
= VT
.getVectorNumElements();
1320 MVT EltTy
= VT
.getVectorElementType().getSimpleVT();
1321 MVT NarrowTy
= MVT::getVectorVT(EltTy
, WideSize
/ 2);
1323 return DAG
.getTargetExtractSubreg(AArch64::dsub
, SDLoc(V128Reg
), NarrowTy
,
1327 void AArch64DAGToDAGISel::SelectLoadLane(SDNode
*N
, unsigned NumVecs
,
1330 EVT VT
= N
->getValueType(0);
1331 bool Narrow
= VT
.getSizeInBits() == 64;
1333 // Form a REG_SEQUENCE to force register allocation.
1334 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1337 transform(Regs
, Regs
.begin(),
1338 WidenVector(*CurDAG
));
1340 SDValue RegSeq
= createQTuple(Regs
);
1342 const EVT ResTys
[] = {MVT::Untyped
, MVT::Other
};
1345 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 2))->getZExtValue();
1347 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1348 N
->getOperand(NumVecs
+ 3), N
->getOperand(0)};
1349 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1350 SDValue SuperReg
= SDValue(Ld
, 0);
1352 EVT WideVT
= RegSeq
.getOperand(1)->getValueType(0);
1353 static const unsigned QSubs
[] = { AArch64::qsub0
, AArch64::qsub1
,
1354 AArch64::qsub2
, AArch64::qsub3
};
1355 for (unsigned i
= 0; i
< NumVecs
; ++i
) {
1356 SDValue NV
= CurDAG
->getTargetExtractSubreg(QSubs
[i
], dl
, WideVT
, SuperReg
);
1358 NV
= NarrowVector(NV
, *CurDAG
);
1359 ReplaceUses(SDValue(N
, i
), NV
);
1362 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 1));
1363 CurDAG
->RemoveDeadNode(N
);
1366 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode
*N
, unsigned NumVecs
,
1369 EVT VT
= N
->getValueType(0);
1370 bool Narrow
= VT
.getSizeInBits() == 64;
1372 // Form a REG_SEQUENCE to force register allocation.
1373 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1376 transform(Regs
, Regs
.begin(),
1377 WidenVector(*CurDAG
));
1379 SDValue RegSeq
= createQTuple(Regs
);
1381 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1382 RegSeq
->getValueType(0), MVT::Other
};
1385 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 1))->getZExtValue();
1387 SDValue Ops
[] = {RegSeq
,
1388 CurDAG
->getTargetConstant(LaneNo
, dl
,
1389 MVT::i64
), // Lane Number
1390 N
->getOperand(NumVecs
+ 2), // Base register
1391 N
->getOperand(NumVecs
+ 3), // Incremental
1393 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1395 // Update uses of the write back register
1396 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 0));
1398 // Update uses of the vector list
1399 SDValue SuperReg
= SDValue(Ld
, 1);
1401 ReplaceUses(SDValue(N
, 0),
1402 Narrow
? NarrowVector(SuperReg
, *CurDAG
) : SuperReg
);
1404 EVT WideVT
= RegSeq
.getOperand(1)->getValueType(0);
1405 static const unsigned QSubs
[] = { AArch64::qsub0
, AArch64::qsub1
,
1406 AArch64::qsub2
, AArch64::qsub3
};
1407 for (unsigned i
= 0; i
< NumVecs
; ++i
) {
1408 SDValue NV
= CurDAG
->getTargetExtractSubreg(QSubs
[i
], dl
, WideVT
,
1411 NV
= NarrowVector(NV
, *CurDAG
);
1412 ReplaceUses(SDValue(N
, i
), NV
);
1417 ReplaceUses(SDValue(N
, NumVecs
+ 1), SDValue(Ld
, 2));
1418 CurDAG
->RemoveDeadNode(N
);
1421 void AArch64DAGToDAGISel::SelectStoreLane(SDNode
*N
, unsigned NumVecs
,
1424 EVT VT
= N
->getOperand(2)->getValueType(0);
1425 bool Narrow
= VT
.getSizeInBits() == 64;
1427 // Form a REG_SEQUENCE to force register allocation.
1428 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1431 transform(Regs
, Regs
.begin(),
1432 WidenVector(*CurDAG
));
1434 SDValue RegSeq
= createQTuple(Regs
);
1437 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 2))->getZExtValue();
1439 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1440 N
->getOperand(NumVecs
+ 3), N
->getOperand(0)};
1441 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, MVT::Other
, Ops
);
1443 // Transfer memoperands.
1444 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
1445 MemOp
[0] = cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1446 cast
<MachineSDNode
>(St
)->setMemRefs(MemOp
, MemOp
+ 1);
1451 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode
*N
, unsigned NumVecs
,
1454 EVT VT
= N
->getOperand(2)->getValueType(0);
1455 bool Narrow
= VT
.getSizeInBits() == 64;
1457 // Form a REG_SEQUENCE to force register allocation.
1458 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1461 transform(Regs
, Regs
.begin(),
1462 WidenVector(*CurDAG
));
1464 SDValue RegSeq
= createQTuple(Regs
);
1466 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1470 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 1))->getZExtValue();
1472 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1473 N
->getOperand(NumVecs
+ 2), // Base Register
1474 N
->getOperand(NumVecs
+ 3), // Incremental
1476 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1478 // Transfer memoperands.
1479 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
1480 MemOp
[0] = cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1481 cast
<MachineSDNode
>(St
)->setMemRefs(MemOp
, MemOp
+ 1);
1486 static bool isBitfieldExtractOpFromAnd(SelectionDAG
*CurDAG
, SDNode
*N
,
1487 unsigned &Opc
, SDValue
&Opd0
,
1488 unsigned &LSB
, unsigned &MSB
,
1489 unsigned NumberOfIgnoredLowBits
,
1490 bool BiggerPattern
) {
1491 assert(N
->getOpcode() == ISD::AND
&&
1492 "N must be a AND operation to call this function");
1494 EVT VT
= N
->getValueType(0);
1496 // Here we can test the type of VT and return false when the type does not
1497 // match, but since it is done prior to that call in the current context
1498 // we turned that into an assert to avoid redundant code.
1499 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1500 "Type checking must have been done before calling this function");
1502 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1503 // changed the AND node to a 32-bit mask operation. We'll have to
1504 // undo that as part of the transform here if we want to catch all
1505 // the opportunities.
1506 // Currently the NumberOfIgnoredLowBits argument helps to recover
1507 // form these situations when matching bigger pattern (bitfield insert).
1509 // For unsigned extracts, check for a shift right and mask
1510 uint64_t AndImm
= 0;
1511 if (!isOpcWithIntImmediate(N
, ISD::AND
, AndImm
))
1514 const SDNode
*Op0
= N
->getOperand(0).getNode();
1516 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1517 // simplified. Try to undo that
1518 AndImm
|= maskTrailingOnes
<uint64_t>(NumberOfIgnoredLowBits
);
1520 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1521 if (AndImm
& (AndImm
+ 1))
1524 bool ClampMSB
= false;
1525 uint64_t SrlImm
= 0;
1526 // Handle the SRL + ANY_EXTEND case.
1527 if (VT
== MVT::i64
&& Op0
->getOpcode() == ISD::ANY_EXTEND
&&
1528 isOpcWithIntImmediate(Op0
->getOperand(0).getNode(), ISD::SRL
, SrlImm
)) {
1529 // Extend the incoming operand of the SRL to 64-bit.
1530 Opd0
= Widen(CurDAG
, Op0
->getOperand(0).getOperand(0));
1531 // Make sure to clamp the MSB so that we preserve the semantics of the
1532 // original operations.
1534 } else if (VT
== MVT::i32
&& Op0
->getOpcode() == ISD::TRUNCATE
&&
1535 isOpcWithIntImmediate(Op0
->getOperand(0).getNode(), ISD::SRL
,
1537 // If the shift result was truncated, we can still combine them.
1538 Opd0
= Op0
->getOperand(0).getOperand(0);
1540 // Use the type of SRL node.
1541 VT
= Opd0
->getValueType(0);
1542 } else if (isOpcWithIntImmediate(Op0
, ISD::SRL
, SrlImm
)) {
1543 Opd0
= Op0
->getOperand(0);
1544 } else if (BiggerPattern
) {
1545 // Let's pretend a 0 shift right has been performed.
1546 // The resulting code will be at least as good as the original one
1547 // plus it may expose more opportunities for bitfield insert pattern.
1548 // FIXME: Currently we limit this to the bigger pattern, because
1549 // some optimizations expect AND and not UBFM.
1550 Opd0
= N
->getOperand(0);
1554 // Bail out on large immediates. This happens when no proper
1555 // combining/constant folding was performed.
1556 if (!BiggerPattern
&& (SrlImm
<= 0 || SrlImm
>= VT
.getSizeInBits())) {
1559 << ": Found large shift immediate, this should not happen\n"));
1564 MSB
= SrlImm
+ (VT
== MVT::i32
? countTrailingOnes
<uint32_t>(AndImm
)
1565 : countTrailingOnes
<uint64_t>(AndImm
)) -
1568 // Since we're moving the extend before the right shift operation, we need
1569 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1570 // the zeros which would get shifted in with the original right shift
1572 MSB
= MSB
> 31 ? 31 : MSB
;
1574 Opc
= VT
== MVT::i32
? AArch64::UBFMWri
: AArch64::UBFMXri
;
1578 static bool isBitfieldExtractOpFromSExtInReg(SDNode
*N
, unsigned &Opc
,
1579 SDValue
&Opd0
, unsigned &Immr
,
1581 assert(N
->getOpcode() == ISD::SIGN_EXTEND_INREG
);
1583 EVT VT
= N
->getValueType(0);
1584 unsigned BitWidth
= VT
.getSizeInBits();
1585 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1586 "Type checking must have been done before calling this function");
1588 SDValue Op
= N
->getOperand(0);
1589 if (Op
->getOpcode() == ISD::TRUNCATE
) {
1590 Op
= Op
->getOperand(0);
1591 VT
= Op
->getValueType(0);
1592 BitWidth
= VT
.getSizeInBits();
1596 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SRL
, ShiftImm
) &&
1597 !isOpcWithIntImmediate(Op
.getNode(), ISD::SRA
, ShiftImm
))
1600 unsigned Width
= cast
<VTSDNode
>(N
->getOperand(1))->getVT().getSizeInBits();
1601 if (ShiftImm
+ Width
> BitWidth
)
1604 Opc
= (VT
== MVT::i32
) ? AArch64::SBFMWri
: AArch64::SBFMXri
;
1605 Opd0
= Op
.getOperand(0);
1607 Imms
= ShiftImm
+ Width
- 1;
1611 static bool isSeveralBitsExtractOpFromShr(SDNode
*N
, unsigned &Opc
,
1612 SDValue
&Opd0
, unsigned &LSB
,
1614 // We are looking for the following pattern which basically extracts several
1615 // continuous bits from the source value and places it from the LSB of the
1616 // destination value, all other bits of the destination value or set to zero:
1618 // Value2 = AND Value, MaskImm
1619 // SRL Value2, ShiftImm
1621 // with MaskImm >> ShiftImm to search for the bit width.
1623 // This gets selected into a single UBFM:
1625 // UBFM Value, ShiftImm, BitWide + SrlImm -1
1628 if (N
->getOpcode() != ISD::SRL
)
1631 uint64_t AndMask
= 0;
1632 if (!isOpcWithIntImmediate(N
->getOperand(0).getNode(), ISD::AND
, AndMask
))
1635 Opd0
= N
->getOperand(0).getOperand(0);
1637 uint64_t SrlImm
= 0;
1638 if (!isIntImmediate(N
->getOperand(1), SrlImm
))
1641 // Check whether we really have several bits extract here.
1642 unsigned BitWide
= 64 - countLeadingOnes(~(AndMask
>> SrlImm
));
1643 if (BitWide
&& isMask_64(AndMask
>> SrlImm
)) {
1644 if (N
->getValueType(0) == MVT::i32
)
1645 Opc
= AArch64::UBFMWri
;
1647 Opc
= AArch64::UBFMXri
;
1650 MSB
= BitWide
+ SrlImm
- 1;
1657 static bool isBitfieldExtractOpFromShr(SDNode
*N
, unsigned &Opc
, SDValue
&Opd0
,
1658 unsigned &Immr
, unsigned &Imms
,
1659 bool BiggerPattern
) {
1660 assert((N
->getOpcode() == ISD::SRA
|| N
->getOpcode() == ISD::SRL
) &&
1661 "N must be a SHR/SRA operation to call this function");
1663 EVT VT
= N
->getValueType(0);
1665 // Here we can test the type of VT and return false when the type does not
1666 // match, but since it is done prior to that call in the current context
1667 // we turned that into an assert to avoid redundant code.
1668 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1669 "Type checking must have been done before calling this function");
1671 // Check for AND + SRL doing several bits extract.
1672 if (isSeveralBitsExtractOpFromShr(N
, Opc
, Opd0
, Immr
, Imms
))
1675 // We're looking for a shift of a shift.
1676 uint64_t ShlImm
= 0;
1677 uint64_t TruncBits
= 0;
1678 if (isOpcWithIntImmediate(N
->getOperand(0).getNode(), ISD::SHL
, ShlImm
)) {
1679 Opd0
= N
->getOperand(0).getOperand(0);
1680 } else if (VT
== MVT::i32
&& N
->getOpcode() == ISD::SRL
&&
1681 N
->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE
) {
1682 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1683 // be considered as setting high 32 bits as zero. Our strategy here is to
1684 // always generate 64bit UBFM. This consistency will help the CSE pass
1685 // later find more redundancy.
1686 Opd0
= N
->getOperand(0).getOperand(0);
1687 TruncBits
= Opd0
->getValueType(0).getSizeInBits() - VT
.getSizeInBits();
1688 VT
= Opd0
.getValueType();
1689 assert(VT
== MVT::i64
&& "the promoted type should be i64");
1690 } else if (BiggerPattern
) {
1691 // Let's pretend a 0 shift left has been performed.
1692 // FIXME: Currently we limit this to the bigger pattern case,
1693 // because some optimizations expect AND and not UBFM
1694 Opd0
= N
->getOperand(0);
1698 // Missing combines/constant folding may have left us with strange
1700 if (ShlImm
>= VT
.getSizeInBits()) {
1703 << ": Found large shift immediate, this should not happen\n"));
1707 uint64_t SrlImm
= 0;
1708 if (!isIntImmediate(N
->getOperand(1), SrlImm
))
1711 assert(SrlImm
> 0 && SrlImm
< VT
.getSizeInBits() &&
1712 "bad amount in shift node!");
1713 int immr
= SrlImm
- ShlImm
;
1714 Immr
= immr
< 0 ? immr
+ VT
.getSizeInBits() : immr
;
1715 Imms
= VT
.getSizeInBits() - ShlImm
- TruncBits
- 1;
1716 // SRA requires a signed extraction
1718 Opc
= N
->getOpcode() == ISD::SRA
? AArch64::SBFMWri
: AArch64::UBFMWri
;
1720 Opc
= N
->getOpcode() == ISD::SRA
? AArch64::SBFMXri
: AArch64::UBFMXri
;
1724 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode
*N
) {
1725 assert(N
->getOpcode() == ISD::SIGN_EXTEND
);
1727 EVT VT
= N
->getValueType(0);
1728 EVT NarrowVT
= N
->getOperand(0)->getValueType(0);
1729 if (VT
!= MVT::i64
|| NarrowVT
!= MVT::i32
)
1733 SDValue Op
= N
->getOperand(0);
1734 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SRA
, ShiftImm
))
1738 // Extend the incoming operand of the shift to 64-bits.
1739 SDValue Opd0
= Widen(CurDAG
, Op
.getOperand(0));
1740 unsigned Immr
= ShiftImm
;
1741 unsigned Imms
= NarrowVT
.getSizeInBits() - 1;
1742 SDValue Ops
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, VT
),
1743 CurDAG
->getTargetConstant(Imms
, dl
, VT
)};
1744 CurDAG
->SelectNodeTo(N
, AArch64::SBFMXri
, VT
, Ops
);
1748 static bool isBitfieldExtractOp(SelectionDAG
*CurDAG
, SDNode
*N
, unsigned &Opc
,
1749 SDValue
&Opd0
, unsigned &Immr
, unsigned &Imms
,
1750 unsigned NumberOfIgnoredLowBits
= 0,
1751 bool BiggerPattern
= false) {
1752 if (N
->getValueType(0) != MVT::i32
&& N
->getValueType(0) != MVT::i64
)
1755 switch (N
->getOpcode()) {
1757 if (!N
->isMachineOpcode())
1761 return isBitfieldExtractOpFromAnd(CurDAG
, N
, Opc
, Opd0
, Immr
, Imms
,
1762 NumberOfIgnoredLowBits
, BiggerPattern
);
1765 return isBitfieldExtractOpFromShr(N
, Opc
, Opd0
, Immr
, Imms
, BiggerPattern
);
1767 case ISD::SIGN_EXTEND_INREG
:
1768 return isBitfieldExtractOpFromSExtInReg(N
, Opc
, Opd0
, Immr
, Imms
);
1771 unsigned NOpc
= N
->getMachineOpcode();
1775 case AArch64::SBFMWri
:
1776 case AArch64::UBFMWri
:
1777 case AArch64::SBFMXri
:
1778 case AArch64::UBFMXri
:
1780 Opd0
= N
->getOperand(0);
1781 Immr
= cast
<ConstantSDNode
>(N
->getOperand(1).getNode())->getZExtValue();
1782 Imms
= cast
<ConstantSDNode
>(N
->getOperand(2).getNode())->getZExtValue();
1789 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode
*N
) {
1790 unsigned Opc
, Immr
, Imms
;
1792 if (!isBitfieldExtractOp(CurDAG
, N
, Opc
, Opd0
, Immr
, Imms
))
1795 EVT VT
= N
->getValueType(0);
1798 // If the bit extract operation is 64bit but the original type is 32bit, we
1799 // need to add one EXTRACT_SUBREG.
1800 if ((Opc
== AArch64::SBFMXri
|| Opc
== AArch64::UBFMXri
) && VT
== MVT::i32
) {
1801 SDValue Ops64
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, MVT::i64
),
1802 CurDAG
->getTargetConstant(Imms
, dl
, MVT::i64
)};
1804 SDNode
*BFM
= CurDAG
->getMachineNode(Opc
, dl
, MVT::i64
, Ops64
);
1805 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
1806 ReplaceNode(N
, CurDAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
, dl
,
1807 MVT::i32
, SDValue(BFM
, 0), SubReg
));
1811 SDValue Ops
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, VT
),
1812 CurDAG
->getTargetConstant(Imms
, dl
, VT
)};
1813 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
1817 /// Does DstMask form a complementary pair with the mask provided by
1818 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1819 /// this asks whether DstMask zeroes precisely those bits that will be set by
1821 static bool isBitfieldDstMask(uint64_t DstMask
, const APInt
&BitsToBeInserted
,
1822 unsigned NumberOfIgnoredHighBits
, EVT VT
) {
1823 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1824 "i32 or i64 mask type expected!");
1825 unsigned BitWidth
= VT
.getSizeInBits() - NumberOfIgnoredHighBits
;
1827 APInt SignificantDstMask
= APInt(BitWidth
, DstMask
);
1828 APInt SignificantBitsToBeInserted
= BitsToBeInserted
.zextOrTrunc(BitWidth
);
1830 return (SignificantDstMask
& SignificantBitsToBeInserted
) == 0 &&
1831 (SignificantDstMask
| SignificantBitsToBeInserted
).isAllOnesValue();
1834 // Look for bits that will be useful for later uses.
1835 // A bit is consider useless as soon as it is dropped and never used
1836 // before it as been dropped.
1837 // E.g., looking for useful bit of x
1840 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1842 // After #2, the useful bits of x are 0x4.
1843 // However, if x is used on an unpredicatable instruction, then all its bits
1849 static void getUsefulBits(SDValue Op
, APInt
&UsefulBits
, unsigned Depth
= 0);
1851 static void getUsefulBitsFromAndWithImmediate(SDValue Op
, APInt
&UsefulBits
,
1854 cast
<const ConstantSDNode
>(Op
.getOperand(1).getNode())->getZExtValue();
1855 Imm
= AArch64_AM::decodeLogicalImmediate(Imm
, UsefulBits
.getBitWidth());
1856 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), Imm
);
1857 getUsefulBits(Op
, UsefulBits
, Depth
+ 1);
1860 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op
, APInt
&UsefulBits
,
1861 uint64_t Imm
, uint64_t MSB
,
1863 // inherit the bitwidth value
1864 APInt
OpUsefulBits(UsefulBits
);
1868 OpUsefulBits
<<= MSB
- Imm
+ 1;
1870 // The interesting part will be in the lower part of the result
1871 getUsefulBits(Op
, OpUsefulBits
, Depth
+ 1);
1872 // The interesting part was starting at Imm in the argument
1873 OpUsefulBits
<<= Imm
;
1875 OpUsefulBits
<<= MSB
+ 1;
1877 // The interesting part will be shifted in the result
1878 OpUsefulBits
<<= OpUsefulBits
.getBitWidth() - Imm
;
1879 getUsefulBits(Op
, OpUsefulBits
, Depth
+ 1);
1880 // The interesting part was at zero in the argument
1881 OpUsefulBits
.lshrInPlace(OpUsefulBits
.getBitWidth() - Imm
);
1884 UsefulBits
&= OpUsefulBits
;
1887 static void getUsefulBitsFromUBFM(SDValue Op
, APInt
&UsefulBits
,
1890 cast
<const ConstantSDNode
>(Op
.getOperand(1).getNode())->getZExtValue();
1892 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
1894 getUsefulBitsFromBitfieldMoveOpd(Op
, UsefulBits
, Imm
, MSB
, Depth
);
1897 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op
, APInt
&UsefulBits
,
1899 uint64_t ShiftTypeAndValue
=
1900 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
1901 APInt
Mask(UsefulBits
);
1902 Mask
.clearAllBits();
1905 if (AArch64_AM::getShiftType(ShiftTypeAndValue
) == AArch64_AM::LSL
) {
1907 uint64_t ShiftAmt
= AArch64_AM::getShiftValue(ShiftTypeAndValue
);
1909 getUsefulBits(Op
, Mask
, Depth
+ 1);
1910 Mask
.lshrInPlace(ShiftAmt
);
1911 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue
) == AArch64_AM::LSR
) {
1913 // We do not handle AArch64_AM::ASR, because the sign will change the
1914 // number of useful bits
1915 uint64_t ShiftAmt
= AArch64_AM::getShiftValue(ShiftTypeAndValue
);
1916 Mask
.lshrInPlace(ShiftAmt
);
1917 getUsefulBits(Op
, Mask
, Depth
+ 1);
1925 static void getUsefulBitsFromBFM(SDValue Op
, SDValue Orig
, APInt
&UsefulBits
,
1928 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
1930 cast
<const ConstantSDNode
>(Op
.getOperand(3).getNode())->getZExtValue();
1932 APInt
OpUsefulBits(UsefulBits
);
1935 APInt
ResultUsefulBits(UsefulBits
.getBitWidth(), 0);
1936 ResultUsefulBits
.flipAllBits();
1937 APInt
Mask(UsefulBits
.getBitWidth(), 0);
1939 getUsefulBits(Op
, ResultUsefulBits
, Depth
+ 1);
1942 // The instruction is a BFXIL.
1943 uint64_t Width
= MSB
- Imm
+ 1;
1946 OpUsefulBits
<<= Width
;
1949 if (Op
.getOperand(1) == Orig
) {
1950 // Copy the low bits from the result to bits starting from LSB.
1951 Mask
= ResultUsefulBits
& OpUsefulBits
;
1955 if (Op
.getOperand(0) == Orig
)
1956 // Bits starting from LSB in the input contribute to the result.
1957 Mask
|= (ResultUsefulBits
& ~OpUsefulBits
);
1959 // The instruction is a BFI.
1960 uint64_t Width
= MSB
+ 1;
1961 uint64_t LSB
= UsefulBits
.getBitWidth() - Imm
;
1963 OpUsefulBits
<<= Width
;
1965 OpUsefulBits
<<= LSB
;
1967 if (Op
.getOperand(1) == Orig
) {
1968 // Copy the bits from the result to the zero bits.
1969 Mask
= ResultUsefulBits
& OpUsefulBits
;
1970 Mask
.lshrInPlace(LSB
);
1973 if (Op
.getOperand(0) == Orig
)
1974 Mask
|= (ResultUsefulBits
& ~OpUsefulBits
);
1980 static void getUsefulBitsForUse(SDNode
*UserNode
, APInt
&UsefulBits
,
1981 SDValue Orig
, unsigned Depth
) {
1983 // Users of this node should have already been instruction selected
1984 // FIXME: Can we turn that into an assert?
1985 if (!UserNode
->isMachineOpcode())
1988 switch (UserNode
->getMachineOpcode()) {
1991 case AArch64::ANDSWri
:
1992 case AArch64::ANDSXri
:
1993 case AArch64::ANDWri
:
1994 case AArch64::ANDXri
:
1995 // We increment Depth only when we call the getUsefulBits
1996 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode
, 0), UsefulBits
,
1998 case AArch64::UBFMWri
:
1999 case AArch64::UBFMXri
:
2000 return getUsefulBitsFromUBFM(SDValue(UserNode
, 0), UsefulBits
, Depth
);
2002 case AArch64::ORRWrs
:
2003 case AArch64::ORRXrs
:
2004 if (UserNode
->getOperand(1) != Orig
)
2006 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode
, 0), UsefulBits
,
2008 case AArch64::BFMWri
:
2009 case AArch64::BFMXri
:
2010 return getUsefulBitsFromBFM(SDValue(UserNode
, 0), Orig
, UsefulBits
, Depth
);
2012 case AArch64::STRBBui
:
2013 case AArch64::STURBBi
:
2014 if (UserNode
->getOperand(0) != Orig
)
2016 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), 0xff);
2019 case AArch64::STRHHui
:
2020 case AArch64::STURHHi
:
2021 if (UserNode
->getOperand(0) != Orig
)
2023 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), 0xffff);
2028 static void getUsefulBits(SDValue Op
, APInt
&UsefulBits
, unsigned Depth
) {
2031 // Initialize UsefulBits
2033 unsigned Bitwidth
= Op
.getScalarValueSizeInBits();
2034 // At the beginning, assume every produced bits is useful
2035 UsefulBits
= APInt(Bitwidth
, 0);
2036 UsefulBits
.flipAllBits();
2038 APInt
UsersUsefulBits(UsefulBits
.getBitWidth(), 0);
2040 for (SDNode
*Node
: Op
.getNode()->uses()) {
2041 // A use cannot produce useful bits
2042 APInt UsefulBitsForUse
= APInt(UsefulBits
);
2043 getUsefulBitsForUse(Node
, UsefulBitsForUse
, Op
, Depth
);
2044 UsersUsefulBits
|= UsefulBitsForUse
;
2046 // UsefulBits contains the produced bits that are meaningful for the
2047 // current definition, thus a user cannot make a bit meaningful at
2049 UsefulBits
&= UsersUsefulBits
;
2052 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
2053 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2054 /// 0, return Op unchanged.
2055 static SDValue
getLeftShift(SelectionDAG
*CurDAG
, SDValue Op
, int ShlAmount
) {
2059 EVT VT
= Op
.getValueType();
2061 unsigned BitWidth
= VT
.getSizeInBits();
2062 unsigned UBFMOpc
= BitWidth
== 32 ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2065 if (ShlAmount
> 0) {
2066 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2067 ShiftNode
= CurDAG
->getMachineNode(
2068 UBFMOpc
, dl
, VT
, Op
,
2069 CurDAG
->getTargetConstant(BitWidth
- ShlAmount
, dl
, VT
),
2070 CurDAG
->getTargetConstant(BitWidth
- 1 - ShlAmount
, dl
, VT
));
2072 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2073 assert(ShlAmount
< 0 && "expected right shift");
2074 int ShrAmount
= -ShlAmount
;
2075 ShiftNode
= CurDAG
->getMachineNode(
2076 UBFMOpc
, dl
, VT
, Op
, CurDAG
->getTargetConstant(ShrAmount
, dl
, VT
),
2077 CurDAG
->getTargetConstant(BitWidth
- 1, dl
, VT
));
2080 return SDValue(ShiftNode
, 0);
2083 /// Does this tree qualify as an attempt to move a bitfield into position,
2084 /// essentially "(and (shl VAL, N), Mask)".
2085 static bool isBitfieldPositioningOp(SelectionDAG
*CurDAG
, SDValue Op
,
2087 SDValue
&Src
, int &ShiftAmount
,
2089 EVT VT
= Op
.getValueType();
2090 unsigned BitWidth
= VT
.getSizeInBits();
2092 assert(BitWidth
== 32 || BitWidth
== 64);
2095 CurDAG
->computeKnownBits(Op
, Known
);
2097 // Non-zero in the sense that they're not provably zero, which is the key
2098 // point if we want to use this value
2099 uint64_t NonZeroBits
= (~Known
.Zero
).getZExtValue();
2101 // Discard a constant AND mask if present. It's safe because the node will
2102 // already have been factored into the computeKnownBits calculation above.
2104 if (isOpcWithIntImmediate(Op
.getNode(), ISD::AND
, AndImm
)) {
2105 assert((~APInt(BitWidth
, AndImm
) & ~Known
.Zero
) == 0);
2106 Op
= Op
.getOperand(0);
2109 // Don't match if the SHL has more than one use, since then we'll end up
2110 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2111 if (!BiggerPattern
&& !Op
.hasOneUse())
2115 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SHL
, ShlImm
))
2117 Op
= Op
.getOperand(0);
2119 if (!isShiftedMask_64(NonZeroBits
))
2122 ShiftAmount
= countTrailingZeros(NonZeroBits
);
2123 MaskWidth
= countTrailingOnes(NonZeroBits
>> ShiftAmount
);
2125 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2126 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2127 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2128 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2129 // which case it is not profitable to insert an extra shift.
2130 if (ShlImm
- ShiftAmount
!= 0 && !BiggerPattern
)
2132 Src
= getLeftShift(CurDAG
, Op
, ShlImm
- ShiftAmount
);
2137 static bool isShiftedMask(uint64_t Mask
, EVT VT
) {
2138 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2140 return isShiftedMask_32(Mask
);
2141 return isShiftedMask_64(Mask
);
2144 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2145 // inserted only sets known zero bits.
2146 static bool tryBitfieldInsertOpFromOrAndImm(SDNode
*N
, SelectionDAG
*CurDAG
) {
2147 assert(N
->getOpcode() == ISD::OR
&& "Expect a OR operation");
2149 EVT VT
= N
->getValueType(0);
2150 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2153 unsigned BitWidth
= VT
.getSizeInBits();
2156 if (!isOpcWithIntImmediate(N
, ISD::OR
, OrImm
))
2159 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2160 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2161 // performance neutral.
2162 if (AArch64_AM::isLogicalImmediate(OrImm
, BitWidth
))
2166 SDValue And
= N
->getOperand(0);
2167 // Must be a single use AND with an immediate operand.
2168 if (!And
.hasOneUse() ||
2169 !isOpcWithIntImmediate(And
.getNode(), ISD::AND
, MaskImm
))
2172 // Compute the Known Zero for the AND as this allows us to catch more general
2173 // cases than just looking for AND with imm.
2175 CurDAG
->computeKnownBits(And
, Known
);
2177 // Non-zero in the sense that they're not provably zero, which is the key
2178 // point if we want to use this value.
2179 uint64_t NotKnownZero
= (~Known
.Zero
).getZExtValue();
2181 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2182 if (!isShiftedMask(Known
.Zero
.getZExtValue(), VT
))
2185 // The bits being inserted must only set those bits that are known to be zero.
2186 if ((OrImm
& NotKnownZero
) != 0) {
2187 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2188 // currently handle this case.
2192 // BFI/BFXIL dst, src, #lsb, #width.
2193 int LSB
= countTrailingOnes(NotKnownZero
);
2194 int Width
= BitWidth
- APInt(BitWidth
, NotKnownZero
).countPopulation();
2196 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2197 unsigned ImmR
= (BitWidth
- LSB
) % BitWidth
;
2198 unsigned ImmS
= Width
- 1;
2200 // If we're creating a BFI instruction avoid cases where we need more
2201 // instructions to materialize the BFI constant as compared to the original
2202 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2203 // should be no worse in this case.
2204 bool IsBFI
= LSB
!= 0;
2205 uint64_t BFIImm
= OrImm
>> LSB
;
2206 if (IsBFI
&& !AArch64_AM::isLogicalImmediate(BFIImm
, BitWidth
)) {
2207 // We have a BFI instruction and we know the constant can't be materialized
2208 // with a ORR-immediate with the zero register.
2209 unsigned OrChunks
= 0, BFIChunks
= 0;
2210 for (unsigned Shift
= 0; Shift
< BitWidth
; Shift
+= 16) {
2211 if (((OrImm
>> Shift
) & 0xFFFF) != 0)
2213 if (((BFIImm
>> Shift
) & 0xFFFF) != 0)
2216 if (BFIChunks
> OrChunks
)
2220 // Materialize the constant to be inserted.
2222 unsigned MOVIOpc
= VT
== MVT::i32
? AArch64::MOVi32imm
: AArch64::MOVi64imm
;
2223 SDNode
*MOVI
= CurDAG
->getMachineNode(
2224 MOVIOpc
, DL
, VT
, CurDAG
->getTargetConstant(BFIImm
, DL
, VT
));
2226 // Create the BFI/BFXIL instruction.
2227 SDValue Ops
[] = {And
.getOperand(0), SDValue(MOVI
, 0),
2228 CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2229 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2230 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2231 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2235 static bool tryBitfieldInsertOpFromOr(SDNode
*N
, const APInt
&UsefulBits
,
2236 SelectionDAG
*CurDAG
) {
2237 assert(N
->getOpcode() == ISD::OR
&& "Expect a OR operation");
2239 EVT VT
= N
->getValueType(0);
2240 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2243 unsigned BitWidth
= VT
.getSizeInBits();
2245 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2246 // have the expected shape. Try to undo that.
2248 unsigned NumberOfIgnoredLowBits
= UsefulBits
.countTrailingZeros();
2249 unsigned NumberOfIgnoredHighBits
= UsefulBits
.countLeadingZeros();
2251 // Given a OR operation, check if we have the following pattern
2252 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2253 // isBitfieldExtractOp)
2254 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2255 // countTrailingZeros(mask2) == imm2 - imm + 1
2257 // if yes, replace the OR instruction with:
2258 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2260 // OR is commutative, check all combinations of operand order and values of
2261 // BiggerPattern, i.e.
2262 // Opd0, Opd1, BiggerPattern=false
2263 // Opd1, Opd0, BiggerPattern=false
2264 // Opd0, Opd1, BiggerPattern=true
2265 // Opd1, Opd0, BiggerPattern=true
2266 // Several of these combinations may match, so check with BiggerPattern=false
2267 // first since that will produce better results by matching more instructions
2268 // and/or inserting fewer extra instructions.
2269 for (int I
= 0; I
< 4; ++I
) {
2272 unsigned ImmR
, ImmS
;
2273 bool BiggerPattern
= I
/ 2;
2274 SDValue OrOpd0Val
= N
->getOperand(I
% 2);
2275 SDNode
*OrOpd0
= OrOpd0Val
.getNode();
2276 SDValue OrOpd1Val
= N
->getOperand((I
+ 1) % 2);
2277 SDNode
*OrOpd1
= OrOpd1Val
.getNode();
2281 if (isBitfieldExtractOp(CurDAG
, OrOpd0
, BFXOpc
, Src
, ImmR
, ImmS
,
2282 NumberOfIgnoredLowBits
, BiggerPattern
)) {
2283 // Check that the returned opcode is compatible with the pattern,
2284 // i.e., same type and zero extended (U and not S)
2285 if ((BFXOpc
!= AArch64::UBFMXri
&& VT
== MVT::i64
) ||
2286 (BFXOpc
!= AArch64::UBFMWri
&& VT
== MVT::i32
))
2289 // Compute the width of the bitfield insertion
2291 Width
= ImmS
- ImmR
+ 1;
2292 // FIXME: This constraint is to catch bitfield insertion we may
2293 // want to widen the pattern if we want to grab general bitfied
2298 // If the mask on the insertee is correct, we have a BFXIL operation. We
2299 // can share the ImmR and ImmS values from the already-computed UBFM.
2300 } else if (isBitfieldPositioningOp(CurDAG
, OrOpd0Val
,
2302 Src
, DstLSB
, Width
)) {
2303 ImmR
= (BitWidth
- DstLSB
) % BitWidth
;
2308 // Check the second part of the pattern
2309 EVT VT
= OrOpd1Val
.getValueType();
2310 assert((VT
== MVT::i32
|| VT
== MVT::i64
) && "unexpected OR operand");
2312 // Compute the Known Zero for the candidate of the first operand.
2313 // This allows to catch more general case than just looking for
2314 // AND with imm. Indeed, simplify-demanded-bits may have removed
2315 // the AND instruction because it proves it was useless.
2317 CurDAG
->computeKnownBits(OrOpd1Val
, Known
);
2319 // Check if there is enough room for the second operand to appear
2321 APInt BitsToBeInserted
=
2322 APInt::getBitsSet(Known
.getBitWidth(), DstLSB
, DstLSB
+ Width
);
2324 if ((BitsToBeInserted
& ~Known
.Zero
) != 0)
2327 // Set the first operand
2329 if (isOpcWithIntImmediate(OrOpd1
, ISD::AND
, Imm
) &&
2330 isBitfieldDstMask(Imm
, BitsToBeInserted
, NumberOfIgnoredHighBits
, VT
))
2331 // In that case, we can eliminate the AND
2332 Dst
= OrOpd1
->getOperand(0);
2334 // Maybe the AND has been removed by simplify-demanded-bits
2335 // or is useful because it discards more bits
2340 SDValue Ops
[] = {Dst
, Src
, CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2341 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2342 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2343 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2347 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2348 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2349 // mask (e.g., 0x000ffff0).
2350 uint64_t Mask0Imm
, Mask1Imm
;
2351 SDValue And0
= N
->getOperand(0);
2352 SDValue And1
= N
->getOperand(1);
2353 if (And0
.hasOneUse() && And1
.hasOneUse() &&
2354 isOpcWithIntImmediate(And0
.getNode(), ISD::AND
, Mask0Imm
) &&
2355 isOpcWithIntImmediate(And1
.getNode(), ISD::AND
, Mask1Imm
) &&
2356 APInt(BitWidth
, Mask0Imm
) == ~APInt(BitWidth
, Mask1Imm
) &&
2357 (isShiftedMask(Mask0Imm
, VT
) || isShiftedMask(Mask1Imm
, VT
))) {
2359 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2360 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2361 // bits to be inserted.
2362 if (isShiftedMask(Mask0Imm
, VT
)) {
2363 std::swap(And0
, And1
);
2364 std::swap(Mask0Imm
, Mask1Imm
);
2367 SDValue Src
= And1
->getOperand(0);
2368 SDValue Dst
= And0
->getOperand(0);
2369 unsigned LSB
= countTrailingZeros(Mask1Imm
);
2370 int Width
= BitWidth
- APInt(BitWidth
, Mask0Imm
).countPopulation();
2372 // The BFXIL inserts the low-order bits from a source register, so right
2373 // shift the needed bits into place.
2375 unsigned ShiftOpc
= (VT
== MVT::i32
) ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2376 SDNode
*LSR
= CurDAG
->getMachineNode(
2377 ShiftOpc
, DL
, VT
, Src
, CurDAG
->getTargetConstant(LSB
, DL
, VT
),
2378 CurDAG
->getTargetConstant(BitWidth
- 1, DL
, VT
));
2380 // BFXIL is an alias of BFM, so translate to BFM operands.
2381 unsigned ImmR
= (BitWidth
- LSB
) % BitWidth
;
2382 unsigned ImmS
= Width
- 1;
2384 // Create the BFXIL instruction.
2385 SDValue Ops
[] = {Dst
, SDValue(LSR
, 0),
2386 CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2387 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2388 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2389 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2396 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode
*N
) {
2397 if (N
->getOpcode() != ISD::OR
)
2401 getUsefulBits(SDValue(N
, 0), NUsefulBits
);
2403 // If all bits are not useful, just return UNDEF.
2405 CurDAG
->SelectNodeTo(N
, TargetOpcode::IMPLICIT_DEF
, N
->getValueType(0));
2409 if (tryBitfieldInsertOpFromOr(N
, NUsefulBits
, CurDAG
))
2412 return tryBitfieldInsertOpFromOrAndImm(N
, CurDAG
);
2415 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2416 /// equivalent of a left shift by a constant amount followed by an and masking
2417 /// out a contiguous set of bits.
2418 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode
*N
) {
2419 if (N
->getOpcode() != ISD::AND
)
2422 EVT VT
= N
->getValueType(0);
2423 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2428 if (!isBitfieldPositioningOp(CurDAG
, SDValue(N
, 0), /*BiggerPattern=*/false,
2429 Op0
, DstLSB
, Width
))
2432 // ImmR is the rotate right amount.
2433 unsigned ImmR
= (VT
.getSizeInBits() - DstLSB
) % VT
.getSizeInBits();
2434 // ImmS is the most significant bit of the source to be moved.
2435 unsigned ImmS
= Width
- 1;
2438 SDValue Ops
[] = {Op0
, CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2439 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2440 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2441 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2445 /// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
2446 /// variable shift/rotate instructions.
2447 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode
*N
) {
2448 EVT VT
= N
->getValueType(0);
2451 switch (N
->getOpcode()) {
2453 Opc
= (VT
== MVT::i32
) ? AArch64::RORVWr
: AArch64::RORVXr
;
2456 Opc
= (VT
== MVT::i32
) ? AArch64::LSLVWr
: AArch64::LSLVXr
;
2459 Opc
= (VT
== MVT::i32
) ? AArch64::LSRVWr
: AArch64::LSRVXr
;
2462 Opc
= (VT
== MVT::i32
) ? AArch64::ASRVWr
: AArch64::ASRVXr
;
2470 if (VT
== MVT::i32
) {
2473 } else if (VT
== MVT::i64
) {
2479 SDValue ShiftAmt
= N
->getOperand(1);
2481 SDValue NewShiftAmt
;
2483 // Skip over an extend of the shift amount.
2484 if (ShiftAmt
->getOpcode() == ISD::ZERO_EXTEND
||
2485 ShiftAmt
->getOpcode() == ISD::ANY_EXTEND
)
2486 ShiftAmt
= ShiftAmt
->getOperand(0);
2488 if (ShiftAmt
->getOpcode() == ISD::ADD
|| ShiftAmt
->getOpcode() == ISD::SUB
) {
2489 SDValue Add0
= ShiftAmt
->getOperand(0);
2490 SDValue Add1
= ShiftAmt
->getOperand(1);
2493 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
2494 // to avoid the ADD/SUB.
2495 if (isIntImmediate(Add1
, Add1Imm
) && (Add1Imm
% Size
== 0))
2497 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
2498 // generate a NEG instead of a SUB of a constant.
2499 else if (ShiftAmt
->getOpcode() == ISD::SUB
&&
2500 isIntImmediate(Add0
, Add0Imm
) && Add0Imm
!= 0 &&
2501 (Add0Imm
% Size
== 0)) {
2504 EVT SubVT
= ShiftAmt
->getValueType(0);
2505 if (SubVT
== MVT::i32
) {
2506 NegOpc
= AArch64::SUBWrr
;
2507 ZeroReg
= AArch64::WZR
;
2509 assert(SubVT
== MVT::i64
);
2510 NegOpc
= AArch64::SUBXrr
;
2511 ZeroReg
= AArch64::XZR
;
2514 CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), DL
, ZeroReg
, SubVT
);
2515 MachineSDNode
*Neg
=
2516 CurDAG
->getMachineNode(NegOpc
, DL
, SubVT
, Zero
, Add1
);
2517 NewShiftAmt
= SDValue(Neg
, 0);
2521 // If the shift amount is masked with an AND, check that the mask covers the
2522 // bits that are implicitly ANDed off by the above opcodes and if so, skip
2525 if (!isOpcWithIntImmediate(ShiftAmt
.getNode(), ISD::AND
, MaskImm
))
2528 if (countTrailingOnes(MaskImm
) < Bits
)
2531 NewShiftAmt
= ShiftAmt
->getOperand(0);
2534 // Narrow/widen the shift amount to match the size of the shift operation.
2536 NewShiftAmt
= narrowIfNeeded(CurDAG
, NewShiftAmt
);
2537 else if (VT
== MVT::i64
&& NewShiftAmt
->getValueType(0) == MVT::i32
) {
2538 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, DL
, MVT::i32
);
2539 MachineSDNode
*Ext
= CurDAG
->getMachineNode(
2540 AArch64::SUBREG_TO_REG
, DL
, VT
,
2541 CurDAG
->getTargetConstant(0, DL
, MVT::i64
), NewShiftAmt
, SubReg
);
2542 NewShiftAmt
= SDValue(Ext
, 0);
2545 SDValue Ops
[] = {N
->getOperand(0), NewShiftAmt
};
2546 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2551 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
,
2552 unsigned RegWidth
) {
2554 if (ConstantFPSDNode
*CN
= dyn_cast
<ConstantFPSDNode
>(N
))
2555 FVal
= CN
->getValueAPF();
2556 else if (LoadSDNode
*LN
= dyn_cast
<LoadSDNode
>(N
)) {
2557 // Some otherwise illegal constants are allowed in this case.
2558 if (LN
->getOperand(1).getOpcode() != AArch64ISD::ADDlow
||
2559 !isa
<ConstantPoolSDNode
>(LN
->getOperand(1)->getOperand(1)))
2562 ConstantPoolSDNode
*CN
=
2563 dyn_cast
<ConstantPoolSDNode
>(LN
->getOperand(1)->getOperand(1));
2564 FVal
= cast
<ConstantFP
>(CN
->getConstVal())->getValueAPF();
2568 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2569 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2572 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2573 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2577 // fbits is between 1 and 64 in the worst-case, which means the fmul
2578 // could have 2^64 as an actual operand. Need 65 bits of precision.
2579 APSInt
IntVal(65, true);
2580 FVal
.convertToInteger(IntVal
, APFloat::rmTowardZero
, &IsExact
);
2582 // N.b. isPowerOf2 also checks for > 0.
2583 if (!IsExact
|| !IntVal
.isPowerOf2()) return false;
2584 unsigned FBits
= IntVal
.logBase2();
2586 // Checks above should have guaranteed that we haven't lost information in
2587 // finding FBits, but it must still be in range.
2588 if (FBits
== 0 || FBits
> RegWidth
) return false;
2590 FixedPos
= CurDAG
->getTargetConstant(FBits
, SDLoc(N
), MVT::i32
);
2594 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2595 // of the string and obtains the integer values from them and combines these
2596 // into a single value to be used in the MRS/MSR instruction.
2597 static int getIntOperandFromRegisterString(StringRef RegString
) {
2598 SmallVector
<StringRef
, 5> Fields
;
2599 RegString
.split(Fields
, ':');
2601 if (Fields
.size() == 1)
2604 assert(Fields
.size() == 5
2605 && "Invalid number of fields in read register string");
2607 SmallVector
<int, 5> Ops
;
2608 bool AllIntFields
= true;
2610 for (StringRef Field
: Fields
) {
2612 AllIntFields
&= !Field
.getAsInteger(10, IntField
);
2613 Ops
.push_back(IntField
);
2616 assert(AllIntFields
&&
2617 "Unexpected non-integer value in special register string.");
2619 // Need to combine the integer fields of the string into a single value
2620 // based on the bit encoding of MRS/MSR instruction.
2621 return (Ops
[0] << 14) | (Ops
[1] << 11) | (Ops
[2] << 7) |
2622 (Ops
[3] << 3) | (Ops
[4]);
2625 // Lower the read_register intrinsic to an MRS instruction node if the special
2626 // register string argument is either of the form detailed in the ALCE (the
2627 // form described in getIntOperandsFromRegsterString) or is a named register
2628 // known by the MRS SysReg mapper.
2629 bool AArch64DAGToDAGISel::tryReadRegister(SDNode
*N
) {
2630 const MDNodeSDNode
*MD
= dyn_cast
<MDNodeSDNode
>(N
->getOperand(1));
2631 const MDString
*RegString
= dyn_cast
<MDString
>(MD
->getMD()->getOperand(0));
2634 int Reg
= getIntOperandFromRegisterString(RegString
->getString());
2636 ReplaceNode(N
, CurDAG
->getMachineNode(
2637 AArch64::MRS
, DL
, N
->getSimpleValueType(0), MVT::Other
,
2638 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
2643 // Use the sysreg mapper to map the remaining possible strings to the
2644 // value for the register to be used for the instruction operand.
2645 auto TheReg
= AArch64SysReg::lookupSysRegByName(RegString
->getString());
2646 if (TheReg
&& TheReg
->Readable
&&
2647 TheReg
->haveFeatures(Subtarget
->getFeatureBits()))
2648 Reg
= TheReg
->Encoding
;
2650 Reg
= AArch64SysReg::parseGenericRegister(RegString
->getString());
2653 ReplaceNode(N
, CurDAG
->getMachineNode(
2654 AArch64::MRS
, DL
, N
->getSimpleValueType(0), MVT::Other
,
2655 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
2663 // Lower the write_register intrinsic to an MSR instruction node if the special
2664 // register string argument is either of the form detailed in the ALCE (the
2665 // form described in getIntOperandsFromRegsterString) or is a named register
2666 // known by the MSR SysReg mapper.
2667 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode
*N
) {
2668 const MDNodeSDNode
*MD
= dyn_cast
<MDNodeSDNode
>(N
->getOperand(1));
2669 const MDString
*RegString
= dyn_cast
<MDString
>(MD
->getMD()->getOperand(0));
2672 int Reg
= getIntOperandFromRegisterString(RegString
->getString());
2675 N
, CurDAG
->getMachineNode(AArch64::MSR
, DL
, MVT::Other
,
2676 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
2677 N
->getOperand(2), N
->getOperand(0)));
2681 // Check if the register was one of those allowed as the pstatefield value in
2682 // the MSR (immediate) instruction. To accept the values allowed in the
2683 // pstatefield for the MSR (immediate) instruction, we also require that an
2684 // immediate value has been provided as an argument, we know that this is
2685 // the case as it has been ensured by semantic checking.
2686 auto PMapper
= AArch64PState::lookupPStateByName(RegString
->getString());
2688 assert (isa
<ConstantSDNode
>(N
->getOperand(2))
2689 && "Expected a constant integer expression.");
2690 unsigned Reg
= PMapper
->Encoding
;
2691 uint64_t Immed
= cast
<ConstantSDNode
>(N
->getOperand(2))->getZExtValue();
2693 if (Reg
== AArch64PState::PAN
|| Reg
== AArch64PState::UAO
) {
2694 assert(Immed
< 2 && "Bad imm");
2695 State
= AArch64::MSRpstateImm1
;
2697 assert(Immed
< 16 && "Bad imm");
2698 State
= AArch64::MSRpstateImm4
;
2700 ReplaceNode(N
, CurDAG
->getMachineNode(
2701 State
, DL
, MVT::Other
,
2702 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
2703 CurDAG
->getTargetConstant(Immed
, DL
, MVT::i16
),
2708 // Use the sysreg mapper to attempt to map the remaining possible strings
2709 // to the value for the register to be used for the MSR (register)
2710 // instruction operand.
2711 auto TheReg
= AArch64SysReg::lookupSysRegByName(RegString
->getString());
2712 if (TheReg
&& TheReg
->Writeable
&&
2713 TheReg
->haveFeatures(Subtarget
->getFeatureBits()))
2714 Reg
= TheReg
->Encoding
;
2716 Reg
= AArch64SysReg::parseGenericRegister(RegString
->getString());
2718 ReplaceNode(N
, CurDAG
->getMachineNode(
2719 AArch64::MSR
, DL
, MVT::Other
,
2720 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
2721 N
->getOperand(2), N
->getOperand(0)));
2728 /// We've got special pseudo-instructions for these
2729 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode
*N
) {
2731 EVT MemTy
= cast
<MemSDNode
>(N
)->getMemoryVT();
2733 // Leave IR for LSE if subtarget supports it.
2734 if (Subtarget
->hasLSE()) return false;
2736 if (MemTy
== MVT::i8
)
2737 Opcode
= AArch64::CMP_SWAP_8
;
2738 else if (MemTy
== MVT::i16
)
2739 Opcode
= AArch64::CMP_SWAP_16
;
2740 else if (MemTy
== MVT::i32
)
2741 Opcode
= AArch64::CMP_SWAP_32
;
2742 else if (MemTy
== MVT::i64
)
2743 Opcode
= AArch64::CMP_SWAP_64
;
2745 llvm_unreachable("Unknown AtomicCmpSwap type");
2747 MVT RegTy
= MemTy
== MVT::i64
? MVT::i64
: MVT::i32
;
2748 SDValue Ops
[] = {N
->getOperand(1), N
->getOperand(2), N
->getOperand(3),
2750 SDNode
*CmpSwap
= CurDAG
->getMachineNode(
2752 CurDAG
->getVTList(RegTy
, MVT::i32
, MVT::Other
), Ops
);
2754 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
2755 MemOp
[0] = cast
<MemSDNode
>(N
)->getMemOperand();
2756 cast
<MachineSDNode
>(CmpSwap
)->setMemRefs(MemOp
, MemOp
+ 1);
2758 ReplaceUses(SDValue(N
, 0), SDValue(CmpSwap
, 0));
2759 ReplaceUses(SDValue(N
, 1), SDValue(CmpSwap
, 2));
2760 CurDAG
->RemoveDeadNode(N
);
2765 void AArch64DAGToDAGISel::Select(SDNode
*Node
) {
2766 // If we have a custom node, we already have selected!
2767 if (Node
->isMachineOpcode()) {
2768 LLVM_DEBUG(errs() << "== "; Node
->dump(CurDAG
); errs() << "\n");
2769 Node
->setNodeId(-1);
2773 // Few custom selection stuff.
2774 EVT VT
= Node
->getValueType(0);
2776 switch (Node
->getOpcode()) {
2780 case ISD::ATOMIC_CMP_SWAP
:
2781 if (SelectCMP_SWAP(Node
))
2785 case ISD::READ_REGISTER
:
2786 if (tryReadRegister(Node
))
2790 case ISD::WRITE_REGISTER
:
2791 if (tryWriteRegister(Node
))
2796 if (tryMLAV64LaneV128(Node
))
2801 // Try to select as an indexed load. Fall through to normal processing
2803 if (tryIndexedLoad(Node
))
2811 case ISD::SIGN_EXTEND_INREG
:
2812 if (tryBitfieldExtractOp(Node
))
2814 if (tryBitfieldInsertInZeroOp(Node
))
2819 if (tryShiftAmountMod(Node
))
2823 case ISD::SIGN_EXTEND
:
2824 if (tryBitfieldExtractOpFromSExt(Node
))
2829 if (tryBitfieldInsertOp(Node
))
2833 case ISD::EXTRACT_VECTOR_ELT
: {
2834 // Extracting lane zero is a special case where we can just use a plain
2835 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2836 // the rest of the compiler, especially the register allocator and copyi
2837 // propagation, to reason about, so is preferred when it's possible to
2839 ConstantSDNode
*LaneNode
= cast
<ConstantSDNode
>(Node
->getOperand(1));
2840 // Bail and use the default Select() for non-zero lanes.
2841 if (LaneNode
->getZExtValue() != 0)
2843 // If the element type is not the same as the result type, likewise
2844 // bail and use the default Select(), as there's more to do than just
2845 // a cross-class COPY. This catches extracts of i8 and i16 elements
2846 // since they will need an explicit zext.
2847 if (VT
!= Node
->getOperand(0).getValueType().getVectorElementType())
2850 switch (Node
->getOperand(0)
2852 .getVectorElementType()
2855 llvm_unreachable("Unexpected vector element type!");
2857 SubReg
= AArch64::dsub
;
2860 SubReg
= AArch64::ssub
;
2863 SubReg
= AArch64::hsub
;
2866 llvm_unreachable("unexpected zext-requiring extract element!");
2868 SDValue Extract
= CurDAG
->getTargetExtractSubreg(SubReg
, SDLoc(Node
), VT
,
2869 Node
->getOperand(0));
2870 LLVM_DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2871 LLVM_DEBUG(Extract
->dumpr(CurDAG
));
2872 LLVM_DEBUG(dbgs() << "\n");
2873 ReplaceNode(Node
, Extract
.getNode());
2876 case ISD::Constant
: {
2877 // Materialize zero constants as copies from WZR/XZR. This allows
2878 // the coalescer to propagate these into other instructions.
2879 ConstantSDNode
*ConstNode
= cast
<ConstantSDNode
>(Node
);
2880 if (ConstNode
->isNullValue()) {
2881 if (VT
== MVT::i32
) {
2882 SDValue New
= CurDAG
->getCopyFromReg(
2883 CurDAG
->getEntryNode(), SDLoc(Node
), AArch64::WZR
, MVT::i32
);
2884 ReplaceNode(Node
, New
.getNode());
2886 } else if (VT
== MVT::i64
) {
2887 SDValue New
= CurDAG
->getCopyFromReg(
2888 CurDAG
->getEntryNode(), SDLoc(Node
), AArch64::XZR
, MVT::i64
);
2889 ReplaceNode(Node
, New
.getNode());
2896 case ISD::FrameIndex
: {
2897 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2898 int FI
= cast
<FrameIndexSDNode
>(Node
)->getIndex();
2899 unsigned Shifter
= AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0);
2900 const TargetLowering
*TLI
= getTargetLowering();
2901 SDValue TFI
= CurDAG
->getTargetFrameIndex(
2902 FI
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
2904 SDValue Ops
[] = { TFI
, CurDAG
->getTargetConstant(0, DL
, MVT::i32
),
2905 CurDAG
->getTargetConstant(Shifter
, DL
, MVT::i32
) };
2906 CurDAG
->SelectNodeTo(Node
, AArch64::ADDXri
, MVT::i64
, Ops
);
2909 case ISD::INTRINSIC_W_CHAIN
: {
2910 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue();
2914 case Intrinsic::aarch64_ldaxp
:
2915 case Intrinsic::aarch64_ldxp
: {
2917 IntNo
== Intrinsic::aarch64_ldaxp
? AArch64::LDAXPX
: AArch64::LDXPX
;
2918 SDValue MemAddr
= Node
->getOperand(2);
2920 SDValue Chain
= Node
->getOperand(0);
2922 SDNode
*Ld
= CurDAG
->getMachineNode(Op
, DL
, MVT::i64
, MVT::i64
,
2923 MVT::Other
, MemAddr
, Chain
);
2925 // Transfer memoperands.
2926 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
2927 MemOp
[0] = cast
<MemIntrinsicSDNode
>(Node
)->getMemOperand();
2928 cast
<MachineSDNode
>(Ld
)->setMemRefs(MemOp
, MemOp
+ 1);
2929 ReplaceNode(Node
, Ld
);
2932 case Intrinsic::aarch64_stlxp
:
2933 case Intrinsic::aarch64_stxp
: {
2935 IntNo
== Intrinsic::aarch64_stlxp
? AArch64::STLXPX
: AArch64::STXPX
;
2937 SDValue Chain
= Node
->getOperand(0);
2938 SDValue ValLo
= Node
->getOperand(2);
2939 SDValue ValHi
= Node
->getOperand(3);
2940 SDValue MemAddr
= Node
->getOperand(4);
2942 // Place arguments in the right order.
2943 SDValue Ops
[] = {ValLo
, ValHi
, MemAddr
, Chain
};
2945 SDNode
*St
= CurDAG
->getMachineNode(Op
, DL
, MVT::i32
, MVT::Other
, Ops
);
2946 // Transfer memoperands.
2947 MachineSDNode::mmo_iterator MemOp
= MF
->allocateMemRefsArray(1);
2948 MemOp
[0] = cast
<MemIntrinsicSDNode
>(Node
)->getMemOperand();
2949 cast
<MachineSDNode
>(St
)->setMemRefs(MemOp
, MemOp
+ 1);
2951 ReplaceNode(Node
, St
);
2954 case Intrinsic::aarch64_neon_ld1x2
:
2955 if (VT
== MVT::v8i8
) {
2956 SelectLoad(Node
, 2, AArch64::LD1Twov8b
, AArch64::dsub0
);
2958 } else if (VT
== MVT::v16i8
) {
2959 SelectLoad(Node
, 2, AArch64::LD1Twov16b
, AArch64::qsub0
);
2961 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
2962 SelectLoad(Node
, 2, AArch64::LD1Twov4h
, AArch64::dsub0
);
2964 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
2965 SelectLoad(Node
, 2, AArch64::LD1Twov8h
, AArch64::qsub0
);
2967 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
2968 SelectLoad(Node
, 2, AArch64::LD1Twov2s
, AArch64::dsub0
);
2970 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
2971 SelectLoad(Node
, 2, AArch64::LD1Twov4s
, AArch64::qsub0
);
2973 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
2974 SelectLoad(Node
, 2, AArch64::LD1Twov1d
, AArch64::dsub0
);
2976 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
2977 SelectLoad(Node
, 2, AArch64::LD1Twov2d
, AArch64::qsub0
);
2981 case Intrinsic::aarch64_neon_ld1x3
:
2982 if (VT
== MVT::v8i8
) {
2983 SelectLoad(Node
, 3, AArch64::LD1Threev8b
, AArch64::dsub0
);
2985 } else if (VT
== MVT::v16i8
) {
2986 SelectLoad(Node
, 3, AArch64::LD1Threev16b
, AArch64::qsub0
);
2988 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
2989 SelectLoad(Node
, 3, AArch64::LD1Threev4h
, AArch64::dsub0
);
2991 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
2992 SelectLoad(Node
, 3, AArch64::LD1Threev8h
, AArch64::qsub0
);
2994 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
2995 SelectLoad(Node
, 3, AArch64::LD1Threev2s
, AArch64::dsub0
);
2997 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
2998 SelectLoad(Node
, 3, AArch64::LD1Threev4s
, AArch64::qsub0
);
3000 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3001 SelectLoad(Node
, 3, AArch64::LD1Threev1d
, AArch64::dsub0
);
3003 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3004 SelectLoad(Node
, 3, AArch64::LD1Threev2d
, AArch64::qsub0
);
3008 case Intrinsic::aarch64_neon_ld1x4
:
3009 if (VT
== MVT::v8i8
) {
3010 SelectLoad(Node
, 4, AArch64::LD1Fourv8b
, AArch64::dsub0
);
3012 } else if (VT
== MVT::v16i8
) {
3013 SelectLoad(Node
, 4, AArch64::LD1Fourv16b
, AArch64::qsub0
);
3015 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3016 SelectLoad(Node
, 4, AArch64::LD1Fourv4h
, AArch64::dsub0
);
3018 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3019 SelectLoad(Node
, 4, AArch64::LD1Fourv8h
, AArch64::qsub0
);
3021 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3022 SelectLoad(Node
, 4, AArch64::LD1Fourv2s
, AArch64::dsub0
);
3024 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3025 SelectLoad(Node
, 4, AArch64::LD1Fourv4s
, AArch64::qsub0
);
3027 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3028 SelectLoad(Node
, 4, AArch64::LD1Fourv1d
, AArch64::dsub0
);
3030 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3031 SelectLoad(Node
, 4, AArch64::LD1Fourv2d
, AArch64::qsub0
);
3035 case Intrinsic::aarch64_neon_ld2
:
3036 if (VT
== MVT::v8i8
) {
3037 SelectLoad(Node
, 2, AArch64::LD2Twov8b
, AArch64::dsub0
);
3039 } else if (VT
== MVT::v16i8
) {
3040 SelectLoad(Node
, 2, AArch64::LD2Twov16b
, AArch64::qsub0
);
3042 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3043 SelectLoad(Node
, 2, AArch64::LD2Twov4h
, AArch64::dsub0
);
3045 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3046 SelectLoad(Node
, 2, AArch64::LD2Twov8h
, AArch64::qsub0
);
3048 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3049 SelectLoad(Node
, 2, AArch64::LD2Twov2s
, AArch64::dsub0
);
3051 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3052 SelectLoad(Node
, 2, AArch64::LD2Twov4s
, AArch64::qsub0
);
3054 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3055 SelectLoad(Node
, 2, AArch64::LD1Twov1d
, AArch64::dsub0
);
3057 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3058 SelectLoad(Node
, 2, AArch64::LD2Twov2d
, AArch64::qsub0
);
3062 case Intrinsic::aarch64_neon_ld3
:
3063 if (VT
== MVT::v8i8
) {
3064 SelectLoad(Node
, 3, AArch64::LD3Threev8b
, AArch64::dsub0
);
3066 } else if (VT
== MVT::v16i8
) {
3067 SelectLoad(Node
, 3, AArch64::LD3Threev16b
, AArch64::qsub0
);
3069 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3070 SelectLoad(Node
, 3, AArch64::LD3Threev4h
, AArch64::dsub0
);
3072 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3073 SelectLoad(Node
, 3, AArch64::LD3Threev8h
, AArch64::qsub0
);
3075 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3076 SelectLoad(Node
, 3, AArch64::LD3Threev2s
, AArch64::dsub0
);
3078 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3079 SelectLoad(Node
, 3, AArch64::LD3Threev4s
, AArch64::qsub0
);
3081 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3082 SelectLoad(Node
, 3, AArch64::LD1Threev1d
, AArch64::dsub0
);
3084 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3085 SelectLoad(Node
, 3, AArch64::LD3Threev2d
, AArch64::qsub0
);
3089 case Intrinsic::aarch64_neon_ld4
:
3090 if (VT
== MVT::v8i8
) {
3091 SelectLoad(Node
, 4, AArch64::LD4Fourv8b
, AArch64::dsub0
);
3093 } else if (VT
== MVT::v16i8
) {
3094 SelectLoad(Node
, 4, AArch64::LD4Fourv16b
, AArch64::qsub0
);
3096 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3097 SelectLoad(Node
, 4, AArch64::LD4Fourv4h
, AArch64::dsub0
);
3099 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3100 SelectLoad(Node
, 4, AArch64::LD4Fourv8h
, AArch64::qsub0
);
3102 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3103 SelectLoad(Node
, 4, AArch64::LD4Fourv2s
, AArch64::dsub0
);
3105 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3106 SelectLoad(Node
, 4, AArch64::LD4Fourv4s
, AArch64::qsub0
);
3108 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3109 SelectLoad(Node
, 4, AArch64::LD1Fourv1d
, AArch64::dsub0
);
3111 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3112 SelectLoad(Node
, 4, AArch64::LD4Fourv2d
, AArch64::qsub0
);
3116 case Intrinsic::aarch64_neon_ld2r
:
3117 if (VT
== MVT::v8i8
) {
3118 SelectLoad(Node
, 2, AArch64::LD2Rv8b
, AArch64::dsub0
);
3120 } else if (VT
== MVT::v16i8
) {
3121 SelectLoad(Node
, 2, AArch64::LD2Rv16b
, AArch64::qsub0
);
3123 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3124 SelectLoad(Node
, 2, AArch64::LD2Rv4h
, AArch64::dsub0
);
3126 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3127 SelectLoad(Node
, 2, AArch64::LD2Rv8h
, AArch64::qsub0
);
3129 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3130 SelectLoad(Node
, 2, AArch64::LD2Rv2s
, AArch64::dsub0
);
3132 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3133 SelectLoad(Node
, 2, AArch64::LD2Rv4s
, AArch64::qsub0
);
3135 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3136 SelectLoad(Node
, 2, AArch64::LD2Rv1d
, AArch64::dsub0
);
3138 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3139 SelectLoad(Node
, 2, AArch64::LD2Rv2d
, AArch64::qsub0
);
3143 case Intrinsic::aarch64_neon_ld3r
:
3144 if (VT
== MVT::v8i8
) {
3145 SelectLoad(Node
, 3, AArch64::LD3Rv8b
, AArch64::dsub0
);
3147 } else if (VT
== MVT::v16i8
) {
3148 SelectLoad(Node
, 3, AArch64::LD3Rv16b
, AArch64::qsub0
);
3150 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3151 SelectLoad(Node
, 3, AArch64::LD3Rv4h
, AArch64::dsub0
);
3153 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3154 SelectLoad(Node
, 3, AArch64::LD3Rv8h
, AArch64::qsub0
);
3156 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3157 SelectLoad(Node
, 3, AArch64::LD3Rv2s
, AArch64::dsub0
);
3159 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3160 SelectLoad(Node
, 3, AArch64::LD3Rv4s
, AArch64::qsub0
);
3162 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3163 SelectLoad(Node
, 3, AArch64::LD3Rv1d
, AArch64::dsub0
);
3165 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3166 SelectLoad(Node
, 3, AArch64::LD3Rv2d
, AArch64::qsub0
);
3170 case Intrinsic::aarch64_neon_ld4r
:
3171 if (VT
== MVT::v8i8
) {
3172 SelectLoad(Node
, 4, AArch64::LD4Rv8b
, AArch64::dsub0
);
3174 } else if (VT
== MVT::v16i8
) {
3175 SelectLoad(Node
, 4, AArch64::LD4Rv16b
, AArch64::qsub0
);
3177 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3178 SelectLoad(Node
, 4, AArch64::LD4Rv4h
, AArch64::dsub0
);
3180 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3181 SelectLoad(Node
, 4, AArch64::LD4Rv8h
, AArch64::qsub0
);
3183 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3184 SelectLoad(Node
, 4, AArch64::LD4Rv2s
, AArch64::dsub0
);
3186 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3187 SelectLoad(Node
, 4, AArch64::LD4Rv4s
, AArch64::qsub0
);
3189 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3190 SelectLoad(Node
, 4, AArch64::LD4Rv1d
, AArch64::dsub0
);
3192 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3193 SelectLoad(Node
, 4, AArch64::LD4Rv2d
, AArch64::qsub0
);
3197 case Intrinsic::aarch64_neon_ld2lane
:
3198 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3199 SelectLoadLane(Node
, 2, AArch64::LD2i8
);
3201 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3203 SelectLoadLane(Node
, 2, AArch64::LD2i16
);
3205 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3207 SelectLoadLane(Node
, 2, AArch64::LD2i32
);
3209 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3211 SelectLoadLane(Node
, 2, AArch64::LD2i64
);
3215 case Intrinsic::aarch64_neon_ld3lane
:
3216 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3217 SelectLoadLane(Node
, 3, AArch64::LD3i8
);
3219 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3221 SelectLoadLane(Node
, 3, AArch64::LD3i16
);
3223 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3225 SelectLoadLane(Node
, 3, AArch64::LD3i32
);
3227 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3229 SelectLoadLane(Node
, 3, AArch64::LD3i64
);
3233 case Intrinsic::aarch64_neon_ld4lane
:
3234 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3235 SelectLoadLane(Node
, 4, AArch64::LD4i8
);
3237 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3239 SelectLoadLane(Node
, 4, AArch64::LD4i16
);
3241 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3243 SelectLoadLane(Node
, 4, AArch64::LD4i32
);
3245 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3247 SelectLoadLane(Node
, 4, AArch64::LD4i64
);
3253 case ISD::INTRINSIC_WO_CHAIN
: {
3254 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(0))->getZExtValue();
3258 case Intrinsic::aarch64_neon_tbl2
:
3259 SelectTable(Node
, 2,
3260 VT
== MVT::v8i8
? AArch64::TBLv8i8Two
: AArch64::TBLv16i8Two
,
3263 case Intrinsic::aarch64_neon_tbl3
:
3264 SelectTable(Node
, 3, VT
== MVT::v8i8
? AArch64::TBLv8i8Three
3265 : AArch64::TBLv16i8Three
,
3268 case Intrinsic::aarch64_neon_tbl4
:
3269 SelectTable(Node
, 4, VT
== MVT::v8i8
? AArch64::TBLv8i8Four
3270 : AArch64::TBLv16i8Four
,
3273 case Intrinsic::aarch64_neon_tbx2
:
3274 SelectTable(Node
, 2,
3275 VT
== MVT::v8i8
? AArch64::TBXv8i8Two
: AArch64::TBXv16i8Two
,
3278 case Intrinsic::aarch64_neon_tbx3
:
3279 SelectTable(Node
, 3, VT
== MVT::v8i8
? AArch64::TBXv8i8Three
3280 : AArch64::TBXv16i8Three
,
3283 case Intrinsic::aarch64_neon_tbx4
:
3284 SelectTable(Node
, 4, VT
== MVT::v8i8
? AArch64::TBXv8i8Four
3285 : AArch64::TBXv16i8Four
,
3288 case Intrinsic::aarch64_neon_smull
:
3289 case Intrinsic::aarch64_neon_umull
:
3290 if (tryMULLV64LaneV128(IntNo
, Node
))
3296 case ISD::INTRINSIC_VOID
: {
3297 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue();
3298 if (Node
->getNumOperands() >= 3)
3299 VT
= Node
->getOperand(2)->getValueType(0);
3303 case Intrinsic::aarch64_neon_st1x2
: {
3304 if (VT
== MVT::v8i8
) {
3305 SelectStore(Node
, 2, AArch64::ST1Twov8b
);
3307 } else if (VT
== MVT::v16i8
) {
3308 SelectStore(Node
, 2, AArch64::ST1Twov16b
);
3310 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3311 SelectStore(Node
, 2, AArch64::ST1Twov4h
);
3313 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3314 SelectStore(Node
, 2, AArch64::ST1Twov8h
);
3316 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3317 SelectStore(Node
, 2, AArch64::ST1Twov2s
);
3319 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3320 SelectStore(Node
, 2, AArch64::ST1Twov4s
);
3322 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3323 SelectStore(Node
, 2, AArch64::ST1Twov2d
);
3325 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3326 SelectStore(Node
, 2, AArch64::ST1Twov1d
);
3331 case Intrinsic::aarch64_neon_st1x3
: {
3332 if (VT
== MVT::v8i8
) {
3333 SelectStore(Node
, 3, AArch64::ST1Threev8b
);
3335 } else if (VT
== MVT::v16i8
) {
3336 SelectStore(Node
, 3, AArch64::ST1Threev16b
);
3338 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3339 SelectStore(Node
, 3, AArch64::ST1Threev4h
);
3341 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3342 SelectStore(Node
, 3, AArch64::ST1Threev8h
);
3344 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3345 SelectStore(Node
, 3, AArch64::ST1Threev2s
);
3347 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3348 SelectStore(Node
, 3, AArch64::ST1Threev4s
);
3350 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3351 SelectStore(Node
, 3, AArch64::ST1Threev2d
);
3353 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3354 SelectStore(Node
, 3, AArch64::ST1Threev1d
);
3359 case Intrinsic::aarch64_neon_st1x4
: {
3360 if (VT
== MVT::v8i8
) {
3361 SelectStore(Node
, 4, AArch64::ST1Fourv8b
);
3363 } else if (VT
== MVT::v16i8
) {
3364 SelectStore(Node
, 4, AArch64::ST1Fourv16b
);
3366 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3367 SelectStore(Node
, 4, AArch64::ST1Fourv4h
);
3369 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3370 SelectStore(Node
, 4, AArch64::ST1Fourv8h
);
3372 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3373 SelectStore(Node
, 4, AArch64::ST1Fourv2s
);
3375 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3376 SelectStore(Node
, 4, AArch64::ST1Fourv4s
);
3378 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3379 SelectStore(Node
, 4, AArch64::ST1Fourv2d
);
3381 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3382 SelectStore(Node
, 4, AArch64::ST1Fourv1d
);
3387 case Intrinsic::aarch64_neon_st2
: {
3388 if (VT
== MVT::v8i8
) {
3389 SelectStore(Node
, 2, AArch64::ST2Twov8b
);
3391 } else if (VT
== MVT::v16i8
) {
3392 SelectStore(Node
, 2, AArch64::ST2Twov16b
);
3394 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3395 SelectStore(Node
, 2, AArch64::ST2Twov4h
);
3397 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3398 SelectStore(Node
, 2, AArch64::ST2Twov8h
);
3400 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3401 SelectStore(Node
, 2, AArch64::ST2Twov2s
);
3403 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3404 SelectStore(Node
, 2, AArch64::ST2Twov4s
);
3406 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3407 SelectStore(Node
, 2, AArch64::ST2Twov2d
);
3409 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3410 SelectStore(Node
, 2, AArch64::ST1Twov1d
);
3415 case Intrinsic::aarch64_neon_st3
: {
3416 if (VT
== MVT::v8i8
) {
3417 SelectStore(Node
, 3, AArch64::ST3Threev8b
);
3419 } else if (VT
== MVT::v16i8
) {
3420 SelectStore(Node
, 3, AArch64::ST3Threev16b
);
3422 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3423 SelectStore(Node
, 3, AArch64::ST3Threev4h
);
3425 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3426 SelectStore(Node
, 3, AArch64::ST3Threev8h
);
3428 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3429 SelectStore(Node
, 3, AArch64::ST3Threev2s
);
3431 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3432 SelectStore(Node
, 3, AArch64::ST3Threev4s
);
3434 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3435 SelectStore(Node
, 3, AArch64::ST3Threev2d
);
3437 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3438 SelectStore(Node
, 3, AArch64::ST1Threev1d
);
3443 case Intrinsic::aarch64_neon_st4
: {
3444 if (VT
== MVT::v8i8
) {
3445 SelectStore(Node
, 4, AArch64::ST4Fourv8b
);
3447 } else if (VT
== MVT::v16i8
) {
3448 SelectStore(Node
, 4, AArch64::ST4Fourv16b
);
3450 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3451 SelectStore(Node
, 4, AArch64::ST4Fourv4h
);
3453 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3454 SelectStore(Node
, 4, AArch64::ST4Fourv8h
);
3456 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3457 SelectStore(Node
, 4, AArch64::ST4Fourv2s
);
3459 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3460 SelectStore(Node
, 4, AArch64::ST4Fourv4s
);
3462 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3463 SelectStore(Node
, 4, AArch64::ST4Fourv2d
);
3465 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3466 SelectStore(Node
, 4, AArch64::ST1Fourv1d
);
3471 case Intrinsic::aarch64_neon_st2lane
: {
3472 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3473 SelectStoreLane(Node
, 2, AArch64::ST2i8
);
3475 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3477 SelectStoreLane(Node
, 2, AArch64::ST2i16
);
3479 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3481 SelectStoreLane(Node
, 2, AArch64::ST2i32
);
3483 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3485 SelectStoreLane(Node
, 2, AArch64::ST2i64
);
3490 case Intrinsic::aarch64_neon_st3lane
: {
3491 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3492 SelectStoreLane(Node
, 3, AArch64::ST3i8
);
3494 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3496 SelectStoreLane(Node
, 3, AArch64::ST3i16
);
3498 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3500 SelectStoreLane(Node
, 3, AArch64::ST3i32
);
3502 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3504 SelectStoreLane(Node
, 3, AArch64::ST3i64
);
3509 case Intrinsic::aarch64_neon_st4lane
: {
3510 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3511 SelectStoreLane(Node
, 4, AArch64::ST4i8
);
3513 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3515 SelectStoreLane(Node
, 4, AArch64::ST4i16
);
3517 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3519 SelectStoreLane(Node
, 4, AArch64::ST4i32
);
3521 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3523 SelectStoreLane(Node
, 4, AArch64::ST4i64
);
3531 case AArch64ISD::LD2post
: {
3532 if (VT
== MVT::v8i8
) {
3533 SelectPostLoad(Node
, 2, AArch64::LD2Twov8b_POST
, AArch64::dsub0
);
3535 } else if (VT
== MVT::v16i8
) {
3536 SelectPostLoad(Node
, 2, AArch64::LD2Twov16b_POST
, AArch64::qsub0
);
3538 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3539 SelectPostLoad(Node
, 2, AArch64::LD2Twov4h_POST
, AArch64::dsub0
);
3541 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3542 SelectPostLoad(Node
, 2, AArch64::LD2Twov8h_POST
, AArch64::qsub0
);
3544 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3545 SelectPostLoad(Node
, 2, AArch64::LD2Twov2s_POST
, AArch64::dsub0
);
3547 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3548 SelectPostLoad(Node
, 2, AArch64::LD2Twov4s_POST
, AArch64::qsub0
);
3550 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3551 SelectPostLoad(Node
, 2, AArch64::LD1Twov1d_POST
, AArch64::dsub0
);
3553 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3554 SelectPostLoad(Node
, 2, AArch64::LD2Twov2d_POST
, AArch64::qsub0
);
3559 case AArch64ISD::LD3post
: {
3560 if (VT
== MVT::v8i8
) {
3561 SelectPostLoad(Node
, 3, AArch64::LD3Threev8b_POST
, AArch64::dsub0
);
3563 } else if (VT
== MVT::v16i8
) {
3564 SelectPostLoad(Node
, 3, AArch64::LD3Threev16b_POST
, AArch64::qsub0
);
3566 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3567 SelectPostLoad(Node
, 3, AArch64::LD3Threev4h_POST
, AArch64::dsub0
);
3569 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3570 SelectPostLoad(Node
, 3, AArch64::LD3Threev8h_POST
, AArch64::qsub0
);
3572 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3573 SelectPostLoad(Node
, 3, AArch64::LD3Threev2s_POST
, AArch64::dsub0
);
3575 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3576 SelectPostLoad(Node
, 3, AArch64::LD3Threev4s_POST
, AArch64::qsub0
);
3578 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3579 SelectPostLoad(Node
, 3, AArch64::LD1Threev1d_POST
, AArch64::dsub0
);
3581 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3582 SelectPostLoad(Node
, 3, AArch64::LD3Threev2d_POST
, AArch64::qsub0
);
3587 case AArch64ISD::LD4post
: {
3588 if (VT
== MVT::v8i8
) {
3589 SelectPostLoad(Node
, 4, AArch64::LD4Fourv8b_POST
, AArch64::dsub0
);
3591 } else if (VT
== MVT::v16i8
) {
3592 SelectPostLoad(Node
, 4, AArch64::LD4Fourv16b_POST
, AArch64::qsub0
);
3594 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3595 SelectPostLoad(Node
, 4, AArch64::LD4Fourv4h_POST
, AArch64::dsub0
);
3597 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3598 SelectPostLoad(Node
, 4, AArch64::LD4Fourv8h_POST
, AArch64::qsub0
);
3600 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3601 SelectPostLoad(Node
, 4, AArch64::LD4Fourv2s_POST
, AArch64::dsub0
);
3603 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3604 SelectPostLoad(Node
, 4, AArch64::LD4Fourv4s_POST
, AArch64::qsub0
);
3606 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3607 SelectPostLoad(Node
, 4, AArch64::LD1Fourv1d_POST
, AArch64::dsub0
);
3609 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3610 SelectPostLoad(Node
, 4, AArch64::LD4Fourv2d_POST
, AArch64::qsub0
);
3615 case AArch64ISD::LD1x2post
: {
3616 if (VT
== MVT::v8i8
) {
3617 SelectPostLoad(Node
, 2, AArch64::LD1Twov8b_POST
, AArch64::dsub0
);
3619 } else if (VT
== MVT::v16i8
) {
3620 SelectPostLoad(Node
, 2, AArch64::LD1Twov16b_POST
, AArch64::qsub0
);
3622 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3623 SelectPostLoad(Node
, 2, AArch64::LD1Twov4h_POST
, AArch64::dsub0
);
3625 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3626 SelectPostLoad(Node
, 2, AArch64::LD1Twov8h_POST
, AArch64::qsub0
);
3628 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3629 SelectPostLoad(Node
, 2, AArch64::LD1Twov2s_POST
, AArch64::dsub0
);
3631 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3632 SelectPostLoad(Node
, 2, AArch64::LD1Twov4s_POST
, AArch64::qsub0
);
3634 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3635 SelectPostLoad(Node
, 2, AArch64::LD1Twov1d_POST
, AArch64::dsub0
);
3637 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3638 SelectPostLoad(Node
, 2, AArch64::LD1Twov2d_POST
, AArch64::qsub0
);
3643 case AArch64ISD::LD1x3post
: {
3644 if (VT
== MVT::v8i8
) {
3645 SelectPostLoad(Node
, 3, AArch64::LD1Threev8b_POST
, AArch64::dsub0
);
3647 } else if (VT
== MVT::v16i8
) {
3648 SelectPostLoad(Node
, 3, AArch64::LD1Threev16b_POST
, AArch64::qsub0
);
3650 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3651 SelectPostLoad(Node
, 3, AArch64::LD1Threev4h_POST
, AArch64::dsub0
);
3653 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3654 SelectPostLoad(Node
, 3, AArch64::LD1Threev8h_POST
, AArch64::qsub0
);
3656 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3657 SelectPostLoad(Node
, 3, AArch64::LD1Threev2s_POST
, AArch64::dsub0
);
3659 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3660 SelectPostLoad(Node
, 3, AArch64::LD1Threev4s_POST
, AArch64::qsub0
);
3662 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3663 SelectPostLoad(Node
, 3, AArch64::LD1Threev1d_POST
, AArch64::dsub0
);
3665 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3666 SelectPostLoad(Node
, 3, AArch64::LD1Threev2d_POST
, AArch64::qsub0
);
3671 case AArch64ISD::LD1x4post
: {
3672 if (VT
== MVT::v8i8
) {
3673 SelectPostLoad(Node
, 4, AArch64::LD1Fourv8b_POST
, AArch64::dsub0
);
3675 } else if (VT
== MVT::v16i8
) {
3676 SelectPostLoad(Node
, 4, AArch64::LD1Fourv16b_POST
, AArch64::qsub0
);
3678 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3679 SelectPostLoad(Node
, 4, AArch64::LD1Fourv4h_POST
, AArch64::dsub0
);
3681 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3682 SelectPostLoad(Node
, 4, AArch64::LD1Fourv8h_POST
, AArch64::qsub0
);
3684 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3685 SelectPostLoad(Node
, 4, AArch64::LD1Fourv2s_POST
, AArch64::dsub0
);
3687 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3688 SelectPostLoad(Node
, 4, AArch64::LD1Fourv4s_POST
, AArch64::qsub0
);
3690 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3691 SelectPostLoad(Node
, 4, AArch64::LD1Fourv1d_POST
, AArch64::dsub0
);
3693 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3694 SelectPostLoad(Node
, 4, AArch64::LD1Fourv2d_POST
, AArch64::qsub0
);
3699 case AArch64ISD::LD1DUPpost
: {
3700 if (VT
== MVT::v8i8
) {
3701 SelectPostLoad(Node
, 1, AArch64::LD1Rv8b_POST
, AArch64::dsub0
);
3703 } else if (VT
== MVT::v16i8
) {
3704 SelectPostLoad(Node
, 1, AArch64::LD1Rv16b_POST
, AArch64::qsub0
);
3706 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3707 SelectPostLoad(Node
, 1, AArch64::LD1Rv4h_POST
, AArch64::dsub0
);
3709 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3710 SelectPostLoad(Node
, 1, AArch64::LD1Rv8h_POST
, AArch64::qsub0
);
3712 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3713 SelectPostLoad(Node
, 1, AArch64::LD1Rv2s_POST
, AArch64::dsub0
);
3715 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3716 SelectPostLoad(Node
, 1, AArch64::LD1Rv4s_POST
, AArch64::qsub0
);
3718 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3719 SelectPostLoad(Node
, 1, AArch64::LD1Rv1d_POST
, AArch64::dsub0
);
3721 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3722 SelectPostLoad(Node
, 1, AArch64::LD1Rv2d_POST
, AArch64::qsub0
);
3727 case AArch64ISD::LD2DUPpost
: {
3728 if (VT
== MVT::v8i8
) {
3729 SelectPostLoad(Node
, 2, AArch64::LD2Rv8b_POST
, AArch64::dsub0
);
3731 } else if (VT
== MVT::v16i8
) {
3732 SelectPostLoad(Node
, 2, AArch64::LD2Rv16b_POST
, AArch64::qsub0
);
3734 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3735 SelectPostLoad(Node
, 2, AArch64::LD2Rv4h_POST
, AArch64::dsub0
);
3737 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3738 SelectPostLoad(Node
, 2, AArch64::LD2Rv8h_POST
, AArch64::qsub0
);
3740 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3741 SelectPostLoad(Node
, 2, AArch64::LD2Rv2s_POST
, AArch64::dsub0
);
3743 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3744 SelectPostLoad(Node
, 2, AArch64::LD2Rv4s_POST
, AArch64::qsub0
);
3746 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3747 SelectPostLoad(Node
, 2, AArch64::LD2Rv1d_POST
, AArch64::dsub0
);
3749 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3750 SelectPostLoad(Node
, 2, AArch64::LD2Rv2d_POST
, AArch64::qsub0
);
3755 case AArch64ISD::LD3DUPpost
: {
3756 if (VT
== MVT::v8i8
) {
3757 SelectPostLoad(Node
, 3, AArch64::LD3Rv8b_POST
, AArch64::dsub0
);
3759 } else if (VT
== MVT::v16i8
) {
3760 SelectPostLoad(Node
, 3, AArch64::LD3Rv16b_POST
, AArch64::qsub0
);
3762 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3763 SelectPostLoad(Node
, 3, AArch64::LD3Rv4h_POST
, AArch64::dsub0
);
3765 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3766 SelectPostLoad(Node
, 3, AArch64::LD3Rv8h_POST
, AArch64::qsub0
);
3768 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3769 SelectPostLoad(Node
, 3, AArch64::LD3Rv2s_POST
, AArch64::dsub0
);
3771 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3772 SelectPostLoad(Node
, 3, AArch64::LD3Rv4s_POST
, AArch64::qsub0
);
3774 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3775 SelectPostLoad(Node
, 3, AArch64::LD3Rv1d_POST
, AArch64::dsub0
);
3777 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3778 SelectPostLoad(Node
, 3, AArch64::LD3Rv2d_POST
, AArch64::qsub0
);
3783 case AArch64ISD::LD4DUPpost
: {
3784 if (VT
== MVT::v8i8
) {
3785 SelectPostLoad(Node
, 4, AArch64::LD4Rv8b_POST
, AArch64::dsub0
);
3787 } else if (VT
== MVT::v16i8
) {
3788 SelectPostLoad(Node
, 4, AArch64::LD4Rv16b_POST
, AArch64::qsub0
);
3790 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3791 SelectPostLoad(Node
, 4, AArch64::LD4Rv4h_POST
, AArch64::dsub0
);
3793 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3794 SelectPostLoad(Node
, 4, AArch64::LD4Rv8h_POST
, AArch64::qsub0
);
3796 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3797 SelectPostLoad(Node
, 4, AArch64::LD4Rv2s_POST
, AArch64::dsub0
);
3799 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3800 SelectPostLoad(Node
, 4, AArch64::LD4Rv4s_POST
, AArch64::qsub0
);
3802 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3803 SelectPostLoad(Node
, 4, AArch64::LD4Rv1d_POST
, AArch64::dsub0
);
3805 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3806 SelectPostLoad(Node
, 4, AArch64::LD4Rv2d_POST
, AArch64::qsub0
);
3811 case AArch64ISD::LD1LANEpost
: {
3812 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3813 SelectPostLoadLane(Node
, 1, AArch64::LD1i8_POST
);
3815 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3817 SelectPostLoadLane(Node
, 1, AArch64::LD1i16_POST
);
3819 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3821 SelectPostLoadLane(Node
, 1, AArch64::LD1i32_POST
);
3823 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3825 SelectPostLoadLane(Node
, 1, AArch64::LD1i64_POST
);
3830 case AArch64ISD::LD2LANEpost
: {
3831 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3832 SelectPostLoadLane(Node
, 2, AArch64::LD2i8_POST
);
3834 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3836 SelectPostLoadLane(Node
, 2, AArch64::LD2i16_POST
);
3838 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3840 SelectPostLoadLane(Node
, 2, AArch64::LD2i32_POST
);
3842 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3844 SelectPostLoadLane(Node
, 2, AArch64::LD2i64_POST
);
3849 case AArch64ISD::LD3LANEpost
: {
3850 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3851 SelectPostLoadLane(Node
, 3, AArch64::LD3i8_POST
);
3853 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3855 SelectPostLoadLane(Node
, 3, AArch64::LD3i16_POST
);
3857 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3859 SelectPostLoadLane(Node
, 3, AArch64::LD3i32_POST
);
3861 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3863 SelectPostLoadLane(Node
, 3, AArch64::LD3i64_POST
);
3868 case AArch64ISD::LD4LANEpost
: {
3869 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3870 SelectPostLoadLane(Node
, 4, AArch64::LD4i8_POST
);
3872 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3874 SelectPostLoadLane(Node
, 4, AArch64::LD4i16_POST
);
3876 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3878 SelectPostLoadLane(Node
, 4, AArch64::LD4i32_POST
);
3880 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3882 SelectPostLoadLane(Node
, 4, AArch64::LD4i64_POST
);
3887 case AArch64ISD::ST2post
: {
3888 VT
= Node
->getOperand(1).getValueType();
3889 if (VT
== MVT::v8i8
) {
3890 SelectPostStore(Node
, 2, AArch64::ST2Twov8b_POST
);
3892 } else if (VT
== MVT::v16i8
) {
3893 SelectPostStore(Node
, 2, AArch64::ST2Twov16b_POST
);
3895 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3896 SelectPostStore(Node
, 2, AArch64::ST2Twov4h_POST
);
3898 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3899 SelectPostStore(Node
, 2, AArch64::ST2Twov8h_POST
);
3901 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3902 SelectPostStore(Node
, 2, AArch64::ST2Twov2s_POST
);
3904 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3905 SelectPostStore(Node
, 2, AArch64::ST2Twov4s_POST
);
3907 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3908 SelectPostStore(Node
, 2, AArch64::ST2Twov2d_POST
);
3910 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3911 SelectPostStore(Node
, 2, AArch64::ST1Twov1d_POST
);
3916 case AArch64ISD::ST3post
: {
3917 VT
= Node
->getOperand(1).getValueType();
3918 if (VT
== MVT::v8i8
) {
3919 SelectPostStore(Node
, 3, AArch64::ST3Threev8b_POST
);
3921 } else if (VT
== MVT::v16i8
) {
3922 SelectPostStore(Node
, 3, AArch64::ST3Threev16b_POST
);
3924 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3925 SelectPostStore(Node
, 3, AArch64::ST3Threev4h_POST
);
3927 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3928 SelectPostStore(Node
, 3, AArch64::ST3Threev8h_POST
);
3930 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3931 SelectPostStore(Node
, 3, AArch64::ST3Threev2s_POST
);
3933 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3934 SelectPostStore(Node
, 3, AArch64::ST3Threev4s_POST
);
3936 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3937 SelectPostStore(Node
, 3, AArch64::ST3Threev2d_POST
);
3939 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3940 SelectPostStore(Node
, 3, AArch64::ST1Threev1d_POST
);
3945 case AArch64ISD::ST4post
: {
3946 VT
= Node
->getOperand(1).getValueType();
3947 if (VT
== MVT::v8i8
) {
3948 SelectPostStore(Node
, 4, AArch64::ST4Fourv8b_POST
);
3950 } else if (VT
== MVT::v16i8
) {
3951 SelectPostStore(Node
, 4, AArch64::ST4Fourv16b_POST
);
3953 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3954 SelectPostStore(Node
, 4, AArch64::ST4Fourv4h_POST
);
3956 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3957 SelectPostStore(Node
, 4, AArch64::ST4Fourv8h_POST
);
3959 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3960 SelectPostStore(Node
, 4, AArch64::ST4Fourv2s_POST
);
3962 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3963 SelectPostStore(Node
, 4, AArch64::ST4Fourv4s_POST
);
3965 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3966 SelectPostStore(Node
, 4, AArch64::ST4Fourv2d_POST
);
3968 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3969 SelectPostStore(Node
, 4, AArch64::ST1Fourv1d_POST
);
3974 case AArch64ISD::ST1x2post
: {
3975 VT
= Node
->getOperand(1).getValueType();
3976 if (VT
== MVT::v8i8
) {
3977 SelectPostStore(Node
, 2, AArch64::ST1Twov8b_POST
);
3979 } else if (VT
== MVT::v16i8
) {
3980 SelectPostStore(Node
, 2, AArch64::ST1Twov16b_POST
);
3982 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
3983 SelectPostStore(Node
, 2, AArch64::ST1Twov4h_POST
);
3985 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
3986 SelectPostStore(Node
, 2, AArch64::ST1Twov8h_POST
);
3988 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3989 SelectPostStore(Node
, 2, AArch64::ST1Twov2s_POST
);
3991 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3992 SelectPostStore(Node
, 2, AArch64::ST1Twov4s_POST
);
3994 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3995 SelectPostStore(Node
, 2, AArch64::ST1Twov1d_POST
);
3997 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3998 SelectPostStore(Node
, 2, AArch64::ST1Twov2d_POST
);
4003 case AArch64ISD::ST1x3post
: {
4004 VT
= Node
->getOperand(1).getValueType();
4005 if (VT
== MVT::v8i8
) {
4006 SelectPostStore(Node
, 3, AArch64::ST1Threev8b_POST
);
4008 } else if (VT
== MVT::v16i8
) {
4009 SelectPostStore(Node
, 3, AArch64::ST1Threev16b_POST
);
4011 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
4012 SelectPostStore(Node
, 3, AArch64::ST1Threev4h_POST
);
4014 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
4015 SelectPostStore(Node
, 3, AArch64::ST1Threev8h_POST
);
4017 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4018 SelectPostStore(Node
, 3, AArch64::ST1Threev2s_POST
);
4020 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4021 SelectPostStore(Node
, 3, AArch64::ST1Threev4s_POST
);
4023 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4024 SelectPostStore(Node
, 3, AArch64::ST1Threev1d_POST
);
4026 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4027 SelectPostStore(Node
, 3, AArch64::ST1Threev2d_POST
);
4032 case AArch64ISD::ST1x4post
: {
4033 VT
= Node
->getOperand(1).getValueType();
4034 if (VT
== MVT::v8i8
) {
4035 SelectPostStore(Node
, 4, AArch64::ST1Fourv8b_POST
);
4037 } else if (VT
== MVT::v16i8
) {
4038 SelectPostStore(Node
, 4, AArch64::ST1Fourv16b_POST
);
4040 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
) {
4041 SelectPostStore(Node
, 4, AArch64::ST1Fourv4h_POST
);
4043 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
) {
4044 SelectPostStore(Node
, 4, AArch64::ST1Fourv8h_POST
);
4046 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4047 SelectPostStore(Node
, 4, AArch64::ST1Fourv2s_POST
);
4049 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4050 SelectPostStore(Node
, 4, AArch64::ST1Fourv4s_POST
);
4052 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4053 SelectPostStore(Node
, 4, AArch64::ST1Fourv1d_POST
);
4055 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4056 SelectPostStore(Node
, 4, AArch64::ST1Fourv2d_POST
);
4061 case AArch64ISD::ST2LANEpost
: {
4062 VT
= Node
->getOperand(1).getValueType();
4063 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4064 SelectPostStoreLane(Node
, 2, AArch64::ST2i8_POST
);
4066 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4068 SelectPostStoreLane(Node
, 2, AArch64::ST2i16_POST
);
4070 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4072 SelectPostStoreLane(Node
, 2, AArch64::ST2i32_POST
);
4074 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4076 SelectPostStoreLane(Node
, 2, AArch64::ST2i64_POST
);
4081 case AArch64ISD::ST3LANEpost
: {
4082 VT
= Node
->getOperand(1).getValueType();
4083 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4084 SelectPostStoreLane(Node
, 3, AArch64::ST3i8_POST
);
4086 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4088 SelectPostStoreLane(Node
, 3, AArch64::ST3i16_POST
);
4090 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4092 SelectPostStoreLane(Node
, 3, AArch64::ST3i32_POST
);
4094 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4096 SelectPostStoreLane(Node
, 3, AArch64::ST3i64_POST
);
4101 case AArch64ISD::ST4LANEpost
: {
4102 VT
= Node
->getOperand(1).getValueType();
4103 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4104 SelectPostStoreLane(Node
, 4, AArch64::ST4i8_POST
);
4106 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4108 SelectPostStoreLane(Node
, 4, AArch64::ST4i16_POST
);
4110 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4112 SelectPostStoreLane(Node
, 4, AArch64::ST4i32_POST
);
4114 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4116 SelectPostStoreLane(Node
, 4, AArch64::ST4i64_POST
);
4123 // Select the default instruction
4127 /// createAArch64ISelDag - This pass converts a legalized DAG into a
4128 /// AArch64-specific DAG, ready for instruction scheduling.
4129 FunctionPass
*llvm::createAArch64ISelDag(AArch64TargetMachine
&TM
,
4130 CodeGenOpt::Level OptLevel
) {
4131 return new AArch64DAGToDAGISel(TM
, OptLevel
);