1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines an instruction selector for the AArch64 target.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64MachineFunctionInfo.h"
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/IntrinsicsAArch64.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/KnownBits.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
30 #define DEBUG_TYPE "aarch64-isel"
32 //===--------------------------------------------------------------------===//
33 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
34 /// instructions for SelectionDAG operations.
38 class AArch64DAGToDAGISel
: public SelectionDAGISel
{
40 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
41 /// make the right decision when generating code for different targets.
42 const AArch64Subtarget
*Subtarget
;
45 explicit AArch64DAGToDAGISel(AArch64TargetMachine
&tm
,
46 CodeGenOpt::Level OptLevel
)
47 : SelectionDAGISel(tm
, OptLevel
), Subtarget(nullptr) {}
49 StringRef
getPassName() const override
{
50 return "AArch64 Instruction Selection";
53 bool runOnMachineFunction(MachineFunction
&MF
) override
{
54 Subtarget
= &MF
.getSubtarget
<AArch64Subtarget
>();
55 return SelectionDAGISel::runOnMachineFunction(MF
);
58 void Select(SDNode
*Node
) override
;
60 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
61 /// inline asm expressions.
62 bool SelectInlineAsmMemoryOperand(const SDValue
&Op
,
63 unsigned ConstraintID
,
64 std::vector
<SDValue
> &OutOps
) override
;
66 template <signed Low
, signed High
, signed Scale
>
67 bool SelectRDVLImm(SDValue N
, SDValue
&Imm
);
69 bool tryMLAV64LaneV128(SDNode
*N
);
70 bool tryMULLV64LaneV128(unsigned IntNo
, SDNode
*N
);
71 bool SelectArithExtendedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
);
72 bool SelectArithImmed(SDValue N
, SDValue
&Val
, SDValue
&Shift
);
73 bool SelectNegArithImmed(SDValue N
, SDValue
&Val
, SDValue
&Shift
);
74 bool SelectArithShiftedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
) {
75 return SelectShiftedRegister(N
, false, Reg
, Shift
);
77 bool SelectLogicalShiftedRegister(SDValue N
, SDValue
&Reg
, SDValue
&Shift
) {
78 return SelectShiftedRegister(N
, true, Reg
, Shift
);
80 bool SelectAddrModeIndexed7S8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
81 return SelectAddrModeIndexed7S(N
, 1, Base
, OffImm
);
83 bool SelectAddrModeIndexed7S16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
84 return SelectAddrModeIndexed7S(N
, 2, Base
, OffImm
);
86 bool SelectAddrModeIndexed7S32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
87 return SelectAddrModeIndexed7S(N
, 4, Base
, OffImm
);
89 bool SelectAddrModeIndexed7S64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
90 return SelectAddrModeIndexed7S(N
, 8, Base
, OffImm
);
92 bool SelectAddrModeIndexed7S128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
93 return SelectAddrModeIndexed7S(N
, 16, Base
, OffImm
);
95 bool SelectAddrModeIndexedS9S128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
96 return SelectAddrModeIndexedBitWidth(N
, true, 9, 16, Base
, OffImm
);
98 bool SelectAddrModeIndexedU6S128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
99 return SelectAddrModeIndexedBitWidth(N
, false, 6, 16, Base
, OffImm
);
101 bool SelectAddrModeIndexed8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
102 return SelectAddrModeIndexed(N
, 1, Base
, OffImm
);
104 bool SelectAddrModeIndexed16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
105 return SelectAddrModeIndexed(N
, 2, Base
, OffImm
);
107 bool SelectAddrModeIndexed32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
108 return SelectAddrModeIndexed(N
, 4, Base
, OffImm
);
110 bool SelectAddrModeIndexed64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
111 return SelectAddrModeIndexed(N
, 8, Base
, OffImm
);
113 bool SelectAddrModeIndexed128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
114 return SelectAddrModeIndexed(N
, 16, Base
, OffImm
);
116 bool SelectAddrModeUnscaled8(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
117 return SelectAddrModeUnscaled(N
, 1, Base
, OffImm
);
119 bool SelectAddrModeUnscaled16(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
120 return SelectAddrModeUnscaled(N
, 2, Base
, OffImm
);
122 bool SelectAddrModeUnscaled32(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
123 return SelectAddrModeUnscaled(N
, 4, Base
, OffImm
);
125 bool SelectAddrModeUnscaled64(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
126 return SelectAddrModeUnscaled(N
, 8, Base
, OffImm
);
128 bool SelectAddrModeUnscaled128(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
129 return SelectAddrModeUnscaled(N
, 16, Base
, OffImm
);
131 template <unsigned Size
, unsigned Max
>
132 bool SelectAddrModeIndexedUImm(SDValue N
, SDValue
&Base
, SDValue
&OffImm
) {
133 // Test if there is an appropriate addressing mode and check if the
135 bool Found
= SelectAddrModeIndexed(N
, Size
, Base
, OffImm
);
137 if (auto *CI
= dyn_cast
<ConstantSDNode
>(OffImm
)) {
138 int64_t C
= CI
->getSExtValue();
144 // Otherwise, base only, materialize address in register.
146 OffImm
= CurDAG
->getTargetConstant(0, SDLoc(N
), MVT::i64
);
151 bool SelectAddrModeWRO(SDValue N
, SDValue
&Base
, SDValue
&Offset
,
152 SDValue
&SignExtend
, SDValue
&DoShift
) {
153 return SelectAddrModeWRO(N
, Width
/ 8, Base
, Offset
, SignExtend
, DoShift
);
157 bool SelectAddrModeXRO(SDValue N
, SDValue
&Base
, SDValue
&Offset
,
158 SDValue
&SignExtend
, SDValue
&DoShift
) {
159 return SelectAddrModeXRO(N
, Width
/ 8, Base
, Offset
, SignExtend
, DoShift
);
162 bool SelectDupZeroOrUndef(SDValue N
) {
163 switch(N
->getOpcode()) {
166 case AArch64ISD::DUP
:
167 case ISD::SPLAT_VECTOR
: {
168 auto Opnd0
= N
->getOperand(0);
169 if (auto CN
= dyn_cast
<ConstantSDNode
>(Opnd0
))
172 if (auto CN
= dyn_cast
<ConstantFPSDNode
>(Opnd0
))
184 bool SelectDupZero(SDValue N
) {
185 switch(N
->getOpcode()) {
186 case AArch64ISD::DUP
:
187 case ISD::SPLAT_VECTOR
: {
188 auto Opnd0
= N
->getOperand(0);
189 if (auto CN
= dyn_cast
<ConstantSDNode
>(Opnd0
))
192 if (auto CN
= dyn_cast
<ConstantFPSDNode
>(Opnd0
))
202 template<MVT::SimpleValueType VT
>
203 bool SelectSVEAddSubImm(SDValue N
, SDValue
&Imm
, SDValue
&Shift
) {
204 return SelectSVEAddSubImm(N
, VT
, Imm
, Shift
);
207 template <MVT::SimpleValueType VT
>
208 bool SelectSVECpyDupImm(SDValue N
, SDValue
&Imm
, SDValue
&Shift
) {
209 return SelectSVECpyDupImm(N
, VT
, Imm
, Shift
);
212 template <MVT::SimpleValueType VT
, bool Invert
= false>
213 bool SelectSVELogicalImm(SDValue N
, SDValue
&Imm
) {
214 return SelectSVELogicalImm(N
, VT
, Imm
, Invert
);
217 template <MVT::SimpleValueType VT
>
218 bool SelectSVEArithImm(SDValue N
, SDValue
&Imm
) {
219 return SelectSVEArithImm(N
, VT
, Imm
);
222 template <unsigned Low
, unsigned High
, bool AllowSaturation
= false>
223 bool SelectSVEShiftImm(SDValue N
, SDValue
&Imm
) {
224 return SelectSVEShiftImm(N
, Low
, High
, AllowSaturation
, Imm
);
227 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
228 template<signed Min
, signed Max
, signed Scale
, bool Shift
>
229 bool SelectCntImm(SDValue N
, SDValue
&Imm
) {
230 if (!isa
<ConstantSDNode
>(N
))
233 int64_t MulImm
= cast
<ConstantSDNode
>(N
)->getSExtValue();
235 MulImm
= 1LL << MulImm
;
237 if ((MulImm
% std::abs(Scale
)) != 0)
241 if ((MulImm
>= Min
) && (MulImm
<= Max
)) {
242 Imm
= CurDAG
->getTargetConstant(MulImm
, SDLoc(N
), MVT::i32
);
249 template <signed Max
, signed Scale
>
250 bool SelectEXTImm(SDValue N
, SDValue
&Imm
) {
251 if (!isa
<ConstantSDNode
>(N
))
254 int64_t MulImm
= cast
<ConstantSDNode
>(N
)->getSExtValue();
256 if (MulImm
>= 0 && MulImm
<= Max
) {
258 Imm
= CurDAG
->getTargetConstant(MulImm
, SDLoc(N
), MVT::i32
);
265 /// Form sequences of consecutive 64/128-bit registers for use in NEON
266 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
267 /// between 1 and 4 elements. If it contains a single element that is returned
268 /// unchanged; otherwise a REG_SEQUENCE value is returned.
269 SDValue
createDTuple(ArrayRef
<SDValue
> Vecs
);
270 SDValue
createQTuple(ArrayRef
<SDValue
> Vecs
);
271 // Form a sequence of SVE registers for instructions using list of vectors,
272 // e.g. structured loads and stores (ldN, stN).
273 SDValue
createZTuple(ArrayRef
<SDValue
> Vecs
);
275 /// Generic helper for the createDTuple/createQTuple
276 /// functions. Those should almost always be called instead.
277 SDValue
createTuple(ArrayRef
<SDValue
> Vecs
, const unsigned RegClassIDs
[],
278 const unsigned SubRegs
[]);
280 void SelectTable(SDNode
*N
, unsigned NumVecs
, unsigned Opc
, bool isExt
);
282 bool tryIndexedLoad(SDNode
*N
);
284 bool trySelectStackSlotTagP(SDNode
*N
);
285 void SelectTagP(SDNode
*N
);
287 void SelectLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
289 void SelectPostLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
291 void SelectLoadLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
292 void SelectPostLoadLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
293 void SelectPredicatedLoad(SDNode
*N
, unsigned NumVecs
, unsigned Scale
,
294 unsigned Opc_rr
, unsigned Opc_ri
,
295 bool IsIntr
= false);
297 bool SelectAddrModeFrameIndexSVE(SDValue N
, SDValue
&Base
, SDValue
&OffImm
);
298 /// SVE Reg+Imm addressing mode.
299 template <int64_t Min
, int64_t Max
>
300 bool SelectAddrModeIndexedSVE(SDNode
*Root
, SDValue N
, SDValue
&Base
,
302 /// SVE Reg+Reg address mode.
303 template <unsigned Scale
>
304 bool SelectSVERegRegAddrMode(SDValue N
, SDValue
&Base
, SDValue
&Offset
) {
305 return SelectSVERegRegAddrMode(N
, Scale
, Base
, Offset
);
308 void SelectStore(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
309 void SelectPostStore(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
310 void SelectStoreLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
311 void SelectPostStoreLane(SDNode
*N
, unsigned NumVecs
, unsigned Opc
);
312 void SelectPredicatedStore(SDNode
*N
, unsigned NumVecs
, unsigned Scale
,
313 unsigned Opc_rr
, unsigned Opc_ri
);
314 std::tuple
<unsigned, SDValue
, SDValue
>
315 findAddrModeSVELoadStore(SDNode
*N
, unsigned Opc_rr
, unsigned Opc_ri
,
316 const SDValue
&OldBase
, const SDValue
&OldOffset
,
319 bool tryBitfieldExtractOp(SDNode
*N
);
320 bool tryBitfieldExtractOpFromSExt(SDNode
*N
);
321 bool tryBitfieldInsertOp(SDNode
*N
);
322 bool tryBitfieldInsertInZeroOp(SDNode
*N
);
323 bool tryShiftAmountMod(SDNode
*N
);
324 bool tryHighFPExt(SDNode
*N
);
326 bool tryReadRegister(SDNode
*N
);
327 bool tryWriteRegister(SDNode
*N
);
329 // Include the pieces autogenerated from the target description.
330 #include "AArch64GenDAGISel.inc"
333 bool SelectShiftedRegister(SDValue N
, bool AllowROR
, SDValue
&Reg
,
335 bool SelectAddrModeIndexed7S(SDValue N
, unsigned Size
, SDValue
&Base
,
337 return SelectAddrModeIndexedBitWidth(N
, true, 7, Size
, Base
, OffImm
);
339 bool SelectAddrModeIndexedBitWidth(SDValue N
, bool IsSignedImm
, unsigned BW
,
340 unsigned Size
, SDValue
&Base
,
342 bool SelectAddrModeIndexed(SDValue N
, unsigned Size
, SDValue
&Base
,
344 bool SelectAddrModeUnscaled(SDValue N
, unsigned Size
, SDValue
&Base
,
346 bool SelectAddrModeWRO(SDValue N
, unsigned Size
, SDValue
&Base
,
347 SDValue
&Offset
, SDValue
&SignExtend
,
349 bool SelectAddrModeXRO(SDValue N
, unsigned Size
, SDValue
&Base
,
350 SDValue
&Offset
, SDValue
&SignExtend
,
352 bool isWorthFolding(SDValue V
) const;
353 bool SelectExtendedSHL(SDValue N
, unsigned Size
, bool WantExtend
,
354 SDValue
&Offset
, SDValue
&SignExtend
);
356 template<unsigned RegWidth
>
357 bool SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
) {
358 return SelectCVTFixedPosOperand(N
, FixedPos
, RegWidth
);
361 bool SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
, unsigned Width
);
363 bool SelectCMP_SWAP(SDNode
*N
);
365 bool SelectSVEAddSubImm(SDValue N
, MVT VT
, SDValue
&Imm
, SDValue
&Shift
);
366 bool SelectSVECpyDupImm(SDValue N
, MVT VT
, SDValue
&Imm
, SDValue
&Shift
);
367 bool SelectSVELogicalImm(SDValue N
, MVT VT
, SDValue
&Imm
, bool Invert
);
369 bool SelectSVESignedArithImm(SDValue N
, SDValue
&Imm
);
370 bool SelectSVEShiftImm(SDValue N
, uint64_t Low
, uint64_t High
,
371 bool AllowSaturation
, SDValue
&Imm
);
373 bool SelectSVEArithImm(SDValue N
, MVT VT
, SDValue
&Imm
);
374 bool SelectSVERegRegAddrMode(SDValue N
, unsigned Scale
, SDValue
&Base
,
377 bool SelectAllActivePredicate(SDValue N
);
379 } // end anonymous namespace
381 /// isIntImmediate - This method tests to see if the node is a constant
382 /// operand. If so Imm will receive the 32-bit value.
383 static bool isIntImmediate(const SDNode
*N
, uint64_t &Imm
) {
384 if (const ConstantSDNode
*C
= dyn_cast
<const ConstantSDNode
>(N
)) {
385 Imm
= C
->getZExtValue();
391 // isIntImmediate - This method tests to see if a constant operand.
392 // If so Imm will receive the value.
393 static bool isIntImmediate(SDValue N
, uint64_t &Imm
) {
394 return isIntImmediate(N
.getNode(), Imm
);
397 // isOpcWithIntImmediate - This method tests to see if the node is a specific
398 // opcode and that it has a immediate integer right operand.
399 // If so Imm will receive the 32 bit value.
400 static bool isOpcWithIntImmediate(const SDNode
*N
, unsigned Opc
,
402 return N
->getOpcode() == Opc
&&
403 isIntImmediate(N
->getOperand(1).getNode(), Imm
);
406 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
407 const SDValue
&Op
, unsigned ConstraintID
, std::vector
<SDValue
> &OutOps
) {
408 switch(ConstraintID
) {
410 llvm_unreachable("Unexpected asm memory constraint");
411 case InlineAsm::Constraint_m
:
412 case InlineAsm::Constraint_o
:
413 case InlineAsm::Constraint_Q
:
414 // We need to make sure that this one operand does not end up in XZR, thus
415 // require the address to be in a PointerRegClass register.
416 const TargetRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
417 const TargetRegisterClass
*TRC
= TRI
->getPointerRegClass(*MF
);
419 SDValue RC
= CurDAG
->getTargetConstant(TRC
->getID(), dl
, MVT::i64
);
421 SDValue(CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
422 dl
, Op
.getValueType(),
424 OutOps
.push_back(NewOp
);
430 /// SelectArithImmed - Select an immediate value that can be represented as
431 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
432 /// Val set to the 12-bit value and Shift set to the shifter operand.
433 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N
, SDValue
&Val
,
435 // This function is called from the addsub_shifted_imm ComplexPattern,
436 // which lists [imm] as the list of opcode it's interested in, however
437 // we still need to check whether the operand is actually an immediate
438 // here because the ComplexPattern opcode list is only used in
439 // root-level opcode matching.
440 if (!isa
<ConstantSDNode
>(N
.getNode()))
443 uint64_t Immed
= cast
<ConstantSDNode
>(N
.getNode())->getZExtValue();
446 if (Immed
>> 12 == 0) {
448 } else if ((Immed
& 0xfff) == 0 && Immed
>> 24 == 0) {
454 unsigned ShVal
= AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftAmt
);
456 Val
= CurDAG
->getTargetConstant(Immed
, dl
, MVT::i32
);
457 Shift
= CurDAG
->getTargetConstant(ShVal
, dl
, MVT::i32
);
461 /// SelectNegArithImmed - As above, but negates the value before trying to
463 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N
, SDValue
&Val
,
465 // This function is called from the addsub_shifted_imm ComplexPattern,
466 // which lists [imm] as the list of opcode it's interested in, however
467 // we still need to check whether the operand is actually an immediate
468 // here because the ComplexPattern opcode list is only used in
469 // root-level opcode matching.
470 if (!isa
<ConstantSDNode
>(N
.getNode()))
473 // The immediate operand must be a 24-bit zero-extended immediate.
474 uint64_t Immed
= cast
<ConstantSDNode
>(N
.getNode())->getZExtValue();
476 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
477 // have the opposite effect on the C flag, so this pattern mustn't match under
478 // those circumstances.
482 if (N
.getValueType() == MVT::i32
)
483 Immed
= ~((uint32_t)Immed
) + 1;
485 Immed
= ~Immed
+ 1ULL;
486 if (Immed
& 0xFFFFFFFFFF000000ULL
)
489 Immed
&= 0xFFFFFFULL
;
490 return SelectArithImmed(CurDAG
->getConstant(Immed
, SDLoc(N
), MVT::i32
), Val
,
494 /// getShiftTypeForNode - Translate a shift node to the corresponding
496 static AArch64_AM::ShiftExtendType
getShiftTypeForNode(SDValue N
) {
497 switch (N
.getOpcode()) {
499 return AArch64_AM::InvalidShiftExtend
;
501 return AArch64_AM::LSL
;
503 return AArch64_AM::LSR
;
505 return AArch64_AM::ASR
;
507 return AArch64_AM::ROR
;
511 /// Determine whether it is worth it to fold SHL into the addressing
513 static bool isWorthFoldingSHL(SDValue V
) {
514 assert(V
.getOpcode() == ISD::SHL
&& "invalid opcode");
515 // It is worth folding logical shift of up to three places.
516 auto *CSD
= dyn_cast
<ConstantSDNode
>(V
.getOperand(1));
519 unsigned ShiftVal
= CSD
->getZExtValue();
523 // Check if this particular node is reused in any non-memory related
524 // operation. If yes, do not try to fold this node into the address
525 // computation, since the computation will be kept.
526 const SDNode
*Node
= V
.getNode();
527 for (SDNode
*UI
: Node
->uses())
528 if (!isa
<MemSDNode
>(*UI
))
529 for (SDNode
*UII
: UI
->uses())
530 if (!isa
<MemSDNode
>(*UII
))
535 /// Determine whether it is worth to fold V into an extended register.
536 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V
) const {
537 // Trivial if we are optimizing for code size or if there is only
538 // one use of the value.
539 if (CurDAG
->shouldOptForSize() || V
.hasOneUse())
541 // If a subtarget has a fastpath LSL we can fold a logical shift into
542 // the addressing mode and save a cycle.
543 if (Subtarget
->hasLSLFast() && V
.getOpcode() == ISD::SHL
&&
544 isWorthFoldingSHL(V
))
546 if (Subtarget
->hasLSLFast() && V
.getOpcode() == ISD::ADD
) {
547 const SDValue LHS
= V
.getOperand(0);
548 const SDValue RHS
= V
.getOperand(1);
549 if (LHS
.getOpcode() == ISD::SHL
&& isWorthFoldingSHL(LHS
))
551 if (RHS
.getOpcode() == ISD::SHL
&& isWorthFoldingSHL(RHS
))
555 // It hurts otherwise, since the value will be reused.
559 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
560 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
561 /// instructions allow the shifted register to be rotated, but the arithmetic
562 /// instructions do not. The AllowROR parameter specifies whether ROR is
564 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N
, bool AllowROR
,
565 SDValue
&Reg
, SDValue
&Shift
) {
566 AArch64_AM::ShiftExtendType ShType
= getShiftTypeForNode(N
);
567 if (ShType
== AArch64_AM::InvalidShiftExtend
)
569 if (!AllowROR
&& ShType
== AArch64_AM::ROR
)
572 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
573 unsigned BitSize
= N
.getValueSizeInBits();
574 unsigned Val
= RHS
->getZExtValue() & (BitSize
- 1);
575 unsigned ShVal
= AArch64_AM::getShifterImm(ShType
, Val
);
577 Reg
= N
.getOperand(0);
578 Shift
= CurDAG
->getTargetConstant(ShVal
, SDLoc(N
), MVT::i32
);
579 return isWorthFolding(N
);
585 /// getExtendTypeForNode - Translate an extend node to the corresponding
586 /// ExtendType value.
587 static AArch64_AM::ShiftExtendType
588 getExtendTypeForNode(SDValue N
, bool IsLoadStore
= false) {
589 if (N
.getOpcode() == ISD::SIGN_EXTEND
||
590 N
.getOpcode() == ISD::SIGN_EXTEND_INREG
) {
592 if (N
.getOpcode() == ISD::SIGN_EXTEND_INREG
)
593 SrcVT
= cast
<VTSDNode
>(N
.getOperand(1))->getVT();
595 SrcVT
= N
.getOperand(0).getValueType();
597 if (!IsLoadStore
&& SrcVT
== MVT::i8
)
598 return AArch64_AM::SXTB
;
599 else if (!IsLoadStore
&& SrcVT
== MVT::i16
)
600 return AArch64_AM::SXTH
;
601 else if (SrcVT
== MVT::i32
)
602 return AArch64_AM::SXTW
;
603 assert(SrcVT
!= MVT::i64
&& "extend from 64-bits?");
605 return AArch64_AM::InvalidShiftExtend
;
606 } else if (N
.getOpcode() == ISD::ZERO_EXTEND
||
607 N
.getOpcode() == ISD::ANY_EXTEND
) {
608 EVT SrcVT
= N
.getOperand(0).getValueType();
609 if (!IsLoadStore
&& SrcVT
== MVT::i8
)
610 return AArch64_AM::UXTB
;
611 else if (!IsLoadStore
&& SrcVT
== MVT::i16
)
612 return AArch64_AM::UXTH
;
613 else if (SrcVT
== MVT::i32
)
614 return AArch64_AM::UXTW
;
615 assert(SrcVT
!= MVT::i64
&& "extend from 64-bits?");
617 return AArch64_AM::InvalidShiftExtend
;
618 } else if (N
.getOpcode() == ISD::AND
) {
619 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
621 return AArch64_AM::InvalidShiftExtend
;
622 uint64_t AndMask
= CSD
->getZExtValue();
626 return AArch64_AM::InvalidShiftExtend
;
628 return !IsLoadStore
? AArch64_AM::UXTB
: AArch64_AM::InvalidShiftExtend
;
630 return !IsLoadStore
? AArch64_AM::UXTH
: AArch64_AM::InvalidShiftExtend
;
632 return AArch64_AM::UXTW
;
636 return AArch64_AM::InvalidShiftExtend
;
639 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
640 static bool checkHighLaneIndex(SDNode
*DL
, SDValue
&LaneOp
, int &LaneIdx
) {
641 if (DL
->getOpcode() != AArch64ISD::DUPLANE16
&&
642 DL
->getOpcode() != AArch64ISD::DUPLANE32
)
645 SDValue SV
= DL
->getOperand(0);
646 if (SV
.getOpcode() != ISD::INSERT_SUBVECTOR
)
649 SDValue EV
= SV
.getOperand(1);
650 if (EV
.getOpcode() != ISD::EXTRACT_SUBVECTOR
)
653 ConstantSDNode
*DLidx
= cast
<ConstantSDNode
>(DL
->getOperand(1).getNode());
654 ConstantSDNode
*EVidx
= cast
<ConstantSDNode
>(EV
.getOperand(1).getNode());
655 LaneIdx
= DLidx
->getSExtValue() + EVidx
->getSExtValue();
656 LaneOp
= EV
.getOperand(0);
661 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
662 // high lane extract.
663 static bool checkV64LaneV128(SDValue Op0
, SDValue Op1
, SDValue
&StdOp
,
664 SDValue
&LaneOp
, int &LaneIdx
) {
666 if (!checkHighLaneIndex(Op0
.getNode(), LaneOp
, LaneIdx
)) {
668 if (!checkHighLaneIndex(Op0
.getNode(), LaneOp
, LaneIdx
))
675 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
676 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
677 /// so that we don't emit unnecessary lane extracts.
678 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode
*N
) {
680 SDValue Op0
= N
->getOperand(0);
681 SDValue Op1
= N
->getOperand(1);
682 SDValue MLAOp1
; // Will hold ordinary multiplicand for MLA.
683 SDValue MLAOp2
; // Will hold lane-accessed multiplicand for MLA.
684 int LaneIdx
= -1; // Will hold the lane index.
686 if (Op1
.getOpcode() != ISD::MUL
||
687 !checkV64LaneV128(Op1
.getOperand(0), Op1
.getOperand(1), MLAOp1
, MLAOp2
,
690 if (Op1
.getOpcode() != ISD::MUL
||
691 !checkV64LaneV128(Op1
.getOperand(0), Op1
.getOperand(1), MLAOp1
, MLAOp2
,
696 SDValue LaneIdxVal
= CurDAG
->getTargetConstant(LaneIdx
, dl
, MVT::i64
);
698 SDValue Ops
[] = { Op0
, MLAOp1
, MLAOp2
, LaneIdxVal
};
700 unsigned MLAOpc
= ~0U;
702 switch (N
->getSimpleValueType(0).SimpleTy
) {
704 llvm_unreachable("Unrecognized MLA.");
706 MLAOpc
= AArch64::MLAv4i16_indexed
;
709 MLAOpc
= AArch64::MLAv8i16_indexed
;
712 MLAOpc
= AArch64::MLAv2i32_indexed
;
715 MLAOpc
= AArch64::MLAv4i32_indexed
;
719 ReplaceNode(N
, CurDAG
->getMachineNode(MLAOpc
, dl
, N
->getValueType(0), Ops
));
723 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo
, SDNode
*N
) {
729 if (!checkV64LaneV128(N
->getOperand(1), N
->getOperand(2), SMULLOp0
, SMULLOp1
,
733 SDValue LaneIdxVal
= CurDAG
->getTargetConstant(LaneIdx
, dl
, MVT::i64
);
735 SDValue Ops
[] = { SMULLOp0
, SMULLOp1
, LaneIdxVal
};
737 unsigned SMULLOpc
= ~0U;
739 if (IntNo
== Intrinsic::aarch64_neon_smull
) {
740 switch (N
->getSimpleValueType(0).SimpleTy
) {
742 llvm_unreachable("Unrecognized SMULL.");
744 SMULLOpc
= AArch64::SMULLv4i16_indexed
;
747 SMULLOpc
= AArch64::SMULLv2i32_indexed
;
750 } else if (IntNo
== Intrinsic::aarch64_neon_umull
) {
751 switch (N
->getSimpleValueType(0).SimpleTy
) {
753 llvm_unreachable("Unrecognized SMULL.");
755 SMULLOpc
= AArch64::UMULLv4i16_indexed
;
758 SMULLOpc
= AArch64::UMULLv2i32_indexed
;
762 llvm_unreachable("Unrecognized intrinsic.");
764 ReplaceNode(N
, CurDAG
->getMachineNode(SMULLOpc
, dl
, N
->getValueType(0), Ops
));
768 /// Instructions that accept extend modifiers like UXTW expect the register
769 /// being extended to be a GPR32, but the incoming DAG might be acting on a
770 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
771 /// this is the case.
772 static SDValue
narrowIfNeeded(SelectionDAG
*CurDAG
, SDValue N
) {
773 if (N
.getValueType() == MVT::i32
)
777 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
778 MachineSDNode
*Node
= CurDAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
,
779 dl
, MVT::i32
, N
, SubReg
);
780 return SDValue(Node
, 0);
783 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
784 template<signed Low
, signed High
, signed Scale
>
785 bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N
, SDValue
&Imm
) {
786 if (!isa
<ConstantSDNode
>(N
))
789 int64_t MulImm
= cast
<ConstantSDNode
>(N
)->getSExtValue();
790 if ((MulImm
% std::abs(Scale
)) == 0) {
791 int64_t RDVLImm
= MulImm
/ Scale
;
792 if ((RDVLImm
>= Low
) && (RDVLImm
<= High
)) {
793 Imm
= CurDAG
->getTargetConstant(RDVLImm
, SDLoc(N
), MVT::i32
);
801 /// SelectArithExtendedRegister - Select a "extended register" operand. This
802 /// operand folds in an extend followed by an optional left shift.
803 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N
, SDValue
&Reg
,
805 unsigned ShiftVal
= 0;
806 AArch64_AM::ShiftExtendType Ext
;
808 if (N
.getOpcode() == ISD::SHL
) {
809 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
812 ShiftVal
= CSD
->getZExtValue();
816 Ext
= getExtendTypeForNode(N
.getOperand(0));
817 if (Ext
== AArch64_AM::InvalidShiftExtend
)
820 Reg
= N
.getOperand(0).getOperand(0);
822 Ext
= getExtendTypeForNode(N
);
823 if (Ext
== AArch64_AM::InvalidShiftExtend
)
826 Reg
= N
.getOperand(0);
828 // Don't match if free 32-bit -> 64-bit zext can be used instead.
829 if (Ext
== AArch64_AM::UXTW
&&
830 Reg
->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg
.getNode()))
834 // AArch64 mandates that the RHS of the operation must use the smallest
835 // register class that could contain the size being extended from. Thus,
836 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
837 // there might not be an actual 32-bit value in the program. We can
838 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
839 assert(Ext
!= AArch64_AM::UXTX
&& Ext
!= AArch64_AM::SXTX
);
840 Reg
= narrowIfNeeded(CurDAG
, Reg
);
841 Shift
= CurDAG
->getTargetConstant(getArithExtendImm(Ext
, ShiftVal
), SDLoc(N
),
843 return isWorthFolding(N
);
846 /// If there's a use of this ADDlow that's not itself a load/store then we'll
847 /// need to create a real ADD instruction from it anyway and there's no point in
848 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
849 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
850 /// leads to duplicated ADRP instructions.
851 static bool isWorthFoldingADDlow(SDValue N
) {
852 for (auto Use
: N
->uses()) {
853 if (Use
->getOpcode() != ISD::LOAD
&& Use
->getOpcode() != ISD::STORE
&&
854 Use
->getOpcode() != ISD::ATOMIC_LOAD
&&
855 Use
->getOpcode() != ISD::ATOMIC_STORE
)
858 // ldar and stlr have much more restrictive addressing modes (just a
860 if (isStrongerThanMonotonic(cast
<MemSDNode
>(Use
)->getSuccessOrdering()))
867 /// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
868 /// immediate" address. The "Size" argument is the size in bytes of the memory
869 /// reference, which determines the scale.
870 bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N
, bool IsSignedImm
,
871 unsigned BW
, unsigned Size
,
875 const DataLayout
&DL
= CurDAG
->getDataLayout();
876 const TargetLowering
*TLI
= getTargetLowering();
877 if (N
.getOpcode() == ISD::FrameIndex
) {
878 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
879 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
880 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
884 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
885 // selected here doesn't support labels/immediates, only base+offset.
886 if (CurDAG
->isBaseWithConstantOffset(N
)) {
887 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
889 int64_t RHSC
= RHS
->getSExtValue();
890 unsigned Scale
= Log2_32(Size
);
891 int64_t Range
= 0x1LL
<< (BW
- 1);
893 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= -(Range
<< Scale
) &&
894 RHSC
< (Range
<< Scale
)) {
895 Base
= N
.getOperand(0);
896 if (Base
.getOpcode() == ISD::FrameIndex
) {
897 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
898 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
900 OffImm
= CurDAG
->getTargetConstant(RHSC
>> Scale
, dl
, MVT::i64
);
904 // unsigned Immediate
905 uint64_t RHSC
= RHS
->getZExtValue();
906 unsigned Scale
= Log2_32(Size
);
907 uint64_t Range
= 0x1ULL
<< BW
;
909 if ((RHSC
& (Size
- 1)) == 0 && RHSC
< (Range
<< Scale
)) {
910 Base
= N
.getOperand(0);
911 if (Base
.getOpcode() == ISD::FrameIndex
) {
912 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
913 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
915 OffImm
= CurDAG
->getTargetConstant(RHSC
>> Scale
, dl
, MVT::i64
);
921 // Base only. The address will be materialized into a register before
922 // the memory is accessed.
923 // add x0, Xbase, #offset
926 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
930 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
931 /// immediate" address. The "Size" argument is the size in bytes of the memory
932 /// reference, which determines the scale.
933 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N
, unsigned Size
,
934 SDValue
&Base
, SDValue
&OffImm
) {
936 const DataLayout
&DL
= CurDAG
->getDataLayout();
937 const TargetLowering
*TLI
= getTargetLowering();
938 if (N
.getOpcode() == ISD::FrameIndex
) {
939 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
940 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
941 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
945 if (N
.getOpcode() == AArch64ISD::ADDlow
&& isWorthFoldingADDlow(N
)) {
946 GlobalAddressSDNode
*GAN
=
947 dyn_cast
<GlobalAddressSDNode
>(N
.getOperand(1).getNode());
948 Base
= N
.getOperand(0);
949 OffImm
= N
.getOperand(1);
953 if (GAN
->getOffset() % Size
== 0 &&
954 GAN
->getGlobal()->getPointerAlignment(DL
) >= Size
)
958 if (CurDAG
->isBaseWithConstantOffset(N
)) {
959 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
960 int64_t RHSC
= (int64_t)RHS
->getZExtValue();
961 unsigned Scale
= Log2_32(Size
);
962 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= 0 && RHSC
< (0x1000 << Scale
)) {
963 Base
= N
.getOperand(0);
964 if (Base
.getOpcode() == ISD::FrameIndex
) {
965 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
966 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
968 OffImm
= CurDAG
->getTargetConstant(RHSC
>> Scale
, dl
, MVT::i64
);
974 // Before falling back to our general case, check if the unscaled
975 // instructions can handle this. If so, that's preferable.
976 if (SelectAddrModeUnscaled(N
, Size
, Base
, OffImm
))
979 // Base only. The address will be materialized into a register before
980 // the memory is accessed.
981 // add x0, Xbase, #offset
984 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
988 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
989 /// immediate" address. This should only match when there is an offset that
990 /// is not valid for a scaled immediate addressing mode. The "Size" argument
991 /// is the size in bytes of the memory reference, which is needed here to know
992 /// what is valid for a scaled immediate.
993 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N
, unsigned Size
,
996 if (!CurDAG
->isBaseWithConstantOffset(N
))
998 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
999 int64_t RHSC
= RHS
->getSExtValue();
1000 // If the offset is valid as a scaled immediate, don't match here.
1001 if ((RHSC
& (Size
- 1)) == 0 && RHSC
>= 0 &&
1002 RHSC
< (0x1000 << Log2_32(Size
)))
1004 if (RHSC
>= -256 && RHSC
< 256) {
1005 Base
= N
.getOperand(0);
1006 if (Base
.getOpcode() == ISD::FrameIndex
) {
1007 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
1008 const TargetLowering
*TLI
= getTargetLowering();
1009 Base
= CurDAG
->getTargetFrameIndex(
1010 FI
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
1012 OffImm
= CurDAG
->getTargetConstant(RHSC
, SDLoc(N
), MVT::i64
);
1019 static SDValue
Widen(SelectionDAG
*CurDAG
, SDValue N
) {
1021 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
1022 SDValue ImpDef
= SDValue(
1023 CurDAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, dl
, MVT::i64
), 0);
1024 MachineSDNode
*Node
= CurDAG
->getMachineNode(
1025 TargetOpcode::INSERT_SUBREG
, dl
, MVT::i64
, ImpDef
, N
, SubReg
);
1026 return SDValue(Node
, 0);
1029 /// Check if the given SHL node (\p N), can be used to form an
1030 /// extended register for an addressing mode.
1031 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N
, unsigned Size
,
1032 bool WantExtend
, SDValue
&Offset
,
1033 SDValue
&SignExtend
) {
1034 assert(N
.getOpcode() == ISD::SHL
&& "Invalid opcode.");
1035 ConstantSDNode
*CSD
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
1036 if (!CSD
|| (CSD
->getZExtValue() & 0x7) != CSD
->getZExtValue())
1041 AArch64_AM::ShiftExtendType Ext
=
1042 getExtendTypeForNode(N
.getOperand(0), true);
1043 if (Ext
== AArch64_AM::InvalidShiftExtend
)
1046 Offset
= narrowIfNeeded(CurDAG
, N
.getOperand(0).getOperand(0));
1047 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
1050 Offset
= N
.getOperand(0);
1051 SignExtend
= CurDAG
->getTargetConstant(0, dl
, MVT::i32
);
1054 unsigned LegalShiftVal
= Log2_32(Size
);
1055 unsigned ShiftVal
= CSD
->getZExtValue();
1057 if (ShiftVal
!= 0 && ShiftVal
!= LegalShiftVal
)
1060 return isWorthFolding(N
);
1063 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N
, unsigned Size
,
1064 SDValue
&Base
, SDValue
&Offset
,
1065 SDValue
&SignExtend
,
1067 if (N
.getOpcode() != ISD::ADD
)
1069 SDValue LHS
= N
.getOperand(0);
1070 SDValue RHS
= N
.getOperand(1);
1073 // We don't want to match immediate adds here, because they are better lowered
1074 // to the register-immediate addressing modes.
1075 if (isa
<ConstantSDNode
>(LHS
) || isa
<ConstantSDNode
>(RHS
))
1078 // Check if this particular node is reused in any non-memory related
1079 // operation. If yes, do not try to fold this node into the address
1080 // computation, since the computation will be kept.
1081 const SDNode
*Node
= N
.getNode();
1082 for (SDNode
*UI
: Node
->uses()) {
1083 if (!isa
<MemSDNode
>(*UI
))
1087 // Remember if it is worth folding N when it produces extended register.
1088 bool IsExtendedRegisterWorthFolding
= isWorthFolding(N
);
1090 // Try to match a shifted extend on the RHS.
1091 if (IsExtendedRegisterWorthFolding
&& RHS
.getOpcode() == ISD::SHL
&&
1092 SelectExtendedSHL(RHS
, Size
, true, Offset
, SignExtend
)) {
1094 DoShift
= CurDAG
->getTargetConstant(true, dl
, MVT::i32
);
1098 // Try to match a shifted extend on the LHS.
1099 if (IsExtendedRegisterWorthFolding
&& LHS
.getOpcode() == ISD::SHL
&&
1100 SelectExtendedSHL(LHS
, Size
, true, Offset
, SignExtend
)) {
1102 DoShift
= CurDAG
->getTargetConstant(true, dl
, MVT::i32
);
1106 // There was no shift, whatever else we find.
1107 DoShift
= CurDAG
->getTargetConstant(false, dl
, MVT::i32
);
1109 AArch64_AM::ShiftExtendType Ext
= AArch64_AM::InvalidShiftExtend
;
1110 // Try to match an unshifted extend on the LHS.
1111 if (IsExtendedRegisterWorthFolding
&&
1112 (Ext
= getExtendTypeForNode(LHS
, true)) !=
1113 AArch64_AM::InvalidShiftExtend
) {
1115 Offset
= narrowIfNeeded(CurDAG
, LHS
.getOperand(0));
1116 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
1118 if (isWorthFolding(LHS
))
1122 // Try to match an unshifted extend on the RHS.
1123 if (IsExtendedRegisterWorthFolding
&&
1124 (Ext
= getExtendTypeForNode(RHS
, true)) !=
1125 AArch64_AM::InvalidShiftExtend
) {
1127 Offset
= narrowIfNeeded(CurDAG
, RHS
.getOperand(0));
1128 SignExtend
= CurDAG
->getTargetConstant(Ext
== AArch64_AM::SXTW
, dl
,
1130 if (isWorthFolding(RHS
))
1137 // Check if the given immediate is preferred by ADD. If an immediate can be
1138 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1139 // encoded by one MOVZ, return true.
1140 static bool isPreferredADD(int64_t ImmOff
) {
1141 // Constant in [0x0, 0xfff] can be encoded in ADD.
1142 if ((ImmOff
& 0xfffffffffffff000LL
) == 0x0LL
)
1144 // Check if it can be encoded in an "ADD LSL #12".
1145 if ((ImmOff
& 0xffffffffff000fffLL
) == 0x0LL
)
1146 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1147 return (ImmOff
& 0xffffffffff00ffffLL
) != 0x0LL
&&
1148 (ImmOff
& 0xffffffffffff0fffLL
) != 0x0LL
;
1152 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N
, unsigned Size
,
1153 SDValue
&Base
, SDValue
&Offset
,
1154 SDValue
&SignExtend
,
1156 if (N
.getOpcode() != ISD::ADD
)
1158 SDValue LHS
= N
.getOperand(0);
1159 SDValue RHS
= N
.getOperand(1);
1162 // Check if this particular node is reused in any non-memory related
1163 // operation. If yes, do not try to fold this node into the address
1164 // computation, since the computation will be kept.
1165 const SDNode
*Node
= N
.getNode();
1166 for (SDNode
*UI
: Node
->uses()) {
1167 if (!isa
<MemSDNode
>(*UI
))
1171 // Watch out if RHS is a wide immediate, it can not be selected into
1172 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1173 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1174 // instructions like:
1175 // MOV X0, WideImmediate
1176 // ADD X1, BaseReg, X0
1178 // For such situation, using [BaseReg, XReg] addressing mode can save one
1180 // MOV X0, WideImmediate
1181 // LDR X2, [BaseReg, X0]
1182 if (isa
<ConstantSDNode
>(RHS
)) {
1183 int64_t ImmOff
= (int64_t)cast
<ConstantSDNode
>(RHS
)->getZExtValue();
1184 unsigned Scale
= Log2_32(Size
);
1185 // Skip the immediate can be selected by load/store addressing mode.
1186 // Also skip the immediate can be encoded by a single ADD (SUB is also
1187 // checked by using -ImmOff).
1188 if ((ImmOff
% Size
== 0 && ImmOff
>= 0 && ImmOff
< (0x1000 << Scale
)) ||
1189 isPreferredADD(ImmOff
) || isPreferredADD(-ImmOff
))
1192 SDValue Ops
[] = { RHS
};
1194 CurDAG
->getMachineNode(AArch64::MOVi64imm
, DL
, MVT::i64
, Ops
);
1195 SDValue MOVIV
= SDValue(MOVI
, 0);
1196 // This ADD of two X register will be selected into [Reg+Reg] mode.
1197 N
= CurDAG
->getNode(ISD::ADD
, DL
, MVT::i64
, LHS
, MOVIV
);
1200 // Remember if it is worth folding N when it produces extended register.
1201 bool IsExtendedRegisterWorthFolding
= isWorthFolding(N
);
1203 // Try to match a shifted extend on the RHS.
1204 if (IsExtendedRegisterWorthFolding
&& RHS
.getOpcode() == ISD::SHL
&&
1205 SelectExtendedSHL(RHS
, Size
, false, Offset
, SignExtend
)) {
1207 DoShift
= CurDAG
->getTargetConstant(true, DL
, MVT::i32
);
1211 // Try to match a shifted extend on the LHS.
1212 if (IsExtendedRegisterWorthFolding
&& LHS
.getOpcode() == ISD::SHL
&&
1213 SelectExtendedSHL(LHS
, Size
, false, Offset
, SignExtend
)) {
1215 DoShift
= CurDAG
->getTargetConstant(true, DL
, MVT::i32
);
1219 // Match any non-shifted, non-extend, non-immediate add expression.
1222 SignExtend
= CurDAG
->getTargetConstant(false, DL
, MVT::i32
);
1223 DoShift
= CurDAG
->getTargetConstant(false, DL
, MVT::i32
);
1224 // Reg1 + Reg2 is free: no check needed.
1228 SDValue
AArch64DAGToDAGISel::createDTuple(ArrayRef
<SDValue
> Regs
) {
1229 static const unsigned RegClassIDs
[] = {
1230 AArch64::DDRegClassID
, AArch64::DDDRegClassID
, AArch64::DDDDRegClassID
};
1231 static const unsigned SubRegs
[] = {AArch64::dsub0
, AArch64::dsub1
,
1232 AArch64::dsub2
, AArch64::dsub3
};
1234 return createTuple(Regs
, RegClassIDs
, SubRegs
);
1237 SDValue
AArch64DAGToDAGISel::createQTuple(ArrayRef
<SDValue
> Regs
) {
1238 static const unsigned RegClassIDs
[] = {
1239 AArch64::QQRegClassID
, AArch64::QQQRegClassID
, AArch64::QQQQRegClassID
};
1240 static const unsigned SubRegs
[] = {AArch64::qsub0
, AArch64::qsub1
,
1241 AArch64::qsub2
, AArch64::qsub3
};
1243 return createTuple(Regs
, RegClassIDs
, SubRegs
);
1246 SDValue
AArch64DAGToDAGISel::createZTuple(ArrayRef
<SDValue
> Regs
) {
1247 static const unsigned RegClassIDs
[] = {AArch64::ZPR2RegClassID
,
1248 AArch64::ZPR3RegClassID
,
1249 AArch64::ZPR4RegClassID
};
1250 static const unsigned SubRegs
[] = {AArch64::zsub0
, AArch64::zsub1
,
1251 AArch64::zsub2
, AArch64::zsub3
};
1253 return createTuple(Regs
, RegClassIDs
, SubRegs
);
1256 SDValue
AArch64DAGToDAGISel::createTuple(ArrayRef
<SDValue
> Regs
,
1257 const unsigned RegClassIDs
[],
1258 const unsigned SubRegs
[]) {
1259 // There's no special register-class for a vector-list of 1 element: it's just
1261 if (Regs
.size() == 1)
1264 assert(Regs
.size() >= 2 && Regs
.size() <= 4);
1268 SmallVector
<SDValue
, 4> Ops
;
1270 // First operand of REG_SEQUENCE is the desired RegClass.
1272 CurDAG
->getTargetConstant(RegClassIDs
[Regs
.size() - 2], DL
, MVT::i32
));
1274 // Then we get pairs of source & subregister-position for the components.
1275 for (unsigned i
= 0; i
< Regs
.size(); ++i
) {
1276 Ops
.push_back(Regs
[i
]);
1277 Ops
.push_back(CurDAG
->getTargetConstant(SubRegs
[i
], DL
, MVT::i32
));
1281 CurDAG
->getMachineNode(TargetOpcode::REG_SEQUENCE
, DL
, MVT::Untyped
, Ops
);
1282 return SDValue(N
, 0);
1285 void AArch64DAGToDAGISel::SelectTable(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
1288 EVT VT
= N
->getValueType(0);
1290 unsigned ExtOff
= isExt
;
1292 // Form a REG_SEQUENCE to force register allocation.
1293 unsigned Vec0Off
= ExtOff
+ 1;
1294 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + Vec0Off
,
1295 N
->op_begin() + Vec0Off
+ NumVecs
);
1296 SDValue RegSeq
= createQTuple(Regs
);
1298 SmallVector
<SDValue
, 6> Ops
;
1300 Ops
.push_back(N
->getOperand(1));
1301 Ops
.push_back(RegSeq
);
1302 Ops
.push_back(N
->getOperand(NumVecs
+ ExtOff
+ 1));
1303 ReplaceNode(N
, CurDAG
->getMachineNode(Opc
, dl
, VT
, Ops
));
1306 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode
*N
) {
1307 LoadSDNode
*LD
= cast
<LoadSDNode
>(N
);
1308 if (LD
->isUnindexed())
1310 EVT VT
= LD
->getMemoryVT();
1311 EVT DstVT
= N
->getValueType(0);
1312 ISD::MemIndexedMode AM
= LD
->getAddressingMode();
1313 bool IsPre
= AM
== ISD::PRE_INC
|| AM
== ISD::PRE_DEC
;
1315 // We're not doing validity checking here. That was done when checking
1316 // if we should mark the load as indexed or not. We're just selecting
1317 // the right instruction.
1318 unsigned Opcode
= 0;
1320 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1321 bool InsertTo64
= false;
1323 Opcode
= IsPre
? AArch64::LDRXpre
: AArch64::LDRXpost
;
1324 else if (VT
== MVT::i32
) {
1325 if (ExtType
== ISD::NON_EXTLOAD
)
1326 Opcode
= IsPre
? AArch64::LDRWpre
: AArch64::LDRWpost
;
1327 else if (ExtType
== ISD::SEXTLOAD
)
1328 Opcode
= IsPre
? AArch64::LDRSWpre
: AArch64::LDRSWpost
;
1330 Opcode
= IsPre
? AArch64::LDRWpre
: AArch64::LDRWpost
;
1332 // The result of the load is only i32. It's the subreg_to_reg that makes
1336 } else if (VT
== MVT::i16
) {
1337 if (ExtType
== ISD::SEXTLOAD
) {
1338 if (DstVT
== MVT::i64
)
1339 Opcode
= IsPre
? AArch64::LDRSHXpre
: AArch64::LDRSHXpost
;
1341 Opcode
= IsPre
? AArch64::LDRSHWpre
: AArch64::LDRSHWpost
;
1343 Opcode
= IsPre
? AArch64::LDRHHpre
: AArch64::LDRHHpost
;
1344 InsertTo64
= DstVT
== MVT::i64
;
1345 // The result of the load is only i32. It's the subreg_to_reg that makes
1349 } else if (VT
== MVT::i8
) {
1350 if (ExtType
== ISD::SEXTLOAD
) {
1351 if (DstVT
== MVT::i64
)
1352 Opcode
= IsPre
? AArch64::LDRSBXpre
: AArch64::LDRSBXpost
;
1354 Opcode
= IsPre
? AArch64::LDRSBWpre
: AArch64::LDRSBWpost
;
1356 Opcode
= IsPre
? AArch64::LDRBBpre
: AArch64::LDRBBpost
;
1357 InsertTo64
= DstVT
== MVT::i64
;
1358 // The result of the load is only i32. It's the subreg_to_reg that makes
1362 } else if (VT
== MVT::f16
) {
1363 Opcode
= IsPre
? AArch64::LDRHpre
: AArch64::LDRHpost
;
1364 } else if (VT
== MVT::bf16
) {
1365 Opcode
= IsPre
? AArch64::LDRHpre
: AArch64::LDRHpost
;
1366 } else if (VT
== MVT::f32
) {
1367 Opcode
= IsPre
? AArch64::LDRSpre
: AArch64::LDRSpost
;
1368 } else if (VT
== MVT::f64
|| VT
.is64BitVector()) {
1369 Opcode
= IsPre
? AArch64::LDRDpre
: AArch64::LDRDpost
;
1370 } else if (VT
.is128BitVector()) {
1371 Opcode
= IsPre
? AArch64::LDRQpre
: AArch64::LDRQpost
;
1374 SDValue Chain
= LD
->getChain();
1375 SDValue Base
= LD
->getBasePtr();
1376 ConstantSDNode
*OffsetOp
= cast
<ConstantSDNode
>(LD
->getOffset());
1377 int OffsetVal
= (int)OffsetOp
->getZExtValue();
1379 SDValue Offset
= CurDAG
->getTargetConstant(OffsetVal
, dl
, MVT::i64
);
1380 SDValue Ops
[] = { Base
, Offset
, Chain
};
1381 SDNode
*Res
= CurDAG
->getMachineNode(Opcode
, dl
, MVT::i64
, DstVT
,
1384 // Transfer memoperands.
1385 MachineMemOperand
*MemOp
= cast
<MemSDNode
>(N
)->getMemOperand();
1386 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(Res
), {MemOp
});
1388 // Either way, we're replacing the node, so tell the caller that.
1389 SDValue LoadedVal
= SDValue(Res
, 1);
1391 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
1393 SDValue(CurDAG
->getMachineNode(
1394 AArch64::SUBREG_TO_REG
, dl
, MVT::i64
,
1395 CurDAG
->getTargetConstant(0, dl
, MVT::i64
), LoadedVal
,
1400 ReplaceUses(SDValue(N
, 0), LoadedVal
);
1401 ReplaceUses(SDValue(N
, 1), SDValue(Res
, 0));
1402 ReplaceUses(SDValue(N
, 2), SDValue(Res
, 2));
1403 CurDAG
->RemoveDeadNode(N
);
1407 void AArch64DAGToDAGISel::SelectLoad(SDNode
*N
, unsigned NumVecs
, unsigned Opc
,
1408 unsigned SubRegIdx
) {
1410 EVT VT
= N
->getValueType(0);
1411 SDValue Chain
= N
->getOperand(0);
1413 SDValue Ops
[] = {N
->getOperand(2), // Mem operand;
1416 const EVT ResTys
[] = {MVT::Untyped
, MVT::Other
};
1418 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1419 SDValue SuperReg
= SDValue(Ld
, 0);
1420 for (unsigned i
= 0; i
< NumVecs
; ++i
)
1421 ReplaceUses(SDValue(N
, i
),
1422 CurDAG
->getTargetExtractSubreg(SubRegIdx
+ i
, dl
, VT
, SuperReg
));
1424 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 1));
1426 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1427 // because it's too simple to have needed special treatment during lowering.
1428 if (auto *MemIntr
= dyn_cast
<MemIntrinsicSDNode
>(N
)) {
1429 MachineMemOperand
*MemOp
= MemIntr
->getMemOperand();
1430 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(Ld
), {MemOp
});
1433 CurDAG
->RemoveDeadNode(N
);
1436 void AArch64DAGToDAGISel::SelectPostLoad(SDNode
*N
, unsigned NumVecs
,
1437 unsigned Opc
, unsigned SubRegIdx
) {
1439 EVT VT
= N
->getValueType(0);
1440 SDValue Chain
= N
->getOperand(0);
1442 SDValue Ops
[] = {N
->getOperand(1), // Mem operand
1443 N
->getOperand(2), // Incremental
1446 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1447 MVT::Untyped
, MVT::Other
};
1449 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1451 // Update uses of write back register
1452 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 0));
1454 // Update uses of vector list
1455 SDValue SuperReg
= SDValue(Ld
, 1);
1457 ReplaceUses(SDValue(N
, 0), SuperReg
);
1459 for (unsigned i
= 0; i
< NumVecs
; ++i
)
1460 ReplaceUses(SDValue(N
, i
),
1461 CurDAG
->getTargetExtractSubreg(SubRegIdx
+ i
, dl
, VT
, SuperReg
));
1464 ReplaceUses(SDValue(N
, NumVecs
+ 1), SDValue(Ld
, 2));
1465 CurDAG
->RemoveDeadNode(N
);
1468 /// Optimize \param OldBase and \param OldOffset selecting the best addressing
1469 /// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1470 /// new Base and an SDValue representing the new offset.
1471 std::tuple
<unsigned, SDValue
, SDValue
>
1472 AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode
*N
, unsigned Opc_rr
,
1474 const SDValue
&OldBase
,
1475 const SDValue
&OldOffset
,
1477 SDValue NewBase
= OldBase
;
1478 SDValue NewOffset
= OldOffset
;
1479 // Detect a possible Reg+Imm addressing mode.
1480 const bool IsRegImm
= SelectAddrModeIndexedSVE
</*Min=*/-8, /*Max=*/7>(
1481 N
, OldBase
, NewBase
, NewOffset
);
1483 // Detect a possible reg+reg addressing mode, but only if we haven't already
1484 // detected a Reg+Imm one.
1485 const bool IsRegReg
=
1486 !IsRegImm
&& SelectSVERegRegAddrMode(OldBase
, Scale
, NewBase
, NewOffset
);
1488 // Select the instruction.
1489 return std::make_tuple(IsRegReg
? Opc_rr
: Opc_ri
, NewBase
, NewOffset
);
1492 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode
*N
, unsigned NumVecs
,
1493 unsigned Scale
, unsigned Opc_ri
,
1494 unsigned Opc_rr
, bool IsIntr
) {
1495 assert(Scale
< 4 && "Invalid scaling value.");
1497 EVT VT
= N
->getValueType(0);
1498 SDValue Chain
= N
->getOperand(0);
1500 // Optimize addressing mode.
1501 SDValue Base
, Offset
;
1503 std::tie(Opc
, Base
, Offset
) = findAddrModeSVELoadStore(
1504 N
, Opc_rr
, Opc_ri
, N
->getOperand(IsIntr
? 3 : 2),
1505 CurDAG
->getTargetConstant(0, DL
, MVT::i64
), Scale
);
1507 SDValue Ops
[] = {N
->getOperand(IsIntr
? 2 : 1), // Predicate
1508 Base
, // Memory operand
1511 const EVT ResTys
[] = {MVT::Untyped
, MVT::Other
};
1513 SDNode
*Load
= CurDAG
->getMachineNode(Opc
, DL
, ResTys
, Ops
);
1514 SDValue SuperReg
= SDValue(Load
, 0);
1515 for (unsigned i
= 0; i
< NumVecs
; ++i
)
1516 ReplaceUses(SDValue(N
, i
), CurDAG
->getTargetExtractSubreg(
1517 AArch64::zsub0
+ i
, DL
, VT
, SuperReg
));
1520 unsigned ChainIdx
= NumVecs
;
1521 ReplaceUses(SDValue(N
, ChainIdx
), SDValue(Load
, 1));
1522 CurDAG
->RemoveDeadNode(N
);
1525 void AArch64DAGToDAGISel::SelectStore(SDNode
*N
, unsigned NumVecs
,
1528 EVT VT
= N
->getOperand(2)->getValueType(0);
1530 // Form a REG_SEQUENCE to force register allocation.
1531 bool Is128Bit
= VT
.getSizeInBits() == 128;
1532 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1533 SDValue RegSeq
= Is128Bit
? createQTuple(Regs
) : createDTuple(Regs
);
1535 SDValue Ops
[] = {RegSeq
, N
->getOperand(NumVecs
+ 2), N
->getOperand(0)};
1536 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, N
->getValueType(0), Ops
);
1538 // Transfer memoperands.
1539 MachineMemOperand
*MemOp
= cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1540 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(St
), {MemOp
});
1545 void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode
*N
, unsigned NumVecs
,
1546 unsigned Scale
, unsigned Opc_rr
,
1550 // Form a REG_SEQUENCE to force register allocation.
1551 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1552 SDValue RegSeq
= createZTuple(Regs
);
1554 // Optimize addressing mode.
1556 SDValue Offset
, Base
;
1557 std::tie(Opc
, Base
, Offset
) = findAddrModeSVELoadStore(
1558 N
, Opc_rr
, Opc_ri
, N
->getOperand(NumVecs
+ 3),
1559 CurDAG
->getTargetConstant(0, dl
, MVT::i64
), Scale
);
1561 SDValue Ops
[] = {RegSeq
, N
->getOperand(NumVecs
+ 2), // predicate
1564 N
->getOperand(0)}; // chain
1565 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, N
->getValueType(0), Ops
);
1570 bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N
, SDValue
&Base
,
1573 const DataLayout
&DL
= CurDAG
->getDataLayout();
1574 const TargetLowering
*TLI
= getTargetLowering();
1576 // Try to match it for the frame address
1577 if (auto FINode
= dyn_cast
<FrameIndexSDNode
>(N
)) {
1578 int FI
= FINode
->getIndex();
1579 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
1580 OffImm
= CurDAG
->getTargetConstant(0, dl
, MVT::i64
);
1587 void AArch64DAGToDAGISel::SelectPostStore(SDNode
*N
, unsigned NumVecs
,
1590 EVT VT
= N
->getOperand(2)->getValueType(0);
1591 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1592 MVT::Other
}; // Type for the Chain
1594 // Form a REG_SEQUENCE to force register allocation.
1595 bool Is128Bit
= VT
.getSizeInBits() == 128;
1596 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1597 SDValue RegSeq
= Is128Bit
? createQTuple(Regs
) : createDTuple(Regs
);
1599 SDValue Ops
[] = {RegSeq
,
1600 N
->getOperand(NumVecs
+ 1), // base register
1601 N
->getOperand(NumVecs
+ 2), // Incremental
1602 N
->getOperand(0)}; // Chain
1603 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1609 /// WidenVector - Given a value in the V64 register class, produce the
1610 /// equivalent value in the V128 register class.
1615 WidenVector(SelectionDAG
&DAG
) : DAG(DAG
) {}
1617 SDValue
operator()(SDValue V64Reg
) {
1618 EVT VT
= V64Reg
.getValueType();
1619 unsigned NarrowSize
= VT
.getVectorNumElements();
1620 MVT EltTy
= VT
.getVectorElementType().getSimpleVT();
1621 MVT WideTy
= MVT::getVectorVT(EltTy
, 2 * NarrowSize
);
1625 SDValue(DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, WideTy
), 0);
1626 return DAG
.getTargetInsertSubreg(AArch64::dsub
, DL
, WideTy
, Undef
, V64Reg
);
1631 /// NarrowVector - Given a value in the V128 register class, produce the
1632 /// equivalent value in the V64 register class.
1633 static SDValue
NarrowVector(SDValue V128Reg
, SelectionDAG
&DAG
) {
1634 EVT VT
= V128Reg
.getValueType();
1635 unsigned WideSize
= VT
.getVectorNumElements();
1636 MVT EltTy
= VT
.getVectorElementType().getSimpleVT();
1637 MVT NarrowTy
= MVT::getVectorVT(EltTy
, WideSize
/ 2);
1639 return DAG
.getTargetExtractSubreg(AArch64::dsub
, SDLoc(V128Reg
), NarrowTy
,
1643 void AArch64DAGToDAGISel::SelectLoadLane(SDNode
*N
, unsigned NumVecs
,
1646 EVT VT
= N
->getValueType(0);
1647 bool Narrow
= VT
.getSizeInBits() == 64;
1649 // Form a REG_SEQUENCE to force register allocation.
1650 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1653 transform(Regs
, Regs
.begin(),
1654 WidenVector(*CurDAG
));
1656 SDValue RegSeq
= createQTuple(Regs
);
1658 const EVT ResTys
[] = {MVT::Untyped
, MVT::Other
};
1661 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 2))->getZExtValue();
1663 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1664 N
->getOperand(NumVecs
+ 3), N
->getOperand(0)};
1665 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1666 SDValue SuperReg
= SDValue(Ld
, 0);
1668 EVT WideVT
= RegSeq
.getOperand(1)->getValueType(0);
1669 static const unsigned QSubs
[] = { AArch64::qsub0
, AArch64::qsub1
,
1670 AArch64::qsub2
, AArch64::qsub3
};
1671 for (unsigned i
= 0; i
< NumVecs
; ++i
) {
1672 SDValue NV
= CurDAG
->getTargetExtractSubreg(QSubs
[i
], dl
, WideVT
, SuperReg
);
1674 NV
= NarrowVector(NV
, *CurDAG
);
1675 ReplaceUses(SDValue(N
, i
), NV
);
1678 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 1));
1679 CurDAG
->RemoveDeadNode(N
);
1682 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode
*N
, unsigned NumVecs
,
1685 EVT VT
= N
->getValueType(0);
1686 bool Narrow
= VT
.getSizeInBits() == 64;
1688 // Form a REG_SEQUENCE to force register allocation.
1689 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1692 transform(Regs
, Regs
.begin(),
1693 WidenVector(*CurDAG
));
1695 SDValue RegSeq
= createQTuple(Regs
);
1697 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1698 RegSeq
->getValueType(0), MVT::Other
};
1701 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 1))->getZExtValue();
1703 SDValue Ops
[] = {RegSeq
,
1704 CurDAG
->getTargetConstant(LaneNo
, dl
,
1705 MVT::i64
), // Lane Number
1706 N
->getOperand(NumVecs
+ 2), // Base register
1707 N
->getOperand(NumVecs
+ 3), // Incremental
1709 SDNode
*Ld
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1711 // Update uses of the write back register
1712 ReplaceUses(SDValue(N
, NumVecs
), SDValue(Ld
, 0));
1714 // Update uses of the vector list
1715 SDValue SuperReg
= SDValue(Ld
, 1);
1717 ReplaceUses(SDValue(N
, 0),
1718 Narrow
? NarrowVector(SuperReg
, *CurDAG
) : SuperReg
);
1720 EVT WideVT
= RegSeq
.getOperand(1)->getValueType(0);
1721 static const unsigned QSubs
[] = { AArch64::qsub0
, AArch64::qsub1
,
1722 AArch64::qsub2
, AArch64::qsub3
};
1723 for (unsigned i
= 0; i
< NumVecs
; ++i
) {
1724 SDValue NV
= CurDAG
->getTargetExtractSubreg(QSubs
[i
], dl
, WideVT
,
1727 NV
= NarrowVector(NV
, *CurDAG
);
1728 ReplaceUses(SDValue(N
, i
), NV
);
1733 ReplaceUses(SDValue(N
, NumVecs
+ 1), SDValue(Ld
, 2));
1734 CurDAG
->RemoveDeadNode(N
);
1737 void AArch64DAGToDAGISel::SelectStoreLane(SDNode
*N
, unsigned NumVecs
,
1740 EVT VT
= N
->getOperand(2)->getValueType(0);
1741 bool Narrow
= VT
.getSizeInBits() == 64;
1743 // Form a REG_SEQUENCE to force register allocation.
1744 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 2, N
->op_begin() + 2 + NumVecs
);
1747 transform(Regs
, Regs
.begin(),
1748 WidenVector(*CurDAG
));
1750 SDValue RegSeq
= createQTuple(Regs
);
1753 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 2))->getZExtValue();
1755 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1756 N
->getOperand(NumVecs
+ 3), N
->getOperand(0)};
1757 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, MVT::Other
, Ops
);
1759 // Transfer memoperands.
1760 MachineMemOperand
*MemOp
= cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1761 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(St
), {MemOp
});
1766 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode
*N
, unsigned NumVecs
,
1769 EVT VT
= N
->getOperand(2)->getValueType(0);
1770 bool Narrow
= VT
.getSizeInBits() == 64;
1772 // Form a REG_SEQUENCE to force register allocation.
1773 SmallVector
<SDValue
, 4> Regs(N
->op_begin() + 1, N
->op_begin() + 1 + NumVecs
);
1776 transform(Regs
, Regs
.begin(),
1777 WidenVector(*CurDAG
));
1779 SDValue RegSeq
= createQTuple(Regs
);
1781 const EVT ResTys
[] = {MVT::i64
, // Type of the write back register
1785 cast
<ConstantSDNode
>(N
->getOperand(NumVecs
+ 1))->getZExtValue();
1787 SDValue Ops
[] = {RegSeq
, CurDAG
->getTargetConstant(LaneNo
, dl
, MVT::i64
),
1788 N
->getOperand(NumVecs
+ 2), // Base Register
1789 N
->getOperand(NumVecs
+ 3), // Incremental
1791 SDNode
*St
= CurDAG
->getMachineNode(Opc
, dl
, ResTys
, Ops
);
1793 // Transfer memoperands.
1794 MachineMemOperand
*MemOp
= cast
<MemIntrinsicSDNode
>(N
)->getMemOperand();
1795 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(St
), {MemOp
});
1800 static bool isBitfieldExtractOpFromAnd(SelectionDAG
*CurDAG
, SDNode
*N
,
1801 unsigned &Opc
, SDValue
&Opd0
,
1802 unsigned &LSB
, unsigned &MSB
,
1803 unsigned NumberOfIgnoredLowBits
,
1804 bool BiggerPattern
) {
1805 assert(N
->getOpcode() == ISD::AND
&&
1806 "N must be a AND operation to call this function");
1808 EVT VT
= N
->getValueType(0);
1810 // Here we can test the type of VT and return false when the type does not
1811 // match, but since it is done prior to that call in the current context
1812 // we turned that into an assert to avoid redundant code.
1813 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1814 "Type checking must have been done before calling this function");
1816 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1817 // changed the AND node to a 32-bit mask operation. We'll have to
1818 // undo that as part of the transform here if we want to catch all
1819 // the opportunities.
1820 // Currently the NumberOfIgnoredLowBits argument helps to recover
1821 // form these situations when matching bigger pattern (bitfield insert).
1823 // For unsigned extracts, check for a shift right and mask
1824 uint64_t AndImm
= 0;
1825 if (!isOpcWithIntImmediate(N
, ISD::AND
, AndImm
))
1828 const SDNode
*Op0
= N
->getOperand(0).getNode();
1830 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1831 // simplified. Try to undo that
1832 AndImm
|= maskTrailingOnes
<uint64_t>(NumberOfIgnoredLowBits
);
1834 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1835 if (AndImm
& (AndImm
+ 1))
1838 bool ClampMSB
= false;
1839 uint64_t SrlImm
= 0;
1840 // Handle the SRL + ANY_EXTEND case.
1841 if (VT
== MVT::i64
&& Op0
->getOpcode() == ISD::ANY_EXTEND
&&
1842 isOpcWithIntImmediate(Op0
->getOperand(0).getNode(), ISD::SRL
, SrlImm
)) {
1843 // Extend the incoming operand of the SRL to 64-bit.
1844 Opd0
= Widen(CurDAG
, Op0
->getOperand(0).getOperand(0));
1845 // Make sure to clamp the MSB so that we preserve the semantics of the
1846 // original operations.
1848 } else if (VT
== MVT::i32
&& Op0
->getOpcode() == ISD::TRUNCATE
&&
1849 isOpcWithIntImmediate(Op0
->getOperand(0).getNode(), ISD::SRL
,
1851 // If the shift result was truncated, we can still combine them.
1852 Opd0
= Op0
->getOperand(0).getOperand(0);
1854 // Use the type of SRL node.
1855 VT
= Opd0
->getValueType(0);
1856 } else if (isOpcWithIntImmediate(Op0
, ISD::SRL
, SrlImm
)) {
1857 Opd0
= Op0
->getOperand(0);
1858 ClampMSB
= (VT
== MVT::i32
);
1859 } else if (BiggerPattern
) {
1860 // Let's pretend a 0 shift right has been performed.
1861 // The resulting code will be at least as good as the original one
1862 // plus it may expose more opportunities for bitfield insert pattern.
1863 // FIXME: Currently we limit this to the bigger pattern, because
1864 // some optimizations expect AND and not UBFM.
1865 Opd0
= N
->getOperand(0);
1869 // Bail out on large immediates. This happens when no proper
1870 // combining/constant folding was performed.
1871 if (!BiggerPattern
&& (SrlImm
<= 0 || SrlImm
>= VT
.getSizeInBits())) {
1874 << ": Found large shift immediate, this should not happen\n"));
1879 MSB
= SrlImm
+ (VT
== MVT::i32
? countTrailingOnes
<uint32_t>(AndImm
)
1880 : countTrailingOnes
<uint64_t>(AndImm
)) -
1883 // Since we're moving the extend before the right shift operation, we need
1884 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1885 // the zeros which would get shifted in with the original right shift
1887 MSB
= MSB
> 31 ? 31 : MSB
;
1889 Opc
= VT
== MVT::i32
? AArch64::UBFMWri
: AArch64::UBFMXri
;
1893 static bool isBitfieldExtractOpFromSExtInReg(SDNode
*N
, unsigned &Opc
,
1894 SDValue
&Opd0
, unsigned &Immr
,
1896 assert(N
->getOpcode() == ISD::SIGN_EXTEND_INREG
);
1898 EVT VT
= N
->getValueType(0);
1899 unsigned BitWidth
= VT
.getSizeInBits();
1900 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1901 "Type checking must have been done before calling this function");
1903 SDValue Op
= N
->getOperand(0);
1904 if (Op
->getOpcode() == ISD::TRUNCATE
) {
1905 Op
= Op
->getOperand(0);
1906 VT
= Op
->getValueType(0);
1907 BitWidth
= VT
.getSizeInBits();
1911 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SRL
, ShiftImm
) &&
1912 !isOpcWithIntImmediate(Op
.getNode(), ISD::SRA
, ShiftImm
))
1915 unsigned Width
= cast
<VTSDNode
>(N
->getOperand(1))->getVT().getSizeInBits();
1916 if (ShiftImm
+ Width
> BitWidth
)
1919 Opc
= (VT
== MVT::i32
) ? AArch64::SBFMWri
: AArch64::SBFMXri
;
1920 Opd0
= Op
.getOperand(0);
1922 Imms
= ShiftImm
+ Width
- 1;
1926 static bool isSeveralBitsExtractOpFromShr(SDNode
*N
, unsigned &Opc
,
1927 SDValue
&Opd0
, unsigned &LSB
,
1929 // We are looking for the following pattern which basically extracts several
1930 // continuous bits from the source value and places it from the LSB of the
1931 // destination value, all other bits of the destination value or set to zero:
1933 // Value2 = AND Value, MaskImm
1934 // SRL Value2, ShiftImm
1936 // with MaskImm >> ShiftImm to search for the bit width.
1938 // This gets selected into a single UBFM:
1940 // UBFM Value, ShiftImm, BitWide + SrlImm -1
1943 if (N
->getOpcode() != ISD::SRL
)
1946 uint64_t AndMask
= 0;
1947 if (!isOpcWithIntImmediate(N
->getOperand(0).getNode(), ISD::AND
, AndMask
))
1950 Opd0
= N
->getOperand(0).getOperand(0);
1952 uint64_t SrlImm
= 0;
1953 if (!isIntImmediate(N
->getOperand(1), SrlImm
))
1956 // Check whether we really have several bits extract here.
1957 unsigned BitWide
= 64 - countLeadingOnes(~(AndMask
>> SrlImm
));
1958 if (BitWide
&& isMask_64(AndMask
>> SrlImm
)) {
1959 if (N
->getValueType(0) == MVT::i32
)
1960 Opc
= AArch64::UBFMWri
;
1962 Opc
= AArch64::UBFMXri
;
1965 MSB
= BitWide
+ SrlImm
- 1;
1972 static bool isBitfieldExtractOpFromShr(SDNode
*N
, unsigned &Opc
, SDValue
&Opd0
,
1973 unsigned &Immr
, unsigned &Imms
,
1974 bool BiggerPattern
) {
1975 assert((N
->getOpcode() == ISD::SRA
|| N
->getOpcode() == ISD::SRL
) &&
1976 "N must be a SHR/SRA operation to call this function");
1978 EVT VT
= N
->getValueType(0);
1980 // Here we can test the type of VT and return false when the type does not
1981 // match, but since it is done prior to that call in the current context
1982 // we turned that into an assert to avoid redundant code.
1983 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
1984 "Type checking must have been done before calling this function");
1986 // Check for AND + SRL doing several bits extract.
1987 if (isSeveralBitsExtractOpFromShr(N
, Opc
, Opd0
, Immr
, Imms
))
1990 // We're looking for a shift of a shift.
1991 uint64_t ShlImm
= 0;
1992 uint64_t TruncBits
= 0;
1993 if (isOpcWithIntImmediate(N
->getOperand(0).getNode(), ISD::SHL
, ShlImm
)) {
1994 Opd0
= N
->getOperand(0).getOperand(0);
1995 } else if (VT
== MVT::i32
&& N
->getOpcode() == ISD::SRL
&&
1996 N
->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE
) {
1997 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1998 // be considered as setting high 32 bits as zero. Our strategy here is to
1999 // always generate 64bit UBFM. This consistency will help the CSE pass
2000 // later find more redundancy.
2001 Opd0
= N
->getOperand(0).getOperand(0);
2002 TruncBits
= Opd0
->getValueType(0).getSizeInBits() - VT
.getSizeInBits();
2003 VT
= Opd0
.getValueType();
2004 assert(VT
== MVT::i64
&& "the promoted type should be i64");
2005 } else if (BiggerPattern
) {
2006 // Let's pretend a 0 shift left has been performed.
2007 // FIXME: Currently we limit this to the bigger pattern case,
2008 // because some optimizations expect AND and not UBFM
2009 Opd0
= N
->getOperand(0);
2013 // Missing combines/constant folding may have left us with strange
2015 if (ShlImm
>= VT
.getSizeInBits()) {
2018 << ": Found large shift immediate, this should not happen\n"));
2022 uint64_t SrlImm
= 0;
2023 if (!isIntImmediate(N
->getOperand(1), SrlImm
))
2026 assert(SrlImm
> 0 && SrlImm
< VT
.getSizeInBits() &&
2027 "bad amount in shift node!");
2028 int immr
= SrlImm
- ShlImm
;
2029 Immr
= immr
< 0 ? immr
+ VT
.getSizeInBits() : immr
;
2030 Imms
= VT
.getSizeInBits() - ShlImm
- TruncBits
- 1;
2031 // SRA requires a signed extraction
2033 Opc
= N
->getOpcode() == ISD::SRA
? AArch64::SBFMWri
: AArch64::UBFMWri
;
2035 Opc
= N
->getOpcode() == ISD::SRA
? AArch64::SBFMXri
: AArch64::UBFMXri
;
2039 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode
*N
) {
2040 assert(N
->getOpcode() == ISD::SIGN_EXTEND
);
2042 EVT VT
= N
->getValueType(0);
2043 EVT NarrowVT
= N
->getOperand(0)->getValueType(0);
2044 if (VT
!= MVT::i64
|| NarrowVT
!= MVT::i32
)
2048 SDValue Op
= N
->getOperand(0);
2049 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SRA
, ShiftImm
))
2053 // Extend the incoming operand of the shift to 64-bits.
2054 SDValue Opd0
= Widen(CurDAG
, Op
.getOperand(0));
2055 unsigned Immr
= ShiftImm
;
2056 unsigned Imms
= NarrowVT
.getSizeInBits() - 1;
2057 SDValue Ops
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, VT
),
2058 CurDAG
->getTargetConstant(Imms
, dl
, VT
)};
2059 CurDAG
->SelectNodeTo(N
, AArch64::SBFMXri
, VT
, Ops
);
2063 /// Try to form fcvtl2 instructions from a floating-point extend of a high-half
2064 /// extract of a subvector.
2065 bool AArch64DAGToDAGISel::tryHighFPExt(SDNode
*N
) {
2066 assert(N
->getOpcode() == ISD::FP_EXTEND
);
2068 // There are 2 forms of fcvtl2 - extend to double or extend to float.
2069 SDValue Extract
= N
->getOperand(0);
2070 EVT VT
= N
->getValueType(0);
2071 EVT NarrowVT
= Extract
.getValueType();
2072 if ((VT
!= MVT::v2f64
|| NarrowVT
!= MVT::v2f32
) &&
2073 (VT
!= MVT::v4f32
|| NarrowVT
!= MVT::v4f16
))
2076 // Optionally look past a bitcast.
2077 Extract
= peekThroughBitcasts(Extract
);
2078 if (Extract
.getOpcode() != ISD::EXTRACT_SUBVECTOR
)
2081 // Match extract from start of high half index.
2082 // Example: v8i16 -> v4i16 means the extract must begin at index 4.
2083 unsigned ExtractIndex
= Extract
.getConstantOperandVal(1);
2084 if (ExtractIndex
!= Extract
.getValueType().getVectorNumElements())
2087 auto Opcode
= VT
== MVT::v2f64
? AArch64::FCVTLv4i32
: AArch64::FCVTLv8i16
;
2088 CurDAG
->SelectNodeTo(N
, Opcode
, VT
, Extract
.getOperand(0));
2092 static bool isBitfieldExtractOp(SelectionDAG
*CurDAG
, SDNode
*N
, unsigned &Opc
,
2093 SDValue
&Opd0
, unsigned &Immr
, unsigned &Imms
,
2094 unsigned NumberOfIgnoredLowBits
= 0,
2095 bool BiggerPattern
= false) {
2096 if (N
->getValueType(0) != MVT::i32
&& N
->getValueType(0) != MVT::i64
)
2099 switch (N
->getOpcode()) {
2101 if (!N
->isMachineOpcode())
2105 return isBitfieldExtractOpFromAnd(CurDAG
, N
, Opc
, Opd0
, Immr
, Imms
,
2106 NumberOfIgnoredLowBits
, BiggerPattern
);
2109 return isBitfieldExtractOpFromShr(N
, Opc
, Opd0
, Immr
, Imms
, BiggerPattern
);
2111 case ISD::SIGN_EXTEND_INREG
:
2112 return isBitfieldExtractOpFromSExtInReg(N
, Opc
, Opd0
, Immr
, Imms
);
2115 unsigned NOpc
= N
->getMachineOpcode();
2119 case AArch64::SBFMWri
:
2120 case AArch64::UBFMWri
:
2121 case AArch64::SBFMXri
:
2122 case AArch64::UBFMXri
:
2124 Opd0
= N
->getOperand(0);
2125 Immr
= cast
<ConstantSDNode
>(N
->getOperand(1).getNode())->getZExtValue();
2126 Imms
= cast
<ConstantSDNode
>(N
->getOperand(2).getNode())->getZExtValue();
2133 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode
*N
) {
2134 unsigned Opc
, Immr
, Imms
;
2136 if (!isBitfieldExtractOp(CurDAG
, N
, Opc
, Opd0
, Immr
, Imms
))
2139 EVT VT
= N
->getValueType(0);
2142 // If the bit extract operation is 64bit but the original type is 32bit, we
2143 // need to add one EXTRACT_SUBREG.
2144 if ((Opc
== AArch64::SBFMXri
|| Opc
== AArch64::UBFMXri
) && VT
== MVT::i32
) {
2145 SDValue Ops64
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, MVT::i64
),
2146 CurDAG
->getTargetConstant(Imms
, dl
, MVT::i64
)};
2148 SDNode
*BFM
= CurDAG
->getMachineNode(Opc
, dl
, MVT::i64
, Ops64
);
2149 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, dl
, MVT::i32
);
2150 ReplaceNode(N
, CurDAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
, dl
,
2151 MVT::i32
, SDValue(BFM
, 0), SubReg
));
2155 SDValue Ops
[] = {Opd0
, CurDAG
->getTargetConstant(Immr
, dl
, VT
),
2156 CurDAG
->getTargetConstant(Imms
, dl
, VT
)};
2157 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2161 /// Does DstMask form a complementary pair with the mask provided by
2162 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2163 /// this asks whether DstMask zeroes precisely those bits that will be set by
2165 static bool isBitfieldDstMask(uint64_t DstMask
, const APInt
&BitsToBeInserted
,
2166 unsigned NumberOfIgnoredHighBits
, EVT VT
) {
2167 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
2168 "i32 or i64 mask type expected!");
2169 unsigned BitWidth
= VT
.getSizeInBits() - NumberOfIgnoredHighBits
;
2171 APInt SignificantDstMask
= APInt(BitWidth
, DstMask
);
2172 APInt SignificantBitsToBeInserted
= BitsToBeInserted
.zextOrTrunc(BitWidth
);
2174 return (SignificantDstMask
& SignificantBitsToBeInserted
) == 0 &&
2175 (SignificantDstMask
| SignificantBitsToBeInserted
).isAllOnes();
2178 // Look for bits that will be useful for later uses.
2179 // A bit is consider useless as soon as it is dropped and never used
2180 // before it as been dropped.
2181 // E.g., looking for useful bit of x
2184 // After #1, x useful bits are 0x7, then the useful bits of x, live through
2186 // After #2, the useful bits of x are 0x4.
2187 // However, if x is used on an unpredicatable instruction, then all its bits
2193 static void getUsefulBits(SDValue Op
, APInt
&UsefulBits
, unsigned Depth
= 0);
2195 static void getUsefulBitsFromAndWithImmediate(SDValue Op
, APInt
&UsefulBits
,
2198 cast
<const ConstantSDNode
>(Op
.getOperand(1).getNode())->getZExtValue();
2199 Imm
= AArch64_AM::decodeLogicalImmediate(Imm
, UsefulBits
.getBitWidth());
2200 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), Imm
);
2201 getUsefulBits(Op
, UsefulBits
, Depth
+ 1);
2204 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op
, APInt
&UsefulBits
,
2205 uint64_t Imm
, uint64_t MSB
,
2207 // inherit the bitwidth value
2208 APInt
OpUsefulBits(UsefulBits
);
2212 OpUsefulBits
<<= MSB
- Imm
+ 1;
2214 // The interesting part will be in the lower part of the result
2215 getUsefulBits(Op
, OpUsefulBits
, Depth
+ 1);
2216 // The interesting part was starting at Imm in the argument
2217 OpUsefulBits
<<= Imm
;
2219 OpUsefulBits
<<= MSB
+ 1;
2221 // The interesting part will be shifted in the result
2222 OpUsefulBits
<<= OpUsefulBits
.getBitWidth() - Imm
;
2223 getUsefulBits(Op
, OpUsefulBits
, Depth
+ 1);
2224 // The interesting part was at zero in the argument
2225 OpUsefulBits
.lshrInPlace(OpUsefulBits
.getBitWidth() - Imm
);
2228 UsefulBits
&= OpUsefulBits
;
2231 static void getUsefulBitsFromUBFM(SDValue Op
, APInt
&UsefulBits
,
2234 cast
<const ConstantSDNode
>(Op
.getOperand(1).getNode())->getZExtValue();
2236 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
2238 getUsefulBitsFromBitfieldMoveOpd(Op
, UsefulBits
, Imm
, MSB
, Depth
);
2241 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op
, APInt
&UsefulBits
,
2243 uint64_t ShiftTypeAndValue
=
2244 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
2245 APInt
Mask(UsefulBits
);
2246 Mask
.clearAllBits();
2249 if (AArch64_AM::getShiftType(ShiftTypeAndValue
) == AArch64_AM::LSL
) {
2251 uint64_t ShiftAmt
= AArch64_AM::getShiftValue(ShiftTypeAndValue
);
2253 getUsefulBits(Op
, Mask
, Depth
+ 1);
2254 Mask
.lshrInPlace(ShiftAmt
);
2255 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue
) == AArch64_AM::LSR
) {
2257 // We do not handle AArch64_AM::ASR, because the sign will change the
2258 // number of useful bits
2259 uint64_t ShiftAmt
= AArch64_AM::getShiftValue(ShiftTypeAndValue
);
2260 Mask
.lshrInPlace(ShiftAmt
);
2261 getUsefulBits(Op
, Mask
, Depth
+ 1);
2269 static void getUsefulBitsFromBFM(SDValue Op
, SDValue Orig
, APInt
&UsefulBits
,
2272 cast
<const ConstantSDNode
>(Op
.getOperand(2).getNode())->getZExtValue();
2274 cast
<const ConstantSDNode
>(Op
.getOperand(3).getNode())->getZExtValue();
2276 APInt
OpUsefulBits(UsefulBits
);
2279 APInt
ResultUsefulBits(UsefulBits
.getBitWidth(), 0);
2280 ResultUsefulBits
.flipAllBits();
2281 APInt
Mask(UsefulBits
.getBitWidth(), 0);
2283 getUsefulBits(Op
, ResultUsefulBits
, Depth
+ 1);
2286 // The instruction is a BFXIL.
2287 uint64_t Width
= MSB
- Imm
+ 1;
2290 OpUsefulBits
<<= Width
;
2293 if (Op
.getOperand(1) == Orig
) {
2294 // Copy the low bits from the result to bits starting from LSB.
2295 Mask
= ResultUsefulBits
& OpUsefulBits
;
2299 if (Op
.getOperand(0) == Orig
)
2300 // Bits starting from LSB in the input contribute to the result.
2301 Mask
|= (ResultUsefulBits
& ~OpUsefulBits
);
2303 // The instruction is a BFI.
2304 uint64_t Width
= MSB
+ 1;
2305 uint64_t LSB
= UsefulBits
.getBitWidth() - Imm
;
2307 OpUsefulBits
<<= Width
;
2309 OpUsefulBits
<<= LSB
;
2311 if (Op
.getOperand(1) == Orig
) {
2312 // Copy the bits from the result to the zero bits.
2313 Mask
= ResultUsefulBits
& OpUsefulBits
;
2314 Mask
.lshrInPlace(LSB
);
2317 if (Op
.getOperand(0) == Orig
)
2318 Mask
|= (ResultUsefulBits
& ~OpUsefulBits
);
2324 static void getUsefulBitsForUse(SDNode
*UserNode
, APInt
&UsefulBits
,
2325 SDValue Orig
, unsigned Depth
) {
2327 // Users of this node should have already been instruction selected
2328 // FIXME: Can we turn that into an assert?
2329 if (!UserNode
->isMachineOpcode())
2332 switch (UserNode
->getMachineOpcode()) {
2335 case AArch64::ANDSWri
:
2336 case AArch64::ANDSXri
:
2337 case AArch64::ANDWri
:
2338 case AArch64::ANDXri
:
2339 // We increment Depth only when we call the getUsefulBits
2340 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode
, 0), UsefulBits
,
2342 case AArch64::UBFMWri
:
2343 case AArch64::UBFMXri
:
2344 return getUsefulBitsFromUBFM(SDValue(UserNode
, 0), UsefulBits
, Depth
);
2346 case AArch64::ORRWrs
:
2347 case AArch64::ORRXrs
:
2348 if (UserNode
->getOperand(0) != Orig
&& UserNode
->getOperand(1) == Orig
)
2349 getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode
, 0), UsefulBits
,
2352 case AArch64::BFMWri
:
2353 case AArch64::BFMXri
:
2354 return getUsefulBitsFromBFM(SDValue(UserNode
, 0), Orig
, UsefulBits
, Depth
);
2356 case AArch64::STRBBui
:
2357 case AArch64::STURBBi
:
2358 if (UserNode
->getOperand(0) != Orig
)
2360 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), 0xff);
2363 case AArch64::STRHHui
:
2364 case AArch64::STURHHi
:
2365 if (UserNode
->getOperand(0) != Orig
)
2367 UsefulBits
&= APInt(UsefulBits
.getBitWidth(), 0xffff);
2372 static void getUsefulBits(SDValue Op
, APInt
&UsefulBits
, unsigned Depth
) {
2373 if (Depth
>= SelectionDAG::MaxRecursionDepth
)
2375 // Initialize UsefulBits
2377 unsigned Bitwidth
= Op
.getScalarValueSizeInBits();
2378 // At the beginning, assume every produced bits is useful
2379 UsefulBits
= APInt(Bitwidth
, 0);
2380 UsefulBits
.flipAllBits();
2382 APInt
UsersUsefulBits(UsefulBits
.getBitWidth(), 0);
2384 for (SDNode
*Node
: Op
.getNode()->uses()) {
2385 // A use cannot produce useful bits
2386 APInt UsefulBitsForUse
= APInt(UsefulBits
);
2387 getUsefulBitsForUse(Node
, UsefulBitsForUse
, Op
, Depth
);
2388 UsersUsefulBits
|= UsefulBitsForUse
;
2390 // UsefulBits contains the produced bits that are meaningful for the
2391 // current definition, thus a user cannot make a bit meaningful at
2393 UsefulBits
&= UsersUsefulBits
;
2396 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
2397 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2398 /// 0, return Op unchanged.
2399 static SDValue
getLeftShift(SelectionDAG
*CurDAG
, SDValue Op
, int ShlAmount
) {
2403 EVT VT
= Op
.getValueType();
2405 unsigned BitWidth
= VT
.getSizeInBits();
2406 unsigned UBFMOpc
= BitWidth
== 32 ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2409 if (ShlAmount
> 0) {
2410 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2411 ShiftNode
= CurDAG
->getMachineNode(
2412 UBFMOpc
, dl
, VT
, Op
,
2413 CurDAG
->getTargetConstant(BitWidth
- ShlAmount
, dl
, VT
),
2414 CurDAG
->getTargetConstant(BitWidth
- 1 - ShlAmount
, dl
, VT
));
2416 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2417 assert(ShlAmount
< 0 && "expected right shift");
2418 int ShrAmount
= -ShlAmount
;
2419 ShiftNode
= CurDAG
->getMachineNode(
2420 UBFMOpc
, dl
, VT
, Op
, CurDAG
->getTargetConstant(ShrAmount
, dl
, VT
),
2421 CurDAG
->getTargetConstant(BitWidth
- 1, dl
, VT
));
2424 return SDValue(ShiftNode
, 0);
2427 /// Does this tree qualify as an attempt to move a bitfield into position,
2428 /// essentially "(and (shl VAL, N), Mask)".
2429 static bool isBitfieldPositioningOp(SelectionDAG
*CurDAG
, SDValue Op
,
2431 SDValue
&Src
, int &ShiftAmount
,
2433 EVT VT
= Op
.getValueType();
2434 unsigned BitWidth
= VT
.getSizeInBits();
2436 assert(BitWidth
== 32 || BitWidth
== 64);
2438 KnownBits Known
= CurDAG
->computeKnownBits(Op
);
2440 // Non-zero in the sense that they're not provably zero, which is the key
2441 // point if we want to use this value
2442 uint64_t NonZeroBits
= (~Known
.Zero
).getZExtValue();
2444 // Discard a constant AND mask if present. It's safe because the node will
2445 // already have been factored into the computeKnownBits calculation above.
2447 if (isOpcWithIntImmediate(Op
.getNode(), ISD::AND
, AndImm
)) {
2448 assert((~APInt(BitWidth
, AndImm
) & ~Known
.Zero
) == 0);
2449 Op
= Op
.getOperand(0);
2452 // Don't match if the SHL has more than one use, since then we'll end up
2453 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2454 if (!BiggerPattern
&& !Op
.hasOneUse())
2458 if (!isOpcWithIntImmediate(Op
.getNode(), ISD::SHL
, ShlImm
))
2460 Op
= Op
.getOperand(0);
2462 if (!isShiftedMask_64(NonZeroBits
))
2465 ShiftAmount
= countTrailingZeros(NonZeroBits
);
2466 MaskWidth
= countTrailingOnes(NonZeroBits
>> ShiftAmount
);
2468 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2469 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2470 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2471 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2472 // which case it is not profitable to insert an extra shift.
2473 if (ShlImm
- ShiftAmount
!= 0 && !BiggerPattern
)
2475 Src
= getLeftShift(CurDAG
, Op
, ShlImm
- ShiftAmount
);
2480 static bool isShiftedMask(uint64_t Mask
, EVT VT
) {
2481 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
2483 return isShiftedMask_32(Mask
);
2484 return isShiftedMask_64(Mask
);
2487 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2488 // inserted only sets known zero bits.
2489 static bool tryBitfieldInsertOpFromOrAndImm(SDNode
*N
, SelectionDAG
*CurDAG
) {
2490 assert(N
->getOpcode() == ISD::OR
&& "Expect a OR operation");
2492 EVT VT
= N
->getValueType(0);
2493 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2496 unsigned BitWidth
= VT
.getSizeInBits();
2499 if (!isOpcWithIntImmediate(N
, ISD::OR
, OrImm
))
2502 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2503 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2504 // performance neutral.
2505 if (AArch64_AM::isLogicalImmediate(OrImm
, BitWidth
))
2509 SDValue And
= N
->getOperand(0);
2510 // Must be a single use AND with an immediate operand.
2511 if (!And
.hasOneUse() ||
2512 !isOpcWithIntImmediate(And
.getNode(), ISD::AND
, MaskImm
))
2515 // Compute the Known Zero for the AND as this allows us to catch more general
2516 // cases than just looking for AND with imm.
2517 KnownBits Known
= CurDAG
->computeKnownBits(And
);
2519 // Non-zero in the sense that they're not provably zero, which is the key
2520 // point if we want to use this value.
2521 uint64_t NotKnownZero
= (~Known
.Zero
).getZExtValue();
2523 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2524 if (!isShiftedMask(Known
.Zero
.getZExtValue(), VT
))
2527 // The bits being inserted must only set those bits that are known to be zero.
2528 if ((OrImm
& NotKnownZero
) != 0) {
2529 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2530 // currently handle this case.
2534 // BFI/BFXIL dst, src, #lsb, #width.
2535 int LSB
= countTrailingOnes(NotKnownZero
);
2536 int Width
= BitWidth
- APInt(BitWidth
, NotKnownZero
).countPopulation();
2538 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2539 unsigned ImmR
= (BitWidth
- LSB
) % BitWidth
;
2540 unsigned ImmS
= Width
- 1;
2542 // If we're creating a BFI instruction avoid cases where we need more
2543 // instructions to materialize the BFI constant as compared to the original
2544 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2545 // should be no worse in this case.
2546 bool IsBFI
= LSB
!= 0;
2547 uint64_t BFIImm
= OrImm
>> LSB
;
2548 if (IsBFI
&& !AArch64_AM::isLogicalImmediate(BFIImm
, BitWidth
)) {
2549 // We have a BFI instruction and we know the constant can't be materialized
2550 // with a ORR-immediate with the zero register.
2551 unsigned OrChunks
= 0, BFIChunks
= 0;
2552 for (unsigned Shift
= 0; Shift
< BitWidth
; Shift
+= 16) {
2553 if (((OrImm
>> Shift
) & 0xFFFF) != 0)
2555 if (((BFIImm
>> Shift
) & 0xFFFF) != 0)
2558 if (BFIChunks
> OrChunks
)
2562 // Materialize the constant to be inserted.
2564 unsigned MOVIOpc
= VT
== MVT::i32
? AArch64::MOVi32imm
: AArch64::MOVi64imm
;
2565 SDNode
*MOVI
= CurDAG
->getMachineNode(
2566 MOVIOpc
, DL
, VT
, CurDAG
->getTargetConstant(BFIImm
, DL
, VT
));
2568 // Create the BFI/BFXIL instruction.
2569 SDValue Ops
[] = {And
.getOperand(0), SDValue(MOVI
, 0),
2570 CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2571 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2572 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2573 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2577 static bool tryBitfieldInsertOpFromOr(SDNode
*N
, const APInt
&UsefulBits
,
2578 SelectionDAG
*CurDAG
) {
2579 assert(N
->getOpcode() == ISD::OR
&& "Expect a OR operation");
2581 EVT VT
= N
->getValueType(0);
2582 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2585 unsigned BitWidth
= VT
.getSizeInBits();
2587 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2588 // have the expected shape. Try to undo that.
2590 unsigned NumberOfIgnoredLowBits
= UsefulBits
.countTrailingZeros();
2591 unsigned NumberOfIgnoredHighBits
= UsefulBits
.countLeadingZeros();
2593 // Given a OR operation, check if we have the following pattern
2594 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2595 // isBitfieldExtractOp)
2596 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2597 // countTrailingZeros(mask2) == imm2 - imm + 1
2599 // if yes, replace the OR instruction with:
2600 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2602 // OR is commutative, check all combinations of operand order and values of
2603 // BiggerPattern, i.e.
2604 // Opd0, Opd1, BiggerPattern=false
2605 // Opd1, Opd0, BiggerPattern=false
2606 // Opd0, Opd1, BiggerPattern=true
2607 // Opd1, Opd0, BiggerPattern=true
2608 // Several of these combinations may match, so check with BiggerPattern=false
2609 // first since that will produce better results by matching more instructions
2610 // and/or inserting fewer extra instructions.
2611 for (int I
= 0; I
< 4; ++I
) {
2614 unsigned ImmR
, ImmS
;
2615 bool BiggerPattern
= I
/ 2;
2616 SDValue OrOpd0Val
= N
->getOperand(I
% 2);
2617 SDNode
*OrOpd0
= OrOpd0Val
.getNode();
2618 SDValue OrOpd1Val
= N
->getOperand((I
+ 1) % 2);
2619 SDNode
*OrOpd1
= OrOpd1Val
.getNode();
2623 if (isBitfieldExtractOp(CurDAG
, OrOpd0
, BFXOpc
, Src
, ImmR
, ImmS
,
2624 NumberOfIgnoredLowBits
, BiggerPattern
)) {
2625 // Check that the returned opcode is compatible with the pattern,
2626 // i.e., same type and zero extended (U and not S)
2627 if ((BFXOpc
!= AArch64::UBFMXri
&& VT
== MVT::i64
) ||
2628 (BFXOpc
!= AArch64::UBFMWri
&& VT
== MVT::i32
))
2631 // Compute the width of the bitfield insertion
2633 Width
= ImmS
- ImmR
+ 1;
2634 // FIXME: This constraint is to catch bitfield insertion we may
2635 // want to widen the pattern if we want to grab general bitfied
2640 // If the mask on the insertee is correct, we have a BFXIL operation. We
2641 // can share the ImmR and ImmS values from the already-computed UBFM.
2642 } else if (isBitfieldPositioningOp(CurDAG
, OrOpd0Val
,
2644 Src
, DstLSB
, Width
)) {
2645 ImmR
= (BitWidth
- DstLSB
) % BitWidth
;
2650 // Check the second part of the pattern
2651 EVT VT
= OrOpd1Val
.getValueType();
2652 assert((VT
== MVT::i32
|| VT
== MVT::i64
) && "unexpected OR operand");
2654 // Compute the Known Zero for the candidate of the first operand.
2655 // This allows to catch more general case than just looking for
2656 // AND with imm. Indeed, simplify-demanded-bits may have removed
2657 // the AND instruction because it proves it was useless.
2658 KnownBits Known
= CurDAG
->computeKnownBits(OrOpd1Val
);
2660 // Check if there is enough room for the second operand to appear
2662 APInt BitsToBeInserted
=
2663 APInt::getBitsSet(Known
.getBitWidth(), DstLSB
, DstLSB
+ Width
);
2665 if ((BitsToBeInserted
& ~Known
.Zero
) != 0)
2668 // Set the first operand
2670 if (isOpcWithIntImmediate(OrOpd1
, ISD::AND
, Imm
) &&
2671 isBitfieldDstMask(Imm
, BitsToBeInserted
, NumberOfIgnoredHighBits
, VT
))
2672 // In that case, we can eliminate the AND
2673 Dst
= OrOpd1
->getOperand(0);
2675 // Maybe the AND has been removed by simplify-demanded-bits
2676 // or is useful because it discards more bits
2681 SDValue Ops
[] = {Dst
, Src
, CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2682 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2683 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2684 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2688 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2689 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2690 // mask (e.g., 0x000ffff0).
2691 uint64_t Mask0Imm
, Mask1Imm
;
2692 SDValue And0
= N
->getOperand(0);
2693 SDValue And1
= N
->getOperand(1);
2694 if (And0
.hasOneUse() && And1
.hasOneUse() &&
2695 isOpcWithIntImmediate(And0
.getNode(), ISD::AND
, Mask0Imm
) &&
2696 isOpcWithIntImmediate(And1
.getNode(), ISD::AND
, Mask1Imm
) &&
2697 APInt(BitWidth
, Mask0Imm
) == ~APInt(BitWidth
, Mask1Imm
) &&
2698 (isShiftedMask(Mask0Imm
, VT
) || isShiftedMask(Mask1Imm
, VT
))) {
2700 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2701 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2702 // bits to be inserted.
2703 if (isShiftedMask(Mask0Imm
, VT
)) {
2704 std::swap(And0
, And1
);
2705 std::swap(Mask0Imm
, Mask1Imm
);
2708 SDValue Src
= And1
->getOperand(0);
2709 SDValue Dst
= And0
->getOperand(0);
2710 unsigned LSB
= countTrailingZeros(Mask1Imm
);
2711 int Width
= BitWidth
- APInt(BitWidth
, Mask0Imm
).countPopulation();
2713 // The BFXIL inserts the low-order bits from a source register, so right
2714 // shift the needed bits into place.
2716 unsigned ShiftOpc
= (VT
== MVT::i32
) ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2717 SDNode
*LSR
= CurDAG
->getMachineNode(
2718 ShiftOpc
, DL
, VT
, Src
, CurDAG
->getTargetConstant(LSB
, DL
, VT
),
2719 CurDAG
->getTargetConstant(BitWidth
- 1, DL
, VT
));
2721 // BFXIL is an alias of BFM, so translate to BFM operands.
2722 unsigned ImmR
= (BitWidth
- LSB
) % BitWidth
;
2723 unsigned ImmS
= Width
- 1;
2725 // Create the BFXIL instruction.
2726 SDValue Ops
[] = {Dst
, SDValue(LSR
, 0),
2727 CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2728 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2729 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::BFMWri
: AArch64::BFMXri
;
2730 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2737 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode
*N
) {
2738 if (N
->getOpcode() != ISD::OR
)
2742 getUsefulBits(SDValue(N
, 0), NUsefulBits
);
2744 // If all bits are not useful, just return UNDEF.
2746 CurDAG
->SelectNodeTo(N
, TargetOpcode::IMPLICIT_DEF
, N
->getValueType(0));
2750 if (tryBitfieldInsertOpFromOr(N
, NUsefulBits
, CurDAG
))
2753 return tryBitfieldInsertOpFromOrAndImm(N
, CurDAG
);
2756 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2757 /// equivalent of a left shift by a constant amount followed by an and masking
2758 /// out a contiguous set of bits.
2759 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode
*N
) {
2760 if (N
->getOpcode() != ISD::AND
)
2763 EVT VT
= N
->getValueType(0);
2764 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
2769 if (!isBitfieldPositioningOp(CurDAG
, SDValue(N
, 0), /*BiggerPattern=*/false,
2770 Op0
, DstLSB
, Width
))
2773 // ImmR is the rotate right amount.
2774 unsigned ImmR
= (VT
.getSizeInBits() - DstLSB
) % VT
.getSizeInBits();
2775 // ImmS is the most significant bit of the source to be moved.
2776 unsigned ImmS
= Width
- 1;
2779 SDValue Ops
[] = {Op0
, CurDAG
->getTargetConstant(ImmR
, DL
, VT
),
2780 CurDAG
->getTargetConstant(ImmS
, DL
, VT
)};
2781 unsigned Opc
= (VT
== MVT::i32
) ? AArch64::UBFMWri
: AArch64::UBFMXri
;
2782 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2786 /// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
2787 /// variable shift/rotate instructions.
2788 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode
*N
) {
2789 EVT VT
= N
->getValueType(0);
2792 switch (N
->getOpcode()) {
2794 Opc
= (VT
== MVT::i32
) ? AArch64::RORVWr
: AArch64::RORVXr
;
2797 Opc
= (VT
== MVT::i32
) ? AArch64::LSLVWr
: AArch64::LSLVXr
;
2800 Opc
= (VT
== MVT::i32
) ? AArch64::LSRVWr
: AArch64::LSRVXr
;
2803 Opc
= (VT
== MVT::i32
) ? AArch64::ASRVWr
: AArch64::ASRVXr
;
2811 if (VT
== MVT::i32
) {
2814 } else if (VT
== MVT::i64
) {
2820 SDValue ShiftAmt
= N
->getOperand(1);
2822 SDValue NewShiftAmt
;
2824 // Skip over an extend of the shift amount.
2825 if (ShiftAmt
->getOpcode() == ISD::ZERO_EXTEND
||
2826 ShiftAmt
->getOpcode() == ISD::ANY_EXTEND
)
2827 ShiftAmt
= ShiftAmt
->getOperand(0);
2829 if (ShiftAmt
->getOpcode() == ISD::ADD
|| ShiftAmt
->getOpcode() == ISD::SUB
) {
2830 SDValue Add0
= ShiftAmt
->getOperand(0);
2831 SDValue Add1
= ShiftAmt
->getOperand(1);
2834 if (isIntImmediate(Add1
, Add1Imm
) && (Add1Imm
% Size
== 0)) {
2835 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
2836 // to avoid the ADD/SUB.
2838 } else if (ShiftAmt
->getOpcode() == ISD::SUB
&&
2839 isIntImmediate(Add0
, Add0Imm
) && Add0Imm
!= 0 &&
2840 (Add0Imm
% Size
== 0)) {
2841 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
2842 // to generate a NEG instead of a SUB from a constant.
2845 EVT SubVT
= ShiftAmt
->getValueType(0);
2846 if (SubVT
== MVT::i32
) {
2847 NegOpc
= AArch64::SUBWrr
;
2848 ZeroReg
= AArch64::WZR
;
2850 assert(SubVT
== MVT::i64
);
2851 NegOpc
= AArch64::SUBXrr
;
2852 ZeroReg
= AArch64::XZR
;
2855 CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), DL
, ZeroReg
, SubVT
);
2856 MachineSDNode
*Neg
=
2857 CurDAG
->getMachineNode(NegOpc
, DL
, SubVT
, Zero
, Add1
);
2858 NewShiftAmt
= SDValue(Neg
, 0);
2859 } else if (ShiftAmt
->getOpcode() == ISD::SUB
&&
2860 isIntImmediate(Add0
, Add0Imm
) && (Add0Imm
% Size
== Size
- 1)) {
2861 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
2862 // to generate a NOT instead of a SUB from a constant.
2865 EVT SubVT
= ShiftAmt
->getValueType(0);
2866 if (SubVT
== MVT::i32
) {
2867 NotOpc
= AArch64::ORNWrr
;
2868 ZeroReg
= AArch64::WZR
;
2870 assert(SubVT
== MVT::i64
);
2871 NotOpc
= AArch64::ORNXrr
;
2872 ZeroReg
= AArch64::XZR
;
2875 CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), DL
, ZeroReg
, SubVT
);
2876 MachineSDNode
*Not
=
2877 CurDAG
->getMachineNode(NotOpc
, DL
, SubVT
, Zero
, Add1
);
2878 NewShiftAmt
= SDValue(Not
, 0);
2882 // If the shift amount is masked with an AND, check that the mask covers the
2883 // bits that are implicitly ANDed off by the above opcodes and if so, skip
2886 if (!isOpcWithIntImmediate(ShiftAmt
.getNode(), ISD::AND
, MaskImm
) &&
2887 !isOpcWithIntImmediate(ShiftAmt
.getNode(), AArch64ISD::ANDS
, MaskImm
))
2890 if (countTrailingOnes(MaskImm
) < Bits
)
2893 NewShiftAmt
= ShiftAmt
->getOperand(0);
2896 // Narrow/widen the shift amount to match the size of the shift operation.
2898 NewShiftAmt
= narrowIfNeeded(CurDAG
, NewShiftAmt
);
2899 else if (VT
== MVT::i64
&& NewShiftAmt
->getValueType(0) == MVT::i32
) {
2900 SDValue SubReg
= CurDAG
->getTargetConstant(AArch64::sub_32
, DL
, MVT::i32
);
2901 MachineSDNode
*Ext
= CurDAG
->getMachineNode(
2902 AArch64::SUBREG_TO_REG
, DL
, VT
,
2903 CurDAG
->getTargetConstant(0, DL
, MVT::i64
), NewShiftAmt
, SubReg
);
2904 NewShiftAmt
= SDValue(Ext
, 0);
2907 SDValue Ops
[] = {N
->getOperand(0), NewShiftAmt
};
2908 CurDAG
->SelectNodeTo(N
, Opc
, VT
, Ops
);
2913 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N
, SDValue
&FixedPos
,
2914 unsigned RegWidth
) {
2916 if (ConstantFPSDNode
*CN
= dyn_cast
<ConstantFPSDNode
>(N
))
2917 FVal
= CN
->getValueAPF();
2918 else if (LoadSDNode
*LN
= dyn_cast
<LoadSDNode
>(N
)) {
2919 // Some otherwise illegal constants are allowed in this case.
2920 if (LN
->getOperand(1).getOpcode() != AArch64ISD::ADDlow
||
2921 !isa
<ConstantPoolSDNode
>(LN
->getOperand(1)->getOperand(1)))
2924 ConstantPoolSDNode
*CN
=
2925 dyn_cast
<ConstantPoolSDNode
>(LN
->getOperand(1)->getOperand(1));
2926 FVal
= cast
<ConstantFP
>(CN
->getConstVal())->getValueAPF();
2930 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2931 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2934 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2935 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2939 // fbits is between 1 and 64 in the worst-case, which means the fmul
2940 // could have 2^64 as an actual operand. Need 65 bits of precision.
2941 APSInt
IntVal(65, true);
2942 FVal
.convertToInteger(IntVal
, APFloat::rmTowardZero
, &IsExact
);
2944 // N.b. isPowerOf2 also checks for > 0.
2945 if (!IsExact
|| !IntVal
.isPowerOf2()) return false;
2946 unsigned FBits
= IntVal
.logBase2();
2948 // Checks above should have guaranteed that we haven't lost information in
2949 // finding FBits, but it must still be in range.
2950 if (FBits
== 0 || FBits
> RegWidth
) return false;
2952 FixedPos
= CurDAG
->getTargetConstant(FBits
, SDLoc(N
), MVT::i32
);
2956 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2957 // of the string and obtains the integer values from them and combines these
2958 // into a single value to be used in the MRS/MSR instruction.
2959 static int getIntOperandFromRegisterString(StringRef RegString
) {
2960 SmallVector
<StringRef
, 5> Fields
;
2961 RegString
.split(Fields
, ':');
2963 if (Fields
.size() == 1)
2966 assert(Fields
.size() == 5
2967 && "Invalid number of fields in read register string");
2969 SmallVector
<int, 5> Ops
;
2970 bool AllIntFields
= true;
2972 for (StringRef Field
: Fields
) {
2974 AllIntFields
&= !Field
.getAsInteger(10, IntField
);
2975 Ops
.push_back(IntField
);
2978 assert(AllIntFields
&&
2979 "Unexpected non-integer value in special register string.");
2982 // Need to combine the integer fields of the string into a single value
2983 // based on the bit encoding of MRS/MSR instruction.
2984 return (Ops
[0] << 14) | (Ops
[1] << 11) | (Ops
[2] << 7) |
2985 (Ops
[3] << 3) | (Ops
[4]);
2988 // Lower the read_register intrinsic to an MRS instruction node if the special
2989 // register string argument is either of the form detailed in the ALCE (the
2990 // form described in getIntOperandsFromRegsterString) or is a named register
2991 // known by the MRS SysReg mapper.
2992 bool AArch64DAGToDAGISel::tryReadRegister(SDNode
*N
) {
2993 const auto *MD
= cast
<MDNodeSDNode
>(N
->getOperand(1));
2994 const auto *RegString
= cast
<MDString
>(MD
->getMD()->getOperand(0));
2997 int Reg
= getIntOperandFromRegisterString(RegString
->getString());
2999 ReplaceNode(N
, CurDAG
->getMachineNode(
3000 AArch64::MRS
, DL
, N
->getSimpleValueType(0), MVT::Other
,
3001 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
3006 // Use the sysreg mapper to map the remaining possible strings to the
3007 // value for the register to be used for the instruction operand.
3008 auto TheReg
= AArch64SysReg::lookupSysRegByName(RegString
->getString());
3009 if (TheReg
&& TheReg
->Readable
&&
3010 TheReg
->haveFeatures(Subtarget
->getFeatureBits()))
3011 Reg
= TheReg
->Encoding
;
3013 Reg
= AArch64SysReg::parseGenericRegister(RegString
->getString());
3016 ReplaceNode(N
, CurDAG
->getMachineNode(
3017 AArch64::MRS
, DL
, N
->getSimpleValueType(0), MVT::Other
,
3018 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
3023 if (RegString
->getString() == "pc") {
3024 ReplaceNode(N
, CurDAG
->getMachineNode(
3025 AArch64::ADR
, DL
, N
->getSimpleValueType(0), MVT::Other
,
3026 CurDAG
->getTargetConstant(0, DL
, MVT::i32
),
3034 // Lower the write_register intrinsic to an MSR instruction node if the special
3035 // register string argument is either of the form detailed in the ALCE (the
3036 // form described in getIntOperandsFromRegsterString) or is a named register
3037 // known by the MSR SysReg mapper.
3038 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode
*N
) {
3039 const auto *MD
= cast
<MDNodeSDNode
>(N
->getOperand(1));
3040 const auto *RegString
= cast
<MDString
>(MD
->getMD()->getOperand(0));
3043 int Reg
= getIntOperandFromRegisterString(RegString
->getString());
3046 N
, CurDAG
->getMachineNode(AArch64::MSR
, DL
, MVT::Other
,
3047 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
3048 N
->getOperand(2), N
->getOperand(0)));
3052 // Check if the register was one of those allowed as the pstatefield value in
3053 // the MSR (immediate) instruction. To accept the values allowed in the
3054 // pstatefield for the MSR (immediate) instruction, we also require that an
3055 // immediate value has been provided as an argument, we know that this is
3056 // the case as it has been ensured by semantic checking.
3057 auto PMapper
= AArch64PState::lookupPStateByName(RegString
->getString());
3059 assert (isa
<ConstantSDNode
>(N
->getOperand(2))
3060 && "Expected a constant integer expression.");
3061 unsigned Reg
= PMapper
->Encoding
;
3062 uint64_t Immed
= cast
<ConstantSDNode
>(N
->getOperand(2))->getZExtValue();
3064 if (Reg
== AArch64PState::PAN
|| Reg
== AArch64PState::UAO
|| Reg
== AArch64PState::SSBS
) {
3065 assert(Immed
< 2 && "Bad imm");
3066 State
= AArch64::MSRpstateImm1
;
3068 assert(Immed
< 16 && "Bad imm");
3069 State
= AArch64::MSRpstateImm4
;
3071 ReplaceNode(N
, CurDAG
->getMachineNode(
3072 State
, DL
, MVT::Other
,
3073 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
3074 CurDAG
->getTargetConstant(Immed
, DL
, MVT::i16
),
3079 // Use the sysreg mapper to attempt to map the remaining possible strings
3080 // to the value for the register to be used for the MSR (register)
3081 // instruction operand.
3082 auto TheReg
= AArch64SysReg::lookupSysRegByName(RegString
->getString());
3083 if (TheReg
&& TheReg
->Writeable
&&
3084 TheReg
->haveFeatures(Subtarget
->getFeatureBits()))
3085 Reg
= TheReg
->Encoding
;
3087 Reg
= AArch64SysReg::parseGenericRegister(RegString
->getString());
3089 ReplaceNode(N
, CurDAG
->getMachineNode(
3090 AArch64::MSR
, DL
, MVT::Other
,
3091 CurDAG
->getTargetConstant(Reg
, DL
, MVT::i32
),
3092 N
->getOperand(2), N
->getOperand(0)));
3099 /// We've got special pseudo-instructions for these
3100 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode
*N
) {
3102 EVT MemTy
= cast
<MemSDNode
>(N
)->getMemoryVT();
3104 // Leave IR for LSE if subtarget supports it.
3105 if (Subtarget
->hasLSE()) return false;
3107 if (MemTy
== MVT::i8
)
3108 Opcode
= AArch64::CMP_SWAP_8
;
3109 else if (MemTy
== MVT::i16
)
3110 Opcode
= AArch64::CMP_SWAP_16
;
3111 else if (MemTy
== MVT::i32
)
3112 Opcode
= AArch64::CMP_SWAP_32
;
3113 else if (MemTy
== MVT::i64
)
3114 Opcode
= AArch64::CMP_SWAP_64
;
3116 llvm_unreachable("Unknown AtomicCmpSwap type");
3118 MVT RegTy
= MemTy
== MVT::i64
? MVT::i64
: MVT::i32
;
3119 SDValue Ops
[] = {N
->getOperand(1), N
->getOperand(2), N
->getOperand(3),
3121 SDNode
*CmpSwap
= CurDAG
->getMachineNode(
3123 CurDAG
->getVTList(RegTy
, MVT::i32
, MVT::Other
), Ops
);
3125 MachineMemOperand
*MemOp
= cast
<MemSDNode
>(N
)->getMemOperand();
3126 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(CmpSwap
), {MemOp
});
3128 ReplaceUses(SDValue(N
, 0), SDValue(CmpSwap
, 0));
3129 ReplaceUses(SDValue(N
, 1), SDValue(CmpSwap
, 2));
3130 CurDAG
->RemoveDeadNode(N
);
3135 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N
, MVT VT
, SDValue
&Imm
,
3137 if (!isa
<ConstantSDNode
>(N
))
3141 uint64_t Val
= cast
<ConstantSDNode
>(N
)
3143 .truncOrSelf(VT
.getFixedSizeInBits())
3146 switch (VT
.SimpleTy
) {
3148 // All immediates are supported.
3149 Shift
= CurDAG
->getTargetConstant(0, DL
, MVT::i32
);
3150 Imm
= CurDAG
->getTargetConstant(Val
, DL
, MVT::i32
);
3155 // Support 8bit unsigned immediates.
3157 Shift
= CurDAG
->getTargetConstant(0, DL
, MVT::i32
);
3158 Imm
= CurDAG
->getTargetConstant(Val
, DL
, MVT::i32
);
3161 // Support 16bit unsigned immediates that are a multiple of 256.
3162 if (Val
<= 65280 && Val
% 256 == 0) {
3163 Shift
= CurDAG
->getTargetConstant(8, DL
, MVT::i32
);
3164 Imm
= CurDAG
->getTargetConstant(Val
>> 8, DL
, MVT::i32
);
3175 bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N
, MVT VT
, SDValue
&Imm
,
3177 if (!isa
<ConstantSDNode
>(N
))
3181 int64_t Val
= cast
<ConstantSDNode
>(N
)
3183 .truncOrSelf(VT
.getFixedSizeInBits())
3186 switch (VT
.SimpleTy
) {
3188 // All immediates are supported.
3189 Shift
= CurDAG
->getTargetConstant(0, DL
, MVT::i32
);
3190 Imm
= CurDAG
->getTargetConstant(Val
& 0xFF, DL
, MVT::i32
);
3195 // Support 8bit signed immediates.
3196 if (Val
>= -128 && Val
<= 127) {
3197 Shift
= CurDAG
->getTargetConstant(0, DL
, MVT::i32
);
3198 Imm
= CurDAG
->getTargetConstant(Val
& 0xFF, DL
, MVT::i32
);
3201 // Support 16bit signed immediates that are a multiple of 256.
3202 if (Val
>= -32768 && Val
<= 32512 && Val
% 256 == 0) {
3203 Shift
= CurDAG
->getTargetConstant(8, DL
, MVT::i32
);
3204 Imm
= CurDAG
->getTargetConstant((Val
>> 8) & 0xFF, DL
, MVT::i32
);
3215 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N
, SDValue
&Imm
) {
3216 if (auto CNode
= dyn_cast
<ConstantSDNode
>(N
)) {
3217 int64_t ImmVal
= CNode
->getSExtValue();
3219 if (ImmVal
>= -128 && ImmVal
< 128) {
3220 Imm
= CurDAG
->getTargetConstant(ImmVal
, DL
, MVT::i32
);
3227 bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N
, MVT VT
, SDValue
&Imm
) {
3228 if (auto CNode
= dyn_cast
<ConstantSDNode
>(N
)) {
3229 uint64_t ImmVal
= CNode
->getZExtValue();
3231 switch (VT
.SimpleTy
) {
3239 ImmVal
&= 0xFFFFFFFF;
3244 llvm_unreachable("Unexpected type");
3248 Imm
= CurDAG
->getTargetConstant(ImmVal
, SDLoc(N
), MVT::i32
);
3255 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N
, MVT VT
, SDValue
&Imm
,
3257 if (auto CNode
= dyn_cast
<ConstantSDNode
>(N
)) {
3258 uint64_t ImmVal
= CNode
->getZExtValue();
3264 // Shift mask depending on type size.
3265 switch (VT
.SimpleTy
) {
3268 ImmVal
|= ImmVal
<< 8;
3269 ImmVal
|= ImmVal
<< 16;
3270 ImmVal
|= ImmVal
<< 32;
3274 ImmVal
|= ImmVal
<< 16;
3275 ImmVal
|= ImmVal
<< 32;
3278 ImmVal
&= 0xFFFFFFFF;
3279 ImmVal
|= ImmVal
<< 32;
3284 llvm_unreachable("Unexpected type");
3288 if (AArch64_AM::processLogicalImmediate(ImmVal
, 64, encoding
)) {
3289 Imm
= CurDAG
->getTargetConstant(encoding
, DL
, MVT::i64
);
3296 // SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
3297 // Rather than attempt to normalise everything we can sometimes saturate the
3298 // shift amount during selection. This function also allows for consistent
3299 // isel patterns by ensuring the resulting "Imm" node is of the i32 type
3300 // required by the instructions.
3301 bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N
, uint64_t Low
,
3302 uint64_t High
, bool AllowSaturation
,
3304 if (auto *CN
= dyn_cast
<ConstantSDNode
>(N
)) {
3305 uint64_t ImmVal
= CN
->getZExtValue();
3307 // Reject shift amounts that are too small.
3311 // Reject or saturate shift amounts that are too big.
3312 if (ImmVal
> High
) {
3313 if (!AllowSaturation
)
3318 Imm
= CurDAG
->getTargetConstant(ImmVal
, SDLoc(N
), MVT::i32
);
3325 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode
*N
) {
3326 // tagp(FrameIndex, IRGstack, tag_offset):
3327 // since the offset between FrameIndex and IRGstack is a compile-time
3328 // constant, this can be lowered to a single ADDG instruction.
3329 if (!(isa
<FrameIndexSDNode
>(N
->getOperand(1)))) {
3333 SDValue IRG_SP
= N
->getOperand(2);
3334 if (IRG_SP
->getOpcode() != ISD::INTRINSIC_W_CHAIN
||
3335 cast
<ConstantSDNode
>(IRG_SP
->getOperand(1))->getZExtValue() !=
3336 Intrinsic::aarch64_irg_sp
) {
3340 const TargetLowering
*TLI
= getTargetLowering();
3342 int FI
= cast
<FrameIndexSDNode
>(N
->getOperand(1))->getIndex();
3343 SDValue FiOp
= CurDAG
->getTargetFrameIndex(
3344 FI
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
3345 int TagOffset
= cast
<ConstantSDNode
>(N
->getOperand(3))->getZExtValue();
3347 SDNode
*Out
= CurDAG
->getMachineNode(
3348 AArch64::TAGPstack
, DL
, MVT::i64
,
3349 {FiOp
, CurDAG
->getTargetConstant(0, DL
, MVT::i64
), N
->getOperand(2),
3350 CurDAG
->getTargetConstant(TagOffset
, DL
, MVT::i64
)});
3351 ReplaceNode(N
, Out
);
3355 void AArch64DAGToDAGISel::SelectTagP(SDNode
*N
) {
3356 assert(isa
<ConstantSDNode
>(N
->getOperand(3)) &&
3357 "llvm.aarch64.tagp third argument must be an immediate");
3358 if (trySelectStackSlotTagP(N
))
3360 // FIXME: above applies in any case when offset between Op1 and Op2 is a
3361 // compile-time constant, not just for stack allocations.
3363 // General case for unrelated pointers in Op1 and Op2.
3365 int TagOffset
= cast
<ConstantSDNode
>(N
->getOperand(3))->getZExtValue();
3366 SDNode
*N1
= CurDAG
->getMachineNode(AArch64::SUBP
, DL
, MVT::i64
,
3367 {N
->getOperand(1), N
->getOperand(2)});
3368 SDNode
*N2
= CurDAG
->getMachineNode(AArch64::ADDXrr
, DL
, MVT::i64
,
3369 {SDValue(N1
, 0), N
->getOperand(2)});
3370 SDNode
*N3
= CurDAG
->getMachineNode(
3371 AArch64::ADDG
, DL
, MVT::i64
,
3372 {SDValue(N2
, 0), CurDAG
->getTargetConstant(0, DL
, MVT::i64
),
3373 CurDAG
->getTargetConstant(TagOffset
, DL
, MVT::i64
)});
3377 // NOTE: We cannot use EXTRACT_SUBREG in all cases because the fixed length
3378 // vector types larger than NEON don't have a matching SubRegIndex.
3379 static SDNode
*extractSubReg(SelectionDAG
*DAG
, EVT VT
, SDValue V
) {
3380 assert(V
.getValueType().isScalableVector() &&
3381 V
.getValueType().getSizeInBits().getKnownMinSize() ==
3382 AArch64::SVEBitsPerBlock
&&
3383 "Expected to extract from a packed scalable vector!");
3384 assert(VT
.isFixedLengthVector() &&
3385 "Expected to extract a fixed length vector!");
3388 switch (VT
.getSizeInBits()) {
3390 auto SubReg
= DAG
->getTargetConstant(AArch64::dsub
, DL
, MVT::i32
);
3391 return DAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
, DL
, VT
, V
, SubReg
);
3394 auto SubReg
= DAG
->getTargetConstant(AArch64::zsub
, DL
, MVT::i32
);
3395 return DAG
->getMachineNode(TargetOpcode::EXTRACT_SUBREG
, DL
, VT
, V
, SubReg
);
3398 auto RC
= DAG
->getTargetConstant(AArch64::ZPRRegClassID
, DL
, MVT::i64
);
3399 return DAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
, DL
, VT
, V
, RC
);
3404 // NOTE: We cannot use INSERT_SUBREG in all cases because the fixed length
3405 // vector types larger than NEON don't have a matching SubRegIndex.
3406 static SDNode
*insertSubReg(SelectionDAG
*DAG
, EVT VT
, SDValue V
) {
3407 assert(VT
.isScalableVector() &&
3408 VT
.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock
&&
3409 "Expected to insert into a packed scalable vector!");
3410 assert(V
.getValueType().isFixedLengthVector() &&
3411 "Expected to insert a fixed length vector!");
3414 switch (V
.getValueType().getSizeInBits()) {
3416 auto SubReg
= DAG
->getTargetConstant(AArch64::dsub
, DL
, MVT::i32
);
3417 auto Container
= DAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, VT
);
3418 return DAG
->getMachineNode(TargetOpcode::INSERT_SUBREG
, DL
, VT
,
3419 SDValue(Container
, 0), V
, SubReg
);
3422 auto SubReg
= DAG
->getTargetConstant(AArch64::zsub
, DL
, MVT::i32
);
3423 auto Container
= DAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, VT
);
3424 return DAG
->getMachineNode(TargetOpcode::INSERT_SUBREG
, DL
, VT
,
3425 SDValue(Container
, 0), V
, SubReg
);
3428 auto RC
= DAG
->getTargetConstant(AArch64::ZPRRegClassID
, DL
, MVT::i64
);
3429 return DAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
, DL
, VT
, V
, RC
);
3434 void AArch64DAGToDAGISel::Select(SDNode
*Node
) {
3435 // If we have a custom node, we already have selected!
3436 if (Node
->isMachineOpcode()) {
3437 LLVM_DEBUG(errs() << "== "; Node
->dump(CurDAG
); errs() << "\n");
3438 Node
->setNodeId(-1);
3442 // Few custom selection stuff.
3443 EVT VT
= Node
->getValueType(0);
3445 switch (Node
->getOpcode()) {
3449 case ISD::ATOMIC_CMP_SWAP
:
3450 if (SelectCMP_SWAP(Node
))
3454 case ISD::READ_REGISTER
:
3455 if (tryReadRegister(Node
))
3459 case ISD::WRITE_REGISTER
:
3460 if (tryWriteRegister(Node
))
3465 if (tryMLAV64LaneV128(Node
))
3470 // Try to select as an indexed load. Fall through to normal processing
3472 if (tryIndexedLoad(Node
))
3480 case ISD::SIGN_EXTEND_INREG
:
3481 if (tryBitfieldExtractOp(Node
))
3483 if (tryBitfieldInsertInZeroOp(Node
))
3488 if (tryShiftAmountMod(Node
))
3492 case ISD::SIGN_EXTEND
:
3493 if (tryBitfieldExtractOpFromSExt(Node
))
3497 case ISD::FP_EXTEND
:
3498 if (tryHighFPExt(Node
))
3503 if (tryBitfieldInsertOp(Node
))
3507 case ISD::EXTRACT_SUBVECTOR
: {
3508 // Bail when not a "cast" like extract_subvector.
3509 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue() != 0)
3512 // Bail when normal isel can do the job.
3513 EVT InVT
= Node
->getOperand(0).getValueType();
3514 if (VT
.isScalableVector() || InVT
.isFixedLengthVector())
3517 // NOTE: We can only get here when doing fixed length SVE code generation.
3518 // We do manual selection because the types involved are not linked to real
3519 // registers (despite being legal) and must be coerced into SVE registers.
3521 // NOTE: If the above changes, be aware that selection will still not work
3522 // because the td definition of extract_vector does not support extracting
3523 // a fixed length vector from a scalable vector.
3525 ReplaceNode(Node
, extractSubReg(CurDAG
, VT
, Node
->getOperand(0)));
3529 case ISD::INSERT_SUBVECTOR
: {
3530 // Bail when not a "cast" like insert_subvector.
3531 if (cast
<ConstantSDNode
>(Node
->getOperand(2))->getZExtValue() != 0)
3533 if (!Node
->getOperand(0).isUndef())
3536 // Bail when normal isel should do the job.
3537 EVT InVT
= Node
->getOperand(1).getValueType();
3538 if (VT
.isFixedLengthVector() || InVT
.isScalableVector())
3541 // NOTE: We can only get here when doing fixed length SVE code generation.
3542 // We do manual selection because the types involved are not linked to real
3543 // registers (despite being legal) and must be coerced into SVE registers.
3545 // NOTE: If the above changes, be aware that selection will still not work
3546 // because the td definition of insert_vector does not support inserting a
3547 // fixed length vector into a scalable vector.
3549 ReplaceNode(Node
, insertSubReg(CurDAG
, VT
, Node
->getOperand(1)));
3553 case ISD::Constant
: {
3554 // Materialize zero constants as copies from WZR/XZR. This allows
3555 // the coalescer to propagate these into other instructions.
3556 ConstantSDNode
*ConstNode
= cast
<ConstantSDNode
>(Node
);
3557 if (ConstNode
->isZero()) {
3558 if (VT
== MVT::i32
) {
3559 SDValue New
= CurDAG
->getCopyFromReg(
3560 CurDAG
->getEntryNode(), SDLoc(Node
), AArch64::WZR
, MVT::i32
);
3561 ReplaceNode(Node
, New
.getNode());
3563 } else if (VT
== MVT::i64
) {
3564 SDValue New
= CurDAG
->getCopyFromReg(
3565 CurDAG
->getEntryNode(), SDLoc(Node
), AArch64::XZR
, MVT::i64
);
3566 ReplaceNode(Node
, New
.getNode());
3573 case ISD::FrameIndex
: {
3574 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
3575 int FI
= cast
<FrameIndexSDNode
>(Node
)->getIndex();
3576 unsigned Shifter
= AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0);
3577 const TargetLowering
*TLI
= getTargetLowering();
3578 SDValue TFI
= CurDAG
->getTargetFrameIndex(
3579 FI
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
3581 SDValue Ops
[] = { TFI
, CurDAG
->getTargetConstant(0, DL
, MVT::i32
),
3582 CurDAG
->getTargetConstant(Shifter
, DL
, MVT::i32
) };
3583 CurDAG
->SelectNodeTo(Node
, AArch64::ADDXri
, MVT::i64
, Ops
);
3586 case ISD::INTRINSIC_W_CHAIN
: {
3587 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue();
3591 case Intrinsic::aarch64_ldaxp
:
3592 case Intrinsic::aarch64_ldxp
: {
3594 IntNo
== Intrinsic::aarch64_ldaxp
? AArch64::LDAXPX
: AArch64::LDXPX
;
3595 SDValue MemAddr
= Node
->getOperand(2);
3597 SDValue Chain
= Node
->getOperand(0);
3599 SDNode
*Ld
= CurDAG
->getMachineNode(Op
, DL
, MVT::i64
, MVT::i64
,
3600 MVT::Other
, MemAddr
, Chain
);
3602 // Transfer memoperands.
3603 MachineMemOperand
*MemOp
=
3604 cast
<MemIntrinsicSDNode
>(Node
)->getMemOperand();
3605 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(Ld
), {MemOp
});
3606 ReplaceNode(Node
, Ld
);
3609 case Intrinsic::aarch64_stlxp
:
3610 case Intrinsic::aarch64_stxp
: {
3612 IntNo
== Intrinsic::aarch64_stlxp
? AArch64::STLXPX
: AArch64::STXPX
;
3614 SDValue Chain
= Node
->getOperand(0);
3615 SDValue ValLo
= Node
->getOperand(2);
3616 SDValue ValHi
= Node
->getOperand(3);
3617 SDValue MemAddr
= Node
->getOperand(4);
3619 // Place arguments in the right order.
3620 SDValue Ops
[] = {ValLo
, ValHi
, MemAddr
, Chain
};
3622 SDNode
*St
= CurDAG
->getMachineNode(Op
, DL
, MVT::i32
, MVT::Other
, Ops
);
3623 // Transfer memoperands.
3624 MachineMemOperand
*MemOp
=
3625 cast
<MemIntrinsicSDNode
>(Node
)->getMemOperand();
3626 CurDAG
->setNodeMemRefs(cast
<MachineSDNode
>(St
), {MemOp
});
3628 ReplaceNode(Node
, St
);
3631 case Intrinsic::aarch64_neon_ld1x2
:
3632 if (VT
== MVT::v8i8
) {
3633 SelectLoad(Node
, 2, AArch64::LD1Twov8b
, AArch64::dsub0
);
3635 } else if (VT
== MVT::v16i8
) {
3636 SelectLoad(Node
, 2, AArch64::LD1Twov16b
, AArch64::qsub0
);
3638 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3639 SelectLoad(Node
, 2, AArch64::LD1Twov4h
, AArch64::dsub0
);
3641 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3642 SelectLoad(Node
, 2, AArch64::LD1Twov8h
, AArch64::qsub0
);
3644 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3645 SelectLoad(Node
, 2, AArch64::LD1Twov2s
, AArch64::dsub0
);
3647 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3648 SelectLoad(Node
, 2, AArch64::LD1Twov4s
, AArch64::qsub0
);
3650 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3651 SelectLoad(Node
, 2, AArch64::LD1Twov1d
, AArch64::dsub0
);
3653 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3654 SelectLoad(Node
, 2, AArch64::LD1Twov2d
, AArch64::qsub0
);
3658 case Intrinsic::aarch64_neon_ld1x3
:
3659 if (VT
== MVT::v8i8
) {
3660 SelectLoad(Node
, 3, AArch64::LD1Threev8b
, AArch64::dsub0
);
3662 } else if (VT
== MVT::v16i8
) {
3663 SelectLoad(Node
, 3, AArch64::LD1Threev16b
, AArch64::qsub0
);
3665 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3666 SelectLoad(Node
, 3, AArch64::LD1Threev4h
, AArch64::dsub0
);
3668 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3669 SelectLoad(Node
, 3, AArch64::LD1Threev8h
, AArch64::qsub0
);
3671 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3672 SelectLoad(Node
, 3, AArch64::LD1Threev2s
, AArch64::dsub0
);
3674 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3675 SelectLoad(Node
, 3, AArch64::LD1Threev4s
, AArch64::qsub0
);
3677 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3678 SelectLoad(Node
, 3, AArch64::LD1Threev1d
, AArch64::dsub0
);
3680 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3681 SelectLoad(Node
, 3, AArch64::LD1Threev2d
, AArch64::qsub0
);
3685 case Intrinsic::aarch64_neon_ld1x4
:
3686 if (VT
== MVT::v8i8
) {
3687 SelectLoad(Node
, 4, AArch64::LD1Fourv8b
, AArch64::dsub0
);
3689 } else if (VT
== MVT::v16i8
) {
3690 SelectLoad(Node
, 4, AArch64::LD1Fourv16b
, AArch64::qsub0
);
3692 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3693 SelectLoad(Node
, 4, AArch64::LD1Fourv4h
, AArch64::dsub0
);
3695 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3696 SelectLoad(Node
, 4, AArch64::LD1Fourv8h
, AArch64::qsub0
);
3698 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3699 SelectLoad(Node
, 4, AArch64::LD1Fourv2s
, AArch64::dsub0
);
3701 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3702 SelectLoad(Node
, 4, AArch64::LD1Fourv4s
, AArch64::qsub0
);
3704 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3705 SelectLoad(Node
, 4, AArch64::LD1Fourv1d
, AArch64::dsub0
);
3707 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3708 SelectLoad(Node
, 4, AArch64::LD1Fourv2d
, AArch64::qsub0
);
3712 case Intrinsic::aarch64_neon_ld2
:
3713 if (VT
== MVT::v8i8
) {
3714 SelectLoad(Node
, 2, AArch64::LD2Twov8b
, AArch64::dsub0
);
3716 } else if (VT
== MVT::v16i8
) {
3717 SelectLoad(Node
, 2, AArch64::LD2Twov16b
, AArch64::qsub0
);
3719 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3720 SelectLoad(Node
, 2, AArch64::LD2Twov4h
, AArch64::dsub0
);
3722 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3723 SelectLoad(Node
, 2, AArch64::LD2Twov8h
, AArch64::qsub0
);
3725 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3726 SelectLoad(Node
, 2, AArch64::LD2Twov2s
, AArch64::dsub0
);
3728 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3729 SelectLoad(Node
, 2, AArch64::LD2Twov4s
, AArch64::qsub0
);
3731 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3732 SelectLoad(Node
, 2, AArch64::LD1Twov1d
, AArch64::dsub0
);
3734 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3735 SelectLoad(Node
, 2, AArch64::LD2Twov2d
, AArch64::qsub0
);
3739 case Intrinsic::aarch64_neon_ld3
:
3740 if (VT
== MVT::v8i8
) {
3741 SelectLoad(Node
, 3, AArch64::LD3Threev8b
, AArch64::dsub0
);
3743 } else if (VT
== MVT::v16i8
) {
3744 SelectLoad(Node
, 3, AArch64::LD3Threev16b
, AArch64::qsub0
);
3746 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3747 SelectLoad(Node
, 3, AArch64::LD3Threev4h
, AArch64::dsub0
);
3749 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3750 SelectLoad(Node
, 3, AArch64::LD3Threev8h
, AArch64::qsub0
);
3752 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3753 SelectLoad(Node
, 3, AArch64::LD3Threev2s
, AArch64::dsub0
);
3755 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3756 SelectLoad(Node
, 3, AArch64::LD3Threev4s
, AArch64::qsub0
);
3758 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3759 SelectLoad(Node
, 3, AArch64::LD1Threev1d
, AArch64::dsub0
);
3761 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3762 SelectLoad(Node
, 3, AArch64::LD3Threev2d
, AArch64::qsub0
);
3766 case Intrinsic::aarch64_neon_ld4
:
3767 if (VT
== MVT::v8i8
) {
3768 SelectLoad(Node
, 4, AArch64::LD4Fourv8b
, AArch64::dsub0
);
3770 } else if (VT
== MVT::v16i8
) {
3771 SelectLoad(Node
, 4, AArch64::LD4Fourv16b
, AArch64::qsub0
);
3773 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3774 SelectLoad(Node
, 4, AArch64::LD4Fourv4h
, AArch64::dsub0
);
3776 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3777 SelectLoad(Node
, 4, AArch64::LD4Fourv8h
, AArch64::qsub0
);
3779 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3780 SelectLoad(Node
, 4, AArch64::LD4Fourv2s
, AArch64::dsub0
);
3782 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3783 SelectLoad(Node
, 4, AArch64::LD4Fourv4s
, AArch64::qsub0
);
3785 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3786 SelectLoad(Node
, 4, AArch64::LD1Fourv1d
, AArch64::dsub0
);
3788 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3789 SelectLoad(Node
, 4, AArch64::LD4Fourv2d
, AArch64::qsub0
);
3793 case Intrinsic::aarch64_neon_ld2r
:
3794 if (VT
== MVT::v8i8
) {
3795 SelectLoad(Node
, 2, AArch64::LD2Rv8b
, AArch64::dsub0
);
3797 } else if (VT
== MVT::v16i8
) {
3798 SelectLoad(Node
, 2, AArch64::LD2Rv16b
, AArch64::qsub0
);
3800 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3801 SelectLoad(Node
, 2, AArch64::LD2Rv4h
, AArch64::dsub0
);
3803 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3804 SelectLoad(Node
, 2, AArch64::LD2Rv8h
, AArch64::qsub0
);
3806 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3807 SelectLoad(Node
, 2, AArch64::LD2Rv2s
, AArch64::dsub0
);
3809 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3810 SelectLoad(Node
, 2, AArch64::LD2Rv4s
, AArch64::qsub0
);
3812 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3813 SelectLoad(Node
, 2, AArch64::LD2Rv1d
, AArch64::dsub0
);
3815 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3816 SelectLoad(Node
, 2, AArch64::LD2Rv2d
, AArch64::qsub0
);
3820 case Intrinsic::aarch64_neon_ld3r
:
3821 if (VT
== MVT::v8i8
) {
3822 SelectLoad(Node
, 3, AArch64::LD3Rv8b
, AArch64::dsub0
);
3824 } else if (VT
== MVT::v16i8
) {
3825 SelectLoad(Node
, 3, AArch64::LD3Rv16b
, AArch64::qsub0
);
3827 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3828 SelectLoad(Node
, 3, AArch64::LD3Rv4h
, AArch64::dsub0
);
3830 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3831 SelectLoad(Node
, 3, AArch64::LD3Rv8h
, AArch64::qsub0
);
3833 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3834 SelectLoad(Node
, 3, AArch64::LD3Rv2s
, AArch64::dsub0
);
3836 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3837 SelectLoad(Node
, 3, AArch64::LD3Rv4s
, AArch64::qsub0
);
3839 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3840 SelectLoad(Node
, 3, AArch64::LD3Rv1d
, AArch64::dsub0
);
3842 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3843 SelectLoad(Node
, 3, AArch64::LD3Rv2d
, AArch64::qsub0
);
3847 case Intrinsic::aarch64_neon_ld4r
:
3848 if (VT
== MVT::v8i8
) {
3849 SelectLoad(Node
, 4, AArch64::LD4Rv8b
, AArch64::dsub0
);
3851 } else if (VT
== MVT::v16i8
) {
3852 SelectLoad(Node
, 4, AArch64::LD4Rv16b
, AArch64::qsub0
);
3854 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
3855 SelectLoad(Node
, 4, AArch64::LD4Rv4h
, AArch64::dsub0
);
3857 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
3858 SelectLoad(Node
, 4, AArch64::LD4Rv8h
, AArch64::qsub0
);
3860 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
3861 SelectLoad(Node
, 4, AArch64::LD4Rv2s
, AArch64::dsub0
);
3863 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
3864 SelectLoad(Node
, 4, AArch64::LD4Rv4s
, AArch64::qsub0
);
3866 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
3867 SelectLoad(Node
, 4, AArch64::LD4Rv1d
, AArch64::dsub0
);
3869 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
3870 SelectLoad(Node
, 4, AArch64::LD4Rv2d
, AArch64::qsub0
);
3874 case Intrinsic::aarch64_neon_ld2lane
:
3875 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3876 SelectLoadLane(Node
, 2, AArch64::LD2i8
);
3878 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3879 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
3880 SelectLoadLane(Node
, 2, AArch64::LD2i16
);
3882 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3884 SelectLoadLane(Node
, 2, AArch64::LD2i32
);
3886 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3888 SelectLoadLane(Node
, 2, AArch64::LD2i64
);
3892 case Intrinsic::aarch64_neon_ld3lane
:
3893 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3894 SelectLoadLane(Node
, 3, AArch64::LD3i8
);
3896 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3897 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
3898 SelectLoadLane(Node
, 3, AArch64::LD3i16
);
3900 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3902 SelectLoadLane(Node
, 3, AArch64::LD3i32
);
3904 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3906 SelectLoadLane(Node
, 3, AArch64::LD3i64
);
3910 case Intrinsic::aarch64_neon_ld4lane
:
3911 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
3912 SelectLoadLane(Node
, 4, AArch64::LD4i8
);
3914 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
3915 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
3916 SelectLoadLane(Node
, 4, AArch64::LD4i16
);
3918 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
3920 SelectLoadLane(Node
, 4, AArch64::LD4i32
);
3922 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
3924 SelectLoadLane(Node
, 4, AArch64::LD4i64
);
3928 case Intrinsic::aarch64_ld64b
:
3929 SelectLoad(Node
, 8, AArch64::LD64B
, AArch64::x8sub_0
);
3931 case Intrinsic::aarch64_sve_ld2_sret
: {
3932 if (VT
== MVT::nxv16i8
) {
3933 SelectPredicatedLoad(Node
, 2, 0, AArch64::LD2B_IMM
, AArch64::LD2B
,
3936 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
3937 VT
== MVT::nxv8bf16
) {
3938 SelectPredicatedLoad(Node
, 2, 1, AArch64::LD2H_IMM
, AArch64::LD2H
,
3941 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
3942 SelectPredicatedLoad(Node
, 2, 2, AArch64::LD2W_IMM
, AArch64::LD2W
,
3945 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
3946 SelectPredicatedLoad(Node
, 2, 3, AArch64::LD2D_IMM
, AArch64::LD2D
,
3952 case Intrinsic::aarch64_sve_ld3_sret
: {
3953 if (VT
== MVT::nxv16i8
) {
3954 SelectPredicatedLoad(Node
, 3, 0, AArch64::LD3B_IMM
, AArch64::LD3B
,
3957 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
3958 VT
== MVT::nxv8bf16
) {
3959 SelectPredicatedLoad(Node
, 3, 1, AArch64::LD3H_IMM
, AArch64::LD3H
,
3962 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
3963 SelectPredicatedLoad(Node
, 3, 2, AArch64::LD3W_IMM
, AArch64::LD3W
,
3966 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
3967 SelectPredicatedLoad(Node
, 3, 3, AArch64::LD3D_IMM
, AArch64::LD3D
,
3973 case Intrinsic::aarch64_sve_ld4_sret
: {
3974 if (VT
== MVT::nxv16i8
) {
3975 SelectPredicatedLoad(Node
, 4, 0, AArch64::LD4B_IMM
, AArch64::LD4B
,
3978 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
3979 VT
== MVT::nxv8bf16
) {
3980 SelectPredicatedLoad(Node
, 4, 1, AArch64::LD4H_IMM
, AArch64::LD4H
,
3983 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
3984 SelectPredicatedLoad(Node
, 4, 2, AArch64::LD4W_IMM
, AArch64::LD4W
,
3987 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
3988 SelectPredicatedLoad(Node
, 4, 3, AArch64::LD4D_IMM
, AArch64::LD4D
,
3996 case ISD::INTRINSIC_WO_CHAIN
: {
3997 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(0))->getZExtValue();
4001 case Intrinsic::aarch64_tagp
:
4004 case Intrinsic::aarch64_neon_tbl2
:
4005 SelectTable(Node
, 2,
4006 VT
== MVT::v8i8
? AArch64::TBLv8i8Two
: AArch64::TBLv16i8Two
,
4009 case Intrinsic::aarch64_neon_tbl3
:
4010 SelectTable(Node
, 3, VT
== MVT::v8i8
? AArch64::TBLv8i8Three
4011 : AArch64::TBLv16i8Three
,
4014 case Intrinsic::aarch64_neon_tbl4
:
4015 SelectTable(Node
, 4, VT
== MVT::v8i8
? AArch64::TBLv8i8Four
4016 : AArch64::TBLv16i8Four
,
4019 case Intrinsic::aarch64_neon_tbx2
:
4020 SelectTable(Node
, 2,
4021 VT
== MVT::v8i8
? AArch64::TBXv8i8Two
: AArch64::TBXv16i8Two
,
4024 case Intrinsic::aarch64_neon_tbx3
:
4025 SelectTable(Node
, 3, VT
== MVT::v8i8
? AArch64::TBXv8i8Three
4026 : AArch64::TBXv16i8Three
,
4029 case Intrinsic::aarch64_neon_tbx4
:
4030 SelectTable(Node
, 4, VT
== MVT::v8i8
? AArch64::TBXv8i8Four
4031 : AArch64::TBXv16i8Four
,
4034 case Intrinsic::aarch64_neon_smull
:
4035 case Intrinsic::aarch64_neon_umull
:
4036 if (tryMULLV64LaneV128(IntNo
, Node
))
4039 case Intrinsic::swift_async_context_addr
: {
4041 CurDAG
->SelectNodeTo(Node
, AArch64::SUBXri
, MVT::i64
,
4042 CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), DL
,
4043 AArch64::FP
, MVT::i64
),
4044 CurDAG
->getTargetConstant(8, DL
, MVT::i32
),
4045 CurDAG
->getTargetConstant(0, DL
, MVT::i32
));
4046 auto &MF
= CurDAG
->getMachineFunction();
4047 MF
.getFrameInfo().setFrameAddressIsTaken(true);
4048 MF
.getInfo
<AArch64FunctionInfo
>()->setHasSwiftAsyncContext(true);
4054 case ISD::INTRINSIC_VOID
: {
4055 unsigned IntNo
= cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue();
4056 if (Node
->getNumOperands() >= 3)
4057 VT
= Node
->getOperand(2)->getValueType(0);
4061 case Intrinsic::aarch64_neon_st1x2
: {
4062 if (VT
== MVT::v8i8
) {
4063 SelectStore(Node
, 2, AArch64::ST1Twov8b
);
4065 } else if (VT
== MVT::v16i8
) {
4066 SelectStore(Node
, 2, AArch64::ST1Twov16b
);
4068 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4069 VT
== MVT::v4bf16
) {
4070 SelectStore(Node
, 2, AArch64::ST1Twov4h
);
4072 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4073 VT
== MVT::v8bf16
) {
4074 SelectStore(Node
, 2, AArch64::ST1Twov8h
);
4076 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4077 SelectStore(Node
, 2, AArch64::ST1Twov2s
);
4079 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4080 SelectStore(Node
, 2, AArch64::ST1Twov4s
);
4082 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4083 SelectStore(Node
, 2, AArch64::ST1Twov2d
);
4085 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4086 SelectStore(Node
, 2, AArch64::ST1Twov1d
);
4091 case Intrinsic::aarch64_neon_st1x3
: {
4092 if (VT
== MVT::v8i8
) {
4093 SelectStore(Node
, 3, AArch64::ST1Threev8b
);
4095 } else if (VT
== MVT::v16i8
) {
4096 SelectStore(Node
, 3, AArch64::ST1Threev16b
);
4098 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4099 VT
== MVT::v4bf16
) {
4100 SelectStore(Node
, 3, AArch64::ST1Threev4h
);
4102 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4103 VT
== MVT::v8bf16
) {
4104 SelectStore(Node
, 3, AArch64::ST1Threev8h
);
4106 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4107 SelectStore(Node
, 3, AArch64::ST1Threev2s
);
4109 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4110 SelectStore(Node
, 3, AArch64::ST1Threev4s
);
4112 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4113 SelectStore(Node
, 3, AArch64::ST1Threev2d
);
4115 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4116 SelectStore(Node
, 3, AArch64::ST1Threev1d
);
4121 case Intrinsic::aarch64_neon_st1x4
: {
4122 if (VT
== MVT::v8i8
) {
4123 SelectStore(Node
, 4, AArch64::ST1Fourv8b
);
4125 } else if (VT
== MVT::v16i8
) {
4126 SelectStore(Node
, 4, AArch64::ST1Fourv16b
);
4128 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4129 VT
== MVT::v4bf16
) {
4130 SelectStore(Node
, 4, AArch64::ST1Fourv4h
);
4132 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4133 VT
== MVT::v8bf16
) {
4134 SelectStore(Node
, 4, AArch64::ST1Fourv8h
);
4136 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4137 SelectStore(Node
, 4, AArch64::ST1Fourv2s
);
4139 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4140 SelectStore(Node
, 4, AArch64::ST1Fourv4s
);
4142 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4143 SelectStore(Node
, 4, AArch64::ST1Fourv2d
);
4145 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4146 SelectStore(Node
, 4, AArch64::ST1Fourv1d
);
4151 case Intrinsic::aarch64_neon_st2
: {
4152 if (VT
== MVT::v8i8
) {
4153 SelectStore(Node
, 2, AArch64::ST2Twov8b
);
4155 } else if (VT
== MVT::v16i8
) {
4156 SelectStore(Node
, 2, AArch64::ST2Twov16b
);
4158 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4159 VT
== MVT::v4bf16
) {
4160 SelectStore(Node
, 2, AArch64::ST2Twov4h
);
4162 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4163 VT
== MVT::v8bf16
) {
4164 SelectStore(Node
, 2, AArch64::ST2Twov8h
);
4166 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4167 SelectStore(Node
, 2, AArch64::ST2Twov2s
);
4169 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4170 SelectStore(Node
, 2, AArch64::ST2Twov4s
);
4172 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4173 SelectStore(Node
, 2, AArch64::ST2Twov2d
);
4175 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4176 SelectStore(Node
, 2, AArch64::ST1Twov1d
);
4181 case Intrinsic::aarch64_neon_st3
: {
4182 if (VT
== MVT::v8i8
) {
4183 SelectStore(Node
, 3, AArch64::ST3Threev8b
);
4185 } else if (VT
== MVT::v16i8
) {
4186 SelectStore(Node
, 3, AArch64::ST3Threev16b
);
4188 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4189 VT
== MVT::v4bf16
) {
4190 SelectStore(Node
, 3, AArch64::ST3Threev4h
);
4192 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4193 VT
== MVT::v8bf16
) {
4194 SelectStore(Node
, 3, AArch64::ST3Threev8h
);
4196 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4197 SelectStore(Node
, 3, AArch64::ST3Threev2s
);
4199 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4200 SelectStore(Node
, 3, AArch64::ST3Threev4s
);
4202 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4203 SelectStore(Node
, 3, AArch64::ST3Threev2d
);
4205 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4206 SelectStore(Node
, 3, AArch64::ST1Threev1d
);
4211 case Intrinsic::aarch64_neon_st4
: {
4212 if (VT
== MVT::v8i8
) {
4213 SelectStore(Node
, 4, AArch64::ST4Fourv8b
);
4215 } else if (VT
== MVT::v16i8
) {
4216 SelectStore(Node
, 4, AArch64::ST4Fourv16b
);
4218 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4219 VT
== MVT::v4bf16
) {
4220 SelectStore(Node
, 4, AArch64::ST4Fourv4h
);
4222 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
||
4223 VT
== MVT::v8bf16
) {
4224 SelectStore(Node
, 4, AArch64::ST4Fourv8h
);
4226 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4227 SelectStore(Node
, 4, AArch64::ST4Fourv2s
);
4229 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4230 SelectStore(Node
, 4, AArch64::ST4Fourv4s
);
4232 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4233 SelectStore(Node
, 4, AArch64::ST4Fourv2d
);
4235 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4236 SelectStore(Node
, 4, AArch64::ST1Fourv1d
);
4241 case Intrinsic::aarch64_neon_st2lane
: {
4242 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4243 SelectStoreLane(Node
, 2, AArch64::ST2i8
);
4245 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4246 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4247 SelectStoreLane(Node
, 2, AArch64::ST2i16
);
4249 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4251 SelectStoreLane(Node
, 2, AArch64::ST2i32
);
4253 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4255 SelectStoreLane(Node
, 2, AArch64::ST2i64
);
4260 case Intrinsic::aarch64_neon_st3lane
: {
4261 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4262 SelectStoreLane(Node
, 3, AArch64::ST3i8
);
4264 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4265 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4266 SelectStoreLane(Node
, 3, AArch64::ST3i16
);
4268 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4270 SelectStoreLane(Node
, 3, AArch64::ST3i32
);
4272 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4274 SelectStoreLane(Node
, 3, AArch64::ST3i64
);
4279 case Intrinsic::aarch64_neon_st4lane
: {
4280 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4281 SelectStoreLane(Node
, 4, AArch64::ST4i8
);
4283 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4284 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4285 SelectStoreLane(Node
, 4, AArch64::ST4i16
);
4287 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4289 SelectStoreLane(Node
, 4, AArch64::ST4i32
);
4291 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4293 SelectStoreLane(Node
, 4, AArch64::ST4i64
);
4298 case Intrinsic::aarch64_sve_st2
: {
4299 if (VT
== MVT::nxv16i8
) {
4300 SelectPredicatedStore(Node
, 2, 0, AArch64::ST2B
, AArch64::ST2B_IMM
);
4302 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4303 VT
== MVT::nxv8bf16
) {
4304 SelectPredicatedStore(Node
, 2, 1, AArch64::ST2H
, AArch64::ST2H_IMM
);
4306 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4307 SelectPredicatedStore(Node
, 2, 2, AArch64::ST2W
, AArch64::ST2W_IMM
);
4309 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4310 SelectPredicatedStore(Node
, 2, 3, AArch64::ST2D
, AArch64::ST2D_IMM
);
4315 case Intrinsic::aarch64_sve_st3
: {
4316 if (VT
== MVT::nxv16i8
) {
4317 SelectPredicatedStore(Node
, 3, 0, AArch64::ST3B
, AArch64::ST3B_IMM
);
4319 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4320 VT
== MVT::nxv8bf16
) {
4321 SelectPredicatedStore(Node
, 3, 1, AArch64::ST3H
, AArch64::ST3H_IMM
);
4323 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4324 SelectPredicatedStore(Node
, 3, 2, AArch64::ST3W
, AArch64::ST3W_IMM
);
4326 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4327 SelectPredicatedStore(Node
, 3, 3, AArch64::ST3D
, AArch64::ST3D_IMM
);
4332 case Intrinsic::aarch64_sve_st4
: {
4333 if (VT
== MVT::nxv16i8
) {
4334 SelectPredicatedStore(Node
, 4, 0, AArch64::ST4B
, AArch64::ST4B_IMM
);
4336 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4337 VT
== MVT::nxv8bf16
) {
4338 SelectPredicatedStore(Node
, 4, 1, AArch64::ST4H
, AArch64::ST4H_IMM
);
4340 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4341 SelectPredicatedStore(Node
, 4, 2, AArch64::ST4W
, AArch64::ST4W_IMM
);
4343 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4344 SelectPredicatedStore(Node
, 4, 3, AArch64::ST4D
, AArch64::ST4D_IMM
);
4352 case AArch64ISD::LD2post
: {
4353 if (VT
== MVT::v8i8
) {
4354 SelectPostLoad(Node
, 2, AArch64::LD2Twov8b_POST
, AArch64::dsub0
);
4356 } else if (VT
== MVT::v16i8
) {
4357 SelectPostLoad(Node
, 2, AArch64::LD2Twov16b_POST
, AArch64::qsub0
);
4359 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4360 SelectPostLoad(Node
, 2, AArch64::LD2Twov4h_POST
, AArch64::dsub0
);
4362 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4363 SelectPostLoad(Node
, 2, AArch64::LD2Twov8h_POST
, AArch64::qsub0
);
4365 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4366 SelectPostLoad(Node
, 2, AArch64::LD2Twov2s_POST
, AArch64::dsub0
);
4368 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4369 SelectPostLoad(Node
, 2, AArch64::LD2Twov4s_POST
, AArch64::qsub0
);
4371 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4372 SelectPostLoad(Node
, 2, AArch64::LD1Twov1d_POST
, AArch64::dsub0
);
4374 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4375 SelectPostLoad(Node
, 2, AArch64::LD2Twov2d_POST
, AArch64::qsub0
);
4380 case AArch64ISD::LD3post
: {
4381 if (VT
== MVT::v8i8
) {
4382 SelectPostLoad(Node
, 3, AArch64::LD3Threev8b_POST
, AArch64::dsub0
);
4384 } else if (VT
== MVT::v16i8
) {
4385 SelectPostLoad(Node
, 3, AArch64::LD3Threev16b_POST
, AArch64::qsub0
);
4387 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4388 SelectPostLoad(Node
, 3, AArch64::LD3Threev4h_POST
, AArch64::dsub0
);
4390 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4391 SelectPostLoad(Node
, 3, AArch64::LD3Threev8h_POST
, AArch64::qsub0
);
4393 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4394 SelectPostLoad(Node
, 3, AArch64::LD3Threev2s_POST
, AArch64::dsub0
);
4396 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4397 SelectPostLoad(Node
, 3, AArch64::LD3Threev4s_POST
, AArch64::qsub0
);
4399 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4400 SelectPostLoad(Node
, 3, AArch64::LD1Threev1d_POST
, AArch64::dsub0
);
4402 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4403 SelectPostLoad(Node
, 3, AArch64::LD3Threev2d_POST
, AArch64::qsub0
);
4408 case AArch64ISD::LD4post
: {
4409 if (VT
== MVT::v8i8
) {
4410 SelectPostLoad(Node
, 4, AArch64::LD4Fourv8b_POST
, AArch64::dsub0
);
4412 } else if (VT
== MVT::v16i8
) {
4413 SelectPostLoad(Node
, 4, AArch64::LD4Fourv16b_POST
, AArch64::qsub0
);
4415 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4416 SelectPostLoad(Node
, 4, AArch64::LD4Fourv4h_POST
, AArch64::dsub0
);
4418 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4419 SelectPostLoad(Node
, 4, AArch64::LD4Fourv8h_POST
, AArch64::qsub0
);
4421 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4422 SelectPostLoad(Node
, 4, AArch64::LD4Fourv2s_POST
, AArch64::dsub0
);
4424 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4425 SelectPostLoad(Node
, 4, AArch64::LD4Fourv4s_POST
, AArch64::qsub0
);
4427 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4428 SelectPostLoad(Node
, 4, AArch64::LD1Fourv1d_POST
, AArch64::dsub0
);
4430 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4431 SelectPostLoad(Node
, 4, AArch64::LD4Fourv2d_POST
, AArch64::qsub0
);
4436 case AArch64ISD::LD1x2post
: {
4437 if (VT
== MVT::v8i8
) {
4438 SelectPostLoad(Node
, 2, AArch64::LD1Twov8b_POST
, AArch64::dsub0
);
4440 } else if (VT
== MVT::v16i8
) {
4441 SelectPostLoad(Node
, 2, AArch64::LD1Twov16b_POST
, AArch64::qsub0
);
4443 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4444 SelectPostLoad(Node
, 2, AArch64::LD1Twov4h_POST
, AArch64::dsub0
);
4446 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4447 SelectPostLoad(Node
, 2, AArch64::LD1Twov8h_POST
, AArch64::qsub0
);
4449 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4450 SelectPostLoad(Node
, 2, AArch64::LD1Twov2s_POST
, AArch64::dsub0
);
4452 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4453 SelectPostLoad(Node
, 2, AArch64::LD1Twov4s_POST
, AArch64::qsub0
);
4455 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4456 SelectPostLoad(Node
, 2, AArch64::LD1Twov1d_POST
, AArch64::dsub0
);
4458 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4459 SelectPostLoad(Node
, 2, AArch64::LD1Twov2d_POST
, AArch64::qsub0
);
4464 case AArch64ISD::LD1x3post
: {
4465 if (VT
== MVT::v8i8
) {
4466 SelectPostLoad(Node
, 3, AArch64::LD1Threev8b_POST
, AArch64::dsub0
);
4468 } else if (VT
== MVT::v16i8
) {
4469 SelectPostLoad(Node
, 3, AArch64::LD1Threev16b_POST
, AArch64::qsub0
);
4471 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4472 SelectPostLoad(Node
, 3, AArch64::LD1Threev4h_POST
, AArch64::dsub0
);
4474 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4475 SelectPostLoad(Node
, 3, AArch64::LD1Threev8h_POST
, AArch64::qsub0
);
4477 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4478 SelectPostLoad(Node
, 3, AArch64::LD1Threev2s_POST
, AArch64::dsub0
);
4480 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4481 SelectPostLoad(Node
, 3, AArch64::LD1Threev4s_POST
, AArch64::qsub0
);
4483 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4484 SelectPostLoad(Node
, 3, AArch64::LD1Threev1d_POST
, AArch64::dsub0
);
4486 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4487 SelectPostLoad(Node
, 3, AArch64::LD1Threev2d_POST
, AArch64::qsub0
);
4492 case AArch64ISD::LD1x4post
: {
4493 if (VT
== MVT::v8i8
) {
4494 SelectPostLoad(Node
, 4, AArch64::LD1Fourv8b_POST
, AArch64::dsub0
);
4496 } else if (VT
== MVT::v16i8
) {
4497 SelectPostLoad(Node
, 4, AArch64::LD1Fourv16b_POST
, AArch64::qsub0
);
4499 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4500 SelectPostLoad(Node
, 4, AArch64::LD1Fourv4h_POST
, AArch64::dsub0
);
4502 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4503 SelectPostLoad(Node
, 4, AArch64::LD1Fourv8h_POST
, AArch64::qsub0
);
4505 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4506 SelectPostLoad(Node
, 4, AArch64::LD1Fourv2s_POST
, AArch64::dsub0
);
4508 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4509 SelectPostLoad(Node
, 4, AArch64::LD1Fourv4s_POST
, AArch64::qsub0
);
4511 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4512 SelectPostLoad(Node
, 4, AArch64::LD1Fourv1d_POST
, AArch64::dsub0
);
4514 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4515 SelectPostLoad(Node
, 4, AArch64::LD1Fourv2d_POST
, AArch64::qsub0
);
4520 case AArch64ISD::LD1DUPpost
: {
4521 if (VT
== MVT::v8i8
) {
4522 SelectPostLoad(Node
, 1, AArch64::LD1Rv8b_POST
, AArch64::dsub0
);
4524 } else if (VT
== MVT::v16i8
) {
4525 SelectPostLoad(Node
, 1, AArch64::LD1Rv16b_POST
, AArch64::qsub0
);
4527 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4528 SelectPostLoad(Node
, 1, AArch64::LD1Rv4h_POST
, AArch64::dsub0
);
4530 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4531 SelectPostLoad(Node
, 1, AArch64::LD1Rv8h_POST
, AArch64::qsub0
);
4533 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4534 SelectPostLoad(Node
, 1, AArch64::LD1Rv2s_POST
, AArch64::dsub0
);
4536 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4537 SelectPostLoad(Node
, 1, AArch64::LD1Rv4s_POST
, AArch64::qsub0
);
4539 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4540 SelectPostLoad(Node
, 1, AArch64::LD1Rv1d_POST
, AArch64::dsub0
);
4542 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4543 SelectPostLoad(Node
, 1, AArch64::LD1Rv2d_POST
, AArch64::qsub0
);
4548 case AArch64ISD::LD2DUPpost
: {
4549 if (VT
== MVT::v8i8
) {
4550 SelectPostLoad(Node
, 2, AArch64::LD2Rv8b_POST
, AArch64::dsub0
);
4552 } else if (VT
== MVT::v16i8
) {
4553 SelectPostLoad(Node
, 2, AArch64::LD2Rv16b_POST
, AArch64::qsub0
);
4555 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4556 SelectPostLoad(Node
, 2, AArch64::LD2Rv4h_POST
, AArch64::dsub0
);
4558 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4559 SelectPostLoad(Node
, 2, AArch64::LD2Rv8h_POST
, AArch64::qsub0
);
4561 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4562 SelectPostLoad(Node
, 2, AArch64::LD2Rv2s_POST
, AArch64::dsub0
);
4564 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4565 SelectPostLoad(Node
, 2, AArch64::LD2Rv4s_POST
, AArch64::qsub0
);
4567 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4568 SelectPostLoad(Node
, 2, AArch64::LD2Rv1d_POST
, AArch64::dsub0
);
4570 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4571 SelectPostLoad(Node
, 2, AArch64::LD2Rv2d_POST
, AArch64::qsub0
);
4576 case AArch64ISD::LD3DUPpost
: {
4577 if (VT
== MVT::v8i8
) {
4578 SelectPostLoad(Node
, 3, AArch64::LD3Rv8b_POST
, AArch64::dsub0
);
4580 } else if (VT
== MVT::v16i8
) {
4581 SelectPostLoad(Node
, 3, AArch64::LD3Rv16b_POST
, AArch64::qsub0
);
4583 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4584 SelectPostLoad(Node
, 3, AArch64::LD3Rv4h_POST
, AArch64::dsub0
);
4586 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4587 SelectPostLoad(Node
, 3, AArch64::LD3Rv8h_POST
, AArch64::qsub0
);
4589 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4590 SelectPostLoad(Node
, 3, AArch64::LD3Rv2s_POST
, AArch64::dsub0
);
4592 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4593 SelectPostLoad(Node
, 3, AArch64::LD3Rv4s_POST
, AArch64::qsub0
);
4595 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4596 SelectPostLoad(Node
, 3, AArch64::LD3Rv1d_POST
, AArch64::dsub0
);
4598 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4599 SelectPostLoad(Node
, 3, AArch64::LD3Rv2d_POST
, AArch64::qsub0
);
4604 case AArch64ISD::LD4DUPpost
: {
4605 if (VT
== MVT::v8i8
) {
4606 SelectPostLoad(Node
, 4, AArch64::LD4Rv8b_POST
, AArch64::dsub0
);
4608 } else if (VT
== MVT::v16i8
) {
4609 SelectPostLoad(Node
, 4, AArch64::LD4Rv16b_POST
, AArch64::qsub0
);
4611 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4612 SelectPostLoad(Node
, 4, AArch64::LD4Rv4h_POST
, AArch64::dsub0
);
4614 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4615 SelectPostLoad(Node
, 4, AArch64::LD4Rv8h_POST
, AArch64::qsub0
);
4617 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4618 SelectPostLoad(Node
, 4, AArch64::LD4Rv2s_POST
, AArch64::dsub0
);
4620 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4621 SelectPostLoad(Node
, 4, AArch64::LD4Rv4s_POST
, AArch64::qsub0
);
4623 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4624 SelectPostLoad(Node
, 4, AArch64::LD4Rv1d_POST
, AArch64::dsub0
);
4626 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4627 SelectPostLoad(Node
, 4, AArch64::LD4Rv2d_POST
, AArch64::qsub0
);
4632 case AArch64ISD::LD1LANEpost
: {
4633 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4634 SelectPostLoadLane(Node
, 1, AArch64::LD1i8_POST
);
4636 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4637 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4638 SelectPostLoadLane(Node
, 1, AArch64::LD1i16_POST
);
4640 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4642 SelectPostLoadLane(Node
, 1, AArch64::LD1i32_POST
);
4644 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4646 SelectPostLoadLane(Node
, 1, AArch64::LD1i64_POST
);
4651 case AArch64ISD::LD2LANEpost
: {
4652 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4653 SelectPostLoadLane(Node
, 2, AArch64::LD2i8_POST
);
4655 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4656 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4657 SelectPostLoadLane(Node
, 2, AArch64::LD2i16_POST
);
4659 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4661 SelectPostLoadLane(Node
, 2, AArch64::LD2i32_POST
);
4663 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4665 SelectPostLoadLane(Node
, 2, AArch64::LD2i64_POST
);
4670 case AArch64ISD::LD3LANEpost
: {
4671 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4672 SelectPostLoadLane(Node
, 3, AArch64::LD3i8_POST
);
4674 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4675 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4676 SelectPostLoadLane(Node
, 3, AArch64::LD3i16_POST
);
4678 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4680 SelectPostLoadLane(Node
, 3, AArch64::LD3i32_POST
);
4682 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4684 SelectPostLoadLane(Node
, 3, AArch64::LD3i64_POST
);
4689 case AArch64ISD::LD4LANEpost
: {
4690 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4691 SelectPostLoadLane(Node
, 4, AArch64::LD4i8_POST
);
4693 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4694 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4695 SelectPostLoadLane(Node
, 4, AArch64::LD4i16_POST
);
4697 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4699 SelectPostLoadLane(Node
, 4, AArch64::LD4i32_POST
);
4701 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4703 SelectPostLoadLane(Node
, 4, AArch64::LD4i64_POST
);
4708 case AArch64ISD::ST2post
: {
4709 VT
= Node
->getOperand(1).getValueType();
4710 if (VT
== MVT::v8i8
) {
4711 SelectPostStore(Node
, 2, AArch64::ST2Twov8b_POST
);
4713 } else if (VT
== MVT::v16i8
) {
4714 SelectPostStore(Node
, 2, AArch64::ST2Twov16b_POST
);
4716 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4717 SelectPostStore(Node
, 2, AArch64::ST2Twov4h_POST
);
4719 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4720 SelectPostStore(Node
, 2, AArch64::ST2Twov8h_POST
);
4722 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4723 SelectPostStore(Node
, 2, AArch64::ST2Twov2s_POST
);
4725 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4726 SelectPostStore(Node
, 2, AArch64::ST2Twov4s_POST
);
4728 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4729 SelectPostStore(Node
, 2, AArch64::ST2Twov2d_POST
);
4731 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4732 SelectPostStore(Node
, 2, AArch64::ST1Twov1d_POST
);
4737 case AArch64ISD::ST3post
: {
4738 VT
= Node
->getOperand(1).getValueType();
4739 if (VT
== MVT::v8i8
) {
4740 SelectPostStore(Node
, 3, AArch64::ST3Threev8b_POST
);
4742 } else if (VT
== MVT::v16i8
) {
4743 SelectPostStore(Node
, 3, AArch64::ST3Threev16b_POST
);
4745 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4746 SelectPostStore(Node
, 3, AArch64::ST3Threev4h_POST
);
4748 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4749 SelectPostStore(Node
, 3, AArch64::ST3Threev8h_POST
);
4751 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4752 SelectPostStore(Node
, 3, AArch64::ST3Threev2s_POST
);
4754 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4755 SelectPostStore(Node
, 3, AArch64::ST3Threev4s_POST
);
4757 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4758 SelectPostStore(Node
, 3, AArch64::ST3Threev2d_POST
);
4760 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4761 SelectPostStore(Node
, 3, AArch64::ST1Threev1d_POST
);
4766 case AArch64ISD::ST4post
: {
4767 VT
= Node
->getOperand(1).getValueType();
4768 if (VT
== MVT::v8i8
) {
4769 SelectPostStore(Node
, 4, AArch64::ST4Fourv8b_POST
);
4771 } else if (VT
== MVT::v16i8
) {
4772 SelectPostStore(Node
, 4, AArch64::ST4Fourv16b_POST
);
4774 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4775 SelectPostStore(Node
, 4, AArch64::ST4Fourv4h_POST
);
4777 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4778 SelectPostStore(Node
, 4, AArch64::ST4Fourv8h_POST
);
4780 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4781 SelectPostStore(Node
, 4, AArch64::ST4Fourv2s_POST
);
4783 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4784 SelectPostStore(Node
, 4, AArch64::ST4Fourv4s_POST
);
4786 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4787 SelectPostStore(Node
, 4, AArch64::ST4Fourv2d_POST
);
4789 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4790 SelectPostStore(Node
, 4, AArch64::ST1Fourv1d_POST
);
4795 case AArch64ISD::ST1x2post
: {
4796 VT
= Node
->getOperand(1).getValueType();
4797 if (VT
== MVT::v8i8
) {
4798 SelectPostStore(Node
, 2, AArch64::ST1Twov8b_POST
);
4800 } else if (VT
== MVT::v16i8
) {
4801 SelectPostStore(Node
, 2, AArch64::ST1Twov16b_POST
);
4803 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4804 SelectPostStore(Node
, 2, AArch64::ST1Twov4h_POST
);
4806 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4807 SelectPostStore(Node
, 2, AArch64::ST1Twov8h_POST
);
4809 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4810 SelectPostStore(Node
, 2, AArch64::ST1Twov2s_POST
);
4812 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4813 SelectPostStore(Node
, 2, AArch64::ST1Twov4s_POST
);
4815 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4816 SelectPostStore(Node
, 2, AArch64::ST1Twov1d_POST
);
4818 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4819 SelectPostStore(Node
, 2, AArch64::ST1Twov2d_POST
);
4824 case AArch64ISD::ST1x3post
: {
4825 VT
= Node
->getOperand(1).getValueType();
4826 if (VT
== MVT::v8i8
) {
4827 SelectPostStore(Node
, 3, AArch64::ST1Threev8b_POST
);
4829 } else if (VT
== MVT::v16i8
) {
4830 SelectPostStore(Node
, 3, AArch64::ST1Threev16b_POST
);
4832 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4833 SelectPostStore(Node
, 3, AArch64::ST1Threev4h_POST
);
4835 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4836 SelectPostStore(Node
, 3, AArch64::ST1Threev8h_POST
);
4838 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4839 SelectPostStore(Node
, 3, AArch64::ST1Threev2s_POST
);
4841 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4842 SelectPostStore(Node
, 3, AArch64::ST1Threev4s_POST
);
4844 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4845 SelectPostStore(Node
, 3, AArch64::ST1Threev1d_POST
);
4847 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4848 SelectPostStore(Node
, 3, AArch64::ST1Threev2d_POST
);
4853 case AArch64ISD::ST1x4post
: {
4854 VT
= Node
->getOperand(1).getValueType();
4855 if (VT
== MVT::v8i8
) {
4856 SelectPostStore(Node
, 4, AArch64::ST1Fourv8b_POST
);
4858 } else if (VT
== MVT::v16i8
) {
4859 SelectPostStore(Node
, 4, AArch64::ST1Fourv16b_POST
);
4861 } else if (VT
== MVT::v4i16
|| VT
== MVT::v4f16
|| VT
== MVT::v4bf16
) {
4862 SelectPostStore(Node
, 4, AArch64::ST1Fourv4h_POST
);
4864 } else if (VT
== MVT::v8i16
|| VT
== MVT::v8f16
|| VT
== MVT::v8bf16
) {
4865 SelectPostStore(Node
, 4, AArch64::ST1Fourv8h_POST
);
4867 } else if (VT
== MVT::v2i32
|| VT
== MVT::v2f32
) {
4868 SelectPostStore(Node
, 4, AArch64::ST1Fourv2s_POST
);
4870 } else if (VT
== MVT::v4i32
|| VT
== MVT::v4f32
) {
4871 SelectPostStore(Node
, 4, AArch64::ST1Fourv4s_POST
);
4873 } else if (VT
== MVT::v1i64
|| VT
== MVT::v1f64
) {
4874 SelectPostStore(Node
, 4, AArch64::ST1Fourv1d_POST
);
4876 } else if (VT
== MVT::v2i64
|| VT
== MVT::v2f64
) {
4877 SelectPostStore(Node
, 4, AArch64::ST1Fourv2d_POST
);
4882 case AArch64ISD::ST2LANEpost
: {
4883 VT
= Node
->getOperand(1).getValueType();
4884 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4885 SelectPostStoreLane(Node
, 2, AArch64::ST2i8_POST
);
4887 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4888 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4889 SelectPostStoreLane(Node
, 2, AArch64::ST2i16_POST
);
4891 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4893 SelectPostStoreLane(Node
, 2, AArch64::ST2i32_POST
);
4895 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4897 SelectPostStoreLane(Node
, 2, AArch64::ST2i64_POST
);
4902 case AArch64ISD::ST3LANEpost
: {
4903 VT
= Node
->getOperand(1).getValueType();
4904 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4905 SelectPostStoreLane(Node
, 3, AArch64::ST3i8_POST
);
4907 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4908 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4909 SelectPostStoreLane(Node
, 3, AArch64::ST3i16_POST
);
4911 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4913 SelectPostStoreLane(Node
, 3, AArch64::ST3i32_POST
);
4915 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4917 SelectPostStoreLane(Node
, 3, AArch64::ST3i64_POST
);
4922 case AArch64ISD::ST4LANEpost
: {
4923 VT
= Node
->getOperand(1).getValueType();
4924 if (VT
== MVT::v16i8
|| VT
== MVT::v8i8
) {
4925 SelectPostStoreLane(Node
, 4, AArch64::ST4i8_POST
);
4927 } else if (VT
== MVT::v8i16
|| VT
== MVT::v4i16
|| VT
== MVT::v4f16
||
4928 VT
== MVT::v8f16
|| VT
== MVT::v4bf16
|| VT
== MVT::v8bf16
) {
4929 SelectPostStoreLane(Node
, 4, AArch64::ST4i16_POST
);
4931 } else if (VT
== MVT::v4i32
|| VT
== MVT::v2i32
|| VT
== MVT::v4f32
||
4933 SelectPostStoreLane(Node
, 4, AArch64::ST4i32_POST
);
4935 } else if (VT
== MVT::v2i64
|| VT
== MVT::v1i64
|| VT
== MVT::v2f64
||
4937 SelectPostStoreLane(Node
, 4, AArch64::ST4i64_POST
);
4942 case AArch64ISD::SVE_LD2_MERGE_ZERO
: {
4943 if (VT
== MVT::nxv16i8
) {
4944 SelectPredicatedLoad(Node
, 2, 0, AArch64::LD2B_IMM
, AArch64::LD2B
);
4946 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4947 VT
== MVT::nxv8bf16
) {
4948 SelectPredicatedLoad(Node
, 2, 1, AArch64::LD2H_IMM
, AArch64::LD2H
);
4950 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4951 SelectPredicatedLoad(Node
, 2, 2, AArch64::LD2W_IMM
, AArch64::LD2W
);
4953 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4954 SelectPredicatedLoad(Node
, 2, 3, AArch64::LD2D_IMM
, AArch64::LD2D
);
4959 case AArch64ISD::SVE_LD3_MERGE_ZERO
: {
4960 if (VT
== MVT::nxv16i8
) {
4961 SelectPredicatedLoad(Node
, 3, 0, AArch64::LD3B_IMM
, AArch64::LD3B
);
4963 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4964 VT
== MVT::nxv8bf16
) {
4965 SelectPredicatedLoad(Node
, 3, 1, AArch64::LD3H_IMM
, AArch64::LD3H
);
4967 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4968 SelectPredicatedLoad(Node
, 3, 2, AArch64::LD3W_IMM
, AArch64::LD3W
);
4970 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4971 SelectPredicatedLoad(Node
, 3, 3, AArch64::LD3D_IMM
, AArch64::LD3D
);
4976 case AArch64ISD::SVE_LD4_MERGE_ZERO
: {
4977 if (VT
== MVT::nxv16i8
) {
4978 SelectPredicatedLoad(Node
, 4, 0, AArch64::LD4B_IMM
, AArch64::LD4B
);
4980 } else if (VT
== MVT::nxv8i16
|| VT
== MVT::nxv8f16
||
4981 VT
== MVT::nxv8bf16
) {
4982 SelectPredicatedLoad(Node
, 4, 1, AArch64::LD4H_IMM
, AArch64::LD4H
);
4984 } else if (VT
== MVT::nxv4i32
|| VT
== MVT::nxv4f32
) {
4985 SelectPredicatedLoad(Node
, 4, 2, AArch64::LD4W_IMM
, AArch64::LD4W
);
4987 } else if (VT
== MVT::nxv2i64
|| VT
== MVT::nxv2f64
) {
4988 SelectPredicatedLoad(Node
, 4, 3, AArch64::LD4D_IMM
, AArch64::LD4D
);
4995 // Select the default instruction
4999 /// createAArch64ISelDag - This pass converts a legalized DAG into a
5000 /// AArch64-specific DAG, ready for instruction scheduling.
5001 FunctionPass
*llvm::createAArch64ISelDag(AArch64TargetMachine
&TM
,
5002 CodeGenOpt::Level OptLevel
) {
5003 return new AArch64DAGToDAGISel(TM
, OptLevel
);
5006 /// When \p PredVT is a scalable vector predicate in the form
5007 /// MVT::nx<M>xi1, it builds the correspondent scalable vector of
5008 /// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
5009 /// structured vectors (NumVec >1), the output data type is
5010 /// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
5011 /// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
5013 static EVT
getPackedVectorTypeFromPredicateType(LLVMContext
&Ctx
, EVT PredVT
,
5015 assert(NumVec
> 0 && NumVec
< 5 && "Invalid number of vectors.");
5016 if (!PredVT
.isScalableVector() || PredVT
.getVectorElementType() != MVT::i1
)
5019 if (PredVT
!= MVT::nxv16i1
&& PredVT
!= MVT::nxv8i1
&&
5020 PredVT
!= MVT::nxv4i1
&& PredVT
!= MVT::nxv2i1
)
5023 ElementCount EC
= PredVT
.getVectorElementCount();
5025 EVT::getIntegerVT(Ctx
, AArch64::SVEBitsPerBlock
/ EC
.getKnownMinValue());
5026 EVT MemVT
= EVT::getVectorVT(Ctx
, ScalarVT
, EC
* NumVec
);
5031 /// Return the EVT of the data associated to a memory operation in \p
5032 /// Root. If such EVT cannot be retrived, it returns an invalid EVT.
5033 static EVT
getMemVTFromNode(LLVMContext
&Ctx
, SDNode
*Root
) {
5034 if (isa
<MemSDNode
>(Root
))
5035 return cast
<MemSDNode
>(Root
)->getMemoryVT();
5037 if (isa
<MemIntrinsicSDNode
>(Root
))
5038 return cast
<MemIntrinsicSDNode
>(Root
)->getMemoryVT();
5040 const unsigned Opcode
= Root
->getOpcode();
5041 // For custom ISD nodes, we have to look at them individually to extract the
5042 // type of the data moved to/from memory.
5044 case AArch64ISD::LD1_MERGE_ZERO
:
5045 case AArch64ISD::LD1S_MERGE_ZERO
:
5046 case AArch64ISD::LDNF1_MERGE_ZERO
:
5047 case AArch64ISD::LDNF1S_MERGE_ZERO
:
5048 return cast
<VTSDNode
>(Root
->getOperand(3))->getVT();
5049 case AArch64ISD::ST1_PRED
:
5050 return cast
<VTSDNode
>(Root
->getOperand(4))->getVT();
5051 case AArch64ISD::SVE_LD2_MERGE_ZERO
:
5052 return getPackedVectorTypeFromPredicateType(
5053 Ctx
, Root
->getOperand(1)->getValueType(0), /*NumVec=*/2);
5054 case AArch64ISD::SVE_LD3_MERGE_ZERO
:
5055 return getPackedVectorTypeFromPredicateType(
5056 Ctx
, Root
->getOperand(1)->getValueType(0), /*NumVec=*/3);
5057 case AArch64ISD::SVE_LD4_MERGE_ZERO
:
5058 return getPackedVectorTypeFromPredicateType(
5059 Ctx
, Root
->getOperand(1)->getValueType(0), /*NumVec=*/4);
5064 if (Opcode
!= ISD::INTRINSIC_VOID
)
5067 const unsigned IntNo
=
5068 cast
<ConstantSDNode
>(Root
->getOperand(1))->getZExtValue();
5069 if (IntNo
!= Intrinsic::aarch64_sve_prf
)
5072 // We are using an SVE prefetch intrinsic. Type must be inferred
5073 // from the width of the predicate.
5074 return getPackedVectorTypeFromPredicateType(
5075 Ctx
, Root
->getOperand(2)->getValueType(0), /*NumVec=*/1);
5078 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
5079 /// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
5080 /// where Root is the memory access using N for its address.
5081 template <int64_t Min
, int64_t Max
>
5082 bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode
*Root
, SDValue N
,
5085 const EVT MemVT
= getMemVTFromNode(*(CurDAG
->getContext()), Root
);
5086 const DataLayout
&DL
= CurDAG
->getDataLayout();
5088 if (N
.getOpcode() == ISD::FrameIndex
) {
5089 int FI
= cast
<FrameIndexSDNode
>(N
)->getIndex();
5090 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
5091 OffImm
= CurDAG
->getTargetConstant(0, SDLoc(N
), MVT::i64
);
5098 if (N
.getOpcode() != ISD::ADD
)
5101 SDValue VScale
= N
.getOperand(1);
5102 if (VScale
.getOpcode() != ISD::VSCALE
)
5105 TypeSize TS
= MemVT
.getSizeInBits();
5106 int64_t MemWidthBytes
= static_cast<int64_t>(TS
.getKnownMinSize()) / 8;
5107 int64_t MulImm
= cast
<ConstantSDNode
>(VScale
.getOperand(0))->getSExtValue();
5109 if ((MulImm
% MemWidthBytes
) != 0)
5112 int64_t Offset
= MulImm
/ MemWidthBytes
;
5113 if (Offset
< Min
|| Offset
> Max
)
5116 Base
= N
.getOperand(0);
5117 if (Base
.getOpcode() == ISD::FrameIndex
) {
5118 int FI
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
5119 Base
= CurDAG
->getTargetFrameIndex(FI
, TLI
->getPointerTy(DL
));
5122 OffImm
= CurDAG
->getTargetConstant(Offset
, SDLoc(N
), MVT::i64
);
5126 /// Select register plus register addressing mode for SVE, with scaled
5128 bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N
, unsigned Scale
,
5131 if (N
.getOpcode() != ISD::ADD
)
5134 // Process an ADD node.
5135 const SDValue LHS
= N
.getOperand(0);
5136 const SDValue RHS
= N
.getOperand(1);
5138 // 8 bit data does not come with the SHL node, so it is treated
5146 if (auto C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
5147 int64_t ImmOff
= C
->getSExtValue();
5148 unsigned Size
= 1 << Scale
;
5150 // To use the reg+reg addressing mode, the immediate must be a multiple of
5151 // the vector element's byte size.
5157 Offset
= CurDAG
->getTargetConstant(ImmOff
>> Scale
, DL
, MVT::i64
);
5158 SDValue Ops
[] = {Offset
};
5159 SDNode
*MI
= CurDAG
->getMachineNode(AArch64::MOVi64imm
, DL
, MVT::i64
, Ops
);
5160 Offset
= SDValue(MI
, 0);
5164 // Check if the RHS is a shift node with a constant.
5165 if (RHS
.getOpcode() != ISD::SHL
)
5168 const SDValue ShiftRHS
= RHS
.getOperand(1);
5169 if (auto *C
= dyn_cast
<ConstantSDNode
>(ShiftRHS
))
5170 if (C
->getZExtValue() == Scale
) {
5172 Offset
= RHS
.getOperand(0);
5179 bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N
) {
5180 const AArch64TargetLowering
*TLI
=
5181 static_cast<const AArch64TargetLowering
*>(getTargetLowering());
5183 return TLI
->isAllActivePredicate(*CurDAG
, N
);