[WebAssembly] Add new target feature in support of 'extended-const' proposal
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64ISelDAGToDAG.cpp
blob01dc1607b0792543575857e7c68c44b6a9b8d036
1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the AArch64 target.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64MachineFunctionInfo.h"
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/IntrinsicsAArch64.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/KnownBits.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
28 using namespace llvm;
30 #define DEBUG_TYPE "aarch64-isel"
32 //===--------------------------------------------------------------------===//
33 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
34 /// instructions for SelectionDAG operations.
35 ///
36 namespace {
38 class AArch64DAGToDAGISel : public SelectionDAGISel {
40 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
41 /// make the right decision when generating code for different targets.
42 const AArch64Subtarget *Subtarget;
44 public:
45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
46 CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr) {}
49 StringRef getPassName() const override {
50 return "AArch64 Instruction Selection";
53 bool runOnMachineFunction(MachineFunction &MF) override {
54 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
55 return SelectionDAGISel::runOnMachineFunction(MF);
58 void Select(SDNode *Node) override;
60 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
61 /// inline asm expressions.
62 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
63 unsigned ConstraintID,
64 std::vector<SDValue> &OutOps) override;
66 template <signed Low, signed High, signed Scale>
67 bool SelectRDVLImm(SDValue N, SDValue &Imm);
69 bool tryMLAV64LaneV128(SDNode *N);
70 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
80 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
95 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm);
98 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm);
101 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 1, Base, OffImm);
104 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 2, Base, OffImm);
107 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 4, Base, OffImm);
110 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeIndexed(N, 8, Base, OffImm);
113 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeIndexed(N, 16, Base, OffImm);
116 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
119 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
122 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
125 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
126 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
128 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
129 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
131 template <unsigned Size, unsigned Max>
132 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
133 // Test if there is an appropriate addressing mode and check if the
134 // immediate fits.
135 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
136 if (Found) {
137 if (auto *CI = dyn_cast<ConstantSDNode>(OffImm)) {
138 int64_t C = CI->getSExtValue();
139 if (C <= Max)
140 return true;
144 // Otherwise, base only, materialize address in register.
145 Base = N;
146 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
147 return true;
150 template<int Width>
151 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
152 SDValue &SignExtend, SDValue &DoShift) {
153 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
156 template<int Width>
157 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
158 SDValue &SignExtend, SDValue &DoShift) {
159 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
162 bool SelectDupZeroOrUndef(SDValue N) {
163 switch(N->getOpcode()) {
164 case ISD::UNDEF:
165 return true;
166 case AArch64ISD::DUP:
167 case ISD::SPLAT_VECTOR: {
168 auto Opnd0 = N->getOperand(0);
169 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
170 if (CN->isZero())
171 return true;
172 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
173 if (CN->isZero())
174 return true;
175 break;
177 default:
178 break;
181 return false;
184 bool SelectDupZero(SDValue N) {
185 switch(N->getOpcode()) {
186 case AArch64ISD::DUP:
187 case ISD::SPLAT_VECTOR: {
188 auto Opnd0 = N->getOperand(0);
189 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
190 if (CN->isZero())
191 return true;
192 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
193 if (CN->isZero())
194 return true;
195 break;
199 return false;
202 template<MVT::SimpleValueType VT>
203 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
204 return SelectSVEAddSubImm(N, VT, Imm, Shift);
207 template <MVT::SimpleValueType VT>
208 bool SelectSVECpyDupImm(SDValue N, SDValue &Imm, SDValue &Shift) {
209 return SelectSVECpyDupImm(N, VT, Imm, Shift);
212 template <MVT::SimpleValueType VT, bool Invert = false>
213 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
214 return SelectSVELogicalImm(N, VT, Imm, Invert);
217 template <MVT::SimpleValueType VT>
218 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
219 return SelectSVEArithImm(N, VT, Imm);
222 template <unsigned Low, unsigned High, bool AllowSaturation = false>
223 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
224 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
227 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
228 template<signed Min, signed Max, signed Scale, bool Shift>
229 bool SelectCntImm(SDValue N, SDValue &Imm) {
230 if (!isa<ConstantSDNode>(N))
231 return false;
233 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
234 if (Shift)
235 MulImm = 1LL << MulImm;
237 if ((MulImm % std::abs(Scale)) != 0)
238 return false;
240 MulImm /= Scale;
241 if ((MulImm >= Min) && (MulImm <= Max)) {
242 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
243 return true;
246 return false;
249 template <signed Max, signed Scale>
250 bool SelectEXTImm(SDValue N, SDValue &Imm) {
251 if (!isa<ConstantSDNode>(N))
252 return false;
254 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
256 if (MulImm >= 0 && MulImm <= Max) {
257 MulImm *= Scale;
258 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
259 return true;
262 return false;
265 /// Form sequences of consecutive 64/128-bit registers for use in NEON
266 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
267 /// between 1 and 4 elements. If it contains a single element that is returned
268 /// unchanged; otherwise a REG_SEQUENCE value is returned.
269 SDValue createDTuple(ArrayRef<SDValue> Vecs);
270 SDValue createQTuple(ArrayRef<SDValue> Vecs);
271 // Form a sequence of SVE registers for instructions using list of vectors,
272 // e.g. structured loads and stores (ldN, stN).
273 SDValue createZTuple(ArrayRef<SDValue> Vecs);
275 /// Generic helper for the createDTuple/createQTuple
276 /// functions. Those should almost always be called instead.
277 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
278 const unsigned SubRegs[]);
280 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
282 bool tryIndexedLoad(SDNode *N);
284 bool trySelectStackSlotTagP(SDNode *N);
285 void SelectTagP(SDNode *N);
287 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
288 unsigned SubRegIdx);
289 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
290 unsigned SubRegIdx);
291 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
292 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
293 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
294 unsigned Opc_rr, unsigned Opc_ri,
295 bool IsIntr = false);
297 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
298 /// SVE Reg+Imm addressing mode.
299 template <int64_t Min, int64_t Max>
300 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
301 SDValue &OffImm);
302 /// SVE Reg+Reg address mode.
303 template <unsigned Scale>
304 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
305 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
308 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
309 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
310 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
311 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
312 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
313 unsigned Opc_rr, unsigned Opc_ri);
314 std::tuple<unsigned, SDValue, SDValue>
315 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
316 const SDValue &OldBase, const SDValue &OldOffset,
317 unsigned Scale);
319 bool tryBitfieldExtractOp(SDNode *N);
320 bool tryBitfieldExtractOpFromSExt(SDNode *N);
321 bool tryBitfieldInsertOp(SDNode *N);
322 bool tryBitfieldInsertInZeroOp(SDNode *N);
323 bool tryShiftAmountMod(SDNode *N);
324 bool tryHighFPExt(SDNode *N);
326 bool tryReadRegister(SDNode *N);
327 bool tryWriteRegister(SDNode *N);
329 // Include the pieces autogenerated from the target description.
330 #include "AArch64GenDAGISel.inc"
332 private:
333 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
334 SDValue &Shift);
335 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
336 SDValue &OffImm) {
337 return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm);
339 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
340 unsigned Size, SDValue &Base,
341 SDValue &OffImm);
342 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
343 SDValue &OffImm);
344 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
345 SDValue &OffImm);
346 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
347 SDValue &Offset, SDValue &SignExtend,
348 SDValue &DoShift);
349 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
350 SDValue &Offset, SDValue &SignExtend,
351 SDValue &DoShift);
352 bool isWorthFolding(SDValue V) const;
353 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
354 SDValue &Offset, SDValue &SignExtend);
356 template<unsigned RegWidth>
357 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
358 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
361 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
363 bool SelectCMP_SWAP(SDNode *N);
365 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
366 bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
367 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
369 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
370 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
371 bool AllowSaturation, SDValue &Imm);
373 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
374 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
375 SDValue &Offset);
377 bool SelectAllActivePredicate(SDValue N);
379 } // end anonymous namespace
381 /// isIntImmediate - This method tests to see if the node is a constant
382 /// operand. If so Imm will receive the 32-bit value.
383 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
384 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
385 Imm = C->getZExtValue();
386 return true;
388 return false;
391 // isIntImmediate - This method tests to see if a constant operand.
392 // If so Imm will receive the value.
393 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
394 return isIntImmediate(N.getNode(), Imm);
397 // isOpcWithIntImmediate - This method tests to see if the node is a specific
398 // opcode and that it has a immediate integer right operand.
399 // If so Imm will receive the 32 bit value.
400 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
401 uint64_t &Imm) {
402 return N->getOpcode() == Opc &&
403 isIntImmediate(N->getOperand(1).getNode(), Imm);
406 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
407 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
408 switch(ConstraintID) {
409 default:
410 llvm_unreachable("Unexpected asm memory constraint");
411 case InlineAsm::Constraint_m:
412 case InlineAsm::Constraint_o:
413 case InlineAsm::Constraint_Q:
414 // We need to make sure that this one operand does not end up in XZR, thus
415 // require the address to be in a PointerRegClass register.
416 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
417 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF);
418 SDLoc dl(Op);
419 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i64);
420 SDValue NewOp =
421 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
422 dl, Op.getValueType(),
423 Op, RC), 0);
424 OutOps.push_back(NewOp);
425 return false;
427 return true;
430 /// SelectArithImmed - Select an immediate value that can be represented as
431 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
432 /// Val set to the 12-bit value and Shift set to the shifter operand.
433 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
434 SDValue &Shift) {
435 // This function is called from the addsub_shifted_imm ComplexPattern,
436 // which lists [imm] as the list of opcode it's interested in, however
437 // we still need to check whether the operand is actually an immediate
438 // here because the ComplexPattern opcode list is only used in
439 // root-level opcode matching.
440 if (!isa<ConstantSDNode>(N.getNode()))
441 return false;
443 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
444 unsigned ShiftAmt;
446 if (Immed >> 12 == 0) {
447 ShiftAmt = 0;
448 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
449 ShiftAmt = 12;
450 Immed = Immed >> 12;
451 } else
452 return false;
454 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
455 SDLoc dl(N);
456 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
457 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
458 return true;
461 /// SelectNegArithImmed - As above, but negates the value before trying to
462 /// select it.
463 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
464 SDValue &Shift) {
465 // This function is called from the addsub_shifted_imm ComplexPattern,
466 // which lists [imm] as the list of opcode it's interested in, however
467 // we still need to check whether the operand is actually an immediate
468 // here because the ComplexPattern opcode list is only used in
469 // root-level opcode matching.
470 if (!isa<ConstantSDNode>(N.getNode()))
471 return false;
473 // The immediate operand must be a 24-bit zero-extended immediate.
474 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
476 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
477 // have the opposite effect on the C flag, so this pattern mustn't match under
478 // those circumstances.
479 if (Immed == 0)
480 return false;
482 if (N.getValueType() == MVT::i32)
483 Immed = ~((uint32_t)Immed) + 1;
484 else
485 Immed = ~Immed + 1ULL;
486 if (Immed & 0xFFFFFFFFFF000000ULL)
487 return false;
489 Immed &= 0xFFFFFFULL;
490 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
491 Shift);
494 /// getShiftTypeForNode - Translate a shift node to the corresponding
495 /// ShiftType value.
496 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
497 switch (N.getOpcode()) {
498 default:
499 return AArch64_AM::InvalidShiftExtend;
500 case ISD::SHL:
501 return AArch64_AM::LSL;
502 case ISD::SRL:
503 return AArch64_AM::LSR;
504 case ISD::SRA:
505 return AArch64_AM::ASR;
506 case ISD::ROTR:
507 return AArch64_AM::ROR;
511 /// Determine whether it is worth it to fold SHL into the addressing
512 /// mode.
513 static bool isWorthFoldingSHL(SDValue V) {
514 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
515 // It is worth folding logical shift of up to three places.
516 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
517 if (!CSD)
518 return false;
519 unsigned ShiftVal = CSD->getZExtValue();
520 if (ShiftVal > 3)
521 return false;
523 // Check if this particular node is reused in any non-memory related
524 // operation. If yes, do not try to fold this node into the address
525 // computation, since the computation will be kept.
526 const SDNode *Node = V.getNode();
527 for (SDNode *UI : Node->uses())
528 if (!isa<MemSDNode>(*UI))
529 for (SDNode *UII : UI->uses())
530 if (!isa<MemSDNode>(*UII))
531 return false;
532 return true;
535 /// Determine whether it is worth to fold V into an extended register.
536 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
537 // Trivial if we are optimizing for code size or if there is only
538 // one use of the value.
539 if (CurDAG->shouldOptForSize() || V.hasOneUse())
540 return true;
541 // If a subtarget has a fastpath LSL we can fold a logical shift into
542 // the addressing mode and save a cycle.
543 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
544 isWorthFoldingSHL(V))
545 return true;
546 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
547 const SDValue LHS = V.getOperand(0);
548 const SDValue RHS = V.getOperand(1);
549 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
550 return true;
551 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
552 return true;
555 // It hurts otherwise, since the value will be reused.
556 return false;
559 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
560 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
561 /// instructions allow the shifted register to be rotated, but the arithmetic
562 /// instructions do not. The AllowROR parameter specifies whether ROR is
563 /// supported.
564 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
565 SDValue &Reg, SDValue &Shift) {
566 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
567 if (ShType == AArch64_AM::InvalidShiftExtend)
568 return false;
569 if (!AllowROR && ShType == AArch64_AM::ROR)
570 return false;
572 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
573 unsigned BitSize = N.getValueSizeInBits();
574 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
575 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
577 Reg = N.getOperand(0);
578 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
579 return isWorthFolding(N);
582 return false;
585 /// getExtendTypeForNode - Translate an extend node to the corresponding
586 /// ExtendType value.
587 static AArch64_AM::ShiftExtendType
588 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
589 if (N.getOpcode() == ISD::SIGN_EXTEND ||
590 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
591 EVT SrcVT;
592 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
593 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
594 else
595 SrcVT = N.getOperand(0).getValueType();
597 if (!IsLoadStore && SrcVT == MVT::i8)
598 return AArch64_AM::SXTB;
599 else if (!IsLoadStore && SrcVT == MVT::i16)
600 return AArch64_AM::SXTH;
601 else if (SrcVT == MVT::i32)
602 return AArch64_AM::SXTW;
603 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
605 return AArch64_AM::InvalidShiftExtend;
606 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
607 N.getOpcode() == ISD::ANY_EXTEND) {
608 EVT SrcVT = N.getOperand(0).getValueType();
609 if (!IsLoadStore && SrcVT == MVT::i8)
610 return AArch64_AM::UXTB;
611 else if (!IsLoadStore && SrcVT == MVT::i16)
612 return AArch64_AM::UXTH;
613 else if (SrcVT == MVT::i32)
614 return AArch64_AM::UXTW;
615 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
617 return AArch64_AM::InvalidShiftExtend;
618 } else if (N.getOpcode() == ISD::AND) {
619 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
620 if (!CSD)
621 return AArch64_AM::InvalidShiftExtend;
622 uint64_t AndMask = CSD->getZExtValue();
624 switch (AndMask) {
625 default:
626 return AArch64_AM::InvalidShiftExtend;
627 case 0xFF:
628 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
629 case 0xFFFF:
630 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
631 case 0xFFFFFFFF:
632 return AArch64_AM::UXTW;
636 return AArch64_AM::InvalidShiftExtend;
639 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
640 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
641 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
642 DL->getOpcode() != AArch64ISD::DUPLANE32)
643 return false;
645 SDValue SV = DL->getOperand(0);
646 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
647 return false;
649 SDValue EV = SV.getOperand(1);
650 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
651 return false;
653 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
654 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
655 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
656 LaneOp = EV.getOperand(0);
658 return true;
661 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
662 // high lane extract.
663 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
664 SDValue &LaneOp, int &LaneIdx) {
666 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
667 std::swap(Op0, Op1);
668 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
669 return false;
671 StdOp = Op1;
672 return true;
675 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
676 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
677 /// so that we don't emit unnecessary lane extracts.
678 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
679 SDLoc dl(N);
680 SDValue Op0 = N->getOperand(0);
681 SDValue Op1 = N->getOperand(1);
682 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
683 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
684 int LaneIdx = -1; // Will hold the lane index.
686 if (Op1.getOpcode() != ISD::MUL ||
687 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
688 LaneIdx)) {
689 std::swap(Op0, Op1);
690 if (Op1.getOpcode() != ISD::MUL ||
691 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
692 LaneIdx))
693 return false;
696 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
698 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
700 unsigned MLAOpc = ~0U;
702 switch (N->getSimpleValueType(0).SimpleTy) {
703 default:
704 llvm_unreachable("Unrecognized MLA.");
705 case MVT::v4i16:
706 MLAOpc = AArch64::MLAv4i16_indexed;
707 break;
708 case MVT::v8i16:
709 MLAOpc = AArch64::MLAv8i16_indexed;
710 break;
711 case MVT::v2i32:
712 MLAOpc = AArch64::MLAv2i32_indexed;
713 break;
714 case MVT::v4i32:
715 MLAOpc = AArch64::MLAv4i32_indexed;
716 break;
719 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
720 return true;
723 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
724 SDLoc dl(N);
725 SDValue SMULLOp0;
726 SDValue SMULLOp1;
727 int LaneIdx;
729 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
730 LaneIdx))
731 return false;
733 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
735 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
737 unsigned SMULLOpc = ~0U;
739 if (IntNo == Intrinsic::aarch64_neon_smull) {
740 switch (N->getSimpleValueType(0).SimpleTy) {
741 default:
742 llvm_unreachable("Unrecognized SMULL.");
743 case MVT::v4i32:
744 SMULLOpc = AArch64::SMULLv4i16_indexed;
745 break;
746 case MVT::v2i64:
747 SMULLOpc = AArch64::SMULLv2i32_indexed;
748 break;
750 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
751 switch (N->getSimpleValueType(0).SimpleTy) {
752 default:
753 llvm_unreachable("Unrecognized SMULL.");
754 case MVT::v4i32:
755 SMULLOpc = AArch64::UMULLv4i16_indexed;
756 break;
757 case MVT::v2i64:
758 SMULLOpc = AArch64::UMULLv2i32_indexed;
759 break;
761 } else
762 llvm_unreachable("Unrecognized intrinsic.");
764 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
765 return true;
768 /// Instructions that accept extend modifiers like UXTW expect the register
769 /// being extended to be a GPR32, but the incoming DAG might be acting on a
770 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
771 /// this is the case.
772 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
773 if (N.getValueType() == MVT::i32)
774 return N;
776 SDLoc dl(N);
777 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
778 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
779 dl, MVT::i32, N, SubReg);
780 return SDValue(Node, 0);
783 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
784 template<signed Low, signed High, signed Scale>
785 bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
786 if (!isa<ConstantSDNode>(N))
787 return false;
789 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
790 if ((MulImm % std::abs(Scale)) == 0) {
791 int64_t RDVLImm = MulImm / Scale;
792 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
793 Imm = CurDAG->getTargetConstant(RDVLImm, SDLoc(N), MVT::i32);
794 return true;
798 return false;
801 /// SelectArithExtendedRegister - Select a "extended register" operand. This
802 /// operand folds in an extend followed by an optional left shift.
803 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
804 SDValue &Shift) {
805 unsigned ShiftVal = 0;
806 AArch64_AM::ShiftExtendType Ext;
808 if (N.getOpcode() == ISD::SHL) {
809 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
810 if (!CSD)
811 return false;
812 ShiftVal = CSD->getZExtValue();
813 if (ShiftVal > 4)
814 return false;
816 Ext = getExtendTypeForNode(N.getOperand(0));
817 if (Ext == AArch64_AM::InvalidShiftExtend)
818 return false;
820 Reg = N.getOperand(0).getOperand(0);
821 } else {
822 Ext = getExtendTypeForNode(N);
823 if (Ext == AArch64_AM::InvalidShiftExtend)
824 return false;
826 Reg = N.getOperand(0);
828 // Don't match if free 32-bit -> 64-bit zext can be used instead.
829 if (Ext == AArch64_AM::UXTW &&
830 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
831 return false;
834 // AArch64 mandates that the RHS of the operation must use the smallest
835 // register class that could contain the size being extended from. Thus,
836 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
837 // there might not be an actual 32-bit value in the program. We can
838 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
839 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
840 Reg = narrowIfNeeded(CurDAG, Reg);
841 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
842 MVT::i32);
843 return isWorthFolding(N);
846 /// If there's a use of this ADDlow that's not itself a load/store then we'll
847 /// need to create a real ADD instruction from it anyway and there's no point in
848 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
849 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
850 /// leads to duplicated ADRP instructions.
851 static bool isWorthFoldingADDlow(SDValue N) {
852 for (auto Use : N->uses()) {
853 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
854 Use->getOpcode() != ISD::ATOMIC_LOAD &&
855 Use->getOpcode() != ISD::ATOMIC_STORE)
856 return false;
858 // ldar and stlr have much more restrictive addressing modes (just a
859 // register).
860 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getSuccessOrdering()))
861 return false;
864 return true;
867 /// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
868 /// immediate" address. The "Size" argument is the size in bytes of the memory
869 /// reference, which determines the scale.
870 bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
871 unsigned BW, unsigned Size,
872 SDValue &Base,
873 SDValue &OffImm) {
874 SDLoc dl(N);
875 const DataLayout &DL = CurDAG->getDataLayout();
876 const TargetLowering *TLI = getTargetLowering();
877 if (N.getOpcode() == ISD::FrameIndex) {
878 int FI = cast<FrameIndexSDNode>(N)->getIndex();
879 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
880 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
881 return true;
884 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
885 // selected here doesn't support labels/immediates, only base+offset.
886 if (CurDAG->isBaseWithConstantOffset(N)) {
887 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
888 if (IsSignedImm) {
889 int64_t RHSC = RHS->getSExtValue();
890 unsigned Scale = Log2_32(Size);
891 int64_t Range = 0x1LL << (BW - 1);
893 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
894 RHSC < (Range << Scale)) {
895 Base = N.getOperand(0);
896 if (Base.getOpcode() == ISD::FrameIndex) {
897 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
898 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
900 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
901 return true;
903 } else {
904 // unsigned Immediate
905 uint64_t RHSC = RHS->getZExtValue();
906 unsigned Scale = Log2_32(Size);
907 uint64_t Range = 0x1ULL << BW;
909 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
910 Base = N.getOperand(0);
911 if (Base.getOpcode() == ISD::FrameIndex) {
912 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
913 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
915 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
916 return true;
921 // Base only. The address will be materialized into a register before
922 // the memory is accessed.
923 // add x0, Xbase, #offset
924 // stp x1, x2, [x0]
925 Base = N;
926 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
927 return true;
930 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
931 /// immediate" address. The "Size" argument is the size in bytes of the memory
932 /// reference, which determines the scale.
933 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
934 SDValue &Base, SDValue &OffImm) {
935 SDLoc dl(N);
936 const DataLayout &DL = CurDAG->getDataLayout();
937 const TargetLowering *TLI = getTargetLowering();
938 if (N.getOpcode() == ISD::FrameIndex) {
939 int FI = cast<FrameIndexSDNode>(N)->getIndex();
940 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
941 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
942 return true;
945 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
946 GlobalAddressSDNode *GAN =
947 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
948 Base = N.getOperand(0);
949 OffImm = N.getOperand(1);
950 if (!GAN)
951 return true;
953 if (GAN->getOffset() % Size == 0 &&
954 GAN->getGlobal()->getPointerAlignment(DL) >= Size)
955 return true;
958 if (CurDAG->isBaseWithConstantOffset(N)) {
959 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
960 int64_t RHSC = (int64_t)RHS->getZExtValue();
961 unsigned Scale = Log2_32(Size);
962 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
963 Base = N.getOperand(0);
964 if (Base.getOpcode() == ISD::FrameIndex) {
965 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
966 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
968 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
969 return true;
974 // Before falling back to our general case, check if the unscaled
975 // instructions can handle this. If so, that's preferable.
976 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
977 return false;
979 // Base only. The address will be materialized into a register before
980 // the memory is accessed.
981 // add x0, Xbase, #offset
982 // ldr x0, [x0]
983 Base = N;
984 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
985 return true;
988 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
989 /// immediate" address. This should only match when there is an offset that
990 /// is not valid for a scaled immediate addressing mode. The "Size" argument
991 /// is the size in bytes of the memory reference, which is needed here to know
992 /// what is valid for a scaled immediate.
993 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
994 SDValue &Base,
995 SDValue &OffImm) {
996 if (!CurDAG->isBaseWithConstantOffset(N))
997 return false;
998 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
999 int64_t RHSC = RHS->getSExtValue();
1000 // If the offset is valid as a scaled immediate, don't match here.
1001 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
1002 RHSC < (0x1000 << Log2_32(Size)))
1003 return false;
1004 if (RHSC >= -256 && RHSC < 256) {
1005 Base = N.getOperand(0);
1006 if (Base.getOpcode() == ISD::FrameIndex) {
1007 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1008 const TargetLowering *TLI = getTargetLowering();
1009 Base = CurDAG->getTargetFrameIndex(
1010 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1012 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
1013 return true;
1016 return false;
1019 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
1020 SDLoc dl(N);
1021 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1022 SDValue ImpDef = SDValue(
1023 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
1024 MachineSDNode *Node = CurDAG->getMachineNode(
1025 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
1026 return SDValue(Node, 0);
1029 /// Check if the given SHL node (\p N), can be used to form an
1030 /// extended register for an addressing mode.
1031 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1032 bool WantExtend, SDValue &Offset,
1033 SDValue &SignExtend) {
1034 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1035 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
1036 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
1037 return false;
1039 SDLoc dl(N);
1040 if (WantExtend) {
1041 AArch64_AM::ShiftExtendType Ext =
1042 getExtendTypeForNode(N.getOperand(0), true);
1043 if (Ext == AArch64_AM::InvalidShiftExtend)
1044 return false;
1046 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
1047 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1048 MVT::i32);
1049 } else {
1050 Offset = N.getOperand(0);
1051 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
1054 unsigned LegalShiftVal = Log2_32(Size);
1055 unsigned ShiftVal = CSD->getZExtValue();
1057 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1058 return false;
1060 return isWorthFolding(N);
1063 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1064 SDValue &Base, SDValue &Offset,
1065 SDValue &SignExtend,
1066 SDValue &DoShift) {
1067 if (N.getOpcode() != ISD::ADD)
1068 return false;
1069 SDValue LHS = N.getOperand(0);
1070 SDValue RHS = N.getOperand(1);
1071 SDLoc dl(N);
1073 // We don't want to match immediate adds here, because they are better lowered
1074 // to the register-immediate addressing modes.
1075 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
1076 return false;
1078 // Check if this particular node is reused in any non-memory related
1079 // operation. If yes, do not try to fold this node into the address
1080 // computation, since the computation will be kept.
1081 const SDNode *Node = N.getNode();
1082 for (SDNode *UI : Node->uses()) {
1083 if (!isa<MemSDNode>(*UI))
1084 return false;
1087 // Remember if it is worth folding N when it produces extended register.
1088 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1090 // Try to match a shifted extend on the RHS.
1091 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1092 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
1093 Base = LHS;
1094 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1095 return true;
1098 // Try to match a shifted extend on the LHS.
1099 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1100 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
1101 Base = RHS;
1102 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1103 return true;
1106 // There was no shift, whatever else we find.
1107 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
1109 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
1110 // Try to match an unshifted extend on the LHS.
1111 if (IsExtendedRegisterWorthFolding &&
1112 (Ext = getExtendTypeForNode(LHS, true)) !=
1113 AArch64_AM::InvalidShiftExtend) {
1114 Base = RHS;
1115 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
1116 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1117 MVT::i32);
1118 if (isWorthFolding(LHS))
1119 return true;
1122 // Try to match an unshifted extend on the RHS.
1123 if (IsExtendedRegisterWorthFolding &&
1124 (Ext = getExtendTypeForNode(RHS, true)) !=
1125 AArch64_AM::InvalidShiftExtend) {
1126 Base = LHS;
1127 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
1128 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1129 MVT::i32);
1130 if (isWorthFolding(RHS))
1131 return true;
1134 return false;
1137 // Check if the given immediate is preferred by ADD. If an immediate can be
1138 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1139 // encoded by one MOVZ, return true.
1140 static bool isPreferredADD(int64_t ImmOff) {
1141 // Constant in [0x0, 0xfff] can be encoded in ADD.
1142 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1143 return true;
1144 // Check if it can be encoded in an "ADD LSL #12".
1145 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1146 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1147 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1148 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1149 return false;
1152 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1153 SDValue &Base, SDValue &Offset,
1154 SDValue &SignExtend,
1155 SDValue &DoShift) {
1156 if (N.getOpcode() != ISD::ADD)
1157 return false;
1158 SDValue LHS = N.getOperand(0);
1159 SDValue RHS = N.getOperand(1);
1160 SDLoc DL(N);
1162 // Check if this particular node is reused in any non-memory related
1163 // operation. If yes, do not try to fold this node into the address
1164 // computation, since the computation will be kept.
1165 const SDNode *Node = N.getNode();
1166 for (SDNode *UI : Node->uses()) {
1167 if (!isa<MemSDNode>(*UI))
1168 return false;
1171 // Watch out if RHS is a wide immediate, it can not be selected into
1172 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1173 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1174 // instructions like:
1175 // MOV X0, WideImmediate
1176 // ADD X1, BaseReg, X0
1177 // LDR X2, [X1, 0]
1178 // For such situation, using [BaseReg, XReg] addressing mode can save one
1179 // ADD/SUB:
1180 // MOV X0, WideImmediate
1181 // LDR X2, [BaseReg, X0]
1182 if (isa<ConstantSDNode>(RHS)) {
1183 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
1184 unsigned Scale = Log2_32(Size);
1185 // Skip the immediate can be selected by load/store addressing mode.
1186 // Also skip the immediate can be encoded by a single ADD (SUB is also
1187 // checked by using -ImmOff).
1188 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
1189 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
1190 return false;
1192 SDValue Ops[] = { RHS };
1193 SDNode *MOVI =
1194 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
1195 SDValue MOVIV = SDValue(MOVI, 0);
1196 // This ADD of two X register will be selected into [Reg+Reg] mode.
1197 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
1200 // Remember if it is worth folding N when it produces extended register.
1201 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1203 // Try to match a shifted extend on the RHS.
1204 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1205 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
1206 Base = LHS;
1207 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1208 return true;
1211 // Try to match a shifted extend on the LHS.
1212 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1213 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1214 Base = RHS;
1215 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1216 return true;
1219 // Match any non-shifted, non-extend, non-immediate add expression.
1220 Base = LHS;
1221 Offset = RHS;
1222 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1223 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
1224 // Reg1 + Reg2 is free: no check needed.
1225 return true;
1228 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1229 static const unsigned RegClassIDs[] = {
1230 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1231 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1232 AArch64::dsub2, AArch64::dsub3};
1234 return createTuple(Regs, RegClassIDs, SubRegs);
1237 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1238 static const unsigned RegClassIDs[] = {
1239 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1240 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1241 AArch64::qsub2, AArch64::qsub3};
1243 return createTuple(Regs, RegClassIDs, SubRegs);
1246 SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1247 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1248 AArch64::ZPR3RegClassID,
1249 AArch64::ZPR4RegClassID};
1250 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1251 AArch64::zsub2, AArch64::zsub3};
1253 return createTuple(Regs, RegClassIDs, SubRegs);
1256 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1257 const unsigned RegClassIDs[],
1258 const unsigned SubRegs[]) {
1259 // There's no special register-class for a vector-list of 1 element: it's just
1260 // a vector.
1261 if (Regs.size() == 1)
1262 return Regs[0];
1264 assert(Regs.size() >= 2 && Regs.size() <= 4);
1266 SDLoc DL(Regs[0]);
1268 SmallVector<SDValue, 4> Ops;
1270 // First operand of REG_SEQUENCE is the desired RegClass.
1271 Ops.push_back(
1272 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1274 // Then we get pairs of source & subregister-position for the components.
1275 for (unsigned i = 0; i < Regs.size(); ++i) {
1276 Ops.push_back(Regs[i]);
1277 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1280 SDNode *N =
1281 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1282 return SDValue(N, 0);
1285 void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1286 bool isExt) {
1287 SDLoc dl(N);
1288 EVT VT = N->getValueType(0);
1290 unsigned ExtOff = isExt;
1292 // Form a REG_SEQUENCE to force register allocation.
1293 unsigned Vec0Off = ExtOff + 1;
1294 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1295 N->op_begin() + Vec0Off + NumVecs);
1296 SDValue RegSeq = createQTuple(Regs);
1298 SmallVector<SDValue, 6> Ops;
1299 if (isExt)
1300 Ops.push_back(N->getOperand(1));
1301 Ops.push_back(RegSeq);
1302 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1303 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
1306 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1307 LoadSDNode *LD = cast<LoadSDNode>(N);
1308 if (LD->isUnindexed())
1309 return false;
1310 EVT VT = LD->getMemoryVT();
1311 EVT DstVT = N->getValueType(0);
1312 ISD::MemIndexedMode AM = LD->getAddressingMode();
1313 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1315 // We're not doing validity checking here. That was done when checking
1316 // if we should mark the load as indexed or not. We're just selecting
1317 // the right instruction.
1318 unsigned Opcode = 0;
1320 ISD::LoadExtType ExtType = LD->getExtensionType();
1321 bool InsertTo64 = false;
1322 if (VT == MVT::i64)
1323 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1324 else if (VT == MVT::i32) {
1325 if (ExtType == ISD::NON_EXTLOAD)
1326 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1327 else if (ExtType == ISD::SEXTLOAD)
1328 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1329 else {
1330 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1331 InsertTo64 = true;
1332 // The result of the load is only i32. It's the subreg_to_reg that makes
1333 // it into an i64.
1334 DstVT = MVT::i32;
1336 } else if (VT == MVT::i16) {
1337 if (ExtType == ISD::SEXTLOAD) {
1338 if (DstVT == MVT::i64)
1339 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1340 else
1341 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1342 } else {
1343 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1344 InsertTo64 = DstVT == MVT::i64;
1345 // The result of the load is only i32. It's the subreg_to_reg that makes
1346 // it into an i64.
1347 DstVT = MVT::i32;
1349 } else if (VT == MVT::i8) {
1350 if (ExtType == ISD::SEXTLOAD) {
1351 if (DstVT == MVT::i64)
1352 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1353 else
1354 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1355 } else {
1356 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1357 InsertTo64 = DstVT == MVT::i64;
1358 // The result of the load is only i32. It's the subreg_to_reg that makes
1359 // it into an i64.
1360 DstVT = MVT::i32;
1362 } else if (VT == MVT::f16) {
1363 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1364 } else if (VT == MVT::bf16) {
1365 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1366 } else if (VT == MVT::f32) {
1367 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1368 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1369 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1370 } else if (VT.is128BitVector()) {
1371 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1372 } else
1373 return false;
1374 SDValue Chain = LD->getChain();
1375 SDValue Base = LD->getBasePtr();
1376 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1377 int OffsetVal = (int)OffsetOp->getZExtValue();
1378 SDLoc dl(N);
1379 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1380 SDValue Ops[] = { Base, Offset, Chain };
1381 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1382 MVT::Other, Ops);
1384 // Transfer memoperands.
1385 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1386 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Res), {MemOp});
1388 // Either way, we're replacing the node, so tell the caller that.
1389 SDValue LoadedVal = SDValue(Res, 1);
1390 if (InsertTo64) {
1391 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1392 LoadedVal =
1393 SDValue(CurDAG->getMachineNode(
1394 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1395 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1396 SubReg),
1400 ReplaceUses(SDValue(N, 0), LoadedVal);
1401 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1402 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1403 CurDAG->RemoveDeadNode(N);
1404 return true;
1407 void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1408 unsigned SubRegIdx) {
1409 SDLoc dl(N);
1410 EVT VT = N->getValueType(0);
1411 SDValue Chain = N->getOperand(0);
1413 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1414 Chain};
1416 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1418 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1419 SDValue SuperReg = SDValue(Ld, 0);
1420 for (unsigned i = 0; i < NumVecs; ++i)
1421 ReplaceUses(SDValue(N, i),
1422 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1424 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1426 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1427 // because it's too simple to have needed special treatment during lowering.
1428 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(N)) {
1429 MachineMemOperand *MemOp = MemIntr->getMemOperand();
1430 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
1433 CurDAG->RemoveDeadNode(N);
1436 void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1437 unsigned Opc, unsigned SubRegIdx) {
1438 SDLoc dl(N);
1439 EVT VT = N->getValueType(0);
1440 SDValue Chain = N->getOperand(0);
1442 SDValue Ops[] = {N->getOperand(1), // Mem operand
1443 N->getOperand(2), // Incremental
1444 Chain};
1446 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1447 MVT::Untyped, MVT::Other};
1449 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1451 // Update uses of write back register
1452 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1454 // Update uses of vector list
1455 SDValue SuperReg = SDValue(Ld, 1);
1456 if (NumVecs == 1)
1457 ReplaceUses(SDValue(N, 0), SuperReg);
1458 else
1459 for (unsigned i = 0; i < NumVecs; ++i)
1460 ReplaceUses(SDValue(N, i),
1461 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1463 // Update the chain
1464 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1465 CurDAG->RemoveDeadNode(N);
1468 /// Optimize \param OldBase and \param OldOffset selecting the best addressing
1469 /// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1470 /// new Base and an SDValue representing the new offset.
1471 std::tuple<unsigned, SDValue, SDValue>
1472 AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1473 unsigned Opc_ri,
1474 const SDValue &OldBase,
1475 const SDValue &OldOffset,
1476 unsigned Scale) {
1477 SDValue NewBase = OldBase;
1478 SDValue NewOffset = OldOffset;
1479 // Detect a possible Reg+Imm addressing mode.
1480 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1481 N, OldBase, NewBase, NewOffset);
1483 // Detect a possible reg+reg addressing mode, but only if we haven't already
1484 // detected a Reg+Imm one.
1485 const bool IsRegReg =
1486 !IsRegImm && SelectSVERegRegAddrMode(OldBase, Scale, NewBase, NewOffset);
1488 // Select the instruction.
1489 return std::make_tuple(IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset);
1492 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1493 unsigned Scale, unsigned Opc_ri,
1494 unsigned Opc_rr, bool IsIntr) {
1495 assert(Scale < 4 && "Invalid scaling value.");
1496 SDLoc DL(N);
1497 EVT VT = N->getValueType(0);
1498 SDValue Chain = N->getOperand(0);
1500 // Optimize addressing mode.
1501 SDValue Base, Offset;
1502 unsigned Opc;
1503 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1504 N, Opc_rr, Opc_ri, N->getOperand(IsIntr ? 3 : 2),
1505 CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
1507 SDValue Ops[] = {N->getOperand(IsIntr ? 2 : 1), // Predicate
1508 Base, // Memory operand
1509 Offset, Chain};
1511 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1513 SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
1514 SDValue SuperReg = SDValue(Load, 0);
1515 for (unsigned i = 0; i < NumVecs; ++i)
1516 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1517 AArch64::zsub0 + i, DL, VT, SuperReg));
1519 // Copy chain
1520 unsigned ChainIdx = NumVecs;
1521 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1522 CurDAG->RemoveDeadNode(N);
1525 void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1526 unsigned Opc) {
1527 SDLoc dl(N);
1528 EVT VT = N->getOperand(2)->getValueType(0);
1530 // Form a REG_SEQUENCE to force register allocation.
1531 bool Is128Bit = VT.getSizeInBits() == 128;
1532 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1533 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1535 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1536 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1538 // Transfer memoperands.
1539 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1540 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1542 ReplaceNode(N, St);
1545 void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
1546 unsigned Scale, unsigned Opc_rr,
1547 unsigned Opc_ri) {
1548 SDLoc dl(N);
1550 // Form a REG_SEQUENCE to force register allocation.
1551 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1552 SDValue RegSeq = createZTuple(Regs);
1554 // Optimize addressing mode.
1555 unsigned Opc;
1556 SDValue Offset, Base;
1557 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1558 N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
1559 CurDAG->getTargetConstant(0, dl, MVT::i64), Scale);
1561 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
1562 Base, // address
1563 Offset, // offset
1564 N->getOperand(0)}; // chain
1565 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1567 ReplaceNode(N, St);
1570 bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
1571 SDValue &OffImm) {
1572 SDLoc dl(N);
1573 const DataLayout &DL = CurDAG->getDataLayout();
1574 const TargetLowering *TLI = getTargetLowering();
1576 // Try to match it for the frame address
1577 if (auto FINode = dyn_cast<FrameIndexSDNode>(N)) {
1578 int FI = FINode->getIndex();
1579 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1580 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1581 return true;
1584 return false;
1587 void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1588 unsigned Opc) {
1589 SDLoc dl(N);
1590 EVT VT = N->getOperand(2)->getValueType(0);
1591 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1592 MVT::Other}; // Type for the Chain
1594 // Form a REG_SEQUENCE to force register allocation.
1595 bool Is128Bit = VT.getSizeInBits() == 128;
1596 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1597 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1599 SDValue Ops[] = {RegSeq,
1600 N->getOperand(NumVecs + 1), // base register
1601 N->getOperand(NumVecs + 2), // Incremental
1602 N->getOperand(0)}; // Chain
1603 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1605 ReplaceNode(N, St);
1608 namespace {
1609 /// WidenVector - Given a value in the V64 register class, produce the
1610 /// equivalent value in the V128 register class.
1611 class WidenVector {
1612 SelectionDAG &DAG;
1614 public:
1615 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1617 SDValue operator()(SDValue V64Reg) {
1618 EVT VT = V64Reg.getValueType();
1619 unsigned NarrowSize = VT.getVectorNumElements();
1620 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1621 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1622 SDLoc DL(V64Reg);
1624 SDValue Undef =
1625 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1626 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1629 } // namespace
1631 /// NarrowVector - Given a value in the V128 register class, produce the
1632 /// equivalent value in the V64 register class.
1633 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1634 EVT VT = V128Reg.getValueType();
1635 unsigned WideSize = VT.getVectorNumElements();
1636 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1637 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1639 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1640 V128Reg);
1643 void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1644 unsigned Opc) {
1645 SDLoc dl(N);
1646 EVT VT = N->getValueType(0);
1647 bool Narrow = VT.getSizeInBits() == 64;
1649 // Form a REG_SEQUENCE to force register allocation.
1650 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1652 if (Narrow)
1653 transform(Regs, Regs.begin(),
1654 WidenVector(*CurDAG));
1656 SDValue RegSeq = createQTuple(Regs);
1658 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1660 unsigned LaneNo =
1661 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1663 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1664 N->getOperand(NumVecs + 3), N->getOperand(0)};
1665 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1666 SDValue SuperReg = SDValue(Ld, 0);
1668 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1669 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1670 AArch64::qsub2, AArch64::qsub3 };
1671 for (unsigned i = 0; i < NumVecs; ++i) {
1672 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1673 if (Narrow)
1674 NV = NarrowVector(NV, *CurDAG);
1675 ReplaceUses(SDValue(N, i), NV);
1678 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1679 CurDAG->RemoveDeadNode(N);
1682 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1683 unsigned Opc) {
1684 SDLoc dl(N);
1685 EVT VT = N->getValueType(0);
1686 bool Narrow = VT.getSizeInBits() == 64;
1688 // Form a REG_SEQUENCE to force register allocation.
1689 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1691 if (Narrow)
1692 transform(Regs, Regs.begin(),
1693 WidenVector(*CurDAG));
1695 SDValue RegSeq = createQTuple(Regs);
1697 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1698 RegSeq->getValueType(0), MVT::Other};
1700 unsigned LaneNo =
1701 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1703 SDValue Ops[] = {RegSeq,
1704 CurDAG->getTargetConstant(LaneNo, dl,
1705 MVT::i64), // Lane Number
1706 N->getOperand(NumVecs + 2), // Base register
1707 N->getOperand(NumVecs + 3), // Incremental
1708 N->getOperand(0)};
1709 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1711 // Update uses of the write back register
1712 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1714 // Update uses of the vector list
1715 SDValue SuperReg = SDValue(Ld, 1);
1716 if (NumVecs == 1) {
1717 ReplaceUses(SDValue(N, 0),
1718 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1719 } else {
1720 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1721 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1722 AArch64::qsub2, AArch64::qsub3 };
1723 for (unsigned i = 0; i < NumVecs; ++i) {
1724 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1725 SuperReg);
1726 if (Narrow)
1727 NV = NarrowVector(NV, *CurDAG);
1728 ReplaceUses(SDValue(N, i), NV);
1732 // Update the Chain
1733 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1734 CurDAG->RemoveDeadNode(N);
1737 void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1738 unsigned Opc) {
1739 SDLoc dl(N);
1740 EVT VT = N->getOperand(2)->getValueType(0);
1741 bool Narrow = VT.getSizeInBits() == 64;
1743 // Form a REG_SEQUENCE to force register allocation.
1744 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1746 if (Narrow)
1747 transform(Regs, Regs.begin(),
1748 WidenVector(*CurDAG));
1750 SDValue RegSeq = createQTuple(Regs);
1752 unsigned LaneNo =
1753 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1755 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1756 N->getOperand(NumVecs + 3), N->getOperand(0)};
1757 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1759 // Transfer memoperands.
1760 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1761 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1763 ReplaceNode(N, St);
1766 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1767 unsigned Opc) {
1768 SDLoc dl(N);
1769 EVT VT = N->getOperand(2)->getValueType(0);
1770 bool Narrow = VT.getSizeInBits() == 64;
1772 // Form a REG_SEQUENCE to force register allocation.
1773 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1775 if (Narrow)
1776 transform(Regs, Regs.begin(),
1777 WidenVector(*CurDAG));
1779 SDValue RegSeq = createQTuple(Regs);
1781 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1782 MVT::Other};
1784 unsigned LaneNo =
1785 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1787 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1788 N->getOperand(NumVecs + 2), // Base Register
1789 N->getOperand(NumVecs + 3), // Incremental
1790 N->getOperand(0)};
1791 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1793 // Transfer memoperands.
1794 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1795 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1797 ReplaceNode(N, St);
1800 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1801 unsigned &Opc, SDValue &Opd0,
1802 unsigned &LSB, unsigned &MSB,
1803 unsigned NumberOfIgnoredLowBits,
1804 bool BiggerPattern) {
1805 assert(N->getOpcode() == ISD::AND &&
1806 "N must be a AND operation to call this function");
1808 EVT VT = N->getValueType(0);
1810 // Here we can test the type of VT and return false when the type does not
1811 // match, but since it is done prior to that call in the current context
1812 // we turned that into an assert to avoid redundant code.
1813 assert((VT == MVT::i32 || VT == MVT::i64) &&
1814 "Type checking must have been done before calling this function");
1816 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1817 // changed the AND node to a 32-bit mask operation. We'll have to
1818 // undo that as part of the transform here if we want to catch all
1819 // the opportunities.
1820 // Currently the NumberOfIgnoredLowBits argument helps to recover
1821 // form these situations when matching bigger pattern (bitfield insert).
1823 // For unsigned extracts, check for a shift right and mask
1824 uint64_t AndImm = 0;
1825 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
1826 return false;
1828 const SDNode *Op0 = N->getOperand(0).getNode();
1830 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1831 // simplified. Try to undo that
1832 AndImm |= maskTrailingOnes<uint64_t>(NumberOfIgnoredLowBits);
1834 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1835 if (AndImm & (AndImm + 1))
1836 return false;
1838 bool ClampMSB = false;
1839 uint64_t SrlImm = 0;
1840 // Handle the SRL + ANY_EXTEND case.
1841 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1842 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
1843 // Extend the incoming operand of the SRL to 64-bit.
1844 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1845 // Make sure to clamp the MSB so that we preserve the semantics of the
1846 // original operations.
1847 ClampMSB = true;
1848 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1849 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1850 SrlImm)) {
1851 // If the shift result was truncated, we can still combine them.
1852 Opd0 = Op0->getOperand(0).getOperand(0);
1854 // Use the type of SRL node.
1855 VT = Opd0->getValueType(0);
1856 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
1857 Opd0 = Op0->getOperand(0);
1858 ClampMSB = (VT == MVT::i32);
1859 } else if (BiggerPattern) {
1860 // Let's pretend a 0 shift right has been performed.
1861 // The resulting code will be at least as good as the original one
1862 // plus it may expose more opportunities for bitfield insert pattern.
1863 // FIXME: Currently we limit this to the bigger pattern, because
1864 // some optimizations expect AND and not UBFM.
1865 Opd0 = N->getOperand(0);
1866 } else
1867 return false;
1869 // Bail out on large immediates. This happens when no proper
1870 // combining/constant folding was performed.
1871 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
1872 LLVM_DEBUG(
1873 (dbgs() << N
1874 << ": Found large shift immediate, this should not happen\n"));
1875 return false;
1878 LSB = SrlImm;
1879 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1880 : countTrailingOnes<uint64_t>(AndImm)) -
1882 if (ClampMSB)
1883 // Since we're moving the extend before the right shift operation, we need
1884 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1885 // the zeros which would get shifted in with the original right shift
1886 // operation.
1887 MSB = MSB > 31 ? 31 : MSB;
1889 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1890 return true;
1893 static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1894 SDValue &Opd0, unsigned &Immr,
1895 unsigned &Imms) {
1896 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1898 EVT VT = N->getValueType(0);
1899 unsigned BitWidth = VT.getSizeInBits();
1900 assert((VT == MVT::i32 || VT == MVT::i64) &&
1901 "Type checking must have been done before calling this function");
1903 SDValue Op = N->getOperand(0);
1904 if (Op->getOpcode() == ISD::TRUNCATE) {
1905 Op = Op->getOperand(0);
1906 VT = Op->getValueType(0);
1907 BitWidth = VT.getSizeInBits();
1910 uint64_t ShiftImm;
1911 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1912 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1913 return false;
1915 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1916 if (ShiftImm + Width > BitWidth)
1917 return false;
1919 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1920 Opd0 = Op.getOperand(0);
1921 Immr = ShiftImm;
1922 Imms = ShiftImm + Width - 1;
1923 return true;
1926 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1927 SDValue &Opd0, unsigned &LSB,
1928 unsigned &MSB) {
1929 // We are looking for the following pattern which basically extracts several
1930 // continuous bits from the source value and places it from the LSB of the
1931 // destination value, all other bits of the destination value or set to zero:
1933 // Value2 = AND Value, MaskImm
1934 // SRL Value2, ShiftImm
1936 // with MaskImm >> ShiftImm to search for the bit width.
1938 // This gets selected into a single UBFM:
1940 // UBFM Value, ShiftImm, BitWide + SrlImm -1
1943 if (N->getOpcode() != ISD::SRL)
1944 return false;
1946 uint64_t AndMask = 0;
1947 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
1948 return false;
1950 Opd0 = N->getOperand(0).getOperand(0);
1952 uint64_t SrlImm = 0;
1953 if (!isIntImmediate(N->getOperand(1), SrlImm))
1954 return false;
1956 // Check whether we really have several bits extract here.
1957 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1958 if (BitWide && isMask_64(AndMask >> SrlImm)) {
1959 if (N->getValueType(0) == MVT::i32)
1960 Opc = AArch64::UBFMWri;
1961 else
1962 Opc = AArch64::UBFMXri;
1964 LSB = SrlImm;
1965 MSB = BitWide + SrlImm - 1;
1966 return true;
1969 return false;
1972 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1973 unsigned &Immr, unsigned &Imms,
1974 bool BiggerPattern) {
1975 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1976 "N must be a SHR/SRA operation to call this function");
1978 EVT VT = N->getValueType(0);
1980 // Here we can test the type of VT and return false when the type does not
1981 // match, but since it is done prior to that call in the current context
1982 // we turned that into an assert to avoid redundant code.
1983 assert((VT == MVT::i32 || VT == MVT::i64) &&
1984 "Type checking must have been done before calling this function");
1986 // Check for AND + SRL doing several bits extract.
1987 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1988 return true;
1990 // We're looking for a shift of a shift.
1991 uint64_t ShlImm = 0;
1992 uint64_t TruncBits = 0;
1993 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
1994 Opd0 = N->getOperand(0).getOperand(0);
1995 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1996 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1997 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1998 // be considered as setting high 32 bits as zero. Our strategy here is to
1999 // always generate 64bit UBFM. This consistency will help the CSE pass
2000 // later find more redundancy.
2001 Opd0 = N->getOperand(0).getOperand(0);
2002 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
2003 VT = Opd0.getValueType();
2004 assert(VT == MVT::i64 && "the promoted type should be i64");
2005 } else if (BiggerPattern) {
2006 // Let's pretend a 0 shift left has been performed.
2007 // FIXME: Currently we limit this to the bigger pattern case,
2008 // because some optimizations expect AND and not UBFM
2009 Opd0 = N->getOperand(0);
2010 } else
2011 return false;
2013 // Missing combines/constant folding may have left us with strange
2014 // constants.
2015 if (ShlImm >= VT.getSizeInBits()) {
2016 LLVM_DEBUG(
2017 (dbgs() << N
2018 << ": Found large shift immediate, this should not happen\n"));
2019 return false;
2022 uint64_t SrlImm = 0;
2023 if (!isIntImmediate(N->getOperand(1), SrlImm))
2024 return false;
2026 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
2027 "bad amount in shift node!");
2028 int immr = SrlImm - ShlImm;
2029 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
2030 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
2031 // SRA requires a signed extraction
2032 if (VT == MVT::i32)
2033 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2034 else
2035 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2036 return true;
2039 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2040 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2042 EVT VT = N->getValueType(0);
2043 EVT NarrowVT = N->getOperand(0)->getValueType(0);
2044 if (VT != MVT::i64 || NarrowVT != MVT::i32)
2045 return false;
2047 uint64_t ShiftImm;
2048 SDValue Op = N->getOperand(0);
2049 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
2050 return false;
2052 SDLoc dl(N);
2053 // Extend the incoming operand of the shift to 64-bits.
2054 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
2055 unsigned Immr = ShiftImm;
2056 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2057 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2058 CurDAG->getTargetConstant(Imms, dl, VT)};
2059 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
2060 return true;
2063 /// Try to form fcvtl2 instructions from a floating-point extend of a high-half
2064 /// extract of a subvector.
2065 bool AArch64DAGToDAGISel::tryHighFPExt(SDNode *N) {
2066 assert(N->getOpcode() == ISD::FP_EXTEND);
2068 // There are 2 forms of fcvtl2 - extend to double or extend to float.
2069 SDValue Extract = N->getOperand(0);
2070 EVT VT = N->getValueType(0);
2071 EVT NarrowVT = Extract.getValueType();
2072 if ((VT != MVT::v2f64 || NarrowVT != MVT::v2f32) &&
2073 (VT != MVT::v4f32 || NarrowVT != MVT::v4f16))
2074 return false;
2076 // Optionally look past a bitcast.
2077 Extract = peekThroughBitcasts(Extract);
2078 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2079 return false;
2081 // Match extract from start of high half index.
2082 // Example: v8i16 -> v4i16 means the extract must begin at index 4.
2083 unsigned ExtractIndex = Extract.getConstantOperandVal(1);
2084 if (ExtractIndex != Extract.getValueType().getVectorNumElements())
2085 return false;
2087 auto Opcode = VT == MVT::v2f64 ? AArch64::FCVTLv4i32 : AArch64::FCVTLv8i16;
2088 CurDAG->SelectNodeTo(N, Opcode, VT, Extract.getOperand(0));
2089 return true;
2092 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2093 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2094 unsigned NumberOfIgnoredLowBits = 0,
2095 bool BiggerPattern = false) {
2096 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
2097 return false;
2099 switch (N->getOpcode()) {
2100 default:
2101 if (!N->isMachineOpcode())
2102 return false;
2103 break;
2104 case ISD::AND:
2105 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
2106 NumberOfIgnoredLowBits, BiggerPattern);
2107 case ISD::SRL:
2108 case ISD::SRA:
2109 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2111 case ISD::SIGN_EXTEND_INREG:
2112 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2115 unsigned NOpc = N->getMachineOpcode();
2116 switch (NOpc) {
2117 default:
2118 return false;
2119 case AArch64::SBFMWri:
2120 case AArch64::UBFMWri:
2121 case AArch64::SBFMXri:
2122 case AArch64::UBFMXri:
2123 Opc = NOpc;
2124 Opd0 = N->getOperand(0);
2125 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
2126 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
2127 return true;
2129 // Unreachable
2130 return false;
2133 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2134 unsigned Opc, Immr, Imms;
2135 SDValue Opd0;
2136 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2137 return false;
2139 EVT VT = N->getValueType(0);
2140 SDLoc dl(N);
2142 // If the bit extract operation is 64bit but the original type is 32bit, we
2143 // need to add one EXTRACT_SUBREG.
2144 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2145 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
2146 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
2148 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
2149 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
2150 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
2151 MVT::i32, SDValue(BFM, 0), SubReg));
2152 return true;
2155 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2156 CurDAG->getTargetConstant(Imms, dl, VT)};
2157 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2158 return true;
2161 /// Does DstMask form a complementary pair with the mask provided by
2162 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2163 /// this asks whether DstMask zeroes precisely those bits that will be set by
2164 /// the other half.
2165 static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2166 unsigned NumberOfIgnoredHighBits, EVT VT) {
2167 assert((VT == MVT::i32 || VT == MVT::i64) &&
2168 "i32 or i64 mask type expected!");
2169 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2171 APInt SignificantDstMask = APInt(BitWidth, DstMask);
2172 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
2174 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2175 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnes();
2178 // Look for bits that will be useful for later uses.
2179 // A bit is consider useless as soon as it is dropped and never used
2180 // before it as been dropped.
2181 // E.g., looking for useful bit of x
2182 // 1. y = x & 0x7
2183 // 2. z = y >> 2
2184 // After #1, x useful bits are 0x7, then the useful bits of x, live through
2185 // y.
2186 // After #2, the useful bits of x are 0x4.
2187 // However, if x is used on an unpredicatable instruction, then all its bits
2188 // are useful.
2189 // E.g.
2190 // 1. y = x & 0x7
2191 // 2. z = y >> 2
2192 // 3. str x, [@x]
2193 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2195 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
2196 unsigned Depth) {
2197 uint64_t Imm =
2198 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2199 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
2200 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2201 getUsefulBits(Op, UsefulBits, Depth + 1);
2204 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
2205 uint64_t Imm, uint64_t MSB,
2206 unsigned Depth) {
2207 // inherit the bitwidth value
2208 APInt OpUsefulBits(UsefulBits);
2209 OpUsefulBits = 1;
2211 if (MSB >= Imm) {
2212 OpUsefulBits <<= MSB - Imm + 1;
2213 --OpUsefulBits;
2214 // The interesting part will be in the lower part of the result
2215 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2216 // The interesting part was starting at Imm in the argument
2217 OpUsefulBits <<= Imm;
2218 } else {
2219 OpUsefulBits <<= MSB + 1;
2220 --OpUsefulBits;
2221 // The interesting part will be shifted in the result
2222 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2223 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2224 // The interesting part was at zero in the argument
2225 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
2228 UsefulBits &= OpUsefulBits;
2231 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
2232 unsigned Depth) {
2233 uint64_t Imm =
2234 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2235 uint64_t MSB =
2236 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2238 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
2241 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
2242 unsigned Depth) {
2243 uint64_t ShiftTypeAndValue =
2244 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2245 APInt Mask(UsefulBits);
2246 Mask.clearAllBits();
2247 Mask.flipAllBits();
2249 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
2250 // Shift Left
2251 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2252 Mask <<= ShiftAmt;
2253 getUsefulBits(Op, Mask, Depth + 1);
2254 Mask.lshrInPlace(ShiftAmt);
2255 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
2256 // Shift Right
2257 // We do not handle AArch64_AM::ASR, because the sign will change the
2258 // number of useful bits
2259 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2260 Mask.lshrInPlace(ShiftAmt);
2261 getUsefulBits(Op, Mask, Depth + 1);
2262 Mask <<= ShiftAmt;
2263 } else
2264 return;
2266 UsefulBits &= Mask;
2269 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
2270 unsigned Depth) {
2271 uint64_t Imm =
2272 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2273 uint64_t MSB =
2274 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
2276 APInt OpUsefulBits(UsefulBits);
2277 OpUsefulBits = 1;
2279 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
2280 ResultUsefulBits.flipAllBits();
2281 APInt Mask(UsefulBits.getBitWidth(), 0);
2283 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
2285 if (MSB >= Imm) {
2286 // The instruction is a BFXIL.
2287 uint64_t Width = MSB - Imm + 1;
2288 uint64_t LSB = Imm;
2290 OpUsefulBits <<= Width;
2291 --OpUsefulBits;
2293 if (Op.getOperand(1) == Orig) {
2294 // Copy the low bits from the result to bits starting from LSB.
2295 Mask = ResultUsefulBits & OpUsefulBits;
2296 Mask <<= LSB;
2299 if (Op.getOperand(0) == Orig)
2300 // Bits starting from LSB in the input contribute to the result.
2301 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2302 } else {
2303 // The instruction is a BFI.
2304 uint64_t Width = MSB + 1;
2305 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
2307 OpUsefulBits <<= Width;
2308 --OpUsefulBits;
2309 OpUsefulBits <<= LSB;
2311 if (Op.getOperand(1) == Orig) {
2312 // Copy the bits from the result to the zero bits.
2313 Mask = ResultUsefulBits & OpUsefulBits;
2314 Mask.lshrInPlace(LSB);
2317 if (Op.getOperand(0) == Orig)
2318 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2321 UsefulBits &= Mask;
2324 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
2325 SDValue Orig, unsigned Depth) {
2327 // Users of this node should have already been instruction selected
2328 // FIXME: Can we turn that into an assert?
2329 if (!UserNode->isMachineOpcode())
2330 return;
2332 switch (UserNode->getMachineOpcode()) {
2333 default:
2334 return;
2335 case AArch64::ANDSWri:
2336 case AArch64::ANDSXri:
2337 case AArch64::ANDWri:
2338 case AArch64::ANDXri:
2339 // We increment Depth only when we call the getUsefulBits
2340 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
2341 Depth);
2342 case AArch64::UBFMWri:
2343 case AArch64::UBFMXri:
2344 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
2346 case AArch64::ORRWrs:
2347 case AArch64::ORRXrs:
2348 if (UserNode->getOperand(0) != Orig && UserNode->getOperand(1) == Orig)
2349 getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
2350 Depth);
2351 return;
2352 case AArch64::BFMWri:
2353 case AArch64::BFMXri:
2354 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
2356 case AArch64::STRBBui:
2357 case AArch64::STURBBi:
2358 if (UserNode->getOperand(0) != Orig)
2359 return;
2360 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2361 return;
2363 case AArch64::STRHHui:
2364 case AArch64::STURHHi:
2365 if (UserNode->getOperand(0) != Orig)
2366 return;
2367 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2368 return;
2372 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2373 if (Depth >= SelectionDAG::MaxRecursionDepth)
2374 return;
2375 // Initialize UsefulBits
2376 if (!Depth) {
2377 unsigned Bitwidth = Op.getScalarValueSizeInBits();
2378 // At the beginning, assume every produced bits is useful
2379 UsefulBits = APInt(Bitwidth, 0);
2380 UsefulBits.flipAllBits();
2382 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2384 for (SDNode *Node : Op.getNode()->uses()) {
2385 // A use cannot produce useful bits
2386 APInt UsefulBitsForUse = APInt(UsefulBits);
2387 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2388 UsersUsefulBits |= UsefulBitsForUse;
2390 // UsefulBits contains the produced bits that are meaningful for the
2391 // current definition, thus a user cannot make a bit meaningful at
2392 // this point
2393 UsefulBits &= UsersUsefulBits;
2396 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
2397 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2398 /// 0, return Op unchanged.
2399 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2400 if (ShlAmount == 0)
2401 return Op;
2403 EVT VT = Op.getValueType();
2404 SDLoc dl(Op);
2405 unsigned BitWidth = VT.getSizeInBits();
2406 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2408 SDNode *ShiftNode;
2409 if (ShlAmount > 0) {
2410 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2411 ShiftNode = CurDAG->getMachineNode(
2412 UBFMOpc, dl, VT, Op,
2413 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2414 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
2415 } else {
2416 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2417 assert(ShlAmount < 0 && "expected right shift");
2418 int ShrAmount = -ShlAmount;
2419 ShiftNode = CurDAG->getMachineNode(
2420 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2421 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
2424 return SDValue(ShiftNode, 0);
2427 /// Does this tree qualify as an attempt to move a bitfield into position,
2428 /// essentially "(and (shl VAL, N), Mask)".
2429 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
2430 bool BiggerPattern,
2431 SDValue &Src, int &ShiftAmount,
2432 int &MaskWidth) {
2433 EVT VT = Op.getValueType();
2434 unsigned BitWidth = VT.getSizeInBits();
2435 (void)BitWidth;
2436 assert(BitWidth == 32 || BitWidth == 64);
2438 KnownBits Known = CurDAG->computeKnownBits(Op);
2440 // Non-zero in the sense that they're not provably zero, which is the key
2441 // point if we want to use this value
2442 uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
2444 // Discard a constant AND mask if present. It's safe because the node will
2445 // already have been factored into the computeKnownBits calculation above.
2446 uint64_t AndImm;
2447 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
2448 assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
2449 Op = Op.getOperand(0);
2452 // Don't match if the SHL has more than one use, since then we'll end up
2453 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2454 if (!BiggerPattern && !Op.hasOneUse())
2455 return false;
2457 uint64_t ShlImm;
2458 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2459 return false;
2460 Op = Op.getOperand(0);
2462 if (!isShiftedMask_64(NonZeroBits))
2463 return false;
2465 ShiftAmount = countTrailingZeros(NonZeroBits);
2466 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
2468 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2469 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2470 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2471 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2472 // which case it is not profitable to insert an extra shift.
2473 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2474 return false;
2475 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2477 return true;
2480 static bool isShiftedMask(uint64_t Mask, EVT VT) {
2481 assert(VT == MVT::i32 || VT == MVT::i64);
2482 if (VT == MVT::i32)
2483 return isShiftedMask_32(Mask);
2484 return isShiftedMask_64(Mask);
2487 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2488 // inserted only sets known zero bits.
2489 static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2490 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2492 EVT VT = N->getValueType(0);
2493 if (VT != MVT::i32 && VT != MVT::i64)
2494 return false;
2496 unsigned BitWidth = VT.getSizeInBits();
2498 uint64_t OrImm;
2499 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2500 return false;
2502 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2503 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2504 // performance neutral.
2505 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2506 return false;
2508 uint64_t MaskImm;
2509 SDValue And = N->getOperand(0);
2510 // Must be a single use AND with an immediate operand.
2511 if (!And.hasOneUse() ||
2512 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2513 return false;
2515 // Compute the Known Zero for the AND as this allows us to catch more general
2516 // cases than just looking for AND with imm.
2517 KnownBits Known = CurDAG->computeKnownBits(And);
2519 // Non-zero in the sense that they're not provably zero, which is the key
2520 // point if we want to use this value.
2521 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
2523 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2524 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
2525 return false;
2527 // The bits being inserted must only set those bits that are known to be zero.
2528 if ((OrImm & NotKnownZero) != 0) {
2529 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2530 // currently handle this case.
2531 return false;
2534 // BFI/BFXIL dst, src, #lsb, #width.
2535 int LSB = countTrailingOnes(NotKnownZero);
2536 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2538 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2539 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2540 unsigned ImmS = Width - 1;
2542 // If we're creating a BFI instruction avoid cases where we need more
2543 // instructions to materialize the BFI constant as compared to the original
2544 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2545 // should be no worse in this case.
2546 bool IsBFI = LSB != 0;
2547 uint64_t BFIImm = OrImm >> LSB;
2548 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2549 // We have a BFI instruction and we know the constant can't be materialized
2550 // with a ORR-immediate with the zero register.
2551 unsigned OrChunks = 0, BFIChunks = 0;
2552 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2553 if (((OrImm >> Shift) & 0xFFFF) != 0)
2554 ++OrChunks;
2555 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2556 ++BFIChunks;
2558 if (BFIChunks > OrChunks)
2559 return false;
2562 // Materialize the constant to be inserted.
2563 SDLoc DL(N);
2564 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2565 SDNode *MOVI = CurDAG->getMachineNode(
2566 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2568 // Create the BFI/BFXIL instruction.
2569 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2570 CurDAG->getTargetConstant(ImmR, DL, VT),
2571 CurDAG->getTargetConstant(ImmS, DL, VT)};
2572 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2573 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2574 return true;
2577 static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2578 SelectionDAG *CurDAG) {
2579 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2581 EVT VT = N->getValueType(0);
2582 if (VT != MVT::i32 && VT != MVT::i64)
2583 return false;
2585 unsigned BitWidth = VT.getSizeInBits();
2587 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2588 // have the expected shape. Try to undo that.
2590 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2591 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2593 // Given a OR operation, check if we have the following pattern
2594 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2595 // isBitfieldExtractOp)
2596 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2597 // countTrailingZeros(mask2) == imm2 - imm + 1
2598 // f = d | c
2599 // if yes, replace the OR instruction with:
2600 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2602 // OR is commutative, check all combinations of operand order and values of
2603 // BiggerPattern, i.e.
2604 // Opd0, Opd1, BiggerPattern=false
2605 // Opd1, Opd0, BiggerPattern=false
2606 // Opd0, Opd1, BiggerPattern=true
2607 // Opd1, Opd0, BiggerPattern=true
2608 // Several of these combinations may match, so check with BiggerPattern=false
2609 // first since that will produce better results by matching more instructions
2610 // and/or inserting fewer extra instructions.
2611 for (int I = 0; I < 4; ++I) {
2613 SDValue Dst, Src;
2614 unsigned ImmR, ImmS;
2615 bool BiggerPattern = I / 2;
2616 SDValue OrOpd0Val = N->getOperand(I % 2);
2617 SDNode *OrOpd0 = OrOpd0Val.getNode();
2618 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2619 SDNode *OrOpd1 = OrOpd1Val.getNode();
2621 unsigned BFXOpc;
2622 int DstLSB, Width;
2623 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2624 NumberOfIgnoredLowBits, BiggerPattern)) {
2625 // Check that the returned opcode is compatible with the pattern,
2626 // i.e., same type and zero extended (U and not S)
2627 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2628 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2629 continue;
2631 // Compute the width of the bitfield insertion
2632 DstLSB = 0;
2633 Width = ImmS - ImmR + 1;
2634 // FIXME: This constraint is to catch bitfield insertion we may
2635 // want to widen the pattern if we want to grab general bitfied
2636 // move case
2637 if (Width <= 0)
2638 continue;
2640 // If the mask on the insertee is correct, we have a BFXIL operation. We
2641 // can share the ImmR and ImmS values from the already-computed UBFM.
2642 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
2643 BiggerPattern,
2644 Src, DstLSB, Width)) {
2645 ImmR = (BitWidth - DstLSB) % BitWidth;
2646 ImmS = Width - 1;
2647 } else
2648 continue;
2650 // Check the second part of the pattern
2651 EVT VT = OrOpd1Val.getValueType();
2652 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2654 // Compute the Known Zero for the candidate of the first operand.
2655 // This allows to catch more general case than just looking for
2656 // AND with imm. Indeed, simplify-demanded-bits may have removed
2657 // the AND instruction because it proves it was useless.
2658 KnownBits Known = CurDAG->computeKnownBits(OrOpd1Val);
2660 // Check if there is enough room for the second operand to appear
2661 // in the first one
2662 APInt BitsToBeInserted =
2663 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
2665 if ((BitsToBeInserted & ~Known.Zero) != 0)
2666 continue;
2668 // Set the first operand
2669 uint64_t Imm;
2670 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2671 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2672 // In that case, we can eliminate the AND
2673 Dst = OrOpd1->getOperand(0);
2674 else
2675 // Maybe the AND has been removed by simplify-demanded-bits
2676 // or is useful because it discards more bits
2677 Dst = OrOpd1Val;
2679 // both parts match
2680 SDLoc DL(N);
2681 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2682 CurDAG->getTargetConstant(ImmS, DL, VT)};
2683 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2684 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2685 return true;
2688 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2689 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2690 // mask (e.g., 0x000ffff0).
2691 uint64_t Mask0Imm, Mask1Imm;
2692 SDValue And0 = N->getOperand(0);
2693 SDValue And1 = N->getOperand(1);
2694 if (And0.hasOneUse() && And1.hasOneUse() &&
2695 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2696 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2697 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2698 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2700 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2701 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2702 // bits to be inserted.
2703 if (isShiftedMask(Mask0Imm, VT)) {
2704 std::swap(And0, And1);
2705 std::swap(Mask0Imm, Mask1Imm);
2708 SDValue Src = And1->getOperand(0);
2709 SDValue Dst = And0->getOperand(0);
2710 unsigned LSB = countTrailingZeros(Mask1Imm);
2711 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2713 // The BFXIL inserts the low-order bits from a source register, so right
2714 // shift the needed bits into place.
2715 SDLoc DL(N);
2716 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2717 SDNode *LSR = CurDAG->getMachineNode(
2718 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2719 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2721 // BFXIL is an alias of BFM, so translate to BFM operands.
2722 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2723 unsigned ImmS = Width - 1;
2725 // Create the BFXIL instruction.
2726 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2727 CurDAG->getTargetConstant(ImmR, DL, VT),
2728 CurDAG->getTargetConstant(ImmS, DL, VT)};
2729 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2730 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2731 return true;
2734 return false;
2737 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
2738 if (N->getOpcode() != ISD::OR)
2739 return false;
2741 APInt NUsefulBits;
2742 getUsefulBits(SDValue(N, 0), NUsefulBits);
2744 // If all bits are not useful, just return UNDEF.
2745 if (!NUsefulBits) {
2746 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2747 return true;
2750 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2751 return true;
2753 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
2756 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2757 /// equivalent of a left shift by a constant amount followed by an and masking
2758 /// out a contiguous set of bits.
2759 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
2760 if (N->getOpcode() != ISD::AND)
2761 return false;
2763 EVT VT = N->getValueType(0);
2764 if (VT != MVT::i32 && VT != MVT::i64)
2765 return false;
2767 SDValue Op0;
2768 int DstLSB, Width;
2769 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2770 Op0, DstLSB, Width))
2771 return false;
2773 // ImmR is the rotate right amount.
2774 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2775 // ImmS is the most significant bit of the source to be moved.
2776 unsigned ImmS = Width - 1;
2778 SDLoc DL(N);
2779 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2780 CurDAG->getTargetConstant(ImmS, DL, VT)};
2781 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2782 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2783 return true;
2786 /// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
2787 /// variable shift/rotate instructions.
2788 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
2789 EVT VT = N->getValueType(0);
2791 unsigned Opc;
2792 switch (N->getOpcode()) {
2793 case ISD::ROTR:
2794 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
2795 break;
2796 case ISD::SHL:
2797 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
2798 break;
2799 case ISD::SRL:
2800 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
2801 break;
2802 case ISD::SRA:
2803 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
2804 break;
2805 default:
2806 return false;
2809 uint64_t Size;
2810 uint64_t Bits;
2811 if (VT == MVT::i32) {
2812 Bits = 5;
2813 Size = 32;
2814 } else if (VT == MVT::i64) {
2815 Bits = 6;
2816 Size = 64;
2817 } else
2818 return false;
2820 SDValue ShiftAmt = N->getOperand(1);
2821 SDLoc DL(N);
2822 SDValue NewShiftAmt;
2824 // Skip over an extend of the shift amount.
2825 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
2826 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
2827 ShiftAmt = ShiftAmt->getOperand(0);
2829 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
2830 SDValue Add0 = ShiftAmt->getOperand(0);
2831 SDValue Add1 = ShiftAmt->getOperand(1);
2832 uint64_t Add0Imm;
2833 uint64_t Add1Imm;
2834 if (isIntImmediate(Add1, Add1Imm) && (Add1Imm % Size == 0)) {
2835 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
2836 // to avoid the ADD/SUB.
2837 NewShiftAmt = Add0;
2838 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
2839 isIntImmediate(Add0, Add0Imm) && Add0Imm != 0 &&
2840 (Add0Imm % Size == 0)) {
2841 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
2842 // to generate a NEG instead of a SUB from a constant.
2843 unsigned NegOpc;
2844 unsigned ZeroReg;
2845 EVT SubVT = ShiftAmt->getValueType(0);
2846 if (SubVT == MVT::i32) {
2847 NegOpc = AArch64::SUBWrr;
2848 ZeroReg = AArch64::WZR;
2849 } else {
2850 assert(SubVT == MVT::i64);
2851 NegOpc = AArch64::SUBXrr;
2852 ZeroReg = AArch64::XZR;
2854 SDValue Zero =
2855 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
2856 MachineSDNode *Neg =
2857 CurDAG->getMachineNode(NegOpc, DL, SubVT, Zero, Add1);
2858 NewShiftAmt = SDValue(Neg, 0);
2859 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
2860 isIntImmediate(Add0, Add0Imm) && (Add0Imm % Size == Size - 1)) {
2861 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
2862 // to generate a NOT instead of a SUB from a constant.
2863 unsigned NotOpc;
2864 unsigned ZeroReg;
2865 EVT SubVT = ShiftAmt->getValueType(0);
2866 if (SubVT == MVT::i32) {
2867 NotOpc = AArch64::ORNWrr;
2868 ZeroReg = AArch64::WZR;
2869 } else {
2870 assert(SubVT == MVT::i64);
2871 NotOpc = AArch64::ORNXrr;
2872 ZeroReg = AArch64::XZR;
2874 SDValue Zero =
2875 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
2876 MachineSDNode *Not =
2877 CurDAG->getMachineNode(NotOpc, DL, SubVT, Zero, Add1);
2878 NewShiftAmt = SDValue(Not, 0);
2879 } else
2880 return false;
2881 } else {
2882 // If the shift amount is masked with an AND, check that the mask covers the
2883 // bits that are implicitly ANDed off by the above opcodes and if so, skip
2884 // the AND.
2885 uint64_t MaskImm;
2886 if (!isOpcWithIntImmediate(ShiftAmt.getNode(), ISD::AND, MaskImm) &&
2887 !isOpcWithIntImmediate(ShiftAmt.getNode(), AArch64ISD::ANDS, MaskImm))
2888 return false;
2890 if (countTrailingOnes(MaskImm) < Bits)
2891 return false;
2893 NewShiftAmt = ShiftAmt->getOperand(0);
2896 // Narrow/widen the shift amount to match the size of the shift operation.
2897 if (VT == MVT::i32)
2898 NewShiftAmt = narrowIfNeeded(CurDAG, NewShiftAmt);
2899 else if (VT == MVT::i64 && NewShiftAmt->getValueType(0) == MVT::i32) {
2900 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, DL, MVT::i32);
2901 MachineSDNode *Ext = CurDAG->getMachineNode(
2902 AArch64::SUBREG_TO_REG, DL, VT,
2903 CurDAG->getTargetConstant(0, DL, MVT::i64), NewShiftAmt, SubReg);
2904 NewShiftAmt = SDValue(Ext, 0);
2907 SDValue Ops[] = {N->getOperand(0), NewShiftAmt};
2908 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2909 return true;
2912 bool
2913 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2914 unsigned RegWidth) {
2915 APFloat FVal(0.0);
2916 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2917 FVal = CN->getValueAPF();
2918 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2919 // Some otherwise illegal constants are allowed in this case.
2920 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2921 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2922 return false;
2924 ConstantPoolSDNode *CN =
2925 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2926 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2927 } else
2928 return false;
2930 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2931 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2932 // x-register.
2934 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2935 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2936 // integers.
2937 bool IsExact;
2939 // fbits is between 1 and 64 in the worst-case, which means the fmul
2940 // could have 2^64 as an actual operand. Need 65 bits of precision.
2941 APSInt IntVal(65, true);
2942 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2944 // N.b. isPowerOf2 also checks for > 0.
2945 if (!IsExact || !IntVal.isPowerOf2()) return false;
2946 unsigned FBits = IntVal.logBase2();
2948 // Checks above should have guaranteed that we haven't lost information in
2949 // finding FBits, but it must still be in range.
2950 if (FBits == 0 || FBits > RegWidth) return false;
2952 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2953 return true;
2956 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2957 // of the string and obtains the integer values from them and combines these
2958 // into a single value to be used in the MRS/MSR instruction.
2959 static int getIntOperandFromRegisterString(StringRef RegString) {
2960 SmallVector<StringRef, 5> Fields;
2961 RegString.split(Fields, ':');
2963 if (Fields.size() == 1)
2964 return -1;
2966 assert(Fields.size() == 5
2967 && "Invalid number of fields in read register string");
2969 SmallVector<int, 5> Ops;
2970 bool AllIntFields = true;
2972 for (StringRef Field : Fields) {
2973 unsigned IntField;
2974 AllIntFields &= !Field.getAsInteger(10, IntField);
2975 Ops.push_back(IntField);
2978 assert(AllIntFields &&
2979 "Unexpected non-integer value in special register string.");
2980 (void)AllIntFields;
2982 // Need to combine the integer fields of the string into a single value
2983 // based on the bit encoding of MRS/MSR instruction.
2984 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2985 (Ops[3] << 3) | (Ops[4]);
2988 // Lower the read_register intrinsic to an MRS instruction node if the special
2989 // register string argument is either of the form detailed in the ALCE (the
2990 // form described in getIntOperandsFromRegsterString) or is a named register
2991 // known by the MRS SysReg mapper.
2992 bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
2993 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
2994 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
2995 SDLoc DL(N);
2997 int Reg = getIntOperandFromRegisterString(RegString->getString());
2998 if (Reg != -1) {
2999 ReplaceNode(N, CurDAG->getMachineNode(
3000 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
3001 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3002 N->getOperand(0)));
3003 return true;
3006 // Use the sysreg mapper to map the remaining possible strings to the
3007 // value for the register to be used for the instruction operand.
3008 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
3009 if (TheReg && TheReg->Readable &&
3010 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3011 Reg = TheReg->Encoding;
3012 else
3013 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
3015 if (Reg != -1) {
3016 ReplaceNode(N, CurDAG->getMachineNode(
3017 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
3018 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3019 N->getOperand(0)));
3020 return true;
3023 if (RegString->getString() == "pc") {
3024 ReplaceNode(N, CurDAG->getMachineNode(
3025 AArch64::ADR, DL, N->getSimpleValueType(0), MVT::Other,
3026 CurDAG->getTargetConstant(0, DL, MVT::i32),
3027 N->getOperand(0)));
3028 return true;
3031 return false;
3034 // Lower the write_register intrinsic to an MSR instruction node if the special
3035 // register string argument is either of the form detailed in the ALCE (the
3036 // form described in getIntOperandsFromRegsterString) or is a named register
3037 // known by the MSR SysReg mapper.
3038 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
3039 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
3040 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
3041 SDLoc DL(N);
3043 int Reg = getIntOperandFromRegisterString(RegString->getString());
3044 if (Reg != -1) {
3045 ReplaceNode(
3046 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
3047 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3048 N->getOperand(2), N->getOperand(0)));
3049 return true;
3052 // Check if the register was one of those allowed as the pstatefield value in
3053 // the MSR (immediate) instruction. To accept the values allowed in the
3054 // pstatefield for the MSR (immediate) instruction, we also require that an
3055 // immediate value has been provided as an argument, we know that this is
3056 // the case as it has been ensured by semantic checking.
3057 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());
3058 if (PMapper) {
3059 assert (isa<ConstantSDNode>(N->getOperand(2))
3060 && "Expected a constant integer expression.");
3061 unsigned Reg = PMapper->Encoding;
3062 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
3063 unsigned State;
3064 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO || Reg == AArch64PState::SSBS) {
3065 assert(Immed < 2 && "Bad imm");
3066 State = AArch64::MSRpstateImm1;
3067 } else {
3068 assert(Immed < 16 && "Bad imm");
3069 State = AArch64::MSRpstateImm4;
3071 ReplaceNode(N, CurDAG->getMachineNode(
3072 State, DL, MVT::Other,
3073 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3074 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
3075 N->getOperand(0)));
3076 return true;
3079 // Use the sysreg mapper to attempt to map the remaining possible strings
3080 // to the value for the register to be used for the MSR (register)
3081 // instruction operand.
3082 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
3083 if (TheReg && TheReg->Writeable &&
3084 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3085 Reg = TheReg->Encoding;
3086 else
3087 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
3088 if (Reg != -1) {
3089 ReplaceNode(N, CurDAG->getMachineNode(
3090 AArch64::MSR, DL, MVT::Other,
3091 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3092 N->getOperand(2), N->getOperand(0)));
3093 return true;
3096 return false;
3099 /// We've got special pseudo-instructions for these
3100 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3101 unsigned Opcode;
3102 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3104 // Leave IR for LSE if subtarget supports it.
3105 if (Subtarget->hasLSE()) return false;
3107 if (MemTy == MVT::i8)
3108 Opcode = AArch64::CMP_SWAP_8;
3109 else if (MemTy == MVT::i16)
3110 Opcode = AArch64::CMP_SWAP_16;
3111 else if (MemTy == MVT::i32)
3112 Opcode = AArch64::CMP_SWAP_32;
3113 else if (MemTy == MVT::i64)
3114 Opcode = AArch64::CMP_SWAP_64;
3115 else
3116 llvm_unreachable("Unknown AtomicCmpSwap type");
3118 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
3119 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3120 N->getOperand(0)};
3121 SDNode *CmpSwap = CurDAG->getMachineNode(
3122 Opcode, SDLoc(N),
3123 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
3125 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3126 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3128 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3129 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3130 CurDAG->RemoveDeadNode(N);
3132 return true;
3135 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
3136 SDValue &Shift) {
3137 if (!isa<ConstantSDNode>(N))
3138 return false;
3140 SDLoc DL(N);
3141 uint64_t Val = cast<ConstantSDNode>(N)
3142 ->getAPIntValue()
3143 .truncOrSelf(VT.getFixedSizeInBits())
3144 .getZExtValue();
3146 switch (VT.SimpleTy) {
3147 case MVT::i8:
3148 // All immediates are supported.
3149 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3150 Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
3151 return true;
3152 case MVT::i16:
3153 case MVT::i32:
3154 case MVT::i64:
3155 // Support 8bit unsigned immediates.
3156 if (Val <= 255) {
3157 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3158 Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
3159 return true;
3161 // Support 16bit unsigned immediates that are a multiple of 256.
3162 if (Val <= 65280 && Val % 256 == 0) {
3163 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3164 Imm = CurDAG->getTargetConstant(Val >> 8, DL, MVT::i32);
3165 return true;
3167 break;
3168 default:
3169 break;
3172 return false;
3175 bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
3176 SDValue &Shift) {
3177 if (!isa<ConstantSDNode>(N))
3178 return false;
3180 SDLoc DL(N);
3181 int64_t Val = cast<ConstantSDNode>(N)
3182 ->getAPIntValue()
3183 .truncOrSelf(VT.getFixedSizeInBits())
3184 .getSExtValue();
3186 switch (VT.SimpleTy) {
3187 case MVT::i8:
3188 // All immediates are supported.
3189 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3190 Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32);
3191 return true;
3192 case MVT::i16:
3193 case MVT::i32:
3194 case MVT::i64:
3195 // Support 8bit signed immediates.
3196 if (Val >= -128 && Val <= 127) {
3197 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3198 Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32);
3199 return true;
3201 // Support 16bit signed immediates that are a multiple of 256.
3202 if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) {
3203 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3204 Imm = CurDAG->getTargetConstant((Val >> 8) & 0xFF, DL, MVT::i32);
3205 return true;
3207 break;
3208 default:
3209 break;
3212 return false;
3215 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
3216 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3217 int64_t ImmVal = CNode->getSExtValue();
3218 SDLoc DL(N);
3219 if (ImmVal >= -128 && ImmVal < 128) {
3220 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3221 return true;
3224 return false;
3227 bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
3228 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3229 uint64_t ImmVal = CNode->getZExtValue();
3231 switch (VT.SimpleTy) {
3232 case MVT::i8:
3233 ImmVal &= 0xFF;
3234 break;
3235 case MVT::i16:
3236 ImmVal &= 0xFFFF;
3237 break;
3238 case MVT::i32:
3239 ImmVal &= 0xFFFFFFFF;
3240 break;
3241 case MVT::i64:
3242 break;
3243 default:
3244 llvm_unreachable("Unexpected type");
3247 if (ImmVal < 256) {
3248 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3249 return true;
3252 return false;
3255 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
3256 bool Invert) {
3257 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3258 uint64_t ImmVal = CNode->getZExtValue();
3259 SDLoc DL(N);
3261 if (Invert)
3262 ImmVal = ~ImmVal;
3264 // Shift mask depending on type size.
3265 switch (VT.SimpleTy) {
3266 case MVT::i8:
3267 ImmVal &= 0xFF;
3268 ImmVal |= ImmVal << 8;
3269 ImmVal |= ImmVal << 16;
3270 ImmVal |= ImmVal << 32;
3271 break;
3272 case MVT::i16:
3273 ImmVal &= 0xFFFF;
3274 ImmVal |= ImmVal << 16;
3275 ImmVal |= ImmVal << 32;
3276 break;
3277 case MVT::i32:
3278 ImmVal &= 0xFFFFFFFF;
3279 ImmVal |= ImmVal << 32;
3280 break;
3281 case MVT::i64:
3282 break;
3283 default:
3284 llvm_unreachable("Unexpected type");
3287 uint64_t encoding;
3288 if (AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding)) {
3289 Imm = CurDAG->getTargetConstant(encoding, DL, MVT::i64);
3290 return true;
3293 return false;
3296 // SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
3297 // Rather than attempt to normalise everything we can sometimes saturate the
3298 // shift amount during selection. This function also allows for consistent
3299 // isel patterns by ensuring the resulting "Imm" node is of the i32 type
3300 // required by the instructions.
3301 bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
3302 uint64_t High, bool AllowSaturation,
3303 SDValue &Imm) {
3304 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
3305 uint64_t ImmVal = CN->getZExtValue();
3307 // Reject shift amounts that are too small.
3308 if (ImmVal < Low)
3309 return false;
3311 // Reject or saturate shift amounts that are too big.
3312 if (ImmVal > High) {
3313 if (!AllowSaturation)
3314 return false;
3315 ImmVal = High;
3318 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3319 return true;
3322 return false;
3325 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
3326 // tagp(FrameIndex, IRGstack, tag_offset):
3327 // since the offset between FrameIndex and IRGstack is a compile-time
3328 // constant, this can be lowered to a single ADDG instruction.
3329 if (!(isa<FrameIndexSDNode>(N->getOperand(1)))) {
3330 return false;
3333 SDValue IRG_SP = N->getOperand(2);
3334 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
3335 cast<ConstantSDNode>(IRG_SP->getOperand(1))->getZExtValue() !=
3336 Intrinsic::aarch64_irg_sp) {
3337 return false;
3340 const TargetLowering *TLI = getTargetLowering();
3341 SDLoc DL(N);
3342 int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
3343 SDValue FiOp = CurDAG->getTargetFrameIndex(
3344 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3345 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3347 SDNode *Out = CurDAG->getMachineNode(
3348 AArch64::TAGPstack, DL, MVT::i64,
3349 {FiOp, CurDAG->getTargetConstant(0, DL, MVT::i64), N->getOperand(2),
3350 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3351 ReplaceNode(N, Out);
3352 return true;
3355 void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
3356 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
3357 "llvm.aarch64.tagp third argument must be an immediate");
3358 if (trySelectStackSlotTagP(N))
3359 return;
3360 // FIXME: above applies in any case when offset between Op1 and Op2 is a
3361 // compile-time constant, not just for stack allocations.
3363 // General case for unrelated pointers in Op1 and Op2.
3364 SDLoc DL(N);
3365 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3366 SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64,
3367 {N->getOperand(1), N->getOperand(2)});
3368 SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64,
3369 {SDValue(N1, 0), N->getOperand(2)});
3370 SDNode *N3 = CurDAG->getMachineNode(
3371 AArch64::ADDG, DL, MVT::i64,
3372 {SDValue(N2, 0), CurDAG->getTargetConstant(0, DL, MVT::i64),
3373 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3374 ReplaceNode(N, N3);
3377 // NOTE: We cannot use EXTRACT_SUBREG in all cases because the fixed length
3378 // vector types larger than NEON don't have a matching SubRegIndex.
3379 static SDNode *extractSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3380 assert(V.getValueType().isScalableVector() &&
3381 V.getValueType().getSizeInBits().getKnownMinSize() ==
3382 AArch64::SVEBitsPerBlock &&
3383 "Expected to extract from a packed scalable vector!");
3384 assert(VT.isFixedLengthVector() &&
3385 "Expected to extract a fixed length vector!");
3387 SDLoc DL(V);
3388 switch (VT.getSizeInBits()) {
3389 case 64: {
3390 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3391 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3393 case 128: {
3394 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3395 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3397 default: {
3398 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3399 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3404 // NOTE: We cannot use INSERT_SUBREG in all cases because the fixed length
3405 // vector types larger than NEON don't have a matching SubRegIndex.
3406 static SDNode *insertSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3407 assert(VT.isScalableVector() &&
3408 VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock &&
3409 "Expected to insert into a packed scalable vector!");
3410 assert(V.getValueType().isFixedLengthVector() &&
3411 "Expected to insert a fixed length vector!");
3413 SDLoc DL(V);
3414 switch (V.getValueType().getSizeInBits()) {
3415 case 64: {
3416 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3417 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3418 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3419 SDValue(Container, 0), V, SubReg);
3421 case 128: {
3422 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3423 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3424 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3425 SDValue(Container, 0), V, SubReg);
3427 default: {
3428 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3429 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3434 void AArch64DAGToDAGISel::Select(SDNode *Node) {
3435 // If we have a custom node, we already have selected!
3436 if (Node->isMachineOpcode()) {
3437 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
3438 Node->setNodeId(-1);
3439 return;
3442 // Few custom selection stuff.
3443 EVT VT = Node->getValueType(0);
3445 switch (Node->getOpcode()) {
3446 default:
3447 break;
3449 case ISD::ATOMIC_CMP_SWAP:
3450 if (SelectCMP_SWAP(Node))
3451 return;
3452 break;
3454 case ISD::READ_REGISTER:
3455 if (tryReadRegister(Node))
3456 return;
3457 break;
3459 case ISD::WRITE_REGISTER:
3460 if (tryWriteRegister(Node))
3461 return;
3462 break;
3464 case ISD::ADD:
3465 if (tryMLAV64LaneV128(Node))
3466 return;
3467 break;
3469 case ISD::LOAD: {
3470 // Try to select as an indexed load. Fall through to normal processing
3471 // if we can't.
3472 if (tryIndexedLoad(Node))
3473 return;
3474 break;
3477 case ISD::SRL:
3478 case ISD::AND:
3479 case ISD::SRA:
3480 case ISD::SIGN_EXTEND_INREG:
3481 if (tryBitfieldExtractOp(Node))
3482 return;
3483 if (tryBitfieldInsertInZeroOp(Node))
3484 return;
3485 LLVM_FALLTHROUGH;
3486 case ISD::ROTR:
3487 case ISD::SHL:
3488 if (tryShiftAmountMod(Node))
3489 return;
3490 break;
3492 case ISD::SIGN_EXTEND:
3493 if (tryBitfieldExtractOpFromSExt(Node))
3494 return;
3495 break;
3497 case ISD::FP_EXTEND:
3498 if (tryHighFPExt(Node))
3499 return;
3500 break;
3502 case ISD::OR:
3503 if (tryBitfieldInsertOp(Node))
3504 return;
3505 break;
3507 case ISD::EXTRACT_SUBVECTOR: {
3508 // Bail when not a "cast" like extract_subvector.
3509 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue() != 0)
3510 break;
3512 // Bail when normal isel can do the job.
3513 EVT InVT = Node->getOperand(0).getValueType();
3514 if (VT.isScalableVector() || InVT.isFixedLengthVector())
3515 break;
3517 // NOTE: We can only get here when doing fixed length SVE code generation.
3518 // We do manual selection because the types involved are not linked to real
3519 // registers (despite being legal) and must be coerced into SVE registers.
3521 // NOTE: If the above changes, be aware that selection will still not work
3522 // because the td definition of extract_vector does not support extracting
3523 // a fixed length vector from a scalable vector.
3525 ReplaceNode(Node, extractSubReg(CurDAG, VT, Node->getOperand(0)));
3526 return;
3529 case ISD::INSERT_SUBVECTOR: {
3530 // Bail when not a "cast" like insert_subvector.
3531 if (cast<ConstantSDNode>(Node->getOperand(2))->getZExtValue() != 0)
3532 break;
3533 if (!Node->getOperand(0).isUndef())
3534 break;
3536 // Bail when normal isel should do the job.
3537 EVT InVT = Node->getOperand(1).getValueType();
3538 if (VT.isFixedLengthVector() || InVT.isScalableVector())
3539 break;
3541 // NOTE: We can only get here when doing fixed length SVE code generation.
3542 // We do manual selection because the types involved are not linked to real
3543 // registers (despite being legal) and must be coerced into SVE registers.
3545 // NOTE: If the above changes, be aware that selection will still not work
3546 // because the td definition of insert_vector does not support inserting a
3547 // fixed length vector into a scalable vector.
3549 ReplaceNode(Node, insertSubReg(CurDAG, VT, Node->getOperand(1)));
3550 return;
3553 case ISD::Constant: {
3554 // Materialize zero constants as copies from WZR/XZR. This allows
3555 // the coalescer to propagate these into other instructions.
3556 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
3557 if (ConstNode->isZero()) {
3558 if (VT == MVT::i32) {
3559 SDValue New = CurDAG->getCopyFromReg(
3560 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
3561 ReplaceNode(Node, New.getNode());
3562 return;
3563 } else if (VT == MVT::i64) {
3564 SDValue New = CurDAG->getCopyFromReg(
3565 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
3566 ReplaceNode(Node, New.getNode());
3567 return;
3570 break;
3573 case ISD::FrameIndex: {
3574 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
3575 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
3576 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
3577 const TargetLowering *TLI = getTargetLowering();
3578 SDValue TFI = CurDAG->getTargetFrameIndex(
3579 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3580 SDLoc DL(Node);
3581 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
3582 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
3583 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
3584 return;
3586 case ISD::INTRINSIC_W_CHAIN: {
3587 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3588 switch (IntNo) {
3589 default:
3590 break;
3591 case Intrinsic::aarch64_ldaxp:
3592 case Intrinsic::aarch64_ldxp: {
3593 unsigned Op =
3594 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
3595 SDValue MemAddr = Node->getOperand(2);
3596 SDLoc DL(Node);
3597 SDValue Chain = Node->getOperand(0);
3599 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
3600 MVT::Other, MemAddr, Chain);
3602 // Transfer memoperands.
3603 MachineMemOperand *MemOp =
3604 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3605 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
3606 ReplaceNode(Node, Ld);
3607 return;
3609 case Intrinsic::aarch64_stlxp:
3610 case Intrinsic::aarch64_stxp: {
3611 unsigned Op =
3612 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
3613 SDLoc DL(Node);
3614 SDValue Chain = Node->getOperand(0);
3615 SDValue ValLo = Node->getOperand(2);
3616 SDValue ValHi = Node->getOperand(3);
3617 SDValue MemAddr = Node->getOperand(4);
3619 // Place arguments in the right order.
3620 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
3622 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
3623 // Transfer memoperands.
3624 MachineMemOperand *MemOp =
3625 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3626 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
3628 ReplaceNode(Node, St);
3629 return;
3631 case Intrinsic::aarch64_neon_ld1x2:
3632 if (VT == MVT::v8i8) {
3633 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
3634 return;
3635 } else if (VT == MVT::v16i8) {
3636 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
3637 return;
3638 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3639 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
3640 return;
3641 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3642 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
3643 return;
3644 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3645 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
3646 return;
3647 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3648 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
3649 return;
3650 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3651 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3652 return;
3653 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3654 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
3655 return;
3657 break;
3658 case Intrinsic::aarch64_neon_ld1x3:
3659 if (VT == MVT::v8i8) {
3660 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
3661 return;
3662 } else if (VT == MVT::v16i8) {
3663 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
3664 return;
3665 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3666 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
3667 return;
3668 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3669 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
3670 return;
3671 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3672 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
3673 return;
3674 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3675 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
3676 return;
3677 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3678 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3679 return;
3680 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3681 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
3682 return;
3684 break;
3685 case Intrinsic::aarch64_neon_ld1x4:
3686 if (VT == MVT::v8i8) {
3687 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
3688 return;
3689 } else if (VT == MVT::v16i8) {
3690 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
3691 return;
3692 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3693 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
3694 return;
3695 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3696 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
3697 return;
3698 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3699 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
3700 return;
3701 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3702 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
3703 return;
3704 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3705 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3706 return;
3707 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3708 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
3709 return;
3711 break;
3712 case Intrinsic::aarch64_neon_ld2:
3713 if (VT == MVT::v8i8) {
3714 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
3715 return;
3716 } else if (VT == MVT::v16i8) {
3717 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
3718 return;
3719 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3720 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
3721 return;
3722 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3723 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
3724 return;
3725 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3726 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
3727 return;
3728 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3729 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
3730 return;
3731 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3732 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3733 return;
3734 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3735 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
3736 return;
3738 break;
3739 case Intrinsic::aarch64_neon_ld3:
3740 if (VT == MVT::v8i8) {
3741 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
3742 return;
3743 } else if (VT == MVT::v16i8) {
3744 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
3745 return;
3746 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3747 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
3748 return;
3749 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3750 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
3751 return;
3752 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3753 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
3754 return;
3755 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3756 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
3757 return;
3758 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3759 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3760 return;
3761 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3762 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
3763 return;
3765 break;
3766 case Intrinsic::aarch64_neon_ld4:
3767 if (VT == MVT::v8i8) {
3768 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
3769 return;
3770 } else if (VT == MVT::v16i8) {
3771 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
3772 return;
3773 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3774 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
3775 return;
3776 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3777 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
3778 return;
3779 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3780 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
3781 return;
3782 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3783 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
3784 return;
3785 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3786 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3787 return;
3788 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3789 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
3790 return;
3792 break;
3793 case Intrinsic::aarch64_neon_ld2r:
3794 if (VT == MVT::v8i8) {
3795 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
3796 return;
3797 } else if (VT == MVT::v16i8) {
3798 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
3799 return;
3800 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3801 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
3802 return;
3803 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3804 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
3805 return;
3806 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3807 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
3808 return;
3809 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3810 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
3811 return;
3812 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3813 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
3814 return;
3815 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3816 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
3817 return;
3819 break;
3820 case Intrinsic::aarch64_neon_ld3r:
3821 if (VT == MVT::v8i8) {
3822 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
3823 return;
3824 } else if (VT == MVT::v16i8) {
3825 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
3826 return;
3827 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3828 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
3829 return;
3830 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3831 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
3832 return;
3833 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3834 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
3835 return;
3836 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3837 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
3838 return;
3839 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3840 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
3841 return;
3842 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3843 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
3844 return;
3846 break;
3847 case Intrinsic::aarch64_neon_ld4r:
3848 if (VT == MVT::v8i8) {
3849 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
3850 return;
3851 } else if (VT == MVT::v16i8) {
3852 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
3853 return;
3854 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3855 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
3856 return;
3857 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3858 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
3859 return;
3860 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3861 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
3862 return;
3863 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3864 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
3865 return;
3866 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3867 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
3868 return;
3869 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3870 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
3871 return;
3873 break;
3874 case Intrinsic::aarch64_neon_ld2lane:
3875 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3876 SelectLoadLane(Node, 2, AArch64::LD2i8);
3877 return;
3878 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3879 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3880 SelectLoadLane(Node, 2, AArch64::LD2i16);
3881 return;
3882 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3883 VT == MVT::v2f32) {
3884 SelectLoadLane(Node, 2, AArch64::LD2i32);
3885 return;
3886 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3887 VT == MVT::v1f64) {
3888 SelectLoadLane(Node, 2, AArch64::LD2i64);
3889 return;
3891 break;
3892 case Intrinsic::aarch64_neon_ld3lane:
3893 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3894 SelectLoadLane(Node, 3, AArch64::LD3i8);
3895 return;
3896 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3897 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3898 SelectLoadLane(Node, 3, AArch64::LD3i16);
3899 return;
3900 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3901 VT == MVT::v2f32) {
3902 SelectLoadLane(Node, 3, AArch64::LD3i32);
3903 return;
3904 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3905 VT == MVT::v1f64) {
3906 SelectLoadLane(Node, 3, AArch64::LD3i64);
3907 return;
3909 break;
3910 case Intrinsic::aarch64_neon_ld4lane:
3911 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3912 SelectLoadLane(Node, 4, AArch64::LD4i8);
3913 return;
3914 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3915 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3916 SelectLoadLane(Node, 4, AArch64::LD4i16);
3917 return;
3918 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3919 VT == MVT::v2f32) {
3920 SelectLoadLane(Node, 4, AArch64::LD4i32);
3921 return;
3922 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3923 VT == MVT::v1f64) {
3924 SelectLoadLane(Node, 4, AArch64::LD4i64);
3925 return;
3927 break;
3928 case Intrinsic::aarch64_ld64b:
3929 SelectLoad(Node, 8, AArch64::LD64B, AArch64::x8sub_0);
3930 return;
3931 case Intrinsic::aarch64_sve_ld2_sret: {
3932 if (VT == MVT::nxv16i8) {
3933 SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B,
3934 true);
3935 return;
3936 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
3937 VT == MVT::nxv8bf16) {
3938 SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H,
3939 true);
3940 return;
3941 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
3942 SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W,
3943 true);
3944 return;
3945 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
3946 SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D,
3947 true);
3948 return;
3950 break;
3952 case Intrinsic::aarch64_sve_ld3_sret: {
3953 if (VT == MVT::nxv16i8) {
3954 SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B,
3955 true);
3956 return;
3957 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
3958 VT == MVT::nxv8bf16) {
3959 SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H,
3960 true);
3961 return;
3962 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
3963 SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W,
3964 true);
3965 return;
3966 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
3967 SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D,
3968 true);
3969 return;
3971 break;
3973 case Intrinsic::aarch64_sve_ld4_sret: {
3974 if (VT == MVT::nxv16i8) {
3975 SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B,
3976 true);
3977 return;
3978 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
3979 VT == MVT::nxv8bf16) {
3980 SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H,
3981 true);
3982 return;
3983 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
3984 SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W,
3985 true);
3986 return;
3987 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
3988 SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D,
3989 true);
3990 return;
3992 break;
3995 } break;
3996 case ISD::INTRINSIC_WO_CHAIN: {
3997 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3998 switch (IntNo) {
3999 default:
4000 break;
4001 case Intrinsic::aarch64_tagp:
4002 SelectTagP(Node);
4003 return;
4004 case Intrinsic::aarch64_neon_tbl2:
4005 SelectTable(Node, 2,
4006 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
4007 false);
4008 return;
4009 case Intrinsic::aarch64_neon_tbl3:
4010 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
4011 : AArch64::TBLv16i8Three,
4012 false);
4013 return;
4014 case Intrinsic::aarch64_neon_tbl4:
4015 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
4016 : AArch64::TBLv16i8Four,
4017 false);
4018 return;
4019 case Intrinsic::aarch64_neon_tbx2:
4020 SelectTable(Node, 2,
4021 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
4022 true);
4023 return;
4024 case Intrinsic::aarch64_neon_tbx3:
4025 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
4026 : AArch64::TBXv16i8Three,
4027 true);
4028 return;
4029 case Intrinsic::aarch64_neon_tbx4:
4030 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
4031 : AArch64::TBXv16i8Four,
4032 true);
4033 return;
4034 case Intrinsic::aarch64_neon_smull:
4035 case Intrinsic::aarch64_neon_umull:
4036 if (tryMULLV64LaneV128(IntNo, Node))
4037 return;
4038 break;
4039 case Intrinsic::swift_async_context_addr: {
4040 SDLoc DL(Node);
4041 CurDAG->SelectNodeTo(Node, AArch64::SUBXri, MVT::i64,
4042 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
4043 AArch64::FP, MVT::i64),
4044 CurDAG->getTargetConstant(8, DL, MVT::i32),
4045 CurDAG->getTargetConstant(0, DL, MVT::i32));
4046 auto &MF = CurDAG->getMachineFunction();
4047 MF.getFrameInfo().setFrameAddressIsTaken(true);
4048 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
4049 return;
4052 break;
4054 case ISD::INTRINSIC_VOID: {
4055 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
4056 if (Node->getNumOperands() >= 3)
4057 VT = Node->getOperand(2)->getValueType(0);
4058 switch (IntNo) {
4059 default:
4060 break;
4061 case Intrinsic::aarch64_neon_st1x2: {
4062 if (VT == MVT::v8i8) {
4063 SelectStore(Node, 2, AArch64::ST1Twov8b);
4064 return;
4065 } else if (VT == MVT::v16i8) {
4066 SelectStore(Node, 2, AArch64::ST1Twov16b);
4067 return;
4068 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4069 VT == MVT::v4bf16) {
4070 SelectStore(Node, 2, AArch64::ST1Twov4h);
4071 return;
4072 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4073 VT == MVT::v8bf16) {
4074 SelectStore(Node, 2, AArch64::ST1Twov8h);
4075 return;
4076 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4077 SelectStore(Node, 2, AArch64::ST1Twov2s);
4078 return;
4079 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4080 SelectStore(Node, 2, AArch64::ST1Twov4s);
4081 return;
4082 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4083 SelectStore(Node, 2, AArch64::ST1Twov2d);
4084 return;
4085 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4086 SelectStore(Node, 2, AArch64::ST1Twov1d);
4087 return;
4089 break;
4091 case Intrinsic::aarch64_neon_st1x3: {
4092 if (VT == MVT::v8i8) {
4093 SelectStore(Node, 3, AArch64::ST1Threev8b);
4094 return;
4095 } else if (VT == MVT::v16i8) {
4096 SelectStore(Node, 3, AArch64::ST1Threev16b);
4097 return;
4098 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4099 VT == MVT::v4bf16) {
4100 SelectStore(Node, 3, AArch64::ST1Threev4h);
4101 return;
4102 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4103 VT == MVT::v8bf16) {
4104 SelectStore(Node, 3, AArch64::ST1Threev8h);
4105 return;
4106 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4107 SelectStore(Node, 3, AArch64::ST1Threev2s);
4108 return;
4109 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4110 SelectStore(Node, 3, AArch64::ST1Threev4s);
4111 return;
4112 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4113 SelectStore(Node, 3, AArch64::ST1Threev2d);
4114 return;
4115 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4116 SelectStore(Node, 3, AArch64::ST1Threev1d);
4117 return;
4119 break;
4121 case Intrinsic::aarch64_neon_st1x4: {
4122 if (VT == MVT::v8i8) {
4123 SelectStore(Node, 4, AArch64::ST1Fourv8b);
4124 return;
4125 } else if (VT == MVT::v16i8) {
4126 SelectStore(Node, 4, AArch64::ST1Fourv16b);
4127 return;
4128 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4129 VT == MVT::v4bf16) {
4130 SelectStore(Node, 4, AArch64::ST1Fourv4h);
4131 return;
4132 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4133 VT == MVT::v8bf16) {
4134 SelectStore(Node, 4, AArch64::ST1Fourv8h);
4135 return;
4136 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4137 SelectStore(Node, 4, AArch64::ST1Fourv2s);
4138 return;
4139 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4140 SelectStore(Node, 4, AArch64::ST1Fourv4s);
4141 return;
4142 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4143 SelectStore(Node, 4, AArch64::ST1Fourv2d);
4144 return;
4145 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4146 SelectStore(Node, 4, AArch64::ST1Fourv1d);
4147 return;
4149 break;
4151 case Intrinsic::aarch64_neon_st2: {
4152 if (VT == MVT::v8i8) {
4153 SelectStore(Node, 2, AArch64::ST2Twov8b);
4154 return;
4155 } else if (VT == MVT::v16i8) {
4156 SelectStore(Node, 2, AArch64::ST2Twov16b);
4157 return;
4158 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4159 VT == MVT::v4bf16) {
4160 SelectStore(Node, 2, AArch64::ST2Twov4h);
4161 return;
4162 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4163 VT == MVT::v8bf16) {
4164 SelectStore(Node, 2, AArch64::ST2Twov8h);
4165 return;
4166 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4167 SelectStore(Node, 2, AArch64::ST2Twov2s);
4168 return;
4169 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4170 SelectStore(Node, 2, AArch64::ST2Twov4s);
4171 return;
4172 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4173 SelectStore(Node, 2, AArch64::ST2Twov2d);
4174 return;
4175 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4176 SelectStore(Node, 2, AArch64::ST1Twov1d);
4177 return;
4179 break;
4181 case Intrinsic::aarch64_neon_st3: {
4182 if (VT == MVT::v8i8) {
4183 SelectStore(Node, 3, AArch64::ST3Threev8b);
4184 return;
4185 } else if (VT == MVT::v16i8) {
4186 SelectStore(Node, 3, AArch64::ST3Threev16b);
4187 return;
4188 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4189 VT == MVT::v4bf16) {
4190 SelectStore(Node, 3, AArch64::ST3Threev4h);
4191 return;
4192 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4193 VT == MVT::v8bf16) {
4194 SelectStore(Node, 3, AArch64::ST3Threev8h);
4195 return;
4196 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4197 SelectStore(Node, 3, AArch64::ST3Threev2s);
4198 return;
4199 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4200 SelectStore(Node, 3, AArch64::ST3Threev4s);
4201 return;
4202 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4203 SelectStore(Node, 3, AArch64::ST3Threev2d);
4204 return;
4205 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4206 SelectStore(Node, 3, AArch64::ST1Threev1d);
4207 return;
4209 break;
4211 case Intrinsic::aarch64_neon_st4: {
4212 if (VT == MVT::v8i8) {
4213 SelectStore(Node, 4, AArch64::ST4Fourv8b);
4214 return;
4215 } else if (VT == MVT::v16i8) {
4216 SelectStore(Node, 4, AArch64::ST4Fourv16b);
4217 return;
4218 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4219 VT == MVT::v4bf16) {
4220 SelectStore(Node, 4, AArch64::ST4Fourv4h);
4221 return;
4222 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4223 VT == MVT::v8bf16) {
4224 SelectStore(Node, 4, AArch64::ST4Fourv8h);
4225 return;
4226 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4227 SelectStore(Node, 4, AArch64::ST4Fourv2s);
4228 return;
4229 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4230 SelectStore(Node, 4, AArch64::ST4Fourv4s);
4231 return;
4232 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4233 SelectStore(Node, 4, AArch64::ST4Fourv2d);
4234 return;
4235 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4236 SelectStore(Node, 4, AArch64::ST1Fourv1d);
4237 return;
4239 break;
4241 case Intrinsic::aarch64_neon_st2lane: {
4242 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4243 SelectStoreLane(Node, 2, AArch64::ST2i8);
4244 return;
4245 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4246 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4247 SelectStoreLane(Node, 2, AArch64::ST2i16);
4248 return;
4249 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4250 VT == MVT::v2f32) {
4251 SelectStoreLane(Node, 2, AArch64::ST2i32);
4252 return;
4253 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4254 VT == MVT::v1f64) {
4255 SelectStoreLane(Node, 2, AArch64::ST2i64);
4256 return;
4258 break;
4260 case Intrinsic::aarch64_neon_st3lane: {
4261 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4262 SelectStoreLane(Node, 3, AArch64::ST3i8);
4263 return;
4264 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4265 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4266 SelectStoreLane(Node, 3, AArch64::ST3i16);
4267 return;
4268 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4269 VT == MVT::v2f32) {
4270 SelectStoreLane(Node, 3, AArch64::ST3i32);
4271 return;
4272 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4273 VT == MVT::v1f64) {
4274 SelectStoreLane(Node, 3, AArch64::ST3i64);
4275 return;
4277 break;
4279 case Intrinsic::aarch64_neon_st4lane: {
4280 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4281 SelectStoreLane(Node, 4, AArch64::ST4i8);
4282 return;
4283 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4284 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4285 SelectStoreLane(Node, 4, AArch64::ST4i16);
4286 return;
4287 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4288 VT == MVT::v2f32) {
4289 SelectStoreLane(Node, 4, AArch64::ST4i32);
4290 return;
4291 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4292 VT == MVT::v1f64) {
4293 SelectStoreLane(Node, 4, AArch64::ST4i64);
4294 return;
4296 break;
4298 case Intrinsic::aarch64_sve_st2: {
4299 if (VT == MVT::nxv16i8) {
4300 SelectPredicatedStore(Node, 2, 0, AArch64::ST2B, AArch64::ST2B_IMM);
4301 return;
4302 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4303 VT == MVT::nxv8bf16) {
4304 SelectPredicatedStore(Node, 2, 1, AArch64::ST2H, AArch64::ST2H_IMM);
4305 return;
4306 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4307 SelectPredicatedStore(Node, 2, 2, AArch64::ST2W, AArch64::ST2W_IMM);
4308 return;
4309 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4310 SelectPredicatedStore(Node, 2, 3, AArch64::ST2D, AArch64::ST2D_IMM);
4311 return;
4313 break;
4315 case Intrinsic::aarch64_sve_st3: {
4316 if (VT == MVT::nxv16i8) {
4317 SelectPredicatedStore(Node, 3, 0, AArch64::ST3B, AArch64::ST3B_IMM);
4318 return;
4319 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4320 VT == MVT::nxv8bf16) {
4321 SelectPredicatedStore(Node, 3, 1, AArch64::ST3H, AArch64::ST3H_IMM);
4322 return;
4323 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4324 SelectPredicatedStore(Node, 3, 2, AArch64::ST3W, AArch64::ST3W_IMM);
4325 return;
4326 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4327 SelectPredicatedStore(Node, 3, 3, AArch64::ST3D, AArch64::ST3D_IMM);
4328 return;
4330 break;
4332 case Intrinsic::aarch64_sve_st4: {
4333 if (VT == MVT::nxv16i8) {
4334 SelectPredicatedStore(Node, 4, 0, AArch64::ST4B, AArch64::ST4B_IMM);
4335 return;
4336 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4337 VT == MVT::nxv8bf16) {
4338 SelectPredicatedStore(Node, 4, 1, AArch64::ST4H, AArch64::ST4H_IMM);
4339 return;
4340 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4341 SelectPredicatedStore(Node, 4, 2, AArch64::ST4W, AArch64::ST4W_IMM);
4342 return;
4343 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4344 SelectPredicatedStore(Node, 4, 3, AArch64::ST4D, AArch64::ST4D_IMM);
4345 return;
4347 break;
4350 break;
4352 case AArch64ISD::LD2post: {
4353 if (VT == MVT::v8i8) {
4354 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
4355 return;
4356 } else if (VT == MVT::v16i8) {
4357 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
4358 return;
4359 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4360 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
4361 return;
4362 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4363 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
4364 return;
4365 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4366 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
4367 return;
4368 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4369 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
4370 return;
4371 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4372 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4373 return;
4374 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4375 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
4376 return;
4378 break;
4380 case AArch64ISD::LD3post: {
4381 if (VT == MVT::v8i8) {
4382 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
4383 return;
4384 } else if (VT == MVT::v16i8) {
4385 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
4386 return;
4387 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4388 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
4389 return;
4390 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4391 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
4392 return;
4393 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4394 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
4395 return;
4396 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4397 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
4398 return;
4399 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4400 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4401 return;
4402 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4403 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
4404 return;
4406 break;
4408 case AArch64ISD::LD4post: {
4409 if (VT == MVT::v8i8) {
4410 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
4411 return;
4412 } else if (VT == MVT::v16i8) {
4413 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
4414 return;
4415 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4416 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
4417 return;
4418 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4419 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
4420 return;
4421 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4422 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
4423 return;
4424 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4425 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
4426 return;
4427 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4428 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4429 return;
4430 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4431 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
4432 return;
4434 break;
4436 case AArch64ISD::LD1x2post: {
4437 if (VT == MVT::v8i8) {
4438 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
4439 return;
4440 } else if (VT == MVT::v16i8) {
4441 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
4442 return;
4443 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4444 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
4445 return;
4446 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4447 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
4448 return;
4449 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4450 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
4451 return;
4452 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4453 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
4454 return;
4455 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4456 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4457 return;
4458 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4459 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
4460 return;
4462 break;
4464 case AArch64ISD::LD1x3post: {
4465 if (VT == MVT::v8i8) {
4466 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
4467 return;
4468 } else if (VT == MVT::v16i8) {
4469 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
4470 return;
4471 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4472 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
4473 return;
4474 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4475 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
4476 return;
4477 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4478 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
4479 return;
4480 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4481 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
4482 return;
4483 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4484 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4485 return;
4486 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4487 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
4488 return;
4490 break;
4492 case AArch64ISD::LD1x4post: {
4493 if (VT == MVT::v8i8) {
4494 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
4495 return;
4496 } else if (VT == MVT::v16i8) {
4497 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
4498 return;
4499 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4500 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
4501 return;
4502 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4503 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
4504 return;
4505 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4506 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
4507 return;
4508 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4509 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
4510 return;
4511 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4512 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4513 return;
4514 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4515 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
4516 return;
4518 break;
4520 case AArch64ISD::LD1DUPpost: {
4521 if (VT == MVT::v8i8) {
4522 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
4523 return;
4524 } else if (VT == MVT::v16i8) {
4525 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
4526 return;
4527 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4528 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
4529 return;
4530 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4531 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
4532 return;
4533 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4534 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
4535 return;
4536 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4537 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
4538 return;
4539 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4540 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
4541 return;
4542 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4543 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
4544 return;
4546 break;
4548 case AArch64ISD::LD2DUPpost: {
4549 if (VT == MVT::v8i8) {
4550 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
4551 return;
4552 } else if (VT == MVT::v16i8) {
4553 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
4554 return;
4555 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4556 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
4557 return;
4558 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4559 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
4560 return;
4561 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4562 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
4563 return;
4564 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4565 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
4566 return;
4567 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4568 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
4569 return;
4570 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4571 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
4572 return;
4574 break;
4576 case AArch64ISD::LD3DUPpost: {
4577 if (VT == MVT::v8i8) {
4578 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
4579 return;
4580 } else if (VT == MVT::v16i8) {
4581 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
4582 return;
4583 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4584 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
4585 return;
4586 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4587 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
4588 return;
4589 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4590 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
4591 return;
4592 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4593 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
4594 return;
4595 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4596 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
4597 return;
4598 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4599 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
4600 return;
4602 break;
4604 case AArch64ISD::LD4DUPpost: {
4605 if (VT == MVT::v8i8) {
4606 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
4607 return;
4608 } else if (VT == MVT::v16i8) {
4609 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
4610 return;
4611 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4612 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
4613 return;
4614 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4615 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
4616 return;
4617 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4618 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
4619 return;
4620 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4621 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
4622 return;
4623 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4624 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
4625 return;
4626 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4627 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
4628 return;
4630 break;
4632 case AArch64ISD::LD1LANEpost: {
4633 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4634 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
4635 return;
4636 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4637 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4638 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
4639 return;
4640 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4641 VT == MVT::v2f32) {
4642 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
4643 return;
4644 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4645 VT == MVT::v1f64) {
4646 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
4647 return;
4649 break;
4651 case AArch64ISD::LD2LANEpost: {
4652 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4653 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
4654 return;
4655 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4656 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4657 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
4658 return;
4659 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4660 VT == MVT::v2f32) {
4661 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
4662 return;
4663 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4664 VT == MVT::v1f64) {
4665 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
4666 return;
4668 break;
4670 case AArch64ISD::LD3LANEpost: {
4671 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4672 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
4673 return;
4674 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4675 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4676 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
4677 return;
4678 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4679 VT == MVT::v2f32) {
4680 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
4681 return;
4682 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4683 VT == MVT::v1f64) {
4684 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
4685 return;
4687 break;
4689 case AArch64ISD::LD4LANEpost: {
4690 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4691 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
4692 return;
4693 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4694 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4695 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
4696 return;
4697 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4698 VT == MVT::v2f32) {
4699 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
4700 return;
4701 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4702 VT == MVT::v1f64) {
4703 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
4704 return;
4706 break;
4708 case AArch64ISD::ST2post: {
4709 VT = Node->getOperand(1).getValueType();
4710 if (VT == MVT::v8i8) {
4711 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
4712 return;
4713 } else if (VT == MVT::v16i8) {
4714 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
4715 return;
4716 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4717 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
4718 return;
4719 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4720 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
4721 return;
4722 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4723 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
4724 return;
4725 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4726 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
4727 return;
4728 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4729 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
4730 return;
4731 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4732 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4733 return;
4735 break;
4737 case AArch64ISD::ST3post: {
4738 VT = Node->getOperand(1).getValueType();
4739 if (VT == MVT::v8i8) {
4740 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
4741 return;
4742 } else if (VT == MVT::v16i8) {
4743 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
4744 return;
4745 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4746 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
4747 return;
4748 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4749 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
4750 return;
4751 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4752 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
4753 return;
4754 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4755 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
4756 return;
4757 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4758 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
4759 return;
4760 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4761 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4762 return;
4764 break;
4766 case AArch64ISD::ST4post: {
4767 VT = Node->getOperand(1).getValueType();
4768 if (VT == MVT::v8i8) {
4769 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
4770 return;
4771 } else if (VT == MVT::v16i8) {
4772 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
4773 return;
4774 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4775 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
4776 return;
4777 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4778 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
4779 return;
4780 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4781 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
4782 return;
4783 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4784 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
4785 return;
4786 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4787 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
4788 return;
4789 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4790 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4791 return;
4793 break;
4795 case AArch64ISD::ST1x2post: {
4796 VT = Node->getOperand(1).getValueType();
4797 if (VT == MVT::v8i8) {
4798 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
4799 return;
4800 } else if (VT == MVT::v16i8) {
4801 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
4802 return;
4803 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4804 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
4805 return;
4806 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4807 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
4808 return;
4809 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4810 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
4811 return;
4812 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4813 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
4814 return;
4815 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4816 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4817 return;
4818 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4819 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
4820 return;
4822 break;
4824 case AArch64ISD::ST1x3post: {
4825 VT = Node->getOperand(1).getValueType();
4826 if (VT == MVT::v8i8) {
4827 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
4828 return;
4829 } else if (VT == MVT::v16i8) {
4830 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
4831 return;
4832 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4833 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
4834 return;
4835 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16 ) {
4836 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
4837 return;
4838 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4839 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
4840 return;
4841 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4842 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
4843 return;
4844 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4845 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4846 return;
4847 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4848 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
4849 return;
4851 break;
4853 case AArch64ISD::ST1x4post: {
4854 VT = Node->getOperand(1).getValueType();
4855 if (VT == MVT::v8i8) {
4856 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
4857 return;
4858 } else if (VT == MVT::v16i8) {
4859 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
4860 return;
4861 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4862 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
4863 return;
4864 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4865 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
4866 return;
4867 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4868 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
4869 return;
4870 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4871 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
4872 return;
4873 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4874 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4875 return;
4876 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4877 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
4878 return;
4880 break;
4882 case AArch64ISD::ST2LANEpost: {
4883 VT = Node->getOperand(1).getValueType();
4884 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4885 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
4886 return;
4887 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4888 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4889 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
4890 return;
4891 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4892 VT == MVT::v2f32) {
4893 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
4894 return;
4895 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4896 VT == MVT::v1f64) {
4897 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
4898 return;
4900 break;
4902 case AArch64ISD::ST3LANEpost: {
4903 VT = Node->getOperand(1).getValueType();
4904 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4905 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
4906 return;
4907 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4908 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4909 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
4910 return;
4911 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4912 VT == MVT::v2f32) {
4913 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
4914 return;
4915 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4916 VT == MVT::v1f64) {
4917 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
4918 return;
4920 break;
4922 case AArch64ISD::ST4LANEpost: {
4923 VT = Node->getOperand(1).getValueType();
4924 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4925 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
4926 return;
4927 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4928 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4929 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
4930 return;
4931 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4932 VT == MVT::v2f32) {
4933 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
4934 return;
4935 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4936 VT == MVT::v1f64) {
4937 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
4938 return;
4940 break;
4942 case AArch64ISD::SVE_LD2_MERGE_ZERO: {
4943 if (VT == MVT::nxv16i8) {
4944 SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B);
4945 return;
4946 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4947 VT == MVT::nxv8bf16) {
4948 SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H);
4949 return;
4950 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4951 SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W);
4952 return;
4953 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4954 SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D);
4955 return;
4957 break;
4959 case AArch64ISD::SVE_LD3_MERGE_ZERO: {
4960 if (VT == MVT::nxv16i8) {
4961 SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B);
4962 return;
4963 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4964 VT == MVT::nxv8bf16) {
4965 SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H);
4966 return;
4967 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4968 SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W);
4969 return;
4970 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4971 SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D);
4972 return;
4974 break;
4976 case AArch64ISD::SVE_LD4_MERGE_ZERO: {
4977 if (VT == MVT::nxv16i8) {
4978 SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B);
4979 return;
4980 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4981 VT == MVT::nxv8bf16) {
4982 SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H);
4983 return;
4984 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4985 SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W);
4986 return;
4987 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4988 SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D);
4989 return;
4991 break;
4995 // Select the default instruction
4996 SelectCode(Node);
4999 /// createAArch64ISelDag - This pass converts a legalized DAG into a
5000 /// AArch64-specific DAG, ready for instruction scheduling.
5001 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
5002 CodeGenOpt::Level OptLevel) {
5003 return new AArch64DAGToDAGISel(TM, OptLevel);
5006 /// When \p PredVT is a scalable vector predicate in the form
5007 /// MVT::nx<M>xi1, it builds the correspondent scalable vector of
5008 /// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
5009 /// structured vectors (NumVec >1), the output data type is
5010 /// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
5011 /// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
5012 /// EVT.
5013 static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
5014 unsigned NumVec) {
5015 assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
5016 if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
5017 return EVT();
5019 if (PredVT != MVT::nxv16i1 && PredVT != MVT::nxv8i1 &&
5020 PredVT != MVT::nxv4i1 && PredVT != MVT::nxv2i1)
5021 return EVT();
5023 ElementCount EC = PredVT.getVectorElementCount();
5024 EVT ScalarVT =
5025 EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / EC.getKnownMinValue());
5026 EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC * NumVec);
5028 return MemVT;
5031 /// Return the EVT of the data associated to a memory operation in \p
5032 /// Root. If such EVT cannot be retrived, it returns an invalid EVT.
5033 static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
5034 if (isa<MemSDNode>(Root))
5035 return cast<MemSDNode>(Root)->getMemoryVT();
5037 if (isa<MemIntrinsicSDNode>(Root))
5038 return cast<MemIntrinsicSDNode>(Root)->getMemoryVT();
5040 const unsigned Opcode = Root->getOpcode();
5041 // For custom ISD nodes, we have to look at them individually to extract the
5042 // type of the data moved to/from memory.
5043 switch (Opcode) {
5044 case AArch64ISD::LD1_MERGE_ZERO:
5045 case AArch64ISD::LD1S_MERGE_ZERO:
5046 case AArch64ISD::LDNF1_MERGE_ZERO:
5047 case AArch64ISD::LDNF1S_MERGE_ZERO:
5048 return cast<VTSDNode>(Root->getOperand(3))->getVT();
5049 case AArch64ISD::ST1_PRED:
5050 return cast<VTSDNode>(Root->getOperand(4))->getVT();
5051 case AArch64ISD::SVE_LD2_MERGE_ZERO:
5052 return getPackedVectorTypeFromPredicateType(
5053 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/2);
5054 case AArch64ISD::SVE_LD3_MERGE_ZERO:
5055 return getPackedVectorTypeFromPredicateType(
5056 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/3);
5057 case AArch64ISD::SVE_LD4_MERGE_ZERO:
5058 return getPackedVectorTypeFromPredicateType(
5059 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/4);
5060 default:
5061 break;
5064 if (Opcode != ISD::INTRINSIC_VOID)
5065 return EVT();
5067 const unsigned IntNo =
5068 cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue();
5069 if (IntNo != Intrinsic::aarch64_sve_prf)
5070 return EVT();
5072 // We are using an SVE prefetch intrinsic. Type must be inferred
5073 // from the width of the predicate.
5074 return getPackedVectorTypeFromPredicateType(
5075 Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
5078 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
5079 /// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
5080 /// where Root is the memory access using N for its address.
5081 template <int64_t Min, int64_t Max>
5082 bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
5083 SDValue &Base,
5084 SDValue &OffImm) {
5085 const EVT MemVT = getMemVTFromNode(*(CurDAG->getContext()), Root);
5086 const DataLayout &DL = CurDAG->getDataLayout();
5088 if (N.getOpcode() == ISD::FrameIndex) {
5089 int FI = cast<FrameIndexSDNode>(N)->getIndex();
5090 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
5091 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
5092 return true;
5095 if (MemVT == EVT())
5096 return false;
5098 if (N.getOpcode() != ISD::ADD)
5099 return false;
5101 SDValue VScale = N.getOperand(1);
5102 if (VScale.getOpcode() != ISD::VSCALE)
5103 return false;
5105 TypeSize TS = MemVT.getSizeInBits();
5106 int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinSize()) / 8;
5107 int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
5109 if ((MulImm % MemWidthBytes) != 0)
5110 return false;
5112 int64_t Offset = MulImm / MemWidthBytes;
5113 if (Offset < Min || Offset > Max)
5114 return false;
5116 Base = N.getOperand(0);
5117 if (Base.getOpcode() == ISD::FrameIndex) {
5118 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
5119 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
5122 OffImm = CurDAG->getTargetConstant(Offset, SDLoc(N), MVT::i64);
5123 return true;
5126 /// Select register plus register addressing mode for SVE, with scaled
5127 /// offset.
5128 bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
5129 SDValue &Base,
5130 SDValue &Offset) {
5131 if (N.getOpcode() != ISD::ADD)
5132 return false;
5134 // Process an ADD node.
5135 const SDValue LHS = N.getOperand(0);
5136 const SDValue RHS = N.getOperand(1);
5138 // 8 bit data does not come with the SHL node, so it is treated
5139 // separately.
5140 if (Scale == 0) {
5141 Base = LHS;
5142 Offset = RHS;
5143 return true;
5146 if (auto C = dyn_cast<ConstantSDNode>(RHS)) {
5147 int64_t ImmOff = C->getSExtValue();
5148 unsigned Size = 1 << Scale;
5150 // To use the reg+reg addressing mode, the immediate must be a multiple of
5151 // the vector element's byte size.
5152 if (ImmOff % Size)
5153 return false;
5155 SDLoc DL(N);
5156 Base = LHS;
5157 Offset = CurDAG->getTargetConstant(ImmOff >> Scale, DL, MVT::i64);
5158 SDValue Ops[] = {Offset};
5159 SDNode *MI = CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
5160 Offset = SDValue(MI, 0);
5161 return true;
5164 // Check if the RHS is a shift node with a constant.
5165 if (RHS.getOpcode() != ISD::SHL)
5166 return false;
5168 const SDValue ShiftRHS = RHS.getOperand(1);
5169 if (auto *C = dyn_cast<ConstantSDNode>(ShiftRHS))
5170 if (C->getZExtValue() == Scale) {
5171 Base = LHS;
5172 Offset = RHS.getOperand(0);
5173 return true;
5176 return false;
5179 bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
5180 const AArch64TargetLowering *TLI =
5181 static_cast<const AArch64TargetLowering *>(getTargetLowering());
5183 return TLI->isAllActivePredicate(*CurDAG, N);