[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64ISelDAGToDAG.cpp
blobe556533cb08911519da4ffa00e821fdb8375ec42
1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the AArch64 target.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64MachineFunctionInfo.h"
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/IntrinsicsAArch64.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/KnownBits.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
28 using namespace llvm;
30 #define DEBUG_TYPE "aarch64-isel"
32 //===--------------------------------------------------------------------===//
33 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
34 /// instructions for SelectionDAG operations.
35 ///
36 namespace {
38 class AArch64DAGToDAGISel : public SelectionDAGISel {
40 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
41 /// make the right decision when generating code for different targets.
42 const AArch64Subtarget *Subtarget;
44 public:
45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
46 CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr) {}
49 StringRef getPassName() const override {
50 return "AArch64 Instruction Selection";
53 bool runOnMachineFunction(MachineFunction &MF) override {
54 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
55 return SelectionDAGISel::runOnMachineFunction(MF);
58 void Select(SDNode *Node) override;
60 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
61 /// inline asm expressions.
62 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
63 unsigned ConstraintID,
64 std::vector<SDValue> &OutOps) override;
66 template <signed Low, signed High, signed Scale>
67 bool SelectRDVLImm(SDValue N, SDValue &Imm);
69 bool tryMLAV64LaneV128(SDNode *N);
70 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
80 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
95 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm);
98 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm);
101 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 1, Base, OffImm);
104 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 2, Base, OffImm);
107 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 4, Base, OffImm);
110 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeIndexed(N, 8, Base, OffImm);
113 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeIndexed(N, 16, Base, OffImm);
116 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
119 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
122 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
125 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
126 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
128 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
129 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
131 template <unsigned Size, unsigned Max>
132 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
133 // Test if there is an appropriate addressing mode and check if the
134 // immediate fits.
135 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
136 if (Found) {
137 if (auto *CI = dyn_cast<ConstantSDNode>(OffImm)) {
138 int64_t C = CI->getSExtValue();
139 if (C <= Max)
140 return true;
144 // Otherwise, base only, materialize address in register.
145 Base = N;
146 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
147 return true;
150 template<int Width>
151 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
152 SDValue &SignExtend, SDValue &DoShift) {
153 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
156 template<int Width>
157 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
158 SDValue &SignExtend, SDValue &DoShift) {
159 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
162 bool SelectDupZeroOrUndef(SDValue N) {
163 switch(N->getOpcode()) {
164 case ISD::UNDEF:
165 return true;
166 case AArch64ISD::DUP:
167 case ISD::SPLAT_VECTOR: {
168 auto Opnd0 = N->getOperand(0);
169 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
170 if (CN->isNullValue())
171 return true;
172 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
173 if (CN->isZero())
174 return true;
175 break;
177 default:
178 break;
181 return false;
184 bool SelectDupZero(SDValue N) {
185 switch(N->getOpcode()) {
186 case AArch64ISD::DUP:
187 case ISD::SPLAT_VECTOR: {
188 auto Opnd0 = N->getOperand(0);
189 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
190 if (CN->isNullValue())
191 return true;
192 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
193 if (CN->isZero())
194 return true;
195 break;
199 return false;
202 template<MVT::SimpleValueType VT>
203 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
204 return SelectSVEAddSubImm(N, VT, Imm, Shift);
207 template <MVT::SimpleValueType VT, bool Invert = false>
208 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
209 return SelectSVELogicalImm(N, VT, Imm, Invert);
212 template <MVT::SimpleValueType VT>
213 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
214 return SelectSVEArithImm(N, VT, Imm);
217 template <unsigned Low, unsigned High, bool AllowSaturation = false>
218 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
219 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
222 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
223 template<signed Min, signed Max, signed Scale, bool Shift>
224 bool SelectCntImm(SDValue N, SDValue &Imm) {
225 if (!isa<ConstantSDNode>(N))
226 return false;
228 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
229 if (Shift)
230 MulImm = 1LL << MulImm;
232 if ((MulImm % std::abs(Scale)) != 0)
233 return false;
235 MulImm /= Scale;
236 if ((MulImm >= Min) && (MulImm <= Max)) {
237 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
238 return true;
241 return false;
244 template <signed Max, signed Scale>
245 bool SelectEXTImm(SDValue N, SDValue &Imm) {
246 if (!isa<ConstantSDNode>(N))
247 return false;
249 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
251 if (MulImm >= 0 && MulImm <= Max) {
252 MulImm *= Scale;
253 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
254 return true;
257 return false;
260 /// Form sequences of consecutive 64/128-bit registers for use in NEON
261 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
262 /// between 1 and 4 elements. If it contains a single element that is returned
263 /// unchanged; otherwise a REG_SEQUENCE value is returned.
264 SDValue createDTuple(ArrayRef<SDValue> Vecs);
265 SDValue createQTuple(ArrayRef<SDValue> Vecs);
266 // Form a sequence of SVE registers for instructions using list of vectors,
267 // e.g. structured loads and stores (ldN, stN).
268 SDValue createZTuple(ArrayRef<SDValue> Vecs);
270 /// Generic helper for the createDTuple/createQTuple
271 /// functions. Those should almost always be called instead.
272 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
273 const unsigned SubRegs[]);
275 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
277 bool tryIndexedLoad(SDNode *N);
279 bool trySelectStackSlotTagP(SDNode *N);
280 void SelectTagP(SDNode *N);
282 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
283 unsigned SubRegIdx);
284 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
285 unsigned SubRegIdx);
286 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
287 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
288 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
289 unsigned Opc_rr, unsigned Opc_ri);
291 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
292 /// SVE Reg+Imm addressing mode.
293 template <int64_t Min, int64_t Max>
294 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
295 SDValue &OffImm);
296 /// SVE Reg+Reg address mode.
297 template <unsigned Scale>
298 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
299 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
302 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
303 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
304 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
305 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
306 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
307 unsigned Opc_rr, unsigned Opc_ri);
308 std::tuple<unsigned, SDValue, SDValue>
309 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
310 const SDValue &OldBase, const SDValue &OldOffset,
311 unsigned Scale);
313 bool tryBitfieldExtractOp(SDNode *N);
314 bool tryBitfieldExtractOpFromSExt(SDNode *N);
315 bool tryBitfieldInsertOp(SDNode *N);
316 bool tryBitfieldInsertInZeroOp(SDNode *N);
317 bool tryShiftAmountMod(SDNode *N);
318 bool tryHighFPExt(SDNode *N);
320 bool tryReadRegister(SDNode *N);
321 bool tryWriteRegister(SDNode *N);
323 // Include the pieces autogenerated from the target description.
324 #include "AArch64GenDAGISel.inc"
326 private:
327 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
328 SDValue &Shift);
329 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
330 SDValue &OffImm) {
331 return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm);
333 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
334 unsigned Size, SDValue &Base,
335 SDValue &OffImm);
336 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
337 SDValue &OffImm);
338 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
339 SDValue &OffImm);
340 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
341 SDValue &Offset, SDValue &SignExtend,
342 SDValue &DoShift);
343 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
344 SDValue &Offset, SDValue &SignExtend,
345 SDValue &DoShift);
346 bool isWorthFolding(SDValue V) const;
347 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
348 SDValue &Offset, SDValue &SignExtend);
350 template<unsigned RegWidth>
351 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
352 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
355 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
357 bool SelectCMP_SWAP(SDNode *N);
359 bool SelectSVE8BitLslImm(SDValue N, SDValue &Imm, SDValue &Shift);
361 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
363 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
365 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
366 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
367 bool AllowSaturation, SDValue &Imm);
369 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
370 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
371 SDValue &Offset);
373 bool SelectAllActivePredicate(SDValue N);
375 } // end anonymous namespace
377 /// isIntImmediate - This method tests to see if the node is a constant
378 /// operand. If so Imm will receive the 32-bit value.
379 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
380 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
381 Imm = C->getZExtValue();
382 return true;
384 return false;
387 // isIntImmediate - This method tests to see if a constant operand.
388 // If so Imm will receive the value.
389 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
390 return isIntImmediate(N.getNode(), Imm);
393 // isOpcWithIntImmediate - This method tests to see if the node is a specific
394 // opcode and that it has a immediate integer right operand.
395 // If so Imm will receive the 32 bit value.
396 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
397 uint64_t &Imm) {
398 return N->getOpcode() == Opc &&
399 isIntImmediate(N->getOperand(1).getNode(), Imm);
402 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
403 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
404 switch(ConstraintID) {
405 default:
406 llvm_unreachable("Unexpected asm memory constraint");
407 case InlineAsm::Constraint_m:
408 case InlineAsm::Constraint_o:
409 case InlineAsm::Constraint_Q:
410 // We need to make sure that this one operand does not end up in XZR, thus
411 // require the address to be in a PointerRegClass register.
412 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
413 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF);
414 SDLoc dl(Op);
415 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i64);
416 SDValue NewOp =
417 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
418 dl, Op.getValueType(),
419 Op, RC), 0);
420 OutOps.push_back(NewOp);
421 return false;
423 return true;
426 /// SelectArithImmed - Select an immediate value that can be represented as
427 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
428 /// Val set to the 12-bit value and Shift set to the shifter operand.
429 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
430 SDValue &Shift) {
431 // This function is called from the addsub_shifted_imm ComplexPattern,
432 // which lists [imm] as the list of opcode it's interested in, however
433 // we still need to check whether the operand is actually an immediate
434 // here because the ComplexPattern opcode list is only used in
435 // root-level opcode matching.
436 if (!isa<ConstantSDNode>(N.getNode()))
437 return false;
439 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
440 unsigned ShiftAmt;
442 if (Immed >> 12 == 0) {
443 ShiftAmt = 0;
444 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
445 ShiftAmt = 12;
446 Immed = Immed >> 12;
447 } else
448 return false;
450 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
451 SDLoc dl(N);
452 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
453 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
454 return true;
457 /// SelectNegArithImmed - As above, but negates the value before trying to
458 /// select it.
459 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
460 SDValue &Shift) {
461 // This function is called from the addsub_shifted_imm ComplexPattern,
462 // which lists [imm] as the list of opcode it's interested in, however
463 // we still need to check whether the operand is actually an immediate
464 // here because the ComplexPattern opcode list is only used in
465 // root-level opcode matching.
466 if (!isa<ConstantSDNode>(N.getNode()))
467 return false;
469 // The immediate operand must be a 24-bit zero-extended immediate.
470 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
472 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
473 // have the opposite effect on the C flag, so this pattern mustn't match under
474 // those circumstances.
475 if (Immed == 0)
476 return false;
478 if (N.getValueType() == MVT::i32)
479 Immed = ~((uint32_t)Immed) + 1;
480 else
481 Immed = ~Immed + 1ULL;
482 if (Immed & 0xFFFFFFFFFF000000ULL)
483 return false;
485 Immed &= 0xFFFFFFULL;
486 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
487 Shift);
490 /// getShiftTypeForNode - Translate a shift node to the corresponding
491 /// ShiftType value.
492 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
493 switch (N.getOpcode()) {
494 default:
495 return AArch64_AM::InvalidShiftExtend;
496 case ISD::SHL:
497 return AArch64_AM::LSL;
498 case ISD::SRL:
499 return AArch64_AM::LSR;
500 case ISD::SRA:
501 return AArch64_AM::ASR;
502 case ISD::ROTR:
503 return AArch64_AM::ROR;
507 /// Determine whether it is worth it to fold SHL into the addressing
508 /// mode.
509 static bool isWorthFoldingSHL(SDValue V) {
510 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
511 // It is worth folding logical shift of up to three places.
512 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
513 if (!CSD)
514 return false;
515 unsigned ShiftVal = CSD->getZExtValue();
516 if (ShiftVal > 3)
517 return false;
519 // Check if this particular node is reused in any non-memory related
520 // operation. If yes, do not try to fold this node into the address
521 // computation, since the computation will be kept.
522 const SDNode *Node = V.getNode();
523 for (SDNode *UI : Node->uses())
524 if (!isa<MemSDNode>(*UI))
525 for (SDNode *UII : UI->uses())
526 if (!isa<MemSDNode>(*UII))
527 return false;
528 return true;
531 /// Determine whether it is worth to fold V into an extended register.
532 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
533 // Trivial if we are optimizing for code size or if there is only
534 // one use of the value.
535 if (CurDAG->shouldOptForSize() || V.hasOneUse())
536 return true;
537 // If a subtarget has a fastpath LSL we can fold a logical shift into
538 // the addressing mode and save a cycle.
539 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
540 isWorthFoldingSHL(V))
541 return true;
542 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
543 const SDValue LHS = V.getOperand(0);
544 const SDValue RHS = V.getOperand(1);
545 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
546 return true;
547 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
548 return true;
551 // It hurts otherwise, since the value will be reused.
552 return false;
555 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
556 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
557 /// instructions allow the shifted register to be rotated, but the arithmetic
558 /// instructions do not. The AllowROR parameter specifies whether ROR is
559 /// supported.
560 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
561 SDValue &Reg, SDValue &Shift) {
562 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
563 if (ShType == AArch64_AM::InvalidShiftExtend)
564 return false;
565 if (!AllowROR && ShType == AArch64_AM::ROR)
566 return false;
568 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
569 unsigned BitSize = N.getValueSizeInBits();
570 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
571 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
573 Reg = N.getOperand(0);
574 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
575 return isWorthFolding(N);
578 return false;
581 /// getExtendTypeForNode - Translate an extend node to the corresponding
582 /// ExtendType value.
583 static AArch64_AM::ShiftExtendType
584 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
585 if (N.getOpcode() == ISD::SIGN_EXTEND ||
586 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
587 EVT SrcVT;
588 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
589 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
590 else
591 SrcVT = N.getOperand(0).getValueType();
593 if (!IsLoadStore && SrcVT == MVT::i8)
594 return AArch64_AM::SXTB;
595 else if (!IsLoadStore && SrcVT == MVT::i16)
596 return AArch64_AM::SXTH;
597 else if (SrcVT == MVT::i32)
598 return AArch64_AM::SXTW;
599 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
601 return AArch64_AM::InvalidShiftExtend;
602 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
603 N.getOpcode() == ISD::ANY_EXTEND) {
604 EVT SrcVT = N.getOperand(0).getValueType();
605 if (!IsLoadStore && SrcVT == MVT::i8)
606 return AArch64_AM::UXTB;
607 else if (!IsLoadStore && SrcVT == MVT::i16)
608 return AArch64_AM::UXTH;
609 else if (SrcVT == MVT::i32)
610 return AArch64_AM::UXTW;
611 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
613 return AArch64_AM::InvalidShiftExtend;
614 } else if (N.getOpcode() == ISD::AND) {
615 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
616 if (!CSD)
617 return AArch64_AM::InvalidShiftExtend;
618 uint64_t AndMask = CSD->getZExtValue();
620 switch (AndMask) {
621 default:
622 return AArch64_AM::InvalidShiftExtend;
623 case 0xFF:
624 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
625 case 0xFFFF:
626 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
627 case 0xFFFFFFFF:
628 return AArch64_AM::UXTW;
632 return AArch64_AM::InvalidShiftExtend;
635 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
636 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
637 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
638 DL->getOpcode() != AArch64ISD::DUPLANE32)
639 return false;
641 SDValue SV = DL->getOperand(0);
642 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
643 return false;
645 SDValue EV = SV.getOperand(1);
646 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
647 return false;
649 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
650 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
651 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
652 LaneOp = EV.getOperand(0);
654 return true;
657 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
658 // high lane extract.
659 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
660 SDValue &LaneOp, int &LaneIdx) {
662 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
663 std::swap(Op0, Op1);
664 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
665 return false;
667 StdOp = Op1;
668 return true;
671 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
672 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
673 /// so that we don't emit unnecessary lane extracts.
674 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
675 SDLoc dl(N);
676 SDValue Op0 = N->getOperand(0);
677 SDValue Op1 = N->getOperand(1);
678 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
679 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
680 int LaneIdx = -1; // Will hold the lane index.
682 if (Op1.getOpcode() != ISD::MUL ||
683 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
684 LaneIdx)) {
685 std::swap(Op0, Op1);
686 if (Op1.getOpcode() != ISD::MUL ||
687 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
688 LaneIdx))
689 return false;
692 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
694 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
696 unsigned MLAOpc = ~0U;
698 switch (N->getSimpleValueType(0).SimpleTy) {
699 default:
700 llvm_unreachable("Unrecognized MLA.");
701 case MVT::v4i16:
702 MLAOpc = AArch64::MLAv4i16_indexed;
703 break;
704 case MVT::v8i16:
705 MLAOpc = AArch64::MLAv8i16_indexed;
706 break;
707 case MVT::v2i32:
708 MLAOpc = AArch64::MLAv2i32_indexed;
709 break;
710 case MVT::v4i32:
711 MLAOpc = AArch64::MLAv4i32_indexed;
712 break;
715 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
716 return true;
719 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
720 SDLoc dl(N);
721 SDValue SMULLOp0;
722 SDValue SMULLOp1;
723 int LaneIdx;
725 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
726 LaneIdx))
727 return false;
729 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
731 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
733 unsigned SMULLOpc = ~0U;
735 if (IntNo == Intrinsic::aarch64_neon_smull) {
736 switch (N->getSimpleValueType(0).SimpleTy) {
737 default:
738 llvm_unreachable("Unrecognized SMULL.");
739 case MVT::v4i32:
740 SMULLOpc = AArch64::SMULLv4i16_indexed;
741 break;
742 case MVT::v2i64:
743 SMULLOpc = AArch64::SMULLv2i32_indexed;
744 break;
746 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
747 switch (N->getSimpleValueType(0).SimpleTy) {
748 default:
749 llvm_unreachable("Unrecognized SMULL.");
750 case MVT::v4i32:
751 SMULLOpc = AArch64::UMULLv4i16_indexed;
752 break;
753 case MVT::v2i64:
754 SMULLOpc = AArch64::UMULLv2i32_indexed;
755 break;
757 } else
758 llvm_unreachable("Unrecognized intrinsic.");
760 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
761 return true;
764 /// Instructions that accept extend modifiers like UXTW expect the register
765 /// being extended to be a GPR32, but the incoming DAG might be acting on a
766 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
767 /// this is the case.
768 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
769 if (N.getValueType() == MVT::i32)
770 return N;
772 SDLoc dl(N);
773 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
774 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
775 dl, MVT::i32, N, SubReg);
776 return SDValue(Node, 0);
779 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
780 template<signed Low, signed High, signed Scale>
781 bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
782 if (!isa<ConstantSDNode>(N))
783 return false;
785 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
786 if ((MulImm % std::abs(Scale)) == 0) {
787 int64_t RDVLImm = MulImm / Scale;
788 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
789 Imm = CurDAG->getTargetConstant(RDVLImm, SDLoc(N), MVT::i32);
790 return true;
794 return false;
797 /// SelectArithExtendedRegister - Select a "extended register" operand. This
798 /// operand folds in an extend followed by an optional left shift.
799 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
800 SDValue &Shift) {
801 unsigned ShiftVal = 0;
802 AArch64_AM::ShiftExtendType Ext;
804 if (N.getOpcode() == ISD::SHL) {
805 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
806 if (!CSD)
807 return false;
808 ShiftVal = CSD->getZExtValue();
809 if (ShiftVal > 4)
810 return false;
812 Ext = getExtendTypeForNode(N.getOperand(0));
813 if (Ext == AArch64_AM::InvalidShiftExtend)
814 return false;
816 Reg = N.getOperand(0).getOperand(0);
817 } else {
818 Ext = getExtendTypeForNode(N);
819 if (Ext == AArch64_AM::InvalidShiftExtend)
820 return false;
822 Reg = N.getOperand(0);
824 // Don't match if free 32-bit -> 64-bit zext can be used instead.
825 if (Ext == AArch64_AM::UXTW &&
826 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
827 return false;
830 // AArch64 mandates that the RHS of the operation must use the smallest
831 // register class that could contain the size being extended from. Thus,
832 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
833 // there might not be an actual 32-bit value in the program. We can
834 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
835 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
836 Reg = narrowIfNeeded(CurDAG, Reg);
837 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
838 MVT::i32);
839 return isWorthFolding(N);
842 /// If there's a use of this ADDlow that's not itself a load/store then we'll
843 /// need to create a real ADD instruction from it anyway and there's no point in
844 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
845 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
846 /// leads to duplicated ADRP instructions.
847 static bool isWorthFoldingADDlow(SDValue N) {
848 for (auto Use : N->uses()) {
849 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
850 Use->getOpcode() != ISD::ATOMIC_LOAD &&
851 Use->getOpcode() != ISD::ATOMIC_STORE)
852 return false;
854 // ldar and stlr have much more restrictive addressing modes (just a
855 // register).
856 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getSuccessOrdering()))
857 return false;
860 return true;
863 /// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
864 /// immediate" address. The "Size" argument is the size in bytes of the memory
865 /// reference, which determines the scale.
866 bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
867 unsigned BW, unsigned Size,
868 SDValue &Base,
869 SDValue &OffImm) {
870 SDLoc dl(N);
871 const DataLayout &DL = CurDAG->getDataLayout();
872 const TargetLowering *TLI = getTargetLowering();
873 if (N.getOpcode() == ISD::FrameIndex) {
874 int FI = cast<FrameIndexSDNode>(N)->getIndex();
875 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
876 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
877 return true;
880 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
881 // selected here doesn't support labels/immediates, only base+offset.
882 if (CurDAG->isBaseWithConstantOffset(N)) {
883 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
884 if (IsSignedImm) {
885 int64_t RHSC = RHS->getSExtValue();
886 unsigned Scale = Log2_32(Size);
887 int64_t Range = 0x1LL << (BW - 1);
889 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
890 RHSC < (Range << Scale)) {
891 Base = N.getOperand(0);
892 if (Base.getOpcode() == ISD::FrameIndex) {
893 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
894 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
896 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
897 return true;
899 } else {
900 // unsigned Immediate
901 uint64_t RHSC = RHS->getZExtValue();
902 unsigned Scale = Log2_32(Size);
903 uint64_t Range = 0x1ULL << BW;
905 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
906 Base = N.getOperand(0);
907 if (Base.getOpcode() == ISD::FrameIndex) {
908 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
909 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
911 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
912 return true;
917 // Base only. The address will be materialized into a register before
918 // the memory is accessed.
919 // add x0, Xbase, #offset
920 // stp x1, x2, [x0]
921 Base = N;
922 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
923 return true;
926 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
927 /// immediate" address. The "Size" argument is the size in bytes of the memory
928 /// reference, which determines the scale.
929 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
930 SDValue &Base, SDValue &OffImm) {
931 SDLoc dl(N);
932 const DataLayout &DL = CurDAG->getDataLayout();
933 const TargetLowering *TLI = getTargetLowering();
934 if (N.getOpcode() == ISD::FrameIndex) {
935 int FI = cast<FrameIndexSDNode>(N)->getIndex();
936 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
937 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
938 return true;
941 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
942 GlobalAddressSDNode *GAN =
943 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
944 Base = N.getOperand(0);
945 OffImm = N.getOperand(1);
946 if (!GAN)
947 return true;
949 if (GAN->getOffset() % Size == 0 &&
950 GAN->getGlobal()->getPointerAlignment(DL) >= Size)
951 return true;
954 if (CurDAG->isBaseWithConstantOffset(N)) {
955 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
956 int64_t RHSC = (int64_t)RHS->getZExtValue();
957 unsigned Scale = Log2_32(Size);
958 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
959 Base = N.getOperand(0);
960 if (Base.getOpcode() == ISD::FrameIndex) {
961 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
962 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
964 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
965 return true;
970 // Before falling back to our general case, check if the unscaled
971 // instructions can handle this. If so, that's preferable.
972 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
973 return false;
975 // Base only. The address will be materialized into a register before
976 // the memory is accessed.
977 // add x0, Xbase, #offset
978 // ldr x0, [x0]
979 Base = N;
980 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
981 return true;
984 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
985 /// immediate" address. This should only match when there is an offset that
986 /// is not valid for a scaled immediate addressing mode. The "Size" argument
987 /// is the size in bytes of the memory reference, which is needed here to know
988 /// what is valid for a scaled immediate.
989 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
990 SDValue &Base,
991 SDValue &OffImm) {
992 if (!CurDAG->isBaseWithConstantOffset(N))
993 return false;
994 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
995 int64_t RHSC = RHS->getSExtValue();
996 // If the offset is valid as a scaled immediate, don't match here.
997 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
998 RHSC < (0x1000 << Log2_32(Size)))
999 return false;
1000 if (RHSC >= -256 && RHSC < 256) {
1001 Base = N.getOperand(0);
1002 if (Base.getOpcode() == ISD::FrameIndex) {
1003 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1004 const TargetLowering *TLI = getTargetLowering();
1005 Base = CurDAG->getTargetFrameIndex(
1006 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1008 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
1009 return true;
1012 return false;
1015 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
1016 SDLoc dl(N);
1017 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1018 SDValue ImpDef = SDValue(
1019 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
1020 MachineSDNode *Node = CurDAG->getMachineNode(
1021 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
1022 return SDValue(Node, 0);
1025 /// Check if the given SHL node (\p N), can be used to form an
1026 /// extended register for an addressing mode.
1027 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1028 bool WantExtend, SDValue &Offset,
1029 SDValue &SignExtend) {
1030 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1031 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
1032 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
1033 return false;
1035 SDLoc dl(N);
1036 if (WantExtend) {
1037 AArch64_AM::ShiftExtendType Ext =
1038 getExtendTypeForNode(N.getOperand(0), true);
1039 if (Ext == AArch64_AM::InvalidShiftExtend)
1040 return false;
1042 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
1043 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1044 MVT::i32);
1045 } else {
1046 Offset = N.getOperand(0);
1047 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
1050 unsigned LegalShiftVal = Log2_32(Size);
1051 unsigned ShiftVal = CSD->getZExtValue();
1053 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1054 return false;
1056 return isWorthFolding(N);
1059 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1060 SDValue &Base, SDValue &Offset,
1061 SDValue &SignExtend,
1062 SDValue &DoShift) {
1063 if (N.getOpcode() != ISD::ADD)
1064 return false;
1065 SDValue LHS = N.getOperand(0);
1066 SDValue RHS = N.getOperand(1);
1067 SDLoc dl(N);
1069 // We don't want to match immediate adds here, because they are better lowered
1070 // to the register-immediate addressing modes.
1071 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
1072 return false;
1074 // Check if this particular node is reused in any non-memory related
1075 // operation. If yes, do not try to fold this node into the address
1076 // computation, since the computation will be kept.
1077 const SDNode *Node = N.getNode();
1078 for (SDNode *UI : Node->uses()) {
1079 if (!isa<MemSDNode>(*UI))
1080 return false;
1083 // Remember if it is worth folding N when it produces extended register.
1084 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1086 // Try to match a shifted extend on the RHS.
1087 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1088 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
1089 Base = LHS;
1090 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1091 return true;
1094 // Try to match a shifted extend on the LHS.
1095 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1096 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
1097 Base = RHS;
1098 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1099 return true;
1102 // There was no shift, whatever else we find.
1103 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
1105 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
1106 // Try to match an unshifted extend on the LHS.
1107 if (IsExtendedRegisterWorthFolding &&
1108 (Ext = getExtendTypeForNode(LHS, true)) !=
1109 AArch64_AM::InvalidShiftExtend) {
1110 Base = RHS;
1111 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
1112 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1113 MVT::i32);
1114 if (isWorthFolding(LHS))
1115 return true;
1118 // Try to match an unshifted extend on the RHS.
1119 if (IsExtendedRegisterWorthFolding &&
1120 (Ext = getExtendTypeForNode(RHS, true)) !=
1121 AArch64_AM::InvalidShiftExtend) {
1122 Base = LHS;
1123 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
1124 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1125 MVT::i32);
1126 if (isWorthFolding(RHS))
1127 return true;
1130 return false;
1133 // Check if the given immediate is preferred by ADD. If an immediate can be
1134 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1135 // encoded by one MOVZ, return true.
1136 static bool isPreferredADD(int64_t ImmOff) {
1137 // Constant in [0x0, 0xfff] can be encoded in ADD.
1138 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1139 return true;
1140 // Check if it can be encoded in an "ADD LSL #12".
1141 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1142 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1143 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1144 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1145 return false;
1148 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1149 SDValue &Base, SDValue &Offset,
1150 SDValue &SignExtend,
1151 SDValue &DoShift) {
1152 if (N.getOpcode() != ISD::ADD)
1153 return false;
1154 SDValue LHS = N.getOperand(0);
1155 SDValue RHS = N.getOperand(1);
1156 SDLoc DL(N);
1158 // Check if this particular node is reused in any non-memory related
1159 // operation. If yes, do not try to fold this node into the address
1160 // computation, since the computation will be kept.
1161 const SDNode *Node = N.getNode();
1162 for (SDNode *UI : Node->uses()) {
1163 if (!isa<MemSDNode>(*UI))
1164 return false;
1167 // Watch out if RHS is a wide immediate, it can not be selected into
1168 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1169 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1170 // instructions like:
1171 // MOV X0, WideImmediate
1172 // ADD X1, BaseReg, X0
1173 // LDR X2, [X1, 0]
1174 // For such situation, using [BaseReg, XReg] addressing mode can save one
1175 // ADD/SUB:
1176 // MOV X0, WideImmediate
1177 // LDR X2, [BaseReg, X0]
1178 if (isa<ConstantSDNode>(RHS)) {
1179 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
1180 unsigned Scale = Log2_32(Size);
1181 // Skip the immediate can be selected by load/store addressing mode.
1182 // Also skip the immediate can be encoded by a single ADD (SUB is also
1183 // checked by using -ImmOff).
1184 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
1185 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
1186 return false;
1188 SDValue Ops[] = { RHS };
1189 SDNode *MOVI =
1190 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
1191 SDValue MOVIV = SDValue(MOVI, 0);
1192 // This ADD of two X register will be selected into [Reg+Reg] mode.
1193 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
1196 // Remember if it is worth folding N when it produces extended register.
1197 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1199 // Try to match a shifted extend on the RHS.
1200 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1201 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
1202 Base = LHS;
1203 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1204 return true;
1207 // Try to match a shifted extend on the LHS.
1208 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1209 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1210 Base = RHS;
1211 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1212 return true;
1215 // Match any non-shifted, non-extend, non-immediate add expression.
1216 Base = LHS;
1217 Offset = RHS;
1218 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1219 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
1220 // Reg1 + Reg2 is free: no check needed.
1221 return true;
1224 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1225 static const unsigned RegClassIDs[] = {
1226 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1227 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1228 AArch64::dsub2, AArch64::dsub3};
1230 return createTuple(Regs, RegClassIDs, SubRegs);
1233 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1234 static const unsigned RegClassIDs[] = {
1235 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1236 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1237 AArch64::qsub2, AArch64::qsub3};
1239 return createTuple(Regs, RegClassIDs, SubRegs);
1242 SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1243 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1244 AArch64::ZPR3RegClassID,
1245 AArch64::ZPR4RegClassID};
1246 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1247 AArch64::zsub2, AArch64::zsub3};
1249 return createTuple(Regs, RegClassIDs, SubRegs);
1252 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1253 const unsigned RegClassIDs[],
1254 const unsigned SubRegs[]) {
1255 // There's no special register-class for a vector-list of 1 element: it's just
1256 // a vector.
1257 if (Regs.size() == 1)
1258 return Regs[0];
1260 assert(Regs.size() >= 2 && Regs.size() <= 4);
1262 SDLoc DL(Regs[0]);
1264 SmallVector<SDValue, 4> Ops;
1266 // First operand of REG_SEQUENCE is the desired RegClass.
1267 Ops.push_back(
1268 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1270 // Then we get pairs of source & subregister-position for the components.
1271 for (unsigned i = 0; i < Regs.size(); ++i) {
1272 Ops.push_back(Regs[i]);
1273 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1276 SDNode *N =
1277 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1278 return SDValue(N, 0);
1281 void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1282 bool isExt) {
1283 SDLoc dl(N);
1284 EVT VT = N->getValueType(0);
1286 unsigned ExtOff = isExt;
1288 // Form a REG_SEQUENCE to force register allocation.
1289 unsigned Vec0Off = ExtOff + 1;
1290 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1291 N->op_begin() + Vec0Off + NumVecs);
1292 SDValue RegSeq = createQTuple(Regs);
1294 SmallVector<SDValue, 6> Ops;
1295 if (isExt)
1296 Ops.push_back(N->getOperand(1));
1297 Ops.push_back(RegSeq);
1298 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1299 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
1302 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1303 LoadSDNode *LD = cast<LoadSDNode>(N);
1304 if (LD->isUnindexed())
1305 return false;
1306 EVT VT = LD->getMemoryVT();
1307 EVT DstVT = N->getValueType(0);
1308 ISD::MemIndexedMode AM = LD->getAddressingMode();
1309 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1311 // We're not doing validity checking here. That was done when checking
1312 // if we should mark the load as indexed or not. We're just selecting
1313 // the right instruction.
1314 unsigned Opcode = 0;
1316 ISD::LoadExtType ExtType = LD->getExtensionType();
1317 bool InsertTo64 = false;
1318 if (VT == MVT::i64)
1319 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1320 else if (VT == MVT::i32) {
1321 if (ExtType == ISD::NON_EXTLOAD)
1322 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1323 else if (ExtType == ISD::SEXTLOAD)
1324 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1325 else {
1326 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1327 InsertTo64 = true;
1328 // The result of the load is only i32. It's the subreg_to_reg that makes
1329 // it into an i64.
1330 DstVT = MVT::i32;
1332 } else if (VT == MVT::i16) {
1333 if (ExtType == ISD::SEXTLOAD) {
1334 if (DstVT == MVT::i64)
1335 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1336 else
1337 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1338 } else {
1339 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1340 InsertTo64 = DstVT == MVT::i64;
1341 // The result of the load is only i32. It's the subreg_to_reg that makes
1342 // it into an i64.
1343 DstVT = MVT::i32;
1345 } else if (VT == MVT::i8) {
1346 if (ExtType == ISD::SEXTLOAD) {
1347 if (DstVT == MVT::i64)
1348 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1349 else
1350 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1351 } else {
1352 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1353 InsertTo64 = DstVT == MVT::i64;
1354 // The result of the load is only i32. It's the subreg_to_reg that makes
1355 // it into an i64.
1356 DstVT = MVT::i32;
1358 } else if (VT == MVT::f16) {
1359 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1360 } else if (VT == MVT::bf16) {
1361 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1362 } else if (VT == MVT::f32) {
1363 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1364 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1365 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1366 } else if (VT.is128BitVector()) {
1367 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1368 } else
1369 return false;
1370 SDValue Chain = LD->getChain();
1371 SDValue Base = LD->getBasePtr();
1372 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1373 int OffsetVal = (int)OffsetOp->getZExtValue();
1374 SDLoc dl(N);
1375 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1376 SDValue Ops[] = { Base, Offset, Chain };
1377 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1378 MVT::Other, Ops);
1380 // Transfer memoperands.
1381 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1382 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Res), {MemOp});
1384 // Either way, we're replacing the node, so tell the caller that.
1385 SDValue LoadedVal = SDValue(Res, 1);
1386 if (InsertTo64) {
1387 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1388 LoadedVal =
1389 SDValue(CurDAG->getMachineNode(
1390 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1391 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1392 SubReg),
1396 ReplaceUses(SDValue(N, 0), LoadedVal);
1397 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1398 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1399 CurDAG->RemoveDeadNode(N);
1400 return true;
1403 void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1404 unsigned SubRegIdx) {
1405 SDLoc dl(N);
1406 EVT VT = N->getValueType(0);
1407 SDValue Chain = N->getOperand(0);
1409 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1410 Chain};
1412 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1414 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1415 SDValue SuperReg = SDValue(Ld, 0);
1416 for (unsigned i = 0; i < NumVecs; ++i)
1417 ReplaceUses(SDValue(N, i),
1418 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1420 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1422 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1423 // because it's too simple to have needed special treatment during lowering.
1424 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(N)) {
1425 MachineMemOperand *MemOp = MemIntr->getMemOperand();
1426 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
1429 CurDAG->RemoveDeadNode(N);
1432 void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1433 unsigned Opc, unsigned SubRegIdx) {
1434 SDLoc dl(N);
1435 EVT VT = N->getValueType(0);
1436 SDValue Chain = N->getOperand(0);
1438 SDValue Ops[] = {N->getOperand(1), // Mem operand
1439 N->getOperand(2), // Incremental
1440 Chain};
1442 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1443 MVT::Untyped, MVT::Other};
1445 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1447 // Update uses of write back register
1448 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1450 // Update uses of vector list
1451 SDValue SuperReg = SDValue(Ld, 1);
1452 if (NumVecs == 1)
1453 ReplaceUses(SDValue(N, 0), SuperReg);
1454 else
1455 for (unsigned i = 0; i < NumVecs; ++i)
1456 ReplaceUses(SDValue(N, i),
1457 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1459 // Update the chain
1460 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1461 CurDAG->RemoveDeadNode(N);
1464 /// Optimize \param OldBase and \param OldOffset selecting the best addressing
1465 /// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1466 /// new Base and an SDValue representing the new offset.
1467 std::tuple<unsigned, SDValue, SDValue>
1468 AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1469 unsigned Opc_ri,
1470 const SDValue &OldBase,
1471 const SDValue &OldOffset,
1472 unsigned Scale) {
1473 SDValue NewBase = OldBase;
1474 SDValue NewOffset = OldOffset;
1475 // Detect a possible Reg+Imm addressing mode.
1476 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1477 N, OldBase, NewBase, NewOffset);
1479 // Detect a possible reg+reg addressing mode, but only if we haven't already
1480 // detected a Reg+Imm one.
1481 const bool IsRegReg =
1482 !IsRegImm && SelectSVERegRegAddrMode(OldBase, Scale, NewBase, NewOffset);
1484 // Select the instruction.
1485 return std::make_tuple(IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset);
1488 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1489 unsigned Scale, unsigned Opc_ri,
1490 unsigned Opc_rr) {
1491 assert(Scale < 4 && "Invalid scaling value.");
1492 SDLoc DL(N);
1493 EVT VT = N->getValueType(0);
1494 SDValue Chain = N->getOperand(0);
1496 // Optimize addressing mode.
1497 SDValue Base, Offset;
1498 unsigned Opc;
1499 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1500 N, Opc_rr, Opc_ri, N->getOperand(2),
1501 CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
1503 SDValue Ops[] = {N->getOperand(1), // Predicate
1504 Base, // Memory operand
1505 Offset, Chain};
1507 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1509 SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
1510 SDValue SuperReg = SDValue(Load, 0);
1511 for (unsigned i = 0; i < NumVecs; ++i)
1512 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1513 AArch64::zsub0 + i, DL, VT, SuperReg));
1515 // Copy chain
1516 unsigned ChainIdx = NumVecs;
1517 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1518 CurDAG->RemoveDeadNode(N);
1521 void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1522 unsigned Opc) {
1523 SDLoc dl(N);
1524 EVT VT = N->getOperand(2)->getValueType(0);
1526 // Form a REG_SEQUENCE to force register allocation.
1527 bool Is128Bit = VT.getSizeInBits() == 128;
1528 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1529 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1531 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1532 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1534 // Transfer memoperands.
1535 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1536 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1538 ReplaceNode(N, St);
1541 void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
1542 unsigned Scale, unsigned Opc_rr,
1543 unsigned Opc_ri) {
1544 SDLoc dl(N);
1546 // Form a REG_SEQUENCE to force register allocation.
1547 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1548 SDValue RegSeq = createZTuple(Regs);
1550 // Optimize addressing mode.
1551 unsigned Opc;
1552 SDValue Offset, Base;
1553 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1554 N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
1555 CurDAG->getTargetConstant(0, dl, MVT::i64), Scale);
1557 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
1558 Base, // address
1559 Offset, // offset
1560 N->getOperand(0)}; // chain
1561 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1563 ReplaceNode(N, St);
1566 bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
1567 SDValue &OffImm) {
1568 SDLoc dl(N);
1569 const DataLayout &DL = CurDAG->getDataLayout();
1570 const TargetLowering *TLI = getTargetLowering();
1572 // Try to match it for the frame address
1573 if (auto FINode = dyn_cast<FrameIndexSDNode>(N)) {
1574 int FI = FINode->getIndex();
1575 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1576 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1577 return true;
1580 return false;
1583 void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1584 unsigned Opc) {
1585 SDLoc dl(N);
1586 EVT VT = N->getOperand(2)->getValueType(0);
1587 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1588 MVT::Other}; // Type for the Chain
1590 // Form a REG_SEQUENCE to force register allocation.
1591 bool Is128Bit = VT.getSizeInBits() == 128;
1592 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1593 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1595 SDValue Ops[] = {RegSeq,
1596 N->getOperand(NumVecs + 1), // base register
1597 N->getOperand(NumVecs + 2), // Incremental
1598 N->getOperand(0)}; // Chain
1599 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1601 ReplaceNode(N, St);
1604 namespace {
1605 /// WidenVector - Given a value in the V64 register class, produce the
1606 /// equivalent value in the V128 register class.
1607 class WidenVector {
1608 SelectionDAG &DAG;
1610 public:
1611 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1613 SDValue operator()(SDValue V64Reg) {
1614 EVT VT = V64Reg.getValueType();
1615 unsigned NarrowSize = VT.getVectorNumElements();
1616 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1617 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1618 SDLoc DL(V64Reg);
1620 SDValue Undef =
1621 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1622 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1625 } // namespace
1627 /// NarrowVector - Given a value in the V128 register class, produce the
1628 /// equivalent value in the V64 register class.
1629 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1630 EVT VT = V128Reg.getValueType();
1631 unsigned WideSize = VT.getVectorNumElements();
1632 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1633 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1635 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1636 V128Reg);
1639 void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1640 unsigned Opc) {
1641 SDLoc dl(N);
1642 EVT VT = N->getValueType(0);
1643 bool Narrow = VT.getSizeInBits() == 64;
1645 // Form a REG_SEQUENCE to force register allocation.
1646 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1648 if (Narrow)
1649 transform(Regs, Regs.begin(),
1650 WidenVector(*CurDAG));
1652 SDValue RegSeq = createQTuple(Regs);
1654 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1656 unsigned LaneNo =
1657 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1659 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1660 N->getOperand(NumVecs + 3), N->getOperand(0)};
1661 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1662 SDValue SuperReg = SDValue(Ld, 0);
1664 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1665 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1666 AArch64::qsub2, AArch64::qsub3 };
1667 for (unsigned i = 0; i < NumVecs; ++i) {
1668 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1669 if (Narrow)
1670 NV = NarrowVector(NV, *CurDAG);
1671 ReplaceUses(SDValue(N, i), NV);
1674 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1675 CurDAG->RemoveDeadNode(N);
1678 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1679 unsigned Opc) {
1680 SDLoc dl(N);
1681 EVT VT = N->getValueType(0);
1682 bool Narrow = VT.getSizeInBits() == 64;
1684 // Form a REG_SEQUENCE to force register allocation.
1685 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1687 if (Narrow)
1688 transform(Regs, Regs.begin(),
1689 WidenVector(*CurDAG));
1691 SDValue RegSeq = createQTuple(Regs);
1693 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1694 RegSeq->getValueType(0), MVT::Other};
1696 unsigned LaneNo =
1697 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1699 SDValue Ops[] = {RegSeq,
1700 CurDAG->getTargetConstant(LaneNo, dl,
1701 MVT::i64), // Lane Number
1702 N->getOperand(NumVecs + 2), // Base register
1703 N->getOperand(NumVecs + 3), // Incremental
1704 N->getOperand(0)};
1705 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1707 // Update uses of the write back register
1708 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1710 // Update uses of the vector list
1711 SDValue SuperReg = SDValue(Ld, 1);
1712 if (NumVecs == 1) {
1713 ReplaceUses(SDValue(N, 0),
1714 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1715 } else {
1716 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1717 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1718 AArch64::qsub2, AArch64::qsub3 };
1719 for (unsigned i = 0; i < NumVecs; ++i) {
1720 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1721 SuperReg);
1722 if (Narrow)
1723 NV = NarrowVector(NV, *CurDAG);
1724 ReplaceUses(SDValue(N, i), NV);
1728 // Update the Chain
1729 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1730 CurDAG->RemoveDeadNode(N);
1733 void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1734 unsigned Opc) {
1735 SDLoc dl(N);
1736 EVT VT = N->getOperand(2)->getValueType(0);
1737 bool Narrow = VT.getSizeInBits() == 64;
1739 // Form a REG_SEQUENCE to force register allocation.
1740 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1742 if (Narrow)
1743 transform(Regs, Regs.begin(),
1744 WidenVector(*CurDAG));
1746 SDValue RegSeq = createQTuple(Regs);
1748 unsigned LaneNo =
1749 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1751 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1752 N->getOperand(NumVecs + 3), N->getOperand(0)};
1753 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1755 // Transfer memoperands.
1756 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1757 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1759 ReplaceNode(N, St);
1762 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1763 unsigned Opc) {
1764 SDLoc dl(N);
1765 EVT VT = N->getOperand(2)->getValueType(0);
1766 bool Narrow = VT.getSizeInBits() == 64;
1768 // Form a REG_SEQUENCE to force register allocation.
1769 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1771 if (Narrow)
1772 transform(Regs, Regs.begin(),
1773 WidenVector(*CurDAG));
1775 SDValue RegSeq = createQTuple(Regs);
1777 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1778 MVT::Other};
1780 unsigned LaneNo =
1781 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1783 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1784 N->getOperand(NumVecs + 2), // Base Register
1785 N->getOperand(NumVecs + 3), // Incremental
1786 N->getOperand(0)};
1787 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1789 // Transfer memoperands.
1790 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1791 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1793 ReplaceNode(N, St);
1796 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1797 unsigned &Opc, SDValue &Opd0,
1798 unsigned &LSB, unsigned &MSB,
1799 unsigned NumberOfIgnoredLowBits,
1800 bool BiggerPattern) {
1801 assert(N->getOpcode() == ISD::AND &&
1802 "N must be a AND operation to call this function");
1804 EVT VT = N->getValueType(0);
1806 // Here we can test the type of VT and return false when the type does not
1807 // match, but since it is done prior to that call in the current context
1808 // we turned that into an assert to avoid redundant code.
1809 assert((VT == MVT::i32 || VT == MVT::i64) &&
1810 "Type checking must have been done before calling this function");
1812 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1813 // changed the AND node to a 32-bit mask operation. We'll have to
1814 // undo that as part of the transform here if we want to catch all
1815 // the opportunities.
1816 // Currently the NumberOfIgnoredLowBits argument helps to recover
1817 // form these situations when matching bigger pattern (bitfield insert).
1819 // For unsigned extracts, check for a shift right and mask
1820 uint64_t AndImm = 0;
1821 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
1822 return false;
1824 const SDNode *Op0 = N->getOperand(0).getNode();
1826 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1827 // simplified. Try to undo that
1828 AndImm |= maskTrailingOnes<uint64_t>(NumberOfIgnoredLowBits);
1830 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1831 if (AndImm & (AndImm + 1))
1832 return false;
1834 bool ClampMSB = false;
1835 uint64_t SrlImm = 0;
1836 // Handle the SRL + ANY_EXTEND case.
1837 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1838 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
1839 // Extend the incoming operand of the SRL to 64-bit.
1840 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1841 // Make sure to clamp the MSB so that we preserve the semantics of the
1842 // original operations.
1843 ClampMSB = true;
1844 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1845 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1846 SrlImm)) {
1847 // If the shift result was truncated, we can still combine them.
1848 Opd0 = Op0->getOperand(0).getOperand(0);
1850 // Use the type of SRL node.
1851 VT = Opd0->getValueType(0);
1852 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
1853 Opd0 = Op0->getOperand(0);
1854 } else if (BiggerPattern) {
1855 // Let's pretend a 0 shift right has been performed.
1856 // The resulting code will be at least as good as the original one
1857 // plus it may expose more opportunities for bitfield insert pattern.
1858 // FIXME: Currently we limit this to the bigger pattern, because
1859 // some optimizations expect AND and not UBFM.
1860 Opd0 = N->getOperand(0);
1861 } else
1862 return false;
1864 // Bail out on large immediates. This happens when no proper
1865 // combining/constant folding was performed.
1866 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
1867 LLVM_DEBUG(
1868 (dbgs() << N
1869 << ": Found large shift immediate, this should not happen\n"));
1870 return false;
1873 LSB = SrlImm;
1874 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1875 : countTrailingOnes<uint64_t>(AndImm)) -
1877 if (ClampMSB)
1878 // Since we're moving the extend before the right shift operation, we need
1879 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1880 // the zeros which would get shifted in with the original right shift
1881 // operation.
1882 MSB = MSB > 31 ? 31 : MSB;
1884 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1885 return true;
1888 static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1889 SDValue &Opd0, unsigned &Immr,
1890 unsigned &Imms) {
1891 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1893 EVT VT = N->getValueType(0);
1894 unsigned BitWidth = VT.getSizeInBits();
1895 assert((VT == MVT::i32 || VT == MVT::i64) &&
1896 "Type checking must have been done before calling this function");
1898 SDValue Op = N->getOperand(0);
1899 if (Op->getOpcode() == ISD::TRUNCATE) {
1900 Op = Op->getOperand(0);
1901 VT = Op->getValueType(0);
1902 BitWidth = VT.getSizeInBits();
1905 uint64_t ShiftImm;
1906 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1907 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1908 return false;
1910 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1911 if (ShiftImm + Width > BitWidth)
1912 return false;
1914 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1915 Opd0 = Op.getOperand(0);
1916 Immr = ShiftImm;
1917 Imms = ShiftImm + Width - 1;
1918 return true;
1921 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1922 SDValue &Opd0, unsigned &LSB,
1923 unsigned &MSB) {
1924 // We are looking for the following pattern which basically extracts several
1925 // continuous bits from the source value and places it from the LSB of the
1926 // destination value, all other bits of the destination value or set to zero:
1928 // Value2 = AND Value, MaskImm
1929 // SRL Value2, ShiftImm
1931 // with MaskImm >> ShiftImm to search for the bit width.
1933 // This gets selected into a single UBFM:
1935 // UBFM Value, ShiftImm, BitWide + SrlImm -1
1938 if (N->getOpcode() != ISD::SRL)
1939 return false;
1941 uint64_t AndMask = 0;
1942 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
1943 return false;
1945 Opd0 = N->getOperand(0).getOperand(0);
1947 uint64_t SrlImm = 0;
1948 if (!isIntImmediate(N->getOperand(1), SrlImm))
1949 return false;
1951 // Check whether we really have several bits extract here.
1952 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1953 if (BitWide && isMask_64(AndMask >> SrlImm)) {
1954 if (N->getValueType(0) == MVT::i32)
1955 Opc = AArch64::UBFMWri;
1956 else
1957 Opc = AArch64::UBFMXri;
1959 LSB = SrlImm;
1960 MSB = BitWide + SrlImm - 1;
1961 return true;
1964 return false;
1967 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1968 unsigned &Immr, unsigned &Imms,
1969 bool BiggerPattern) {
1970 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1971 "N must be a SHR/SRA operation to call this function");
1973 EVT VT = N->getValueType(0);
1975 // Here we can test the type of VT and return false when the type does not
1976 // match, but since it is done prior to that call in the current context
1977 // we turned that into an assert to avoid redundant code.
1978 assert((VT == MVT::i32 || VT == MVT::i64) &&
1979 "Type checking must have been done before calling this function");
1981 // Check for AND + SRL doing several bits extract.
1982 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1983 return true;
1985 // We're looking for a shift of a shift.
1986 uint64_t ShlImm = 0;
1987 uint64_t TruncBits = 0;
1988 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
1989 Opd0 = N->getOperand(0).getOperand(0);
1990 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1991 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1992 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1993 // be considered as setting high 32 bits as zero. Our strategy here is to
1994 // always generate 64bit UBFM. This consistency will help the CSE pass
1995 // later find more redundancy.
1996 Opd0 = N->getOperand(0).getOperand(0);
1997 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1998 VT = Opd0.getValueType();
1999 assert(VT == MVT::i64 && "the promoted type should be i64");
2000 } else if (BiggerPattern) {
2001 // Let's pretend a 0 shift left has been performed.
2002 // FIXME: Currently we limit this to the bigger pattern case,
2003 // because some optimizations expect AND and not UBFM
2004 Opd0 = N->getOperand(0);
2005 } else
2006 return false;
2008 // Missing combines/constant folding may have left us with strange
2009 // constants.
2010 if (ShlImm >= VT.getSizeInBits()) {
2011 LLVM_DEBUG(
2012 (dbgs() << N
2013 << ": Found large shift immediate, this should not happen\n"));
2014 return false;
2017 uint64_t SrlImm = 0;
2018 if (!isIntImmediate(N->getOperand(1), SrlImm))
2019 return false;
2021 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
2022 "bad amount in shift node!");
2023 int immr = SrlImm - ShlImm;
2024 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
2025 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
2026 // SRA requires a signed extraction
2027 if (VT == MVT::i32)
2028 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2029 else
2030 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2031 return true;
2034 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2035 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2037 EVT VT = N->getValueType(0);
2038 EVT NarrowVT = N->getOperand(0)->getValueType(0);
2039 if (VT != MVT::i64 || NarrowVT != MVT::i32)
2040 return false;
2042 uint64_t ShiftImm;
2043 SDValue Op = N->getOperand(0);
2044 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
2045 return false;
2047 SDLoc dl(N);
2048 // Extend the incoming operand of the shift to 64-bits.
2049 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
2050 unsigned Immr = ShiftImm;
2051 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2052 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2053 CurDAG->getTargetConstant(Imms, dl, VT)};
2054 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
2055 return true;
2058 /// Try to form fcvtl2 instructions from a floating-point extend of a high-half
2059 /// extract of a subvector.
2060 bool AArch64DAGToDAGISel::tryHighFPExt(SDNode *N) {
2061 assert(N->getOpcode() == ISD::FP_EXTEND);
2063 // There are 2 forms of fcvtl2 - extend to double or extend to float.
2064 SDValue Extract = N->getOperand(0);
2065 EVT VT = N->getValueType(0);
2066 EVT NarrowVT = Extract.getValueType();
2067 if ((VT != MVT::v2f64 || NarrowVT != MVT::v2f32) &&
2068 (VT != MVT::v4f32 || NarrowVT != MVT::v4f16))
2069 return false;
2071 // Optionally look past a bitcast.
2072 Extract = peekThroughBitcasts(Extract);
2073 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2074 return false;
2076 // Match extract from start of high half index.
2077 // Example: v8i16 -> v4i16 means the extract must begin at index 4.
2078 unsigned ExtractIndex = Extract.getConstantOperandVal(1);
2079 if (ExtractIndex != Extract.getValueType().getVectorNumElements())
2080 return false;
2082 auto Opcode = VT == MVT::v2f64 ? AArch64::FCVTLv4i32 : AArch64::FCVTLv8i16;
2083 CurDAG->SelectNodeTo(N, Opcode, VT, Extract.getOperand(0));
2084 return true;
2087 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2088 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2089 unsigned NumberOfIgnoredLowBits = 0,
2090 bool BiggerPattern = false) {
2091 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
2092 return false;
2094 switch (N->getOpcode()) {
2095 default:
2096 if (!N->isMachineOpcode())
2097 return false;
2098 break;
2099 case ISD::AND:
2100 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
2101 NumberOfIgnoredLowBits, BiggerPattern);
2102 case ISD::SRL:
2103 case ISD::SRA:
2104 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2106 case ISD::SIGN_EXTEND_INREG:
2107 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2110 unsigned NOpc = N->getMachineOpcode();
2111 switch (NOpc) {
2112 default:
2113 return false;
2114 case AArch64::SBFMWri:
2115 case AArch64::UBFMWri:
2116 case AArch64::SBFMXri:
2117 case AArch64::UBFMXri:
2118 Opc = NOpc;
2119 Opd0 = N->getOperand(0);
2120 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
2121 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
2122 return true;
2124 // Unreachable
2125 return false;
2128 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2129 unsigned Opc, Immr, Imms;
2130 SDValue Opd0;
2131 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2132 return false;
2134 EVT VT = N->getValueType(0);
2135 SDLoc dl(N);
2137 // If the bit extract operation is 64bit but the original type is 32bit, we
2138 // need to add one EXTRACT_SUBREG.
2139 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2140 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
2141 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
2143 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
2144 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
2145 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
2146 MVT::i32, SDValue(BFM, 0), SubReg));
2147 return true;
2150 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2151 CurDAG->getTargetConstant(Imms, dl, VT)};
2152 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2153 return true;
2156 /// Does DstMask form a complementary pair with the mask provided by
2157 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2158 /// this asks whether DstMask zeroes precisely those bits that will be set by
2159 /// the other half.
2160 static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2161 unsigned NumberOfIgnoredHighBits, EVT VT) {
2162 assert((VT == MVT::i32 || VT == MVT::i64) &&
2163 "i32 or i64 mask type expected!");
2164 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2166 APInt SignificantDstMask = APInt(BitWidth, DstMask);
2167 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
2169 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2170 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
2173 // Look for bits that will be useful for later uses.
2174 // A bit is consider useless as soon as it is dropped and never used
2175 // before it as been dropped.
2176 // E.g., looking for useful bit of x
2177 // 1. y = x & 0x7
2178 // 2. z = y >> 2
2179 // After #1, x useful bits are 0x7, then the useful bits of x, live through
2180 // y.
2181 // After #2, the useful bits of x are 0x4.
2182 // However, if x is used on an unpredicatable instruction, then all its bits
2183 // are useful.
2184 // E.g.
2185 // 1. y = x & 0x7
2186 // 2. z = y >> 2
2187 // 3. str x, [@x]
2188 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2190 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
2191 unsigned Depth) {
2192 uint64_t Imm =
2193 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2194 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
2195 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2196 getUsefulBits(Op, UsefulBits, Depth + 1);
2199 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
2200 uint64_t Imm, uint64_t MSB,
2201 unsigned Depth) {
2202 // inherit the bitwidth value
2203 APInt OpUsefulBits(UsefulBits);
2204 OpUsefulBits = 1;
2206 if (MSB >= Imm) {
2207 OpUsefulBits <<= MSB - Imm + 1;
2208 --OpUsefulBits;
2209 // The interesting part will be in the lower part of the result
2210 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2211 // The interesting part was starting at Imm in the argument
2212 OpUsefulBits <<= Imm;
2213 } else {
2214 OpUsefulBits <<= MSB + 1;
2215 --OpUsefulBits;
2216 // The interesting part will be shifted in the result
2217 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2218 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2219 // The interesting part was at zero in the argument
2220 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
2223 UsefulBits &= OpUsefulBits;
2226 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
2227 unsigned Depth) {
2228 uint64_t Imm =
2229 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2230 uint64_t MSB =
2231 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2233 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
2236 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
2237 unsigned Depth) {
2238 uint64_t ShiftTypeAndValue =
2239 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2240 APInt Mask(UsefulBits);
2241 Mask.clearAllBits();
2242 Mask.flipAllBits();
2244 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
2245 // Shift Left
2246 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2247 Mask <<= ShiftAmt;
2248 getUsefulBits(Op, Mask, Depth + 1);
2249 Mask.lshrInPlace(ShiftAmt);
2250 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
2251 // Shift Right
2252 // We do not handle AArch64_AM::ASR, because the sign will change the
2253 // number of useful bits
2254 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2255 Mask.lshrInPlace(ShiftAmt);
2256 getUsefulBits(Op, Mask, Depth + 1);
2257 Mask <<= ShiftAmt;
2258 } else
2259 return;
2261 UsefulBits &= Mask;
2264 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
2265 unsigned Depth) {
2266 uint64_t Imm =
2267 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2268 uint64_t MSB =
2269 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
2271 APInt OpUsefulBits(UsefulBits);
2272 OpUsefulBits = 1;
2274 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
2275 ResultUsefulBits.flipAllBits();
2276 APInt Mask(UsefulBits.getBitWidth(), 0);
2278 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
2280 if (MSB >= Imm) {
2281 // The instruction is a BFXIL.
2282 uint64_t Width = MSB - Imm + 1;
2283 uint64_t LSB = Imm;
2285 OpUsefulBits <<= Width;
2286 --OpUsefulBits;
2288 if (Op.getOperand(1) == Orig) {
2289 // Copy the low bits from the result to bits starting from LSB.
2290 Mask = ResultUsefulBits & OpUsefulBits;
2291 Mask <<= LSB;
2294 if (Op.getOperand(0) == Orig)
2295 // Bits starting from LSB in the input contribute to the result.
2296 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2297 } else {
2298 // The instruction is a BFI.
2299 uint64_t Width = MSB + 1;
2300 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
2302 OpUsefulBits <<= Width;
2303 --OpUsefulBits;
2304 OpUsefulBits <<= LSB;
2306 if (Op.getOperand(1) == Orig) {
2307 // Copy the bits from the result to the zero bits.
2308 Mask = ResultUsefulBits & OpUsefulBits;
2309 Mask.lshrInPlace(LSB);
2312 if (Op.getOperand(0) == Orig)
2313 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2316 UsefulBits &= Mask;
2319 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
2320 SDValue Orig, unsigned Depth) {
2322 // Users of this node should have already been instruction selected
2323 // FIXME: Can we turn that into an assert?
2324 if (!UserNode->isMachineOpcode())
2325 return;
2327 switch (UserNode->getMachineOpcode()) {
2328 default:
2329 return;
2330 case AArch64::ANDSWri:
2331 case AArch64::ANDSXri:
2332 case AArch64::ANDWri:
2333 case AArch64::ANDXri:
2334 // We increment Depth only when we call the getUsefulBits
2335 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
2336 Depth);
2337 case AArch64::UBFMWri:
2338 case AArch64::UBFMXri:
2339 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
2341 case AArch64::ORRWrs:
2342 case AArch64::ORRXrs:
2343 if (UserNode->getOperand(0) != Orig && UserNode->getOperand(1) == Orig)
2344 getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
2345 Depth);
2346 return;
2347 case AArch64::BFMWri:
2348 case AArch64::BFMXri:
2349 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
2351 case AArch64::STRBBui:
2352 case AArch64::STURBBi:
2353 if (UserNode->getOperand(0) != Orig)
2354 return;
2355 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2356 return;
2358 case AArch64::STRHHui:
2359 case AArch64::STURHHi:
2360 if (UserNode->getOperand(0) != Orig)
2361 return;
2362 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2363 return;
2367 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2368 if (Depth >= SelectionDAG::MaxRecursionDepth)
2369 return;
2370 // Initialize UsefulBits
2371 if (!Depth) {
2372 unsigned Bitwidth = Op.getScalarValueSizeInBits();
2373 // At the beginning, assume every produced bits is useful
2374 UsefulBits = APInt(Bitwidth, 0);
2375 UsefulBits.flipAllBits();
2377 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2379 for (SDNode *Node : Op.getNode()->uses()) {
2380 // A use cannot produce useful bits
2381 APInt UsefulBitsForUse = APInt(UsefulBits);
2382 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2383 UsersUsefulBits |= UsefulBitsForUse;
2385 // UsefulBits contains the produced bits that are meaningful for the
2386 // current definition, thus a user cannot make a bit meaningful at
2387 // this point
2388 UsefulBits &= UsersUsefulBits;
2391 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
2392 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2393 /// 0, return Op unchanged.
2394 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2395 if (ShlAmount == 0)
2396 return Op;
2398 EVT VT = Op.getValueType();
2399 SDLoc dl(Op);
2400 unsigned BitWidth = VT.getSizeInBits();
2401 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2403 SDNode *ShiftNode;
2404 if (ShlAmount > 0) {
2405 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2406 ShiftNode = CurDAG->getMachineNode(
2407 UBFMOpc, dl, VT, Op,
2408 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2409 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
2410 } else {
2411 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2412 assert(ShlAmount < 0 && "expected right shift");
2413 int ShrAmount = -ShlAmount;
2414 ShiftNode = CurDAG->getMachineNode(
2415 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2416 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
2419 return SDValue(ShiftNode, 0);
2422 /// Does this tree qualify as an attempt to move a bitfield into position,
2423 /// essentially "(and (shl VAL, N), Mask)".
2424 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
2425 bool BiggerPattern,
2426 SDValue &Src, int &ShiftAmount,
2427 int &MaskWidth) {
2428 EVT VT = Op.getValueType();
2429 unsigned BitWidth = VT.getSizeInBits();
2430 (void)BitWidth;
2431 assert(BitWidth == 32 || BitWidth == 64);
2433 KnownBits Known = CurDAG->computeKnownBits(Op);
2435 // Non-zero in the sense that they're not provably zero, which is the key
2436 // point if we want to use this value
2437 uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
2439 // Discard a constant AND mask if present. It's safe because the node will
2440 // already have been factored into the computeKnownBits calculation above.
2441 uint64_t AndImm;
2442 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
2443 assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
2444 Op = Op.getOperand(0);
2447 // Don't match if the SHL has more than one use, since then we'll end up
2448 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2449 if (!BiggerPattern && !Op.hasOneUse())
2450 return false;
2452 uint64_t ShlImm;
2453 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2454 return false;
2455 Op = Op.getOperand(0);
2457 if (!isShiftedMask_64(NonZeroBits))
2458 return false;
2460 ShiftAmount = countTrailingZeros(NonZeroBits);
2461 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
2463 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2464 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2465 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2466 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2467 // which case it is not profitable to insert an extra shift.
2468 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2469 return false;
2470 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2472 return true;
2475 static bool isShiftedMask(uint64_t Mask, EVT VT) {
2476 assert(VT == MVT::i32 || VT == MVT::i64);
2477 if (VT == MVT::i32)
2478 return isShiftedMask_32(Mask);
2479 return isShiftedMask_64(Mask);
2482 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2483 // inserted only sets known zero bits.
2484 static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2485 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2487 EVT VT = N->getValueType(0);
2488 if (VT != MVT::i32 && VT != MVT::i64)
2489 return false;
2491 unsigned BitWidth = VT.getSizeInBits();
2493 uint64_t OrImm;
2494 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2495 return false;
2497 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2498 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2499 // performance neutral.
2500 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2501 return false;
2503 uint64_t MaskImm;
2504 SDValue And = N->getOperand(0);
2505 // Must be a single use AND with an immediate operand.
2506 if (!And.hasOneUse() ||
2507 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2508 return false;
2510 // Compute the Known Zero for the AND as this allows us to catch more general
2511 // cases than just looking for AND with imm.
2512 KnownBits Known = CurDAG->computeKnownBits(And);
2514 // Non-zero in the sense that they're not provably zero, which is the key
2515 // point if we want to use this value.
2516 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
2518 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2519 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
2520 return false;
2522 // The bits being inserted must only set those bits that are known to be zero.
2523 if ((OrImm & NotKnownZero) != 0) {
2524 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2525 // currently handle this case.
2526 return false;
2529 // BFI/BFXIL dst, src, #lsb, #width.
2530 int LSB = countTrailingOnes(NotKnownZero);
2531 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2533 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2534 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2535 unsigned ImmS = Width - 1;
2537 // If we're creating a BFI instruction avoid cases where we need more
2538 // instructions to materialize the BFI constant as compared to the original
2539 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2540 // should be no worse in this case.
2541 bool IsBFI = LSB != 0;
2542 uint64_t BFIImm = OrImm >> LSB;
2543 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2544 // We have a BFI instruction and we know the constant can't be materialized
2545 // with a ORR-immediate with the zero register.
2546 unsigned OrChunks = 0, BFIChunks = 0;
2547 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2548 if (((OrImm >> Shift) & 0xFFFF) != 0)
2549 ++OrChunks;
2550 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2551 ++BFIChunks;
2553 if (BFIChunks > OrChunks)
2554 return false;
2557 // Materialize the constant to be inserted.
2558 SDLoc DL(N);
2559 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2560 SDNode *MOVI = CurDAG->getMachineNode(
2561 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2563 // Create the BFI/BFXIL instruction.
2564 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2565 CurDAG->getTargetConstant(ImmR, DL, VT),
2566 CurDAG->getTargetConstant(ImmS, DL, VT)};
2567 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2568 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2569 return true;
2572 static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2573 SelectionDAG *CurDAG) {
2574 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2576 EVT VT = N->getValueType(0);
2577 if (VT != MVT::i32 && VT != MVT::i64)
2578 return false;
2580 unsigned BitWidth = VT.getSizeInBits();
2582 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2583 // have the expected shape. Try to undo that.
2585 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2586 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2588 // Given a OR operation, check if we have the following pattern
2589 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2590 // isBitfieldExtractOp)
2591 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2592 // countTrailingZeros(mask2) == imm2 - imm + 1
2593 // f = d | c
2594 // if yes, replace the OR instruction with:
2595 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2597 // OR is commutative, check all combinations of operand order and values of
2598 // BiggerPattern, i.e.
2599 // Opd0, Opd1, BiggerPattern=false
2600 // Opd1, Opd0, BiggerPattern=false
2601 // Opd0, Opd1, BiggerPattern=true
2602 // Opd1, Opd0, BiggerPattern=true
2603 // Several of these combinations may match, so check with BiggerPattern=false
2604 // first since that will produce better results by matching more instructions
2605 // and/or inserting fewer extra instructions.
2606 for (int I = 0; I < 4; ++I) {
2608 SDValue Dst, Src;
2609 unsigned ImmR, ImmS;
2610 bool BiggerPattern = I / 2;
2611 SDValue OrOpd0Val = N->getOperand(I % 2);
2612 SDNode *OrOpd0 = OrOpd0Val.getNode();
2613 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2614 SDNode *OrOpd1 = OrOpd1Val.getNode();
2616 unsigned BFXOpc;
2617 int DstLSB, Width;
2618 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2619 NumberOfIgnoredLowBits, BiggerPattern)) {
2620 // Check that the returned opcode is compatible with the pattern,
2621 // i.e., same type and zero extended (U and not S)
2622 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2623 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2624 continue;
2626 // Compute the width of the bitfield insertion
2627 DstLSB = 0;
2628 Width = ImmS - ImmR + 1;
2629 // FIXME: This constraint is to catch bitfield insertion we may
2630 // want to widen the pattern if we want to grab general bitfied
2631 // move case
2632 if (Width <= 0)
2633 continue;
2635 // If the mask on the insertee is correct, we have a BFXIL operation. We
2636 // can share the ImmR and ImmS values from the already-computed UBFM.
2637 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
2638 BiggerPattern,
2639 Src, DstLSB, Width)) {
2640 ImmR = (BitWidth - DstLSB) % BitWidth;
2641 ImmS = Width - 1;
2642 } else
2643 continue;
2645 // Check the second part of the pattern
2646 EVT VT = OrOpd1Val.getValueType();
2647 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2649 // Compute the Known Zero for the candidate of the first operand.
2650 // This allows to catch more general case than just looking for
2651 // AND with imm. Indeed, simplify-demanded-bits may have removed
2652 // the AND instruction because it proves it was useless.
2653 KnownBits Known = CurDAG->computeKnownBits(OrOpd1Val);
2655 // Check if there is enough room for the second operand to appear
2656 // in the first one
2657 APInt BitsToBeInserted =
2658 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
2660 if ((BitsToBeInserted & ~Known.Zero) != 0)
2661 continue;
2663 // Set the first operand
2664 uint64_t Imm;
2665 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2666 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2667 // In that case, we can eliminate the AND
2668 Dst = OrOpd1->getOperand(0);
2669 else
2670 // Maybe the AND has been removed by simplify-demanded-bits
2671 // or is useful because it discards more bits
2672 Dst = OrOpd1Val;
2674 // both parts match
2675 SDLoc DL(N);
2676 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2677 CurDAG->getTargetConstant(ImmS, DL, VT)};
2678 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2679 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2680 return true;
2683 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2684 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2685 // mask (e.g., 0x000ffff0).
2686 uint64_t Mask0Imm, Mask1Imm;
2687 SDValue And0 = N->getOperand(0);
2688 SDValue And1 = N->getOperand(1);
2689 if (And0.hasOneUse() && And1.hasOneUse() &&
2690 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2691 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2692 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2693 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2695 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2696 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2697 // bits to be inserted.
2698 if (isShiftedMask(Mask0Imm, VT)) {
2699 std::swap(And0, And1);
2700 std::swap(Mask0Imm, Mask1Imm);
2703 SDValue Src = And1->getOperand(0);
2704 SDValue Dst = And0->getOperand(0);
2705 unsigned LSB = countTrailingZeros(Mask1Imm);
2706 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2708 // The BFXIL inserts the low-order bits from a source register, so right
2709 // shift the needed bits into place.
2710 SDLoc DL(N);
2711 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2712 SDNode *LSR = CurDAG->getMachineNode(
2713 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2714 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2716 // BFXIL is an alias of BFM, so translate to BFM operands.
2717 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2718 unsigned ImmS = Width - 1;
2720 // Create the BFXIL instruction.
2721 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2722 CurDAG->getTargetConstant(ImmR, DL, VT),
2723 CurDAG->getTargetConstant(ImmS, DL, VT)};
2724 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2725 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2726 return true;
2729 return false;
2732 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
2733 if (N->getOpcode() != ISD::OR)
2734 return false;
2736 APInt NUsefulBits;
2737 getUsefulBits(SDValue(N, 0), NUsefulBits);
2739 // If all bits are not useful, just return UNDEF.
2740 if (!NUsefulBits) {
2741 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2742 return true;
2745 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2746 return true;
2748 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
2751 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2752 /// equivalent of a left shift by a constant amount followed by an and masking
2753 /// out a contiguous set of bits.
2754 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
2755 if (N->getOpcode() != ISD::AND)
2756 return false;
2758 EVT VT = N->getValueType(0);
2759 if (VT != MVT::i32 && VT != MVT::i64)
2760 return false;
2762 SDValue Op0;
2763 int DstLSB, Width;
2764 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2765 Op0, DstLSB, Width))
2766 return false;
2768 // ImmR is the rotate right amount.
2769 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2770 // ImmS is the most significant bit of the source to be moved.
2771 unsigned ImmS = Width - 1;
2773 SDLoc DL(N);
2774 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2775 CurDAG->getTargetConstant(ImmS, DL, VT)};
2776 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2777 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2778 return true;
2781 /// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
2782 /// variable shift/rotate instructions.
2783 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
2784 EVT VT = N->getValueType(0);
2786 unsigned Opc;
2787 switch (N->getOpcode()) {
2788 case ISD::ROTR:
2789 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
2790 break;
2791 case ISD::SHL:
2792 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
2793 break;
2794 case ISD::SRL:
2795 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
2796 break;
2797 case ISD::SRA:
2798 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
2799 break;
2800 default:
2801 return false;
2804 uint64_t Size;
2805 uint64_t Bits;
2806 if (VT == MVT::i32) {
2807 Bits = 5;
2808 Size = 32;
2809 } else if (VT == MVT::i64) {
2810 Bits = 6;
2811 Size = 64;
2812 } else
2813 return false;
2815 SDValue ShiftAmt = N->getOperand(1);
2816 SDLoc DL(N);
2817 SDValue NewShiftAmt;
2819 // Skip over an extend of the shift amount.
2820 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
2821 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
2822 ShiftAmt = ShiftAmt->getOperand(0);
2824 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
2825 SDValue Add0 = ShiftAmt->getOperand(0);
2826 SDValue Add1 = ShiftAmt->getOperand(1);
2827 uint64_t Add0Imm;
2828 uint64_t Add1Imm;
2829 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
2830 // to avoid the ADD/SUB.
2831 if (isIntImmediate(Add1, Add1Imm) && (Add1Imm % Size == 0))
2832 NewShiftAmt = Add0;
2833 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
2834 // generate a NEG instead of a SUB of a constant.
2835 else if (ShiftAmt->getOpcode() == ISD::SUB &&
2836 isIntImmediate(Add0, Add0Imm) && Add0Imm != 0 &&
2837 (Add0Imm % Size == 0)) {
2838 unsigned NegOpc;
2839 unsigned ZeroReg;
2840 EVT SubVT = ShiftAmt->getValueType(0);
2841 if (SubVT == MVT::i32) {
2842 NegOpc = AArch64::SUBWrr;
2843 ZeroReg = AArch64::WZR;
2844 } else {
2845 assert(SubVT == MVT::i64);
2846 NegOpc = AArch64::SUBXrr;
2847 ZeroReg = AArch64::XZR;
2849 SDValue Zero =
2850 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
2851 MachineSDNode *Neg =
2852 CurDAG->getMachineNode(NegOpc, DL, SubVT, Zero, Add1);
2853 NewShiftAmt = SDValue(Neg, 0);
2854 } else
2855 return false;
2856 } else {
2857 // If the shift amount is masked with an AND, check that the mask covers the
2858 // bits that are implicitly ANDed off by the above opcodes and if so, skip
2859 // the AND.
2860 uint64_t MaskImm;
2861 if (!isOpcWithIntImmediate(ShiftAmt.getNode(), ISD::AND, MaskImm) &&
2862 !isOpcWithIntImmediate(ShiftAmt.getNode(), AArch64ISD::ANDS, MaskImm))
2863 return false;
2865 if (countTrailingOnes(MaskImm) < Bits)
2866 return false;
2868 NewShiftAmt = ShiftAmt->getOperand(0);
2871 // Narrow/widen the shift amount to match the size of the shift operation.
2872 if (VT == MVT::i32)
2873 NewShiftAmt = narrowIfNeeded(CurDAG, NewShiftAmt);
2874 else if (VT == MVT::i64 && NewShiftAmt->getValueType(0) == MVT::i32) {
2875 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, DL, MVT::i32);
2876 MachineSDNode *Ext = CurDAG->getMachineNode(
2877 AArch64::SUBREG_TO_REG, DL, VT,
2878 CurDAG->getTargetConstant(0, DL, MVT::i64), NewShiftAmt, SubReg);
2879 NewShiftAmt = SDValue(Ext, 0);
2882 SDValue Ops[] = {N->getOperand(0), NewShiftAmt};
2883 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2884 return true;
2887 bool
2888 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2889 unsigned RegWidth) {
2890 APFloat FVal(0.0);
2891 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2892 FVal = CN->getValueAPF();
2893 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2894 // Some otherwise illegal constants are allowed in this case.
2895 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2896 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2897 return false;
2899 ConstantPoolSDNode *CN =
2900 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2901 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2902 } else
2903 return false;
2905 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2906 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2907 // x-register.
2909 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2910 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2911 // integers.
2912 bool IsExact;
2914 // fbits is between 1 and 64 in the worst-case, which means the fmul
2915 // could have 2^64 as an actual operand. Need 65 bits of precision.
2916 APSInt IntVal(65, true);
2917 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2919 // N.b. isPowerOf2 also checks for > 0.
2920 if (!IsExact || !IntVal.isPowerOf2()) return false;
2921 unsigned FBits = IntVal.logBase2();
2923 // Checks above should have guaranteed that we haven't lost information in
2924 // finding FBits, but it must still be in range.
2925 if (FBits == 0 || FBits > RegWidth) return false;
2927 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2928 return true;
2931 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2932 // of the string and obtains the integer values from them and combines these
2933 // into a single value to be used in the MRS/MSR instruction.
2934 static int getIntOperandFromRegisterString(StringRef RegString) {
2935 SmallVector<StringRef, 5> Fields;
2936 RegString.split(Fields, ':');
2938 if (Fields.size() == 1)
2939 return -1;
2941 assert(Fields.size() == 5
2942 && "Invalid number of fields in read register string");
2944 SmallVector<int, 5> Ops;
2945 bool AllIntFields = true;
2947 for (StringRef Field : Fields) {
2948 unsigned IntField;
2949 AllIntFields &= !Field.getAsInteger(10, IntField);
2950 Ops.push_back(IntField);
2953 assert(AllIntFields &&
2954 "Unexpected non-integer value in special register string.");
2955 (void)AllIntFields;
2957 // Need to combine the integer fields of the string into a single value
2958 // based on the bit encoding of MRS/MSR instruction.
2959 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2960 (Ops[3] << 3) | (Ops[4]);
2963 // Lower the read_register intrinsic to an MRS instruction node if the special
2964 // register string argument is either of the form detailed in the ALCE (the
2965 // form described in getIntOperandsFromRegsterString) or is a named register
2966 // known by the MRS SysReg mapper.
2967 bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
2968 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
2969 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
2970 SDLoc DL(N);
2972 int Reg = getIntOperandFromRegisterString(RegString->getString());
2973 if (Reg != -1) {
2974 ReplaceNode(N, CurDAG->getMachineNode(
2975 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2976 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2977 N->getOperand(0)));
2978 return true;
2981 // Use the sysreg mapper to map the remaining possible strings to the
2982 // value for the register to be used for the instruction operand.
2983 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2984 if (TheReg && TheReg->Readable &&
2985 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2986 Reg = TheReg->Encoding;
2987 else
2988 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2990 if (Reg != -1) {
2991 ReplaceNode(N, CurDAG->getMachineNode(
2992 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2993 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2994 N->getOperand(0)));
2995 return true;
2998 if (RegString->getString() == "pc") {
2999 ReplaceNode(N, CurDAG->getMachineNode(
3000 AArch64::ADR, DL, N->getSimpleValueType(0), MVT::Other,
3001 CurDAG->getTargetConstant(0, DL, MVT::i32),
3002 N->getOperand(0)));
3003 return true;
3006 return false;
3009 // Lower the write_register intrinsic to an MSR instruction node if the special
3010 // register string argument is either of the form detailed in the ALCE (the
3011 // form described in getIntOperandsFromRegsterString) or is a named register
3012 // known by the MSR SysReg mapper.
3013 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
3014 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
3015 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
3016 SDLoc DL(N);
3018 int Reg = getIntOperandFromRegisterString(RegString->getString());
3019 if (Reg != -1) {
3020 ReplaceNode(
3021 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
3022 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3023 N->getOperand(2), N->getOperand(0)));
3024 return true;
3027 // Check if the register was one of those allowed as the pstatefield value in
3028 // the MSR (immediate) instruction. To accept the values allowed in the
3029 // pstatefield for the MSR (immediate) instruction, we also require that an
3030 // immediate value has been provided as an argument, we know that this is
3031 // the case as it has been ensured by semantic checking.
3032 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());
3033 if (PMapper) {
3034 assert (isa<ConstantSDNode>(N->getOperand(2))
3035 && "Expected a constant integer expression.");
3036 unsigned Reg = PMapper->Encoding;
3037 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
3038 unsigned State;
3039 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO || Reg == AArch64PState::SSBS) {
3040 assert(Immed < 2 && "Bad imm");
3041 State = AArch64::MSRpstateImm1;
3042 } else {
3043 assert(Immed < 16 && "Bad imm");
3044 State = AArch64::MSRpstateImm4;
3046 ReplaceNode(N, CurDAG->getMachineNode(
3047 State, DL, MVT::Other,
3048 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3049 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
3050 N->getOperand(0)));
3051 return true;
3054 // Use the sysreg mapper to attempt to map the remaining possible strings
3055 // to the value for the register to be used for the MSR (register)
3056 // instruction operand.
3057 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
3058 if (TheReg && TheReg->Writeable &&
3059 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3060 Reg = TheReg->Encoding;
3061 else
3062 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
3063 if (Reg != -1) {
3064 ReplaceNode(N, CurDAG->getMachineNode(
3065 AArch64::MSR, DL, MVT::Other,
3066 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3067 N->getOperand(2), N->getOperand(0)));
3068 return true;
3071 return false;
3074 /// We've got special pseudo-instructions for these
3075 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3076 unsigned Opcode;
3077 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3079 // Leave IR for LSE if subtarget supports it.
3080 if (Subtarget->hasLSE()) return false;
3082 if (MemTy == MVT::i8)
3083 Opcode = AArch64::CMP_SWAP_8;
3084 else if (MemTy == MVT::i16)
3085 Opcode = AArch64::CMP_SWAP_16;
3086 else if (MemTy == MVT::i32)
3087 Opcode = AArch64::CMP_SWAP_32;
3088 else if (MemTy == MVT::i64)
3089 Opcode = AArch64::CMP_SWAP_64;
3090 else
3091 llvm_unreachable("Unknown AtomicCmpSwap type");
3093 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
3094 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3095 N->getOperand(0)};
3096 SDNode *CmpSwap = CurDAG->getMachineNode(
3097 Opcode, SDLoc(N),
3098 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
3100 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3101 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3103 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3104 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3105 CurDAG->RemoveDeadNode(N);
3107 return true;
3110 bool AArch64DAGToDAGISel::SelectSVE8BitLslImm(SDValue N, SDValue &Base,
3111 SDValue &Offset) {
3112 auto C = dyn_cast<ConstantSDNode>(N);
3113 if (!C)
3114 return false;
3116 auto Ty = N->getValueType(0);
3118 int64_t Imm = C->getSExtValue();
3119 SDLoc DL(N);
3121 if ((Imm >= -128) && (Imm <= 127)) {
3122 Base = CurDAG->getTargetConstant(Imm, DL, Ty);
3123 Offset = CurDAG->getTargetConstant(0, DL, Ty);
3124 return true;
3127 if (((Imm % 256) == 0) && (Imm >= -32768) && (Imm <= 32512)) {
3128 Base = CurDAG->getTargetConstant(Imm/256, DL, Ty);
3129 Offset = CurDAG->getTargetConstant(8, DL, Ty);
3130 return true;
3133 return false;
3136 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift) {
3137 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3138 const int64_t ImmVal = CNode->getSExtValue();
3139 SDLoc DL(N);
3141 switch (VT.SimpleTy) {
3142 case MVT::i8:
3143 // Can always select i8s, no shift, mask the immediate value to
3144 // deal with sign-extended value from lowering.
3145 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3146 Imm = CurDAG->getTargetConstant(ImmVal & 0xFF, DL, MVT::i32);
3147 return true;
3148 case MVT::i16:
3149 // i16 values get sign-extended to 32-bits during lowering.
3150 if ((ImmVal & 0xFF) == ImmVal) {
3151 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3152 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3153 return true;
3154 } else if ((ImmVal & 0xFF) == 0) {
3155 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3156 Imm = CurDAG->getTargetConstant((ImmVal >> 8) & 0xFF, DL, MVT::i32);
3157 return true;
3159 break;
3160 case MVT::i32:
3161 case MVT::i64:
3162 // Range of immediate won't trigger signedness problems for 32/64b.
3163 if ((ImmVal & 0xFF) == ImmVal) {
3164 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3165 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3166 return true;
3167 } else if ((ImmVal & 0xFF00) == ImmVal) {
3168 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3169 Imm = CurDAG->getTargetConstant(ImmVal >> 8, DL, MVT::i32);
3170 return true;
3172 break;
3173 default:
3174 break;
3178 return false;
3181 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
3182 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3183 int64_t ImmVal = CNode->getSExtValue();
3184 SDLoc DL(N);
3185 if (ImmVal >= -128 && ImmVal < 128) {
3186 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3187 return true;
3190 return false;
3193 bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
3194 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3195 uint64_t ImmVal = CNode->getZExtValue();
3197 switch (VT.SimpleTy) {
3198 case MVT::i8:
3199 ImmVal &= 0xFF;
3200 break;
3201 case MVT::i16:
3202 ImmVal &= 0xFFFF;
3203 break;
3204 case MVT::i32:
3205 ImmVal &= 0xFFFFFFFF;
3206 break;
3207 case MVT::i64:
3208 break;
3209 default:
3210 llvm_unreachable("Unexpected type");
3213 if (ImmVal < 256) {
3214 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3215 return true;
3218 return false;
3221 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
3222 bool Invert) {
3223 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3224 uint64_t ImmVal = CNode->getZExtValue();
3225 SDLoc DL(N);
3227 if (Invert)
3228 ImmVal = ~ImmVal;
3230 // Shift mask depending on type size.
3231 switch (VT.SimpleTy) {
3232 case MVT::i8:
3233 ImmVal &= 0xFF;
3234 ImmVal |= ImmVal << 8;
3235 ImmVal |= ImmVal << 16;
3236 ImmVal |= ImmVal << 32;
3237 break;
3238 case MVT::i16:
3239 ImmVal &= 0xFFFF;
3240 ImmVal |= ImmVal << 16;
3241 ImmVal |= ImmVal << 32;
3242 break;
3243 case MVT::i32:
3244 ImmVal &= 0xFFFFFFFF;
3245 ImmVal |= ImmVal << 32;
3246 break;
3247 case MVT::i64:
3248 break;
3249 default:
3250 llvm_unreachable("Unexpected type");
3253 uint64_t encoding;
3254 if (AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding)) {
3255 Imm = CurDAG->getTargetConstant(encoding, DL, MVT::i64);
3256 return true;
3259 return false;
3262 // SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
3263 // Rather than attempt to normalise everything we can sometimes saturate the
3264 // shift amount during selection. This function also allows for consistent
3265 // isel patterns by ensuring the resulting "Imm" node is of the i32 type
3266 // required by the instructions.
3267 bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
3268 uint64_t High, bool AllowSaturation,
3269 SDValue &Imm) {
3270 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
3271 uint64_t ImmVal = CN->getZExtValue();
3273 // Reject shift amounts that are too small.
3274 if (ImmVal < Low)
3275 return false;
3277 // Reject or saturate shift amounts that are too big.
3278 if (ImmVal > High) {
3279 if (!AllowSaturation)
3280 return false;
3281 ImmVal = High;
3284 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3285 return true;
3288 return false;
3291 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
3292 // tagp(FrameIndex, IRGstack, tag_offset):
3293 // since the offset between FrameIndex and IRGstack is a compile-time
3294 // constant, this can be lowered to a single ADDG instruction.
3295 if (!(isa<FrameIndexSDNode>(N->getOperand(1)))) {
3296 return false;
3299 SDValue IRG_SP = N->getOperand(2);
3300 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
3301 cast<ConstantSDNode>(IRG_SP->getOperand(1))->getZExtValue() !=
3302 Intrinsic::aarch64_irg_sp) {
3303 return false;
3306 const TargetLowering *TLI = getTargetLowering();
3307 SDLoc DL(N);
3308 int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
3309 SDValue FiOp = CurDAG->getTargetFrameIndex(
3310 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3311 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3313 SDNode *Out = CurDAG->getMachineNode(
3314 AArch64::TAGPstack, DL, MVT::i64,
3315 {FiOp, CurDAG->getTargetConstant(0, DL, MVT::i64), N->getOperand(2),
3316 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3317 ReplaceNode(N, Out);
3318 return true;
3321 void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
3322 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
3323 "llvm.aarch64.tagp third argument must be an immediate");
3324 if (trySelectStackSlotTagP(N))
3325 return;
3326 // FIXME: above applies in any case when offset between Op1 and Op2 is a
3327 // compile-time constant, not just for stack allocations.
3329 // General case for unrelated pointers in Op1 and Op2.
3330 SDLoc DL(N);
3331 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3332 SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64,
3333 {N->getOperand(1), N->getOperand(2)});
3334 SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64,
3335 {SDValue(N1, 0), N->getOperand(2)});
3336 SDNode *N3 = CurDAG->getMachineNode(
3337 AArch64::ADDG, DL, MVT::i64,
3338 {SDValue(N2, 0), CurDAG->getTargetConstant(0, DL, MVT::i64),
3339 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3340 ReplaceNode(N, N3);
3343 // NOTE: We cannot use EXTRACT_SUBREG in all cases because the fixed length
3344 // vector types larger than NEON don't have a matching SubRegIndex.
3345 static SDNode *extractSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3346 assert(V.getValueType().isScalableVector() &&
3347 V.getValueType().getSizeInBits().getKnownMinSize() ==
3348 AArch64::SVEBitsPerBlock &&
3349 "Expected to extract from a packed scalable vector!");
3350 assert(VT.isFixedLengthVector() &&
3351 "Expected to extract a fixed length vector!");
3353 SDLoc DL(V);
3354 switch (VT.getSizeInBits()) {
3355 case 64: {
3356 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3357 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3359 case 128: {
3360 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3361 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3363 default: {
3364 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3365 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3370 // NOTE: We cannot use INSERT_SUBREG in all cases because the fixed length
3371 // vector types larger than NEON don't have a matching SubRegIndex.
3372 static SDNode *insertSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3373 assert(VT.isScalableVector() &&
3374 VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock &&
3375 "Expected to insert into a packed scalable vector!");
3376 assert(V.getValueType().isFixedLengthVector() &&
3377 "Expected to insert a fixed length vector!");
3379 SDLoc DL(V);
3380 switch (V.getValueType().getSizeInBits()) {
3381 case 64: {
3382 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3383 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3384 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3385 SDValue(Container, 0), V, SubReg);
3387 case 128: {
3388 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3389 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3390 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3391 SDValue(Container, 0), V, SubReg);
3393 default: {
3394 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3395 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3400 void AArch64DAGToDAGISel::Select(SDNode *Node) {
3401 // If we have a custom node, we already have selected!
3402 if (Node->isMachineOpcode()) {
3403 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
3404 Node->setNodeId(-1);
3405 return;
3408 // Few custom selection stuff.
3409 EVT VT = Node->getValueType(0);
3411 switch (Node->getOpcode()) {
3412 default:
3413 break;
3415 case ISD::ATOMIC_CMP_SWAP:
3416 if (SelectCMP_SWAP(Node))
3417 return;
3418 break;
3420 case ISD::READ_REGISTER:
3421 if (tryReadRegister(Node))
3422 return;
3423 break;
3425 case ISD::WRITE_REGISTER:
3426 if (tryWriteRegister(Node))
3427 return;
3428 break;
3430 case ISD::ADD:
3431 if (tryMLAV64LaneV128(Node))
3432 return;
3433 break;
3435 case ISD::LOAD: {
3436 // Try to select as an indexed load. Fall through to normal processing
3437 // if we can't.
3438 if (tryIndexedLoad(Node))
3439 return;
3440 break;
3443 case ISD::SRL:
3444 case ISD::AND:
3445 case ISD::SRA:
3446 case ISD::SIGN_EXTEND_INREG:
3447 if (tryBitfieldExtractOp(Node))
3448 return;
3449 if (tryBitfieldInsertInZeroOp(Node))
3450 return;
3451 LLVM_FALLTHROUGH;
3452 case ISD::ROTR:
3453 case ISD::SHL:
3454 if (tryShiftAmountMod(Node))
3455 return;
3456 break;
3458 case ISD::SIGN_EXTEND:
3459 if (tryBitfieldExtractOpFromSExt(Node))
3460 return;
3461 break;
3463 case ISD::FP_EXTEND:
3464 if (tryHighFPExt(Node))
3465 return;
3466 break;
3468 case ISD::OR:
3469 if (tryBitfieldInsertOp(Node))
3470 return;
3471 break;
3473 case ISD::EXTRACT_SUBVECTOR: {
3474 // Bail when not a "cast" like extract_subvector.
3475 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue() != 0)
3476 break;
3478 // Bail when normal isel can do the job.
3479 EVT InVT = Node->getOperand(0).getValueType();
3480 if (VT.isScalableVector() || InVT.isFixedLengthVector())
3481 break;
3483 // NOTE: We can only get here when doing fixed length SVE code generation.
3484 // We do manual selection because the types involved are not linked to real
3485 // registers (despite being legal) and must be coerced into SVE registers.
3487 // NOTE: If the above changes, be aware that selection will still not work
3488 // because the td definition of extract_vector does not support extracting
3489 // a fixed length vector from a scalable vector.
3491 ReplaceNode(Node, extractSubReg(CurDAG, VT, Node->getOperand(0)));
3492 return;
3495 case ISD::INSERT_SUBVECTOR: {
3496 // Bail when not a "cast" like insert_subvector.
3497 if (cast<ConstantSDNode>(Node->getOperand(2))->getZExtValue() != 0)
3498 break;
3499 if (!Node->getOperand(0).isUndef())
3500 break;
3502 // Bail when normal isel should do the job.
3503 EVT InVT = Node->getOperand(1).getValueType();
3504 if (VT.isFixedLengthVector() || InVT.isScalableVector())
3505 break;
3507 // NOTE: We can only get here when doing fixed length SVE code generation.
3508 // We do manual selection because the types involved are not linked to real
3509 // registers (despite being legal) and must be coerced into SVE registers.
3511 // NOTE: If the above changes, be aware that selection will still not work
3512 // because the td definition of insert_vector does not support inserting a
3513 // fixed length vector into a scalable vector.
3515 ReplaceNode(Node, insertSubReg(CurDAG, VT, Node->getOperand(1)));
3516 return;
3519 case ISD::Constant: {
3520 // Materialize zero constants as copies from WZR/XZR. This allows
3521 // the coalescer to propagate these into other instructions.
3522 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
3523 if (ConstNode->isNullValue()) {
3524 if (VT == MVT::i32) {
3525 SDValue New = CurDAG->getCopyFromReg(
3526 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
3527 ReplaceNode(Node, New.getNode());
3528 return;
3529 } else if (VT == MVT::i64) {
3530 SDValue New = CurDAG->getCopyFromReg(
3531 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
3532 ReplaceNode(Node, New.getNode());
3533 return;
3536 break;
3539 case ISD::FrameIndex: {
3540 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
3541 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
3542 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
3543 const TargetLowering *TLI = getTargetLowering();
3544 SDValue TFI = CurDAG->getTargetFrameIndex(
3545 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3546 SDLoc DL(Node);
3547 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
3548 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
3549 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
3550 return;
3552 case ISD::INTRINSIC_W_CHAIN: {
3553 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3554 switch (IntNo) {
3555 default:
3556 break;
3557 case Intrinsic::aarch64_ldaxp:
3558 case Intrinsic::aarch64_ldxp: {
3559 unsigned Op =
3560 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
3561 SDValue MemAddr = Node->getOperand(2);
3562 SDLoc DL(Node);
3563 SDValue Chain = Node->getOperand(0);
3565 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
3566 MVT::Other, MemAddr, Chain);
3568 // Transfer memoperands.
3569 MachineMemOperand *MemOp =
3570 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3571 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
3572 ReplaceNode(Node, Ld);
3573 return;
3575 case Intrinsic::aarch64_stlxp:
3576 case Intrinsic::aarch64_stxp: {
3577 unsigned Op =
3578 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
3579 SDLoc DL(Node);
3580 SDValue Chain = Node->getOperand(0);
3581 SDValue ValLo = Node->getOperand(2);
3582 SDValue ValHi = Node->getOperand(3);
3583 SDValue MemAddr = Node->getOperand(4);
3585 // Place arguments in the right order.
3586 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
3588 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
3589 // Transfer memoperands.
3590 MachineMemOperand *MemOp =
3591 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3592 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
3594 ReplaceNode(Node, St);
3595 return;
3597 case Intrinsic::aarch64_neon_ld1x2:
3598 if (VT == MVT::v8i8) {
3599 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
3600 return;
3601 } else if (VT == MVT::v16i8) {
3602 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
3603 return;
3604 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3605 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
3606 return;
3607 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3608 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
3609 return;
3610 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3611 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
3612 return;
3613 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3614 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
3615 return;
3616 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3617 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3618 return;
3619 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3620 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
3621 return;
3623 break;
3624 case Intrinsic::aarch64_neon_ld1x3:
3625 if (VT == MVT::v8i8) {
3626 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
3627 return;
3628 } else if (VT == MVT::v16i8) {
3629 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
3630 return;
3631 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3632 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
3633 return;
3634 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3635 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
3636 return;
3637 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3638 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
3639 return;
3640 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3641 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
3642 return;
3643 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3644 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3645 return;
3646 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3647 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
3648 return;
3650 break;
3651 case Intrinsic::aarch64_neon_ld1x4:
3652 if (VT == MVT::v8i8) {
3653 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
3654 return;
3655 } else if (VT == MVT::v16i8) {
3656 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
3657 return;
3658 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3659 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
3660 return;
3661 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3662 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
3663 return;
3664 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3665 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
3666 return;
3667 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3668 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
3669 return;
3670 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3671 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3672 return;
3673 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3674 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
3675 return;
3677 break;
3678 case Intrinsic::aarch64_neon_ld2:
3679 if (VT == MVT::v8i8) {
3680 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
3681 return;
3682 } else if (VT == MVT::v16i8) {
3683 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
3684 return;
3685 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3686 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
3687 return;
3688 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3689 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
3690 return;
3691 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3692 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
3693 return;
3694 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3695 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
3696 return;
3697 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3698 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3699 return;
3700 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3701 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
3702 return;
3704 break;
3705 case Intrinsic::aarch64_neon_ld3:
3706 if (VT == MVT::v8i8) {
3707 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
3708 return;
3709 } else if (VT == MVT::v16i8) {
3710 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
3711 return;
3712 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3713 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
3714 return;
3715 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3716 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
3717 return;
3718 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3719 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
3720 return;
3721 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3722 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
3723 return;
3724 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3725 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3726 return;
3727 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3728 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
3729 return;
3731 break;
3732 case Intrinsic::aarch64_neon_ld4:
3733 if (VT == MVT::v8i8) {
3734 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
3735 return;
3736 } else if (VT == MVT::v16i8) {
3737 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
3738 return;
3739 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3740 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
3741 return;
3742 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3743 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
3744 return;
3745 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3746 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
3747 return;
3748 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3749 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
3750 return;
3751 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3752 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3753 return;
3754 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3755 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
3756 return;
3758 break;
3759 case Intrinsic::aarch64_neon_ld2r:
3760 if (VT == MVT::v8i8) {
3761 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
3762 return;
3763 } else if (VT == MVT::v16i8) {
3764 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
3765 return;
3766 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3767 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
3768 return;
3769 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3770 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
3771 return;
3772 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3773 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
3774 return;
3775 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3776 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
3777 return;
3778 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3779 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
3780 return;
3781 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3782 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
3783 return;
3785 break;
3786 case Intrinsic::aarch64_neon_ld3r:
3787 if (VT == MVT::v8i8) {
3788 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
3789 return;
3790 } else if (VT == MVT::v16i8) {
3791 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
3792 return;
3793 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3794 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
3795 return;
3796 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3797 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
3798 return;
3799 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3800 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
3801 return;
3802 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3803 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
3804 return;
3805 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3806 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
3807 return;
3808 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3809 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
3810 return;
3812 break;
3813 case Intrinsic::aarch64_neon_ld4r:
3814 if (VT == MVT::v8i8) {
3815 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
3816 return;
3817 } else if (VT == MVT::v16i8) {
3818 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
3819 return;
3820 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3821 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
3822 return;
3823 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3824 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
3825 return;
3826 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3827 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
3828 return;
3829 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3830 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
3831 return;
3832 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3833 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
3834 return;
3835 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3836 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
3837 return;
3839 break;
3840 case Intrinsic::aarch64_neon_ld2lane:
3841 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3842 SelectLoadLane(Node, 2, AArch64::LD2i8);
3843 return;
3844 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3845 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3846 SelectLoadLane(Node, 2, AArch64::LD2i16);
3847 return;
3848 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3849 VT == MVT::v2f32) {
3850 SelectLoadLane(Node, 2, AArch64::LD2i32);
3851 return;
3852 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3853 VT == MVT::v1f64) {
3854 SelectLoadLane(Node, 2, AArch64::LD2i64);
3855 return;
3857 break;
3858 case Intrinsic::aarch64_neon_ld3lane:
3859 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3860 SelectLoadLane(Node, 3, AArch64::LD3i8);
3861 return;
3862 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3863 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3864 SelectLoadLane(Node, 3, AArch64::LD3i16);
3865 return;
3866 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3867 VT == MVT::v2f32) {
3868 SelectLoadLane(Node, 3, AArch64::LD3i32);
3869 return;
3870 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3871 VT == MVT::v1f64) {
3872 SelectLoadLane(Node, 3, AArch64::LD3i64);
3873 return;
3875 break;
3876 case Intrinsic::aarch64_neon_ld4lane:
3877 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3878 SelectLoadLane(Node, 4, AArch64::LD4i8);
3879 return;
3880 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3881 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3882 SelectLoadLane(Node, 4, AArch64::LD4i16);
3883 return;
3884 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3885 VT == MVT::v2f32) {
3886 SelectLoadLane(Node, 4, AArch64::LD4i32);
3887 return;
3888 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3889 VT == MVT::v1f64) {
3890 SelectLoadLane(Node, 4, AArch64::LD4i64);
3891 return;
3893 break;
3894 case Intrinsic::aarch64_ld64b:
3895 SelectLoad(Node, 8, AArch64::LD64B, AArch64::x8sub_0);
3896 return;
3898 } break;
3899 case ISD::INTRINSIC_WO_CHAIN: {
3900 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3901 switch (IntNo) {
3902 default:
3903 break;
3904 case Intrinsic::aarch64_tagp:
3905 SelectTagP(Node);
3906 return;
3907 case Intrinsic::aarch64_neon_tbl2:
3908 SelectTable(Node, 2,
3909 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
3910 false);
3911 return;
3912 case Intrinsic::aarch64_neon_tbl3:
3913 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
3914 : AArch64::TBLv16i8Three,
3915 false);
3916 return;
3917 case Intrinsic::aarch64_neon_tbl4:
3918 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
3919 : AArch64::TBLv16i8Four,
3920 false);
3921 return;
3922 case Intrinsic::aarch64_neon_tbx2:
3923 SelectTable(Node, 2,
3924 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
3925 true);
3926 return;
3927 case Intrinsic::aarch64_neon_tbx3:
3928 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
3929 : AArch64::TBXv16i8Three,
3930 true);
3931 return;
3932 case Intrinsic::aarch64_neon_tbx4:
3933 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
3934 : AArch64::TBXv16i8Four,
3935 true);
3936 return;
3937 case Intrinsic::aarch64_neon_smull:
3938 case Intrinsic::aarch64_neon_umull:
3939 if (tryMULLV64LaneV128(IntNo, Node))
3940 return;
3941 break;
3942 case Intrinsic::swift_async_context_addr: {
3943 SDLoc DL(Node);
3944 CurDAG->SelectNodeTo(Node, AArch64::SUBXri, MVT::i64,
3945 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
3946 AArch64::FP, MVT::i64),
3947 CurDAG->getTargetConstant(8, DL, MVT::i32),
3948 CurDAG->getTargetConstant(0, DL, MVT::i32));
3949 auto &MF = CurDAG->getMachineFunction();
3950 MF.getFrameInfo().setFrameAddressIsTaken(true);
3951 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
3952 return;
3955 break;
3957 case ISD::INTRINSIC_VOID: {
3958 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3959 if (Node->getNumOperands() >= 3)
3960 VT = Node->getOperand(2)->getValueType(0);
3961 switch (IntNo) {
3962 default:
3963 break;
3964 case Intrinsic::aarch64_neon_st1x2: {
3965 if (VT == MVT::v8i8) {
3966 SelectStore(Node, 2, AArch64::ST1Twov8b);
3967 return;
3968 } else if (VT == MVT::v16i8) {
3969 SelectStore(Node, 2, AArch64::ST1Twov16b);
3970 return;
3971 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
3972 VT == MVT::v4bf16) {
3973 SelectStore(Node, 2, AArch64::ST1Twov4h);
3974 return;
3975 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
3976 VT == MVT::v8bf16) {
3977 SelectStore(Node, 2, AArch64::ST1Twov8h);
3978 return;
3979 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3980 SelectStore(Node, 2, AArch64::ST1Twov2s);
3981 return;
3982 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3983 SelectStore(Node, 2, AArch64::ST1Twov4s);
3984 return;
3985 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3986 SelectStore(Node, 2, AArch64::ST1Twov2d);
3987 return;
3988 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3989 SelectStore(Node, 2, AArch64::ST1Twov1d);
3990 return;
3992 break;
3994 case Intrinsic::aarch64_neon_st1x3: {
3995 if (VT == MVT::v8i8) {
3996 SelectStore(Node, 3, AArch64::ST1Threev8b);
3997 return;
3998 } else if (VT == MVT::v16i8) {
3999 SelectStore(Node, 3, AArch64::ST1Threev16b);
4000 return;
4001 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4002 VT == MVT::v4bf16) {
4003 SelectStore(Node, 3, AArch64::ST1Threev4h);
4004 return;
4005 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4006 VT == MVT::v8bf16) {
4007 SelectStore(Node, 3, AArch64::ST1Threev8h);
4008 return;
4009 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4010 SelectStore(Node, 3, AArch64::ST1Threev2s);
4011 return;
4012 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4013 SelectStore(Node, 3, AArch64::ST1Threev4s);
4014 return;
4015 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4016 SelectStore(Node, 3, AArch64::ST1Threev2d);
4017 return;
4018 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4019 SelectStore(Node, 3, AArch64::ST1Threev1d);
4020 return;
4022 break;
4024 case Intrinsic::aarch64_neon_st1x4: {
4025 if (VT == MVT::v8i8) {
4026 SelectStore(Node, 4, AArch64::ST1Fourv8b);
4027 return;
4028 } else if (VT == MVT::v16i8) {
4029 SelectStore(Node, 4, AArch64::ST1Fourv16b);
4030 return;
4031 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4032 VT == MVT::v4bf16) {
4033 SelectStore(Node, 4, AArch64::ST1Fourv4h);
4034 return;
4035 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4036 VT == MVT::v8bf16) {
4037 SelectStore(Node, 4, AArch64::ST1Fourv8h);
4038 return;
4039 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4040 SelectStore(Node, 4, AArch64::ST1Fourv2s);
4041 return;
4042 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4043 SelectStore(Node, 4, AArch64::ST1Fourv4s);
4044 return;
4045 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4046 SelectStore(Node, 4, AArch64::ST1Fourv2d);
4047 return;
4048 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4049 SelectStore(Node, 4, AArch64::ST1Fourv1d);
4050 return;
4052 break;
4054 case Intrinsic::aarch64_neon_st2: {
4055 if (VT == MVT::v8i8) {
4056 SelectStore(Node, 2, AArch64::ST2Twov8b);
4057 return;
4058 } else if (VT == MVT::v16i8) {
4059 SelectStore(Node, 2, AArch64::ST2Twov16b);
4060 return;
4061 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4062 VT == MVT::v4bf16) {
4063 SelectStore(Node, 2, AArch64::ST2Twov4h);
4064 return;
4065 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4066 VT == MVT::v8bf16) {
4067 SelectStore(Node, 2, AArch64::ST2Twov8h);
4068 return;
4069 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4070 SelectStore(Node, 2, AArch64::ST2Twov2s);
4071 return;
4072 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4073 SelectStore(Node, 2, AArch64::ST2Twov4s);
4074 return;
4075 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4076 SelectStore(Node, 2, AArch64::ST2Twov2d);
4077 return;
4078 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4079 SelectStore(Node, 2, AArch64::ST1Twov1d);
4080 return;
4082 break;
4084 case Intrinsic::aarch64_neon_st3: {
4085 if (VT == MVT::v8i8) {
4086 SelectStore(Node, 3, AArch64::ST3Threev8b);
4087 return;
4088 } else if (VT == MVT::v16i8) {
4089 SelectStore(Node, 3, AArch64::ST3Threev16b);
4090 return;
4091 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4092 VT == MVT::v4bf16) {
4093 SelectStore(Node, 3, AArch64::ST3Threev4h);
4094 return;
4095 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4096 VT == MVT::v8bf16) {
4097 SelectStore(Node, 3, AArch64::ST3Threev8h);
4098 return;
4099 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4100 SelectStore(Node, 3, AArch64::ST3Threev2s);
4101 return;
4102 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4103 SelectStore(Node, 3, AArch64::ST3Threev4s);
4104 return;
4105 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4106 SelectStore(Node, 3, AArch64::ST3Threev2d);
4107 return;
4108 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4109 SelectStore(Node, 3, AArch64::ST1Threev1d);
4110 return;
4112 break;
4114 case Intrinsic::aarch64_neon_st4: {
4115 if (VT == MVT::v8i8) {
4116 SelectStore(Node, 4, AArch64::ST4Fourv8b);
4117 return;
4118 } else if (VT == MVT::v16i8) {
4119 SelectStore(Node, 4, AArch64::ST4Fourv16b);
4120 return;
4121 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4122 VT == MVT::v4bf16) {
4123 SelectStore(Node, 4, AArch64::ST4Fourv4h);
4124 return;
4125 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4126 VT == MVT::v8bf16) {
4127 SelectStore(Node, 4, AArch64::ST4Fourv8h);
4128 return;
4129 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4130 SelectStore(Node, 4, AArch64::ST4Fourv2s);
4131 return;
4132 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4133 SelectStore(Node, 4, AArch64::ST4Fourv4s);
4134 return;
4135 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4136 SelectStore(Node, 4, AArch64::ST4Fourv2d);
4137 return;
4138 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4139 SelectStore(Node, 4, AArch64::ST1Fourv1d);
4140 return;
4142 break;
4144 case Intrinsic::aarch64_neon_st2lane: {
4145 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4146 SelectStoreLane(Node, 2, AArch64::ST2i8);
4147 return;
4148 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4149 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4150 SelectStoreLane(Node, 2, AArch64::ST2i16);
4151 return;
4152 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4153 VT == MVT::v2f32) {
4154 SelectStoreLane(Node, 2, AArch64::ST2i32);
4155 return;
4156 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4157 VT == MVT::v1f64) {
4158 SelectStoreLane(Node, 2, AArch64::ST2i64);
4159 return;
4161 break;
4163 case Intrinsic::aarch64_neon_st3lane: {
4164 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4165 SelectStoreLane(Node, 3, AArch64::ST3i8);
4166 return;
4167 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4168 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4169 SelectStoreLane(Node, 3, AArch64::ST3i16);
4170 return;
4171 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4172 VT == MVT::v2f32) {
4173 SelectStoreLane(Node, 3, AArch64::ST3i32);
4174 return;
4175 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4176 VT == MVT::v1f64) {
4177 SelectStoreLane(Node, 3, AArch64::ST3i64);
4178 return;
4180 break;
4182 case Intrinsic::aarch64_neon_st4lane: {
4183 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4184 SelectStoreLane(Node, 4, AArch64::ST4i8);
4185 return;
4186 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4187 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4188 SelectStoreLane(Node, 4, AArch64::ST4i16);
4189 return;
4190 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4191 VT == MVT::v2f32) {
4192 SelectStoreLane(Node, 4, AArch64::ST4i32);
4193 return;
4194 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4195 VT == MVT::v1f64) {
4196 SelectStoreLane(Node, 4, AArch64::ST4i64);
4197 return;
4199 break;
4201 case Intrinsic::aarch64_sve_st2: {
4202 if (VT == MVT::nxv16i8) {
4203 SelectPredicatedStore(Node, 2, 0, AArch64::ST2B, AArch64::ST2B_IMM);
4204 return;
4205 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4206 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4207 SelectPredicatedStore(Node, 2, 1, AArch64::ST2H, AArch64::ST2H_IMM);
4208 return;
4209 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4210 SelectPredicatedStore(Node, 2, 2, AArch64::ST2W, AArch64::ST2W_IMM);
4211 return;
4212 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4213 SelectPredicatedStore(Node, 2, 3, AArch64::ST2D, AArch64::ST2D_IMM);
4214 return;
4216 break;
4218 case Intrinsic::aarch64_sve_st3: {
4219 if (VT == MVT::nxv16i8) {
4220 SelectPredicatedStore(Node, 3, 0, AArch64::ST3B, AArch64::ST3B_IMM);
4221 return;
4222 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4223 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4224 SelectPredicatedStore(Node, 3, 1, AArch64::ST3H, AArch64::ST3H_IMM);
4225 return;
4226 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4227 SelectPredicatedStore(Node, 3, 2, AArch64::ST3W, AArch64::ST3W_IMM);
4228 return;
4229 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4230 SelectPredicatedStore(Node, 3, 3, AArch64::ST3D, AArch64::ST3D_IMM);
4231 return;
4233 break;
4235 case Intrinsic::aarch64_sve_st4: {
4236 if (VT == MVT::nxv16i8) {
4237 SelectPredicatedStore(Node, 4, 0, AArch64::ST4B, AArch64::ST4B_IMM);
4238 return;
4239 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4240 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4241 SelectPredicatedStore(Node, 4, 1, AArch64::ST4H, AArch64::ST4H_IMM);
4242 return;
4243 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4244 SelectPredicatedStore(Node, 4, 2, AArch64::ST4W, AArch64::ST4W_IMM);
4245 return;
4246 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4247 SelectPredicatedStore(Node, 4, 3, AArch64::ST4D, AArch64::ST4D_IMM);
4248 return;
4250 break;
4253 break;
4255 case AArch64ISD::LD2post: {
4256 if (VT == MVT::v8i8) {
4257 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
4258 return;
4259 } else if (VT == MVT::v16i8) {
4260 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
4261 return;
4262 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4263 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
4264 return;
4265 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4266 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
4267 return;
4268 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4269 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
4270 return;
4271 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4272 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
4273 return;
4274 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4275 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4276 return;
4277 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4278 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
4279 return;
4281 break;
4283 case AArch64ISD::LD3post: {
4284 if (VT == MVT::v8i8) {
4285 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
4286 return;
4287 } else if (VT == MVT::v16i8) {
4288 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
4289 return;
4290 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4291 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
4292 return;
4293 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4294 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
4295 return;
4296 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4297 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
4298 return;
4299 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4300 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
4301 return;
4302 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4303 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4304 return;
4305 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4306 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
4307 return;
4309 break;
4311 case AArch64ISD::LD4post: {
4312 if (VT == MVT::v8i8) {
4313 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
4314 return;
4315 } else if (VT == MVT::v16i8) {
4316 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
4317 return;
4318 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4319 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
4320 return;
4321 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4322 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
4323 return;
4324 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4325 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
4326 return;
4327 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4328 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
4329 return;
4330 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4331 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4332 return;
4333 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4334 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
4335 return;
4337 break;
4339 case AArch64ISD::LD1x2post: {
4340 if (VT == MVT::v8i8) {
4341 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
4342 return;
4343 } else if (VT == MVT::v16i8) {
4344 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
4345 return;
4346 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4347 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
4348 return;
4349 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4350 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
4351 return;
4352 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4353 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
4354 return;
4355 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4356 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
4357 return;
4358 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4359 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4360 return;
4361 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4362 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
4363 return;
4365 break;
4367 case AArch64ISD::LD1x3post: {
4368 if (VT == MVT::v8i8) {
4369 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
4370 return;
4371 } else if (VT == MVT::v16i8) {
4372 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
4373 return;
4374 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4375 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
4376 return;
4377 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4378 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
4379 return;
4380 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4381 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
4382 return;
4383 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4384 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
4385 return;
4386 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4387 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4388 return;
4389 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4390 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
4391 return;
4393 break;
4395 case AArch64ISD::LD1x4post: {
4396 if (VT == MVT::v8i8) {
4397 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
4398 return;
4399 } else if (VT == MVT::v16i8) {
4400 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
4401 return;
4402 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4403 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
4404 return;
4405 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4406 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
4407 return;
4408 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4409 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
4410 return;
4411 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4412 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
4413 return;
4414 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4415 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4416 return;
4417 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4418 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
4419 return;
4421 break;
4423 case AArch64ISD::LD1DUPpost: {
4424 if (VT == MVT::v8i8) {
4425 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
4426 return;
4427 } else if (VT == MVT::v16i8) {
4428 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
4429 return;
4430 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4431 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
4432 return;
4433 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4434 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
4435 return;
4436 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4437 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
4438 return;
4439 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4440 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
4441 return;
4442 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4443 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
4444 return;
4445 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4446 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
4447 return;
4449 break;
4451 case AArch64ISD::LD2DUPpost: {
4452 if (VT == MVT::v8i8) {
4453 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
4454 return;
4455 } else if (VT == MVT::v16i8) {
4456 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
4457 return;
4458 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4459 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
4460 return;
4461 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4462 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
4463 return;
4464 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4465 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
4466 return;
4467 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4468 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
4469 return;
4470 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4471 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
4472 return;
4473 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4474 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
4475 return;
4477 break;
4479 case AArch64ISD::LD3DUPpost: {
4480 if (VT == MVT::v8i8) {
4481 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
4482 return;
4483 } else if (VT == MVT::v16i8) {
4484 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
4485 return;
4486 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4487 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
4488 return;
4489 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4490 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
4491 return;
4492 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4493 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
4494 return;
4495 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4496 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
4497 return;
4498 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4499 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
4500 return;
4501 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4502 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
4503 return;
4505 break;
4507 case AArch64ISD::LD4DUPpost: {
4508 if (VT == MVT::v8i8) {
4509 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
4510 return;
4511 } else if (VT == MVT::v16i8) {
4512 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
4513 return;
4514 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4515 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
4516 return;
4517 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4518 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
4519 return;
4520 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4521 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
4522 return;
4523 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4524 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
4525 return;
4526 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4527 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
4528 return;
4529 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4530 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
4531 return;
4533 break;
4535 case AArch64ISD::LD1LANEpost: {
4536 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4537 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
4538 return;
4539 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4540 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4541 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
4542 return;
4543 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4544 VT == MVT::v2f32) {
4545 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
4546 return;
4547 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4548 VT == MVT::v1f64) {
4549 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
4550 return;
4552 break;
4554 case AArch64ISD::LD2LANEpost: {
4555 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4556 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
4557 return;
4558 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4559 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4560 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
4561 return;
4562 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4563 VT == MVT::v2f32) {
4564 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
4565 return;
4566 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4567 VT == MVT::v1f64) {
4568 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
4569 return;
4571 break;
4573 case AArch64ISD::LD3LANEpost: {
4574 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4575 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
4576 return;
4577 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4578 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4579 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
4580 return;
4581 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4582 VT == MVT::v2f32) {
4583 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
4584 return;
4585 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4586 VT == MVT::v1f64) {
4587 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
4588 return;
4590 break;
4592 case AArch64ISD::LD4LANEpost: {
4593 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4594 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
4595 return;
4596 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4597 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4598 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
4599 return;
4600 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4601 VT == MVT::v2f32) {
4602 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
4603 return;
4604 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4605 VT == MVT::v1f64) {
4606 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
4607 return;
4609 break;
4611 case AArch64ISD::ST2post: {
4612 VT = Node->getOperand(1).getValueType();
4613 if (VT == MVT::v8i8) {
4614 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
4615 return;
4616 } else if (VT == MVT::v16i8) {
4617 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
4618 return;
4619 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4620 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
4621 return;
4622 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4623 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
4624 return;
4625 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4626 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
4627 return;
4628 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4629 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
4630 return;
4631 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4632 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
4633 return;
4634 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4635 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4636 return;
4638 break;
4640 case AArch64ISD::ST3post: {
4641 VT = Node->getOperand(1).getValueType();
4642 if (VT == MVT::v8i8) {
4643 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
4644 return;
4645 } else if (VT == MVT::v16i8) {
4646 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
4647 return;
4648 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4649 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
4650 return;
4651 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4652 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
4653 return;
4654 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4655 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
4656 return;
4657 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4658 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
4659 return;
4660 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4661 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
4662 return;
4663 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4664 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4665 return;
4667 break;
4669 case AArch64ISD::ST4post: {
4670 VT = Node->getOperand(1).getValueType();
4671 if (VT == MVT::v8i8) {
4672 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
4673 return;
4674 } else if (VT == MVT::v16i8) {
4675 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
4676 return;
4677 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4678 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
4679 return;
4680 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4681 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
4682 return;
4683 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4684 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
4685 return;
4686 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4687 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
4688 return;
4689 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4690 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
4691 return;
4692 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4693 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4694 return;
4696 break;
4698 case AArch64ISD::ST1x2post: {
4699 VT = Node->getOperand(1).getValueType();
4700 if (VT == MVT::v8i8) {
4701 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
4702 return;
4703 } else if (VT == MVT::v16i8) {
4704 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
4705 return;
4706 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4707 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
4708 return;
4709 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4710 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
4711 return;
4712 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4713 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
4714 return;
4715 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4716 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
4717 return;
4718 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4719 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4720 return;
4721 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4722 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
4723 return;
4725 break;
4727 case AArch64ISD::ST1x3post: {
4728 VT = Node->getOperand(1).getValueType();
4729 if (VT == MVT::v8i8) {
4730 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
4731 return;
4732 } else if (VT == MVT::v16i8) {
4733 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
4734 return;
4735 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4736 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
4737 return;
4738 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16 ) {
4739 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
4740 return;
4741 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4742 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
4743 return;
4744 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4745 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
4746 return;
4747 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4748 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4749 return;
4750 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4751 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
4752 return;
4754 break;
4756 case AArch64ISD::ST1x4post: {
4757 VT = Node->getOperand(1).getValueType();
4758 if (VT == MVT::v8i8) {
4759 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
4760 return;
4761 } else if (VT == MVT::v16i8) {
4762 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
4763 return;
4764 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4765 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
4766 return;
4767 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4768 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
4769 return;
4770 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4771 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
4772 return;
4773 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4774 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
4775 return;
4776 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4777 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4778 return;
4779 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4780 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
4781 return;
4783 break;
4785 case AArch64ISD::ST2LANEpost: {
4786 VT = Node->getOperand(1).getValueType();
4787 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4788 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
4789 return;
4790 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4791 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4792 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
4793 return;
4794 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4795 VT == MVT::v2f32) {
4796 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
4797 return;
4798 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4799 VT == MVT::v1f64) {
4800 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
4801 return;
4803 break;
4805 case AArch64ISD::ST3LANEpost: {
4806 VT = Node->getOperand(1).getValueType();
4807 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4808 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
4809 return;
4810 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4811 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4812 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
4813 return;
4814 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4815 VT == MVT::v2f32) {
4816 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
4817 return;
4818 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4819 VT == MVT::v1f64) {
4820 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
4821 return;
4823 break;
4825 case AArch64ISD::ST4LANEpost: {
4826 VT = Node->getOperand(1).getValueType();
4827 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4828 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
4829 return;
4830 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4831 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4832 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
4833 return;
4834 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4835 VT == MVT::v2f32) {
4836 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
4837 return;
4838 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4839 VT == MVT::v1f64) {
4840 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
4841 return;
4843 break;
4845 case AArch64ISD::SVE_LD2_MERGE_ZERO: {
4846 if (VT == MVT::nxv16i8) {
4847 SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B);
4848 return;
4849 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4850 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4851 SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H);
4852 return;
4853 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4854 SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W);
4855 return;
4856 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4857 SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D);
4858 return;
4860 break;
4862 case AArch64ISD::SVE_LD3_MERGE_ZERO: {
4863 if (VT == MVT::nxv16i8) {
4864 SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B);
4865 return;
4866 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4867 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4868 SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H);
4869 return;
4870 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4871 SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W);
4872 return;
4873 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4874 SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D);
4875 return;
4877 break;
4879 case AArch64ISD::SVE_LD4_MERGE_ZERO: {
4880 if (VT == MVT::nxv16i8) {
4881 SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B);
4882 return;
4883 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4884 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4885 SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H);
4886 return;
4887 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4888 SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W);
4889 return;
4890 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4891 SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D);
4892 return;
4894 break;
4898 // Select the default instruction
4899 SelectCode(Node);
4902 /// createAArch64ISelDag - This pass converts a legalized DAG into a
4903 /// AArch64-specific DAG, ready for instruction scheduling.
4904 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
4905 CodeGenOpt::Level OptLevel) {
4906 return new AArch64DAGToDAGISel(TM, OptLevel);
4909 /// When \p PredVT is a scalable vector predicate in the form
4910 /// MVT::nx<M>xi1, it builds the correspondent scalable vector of
4911 /// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
4912 /// structured vectors (NumVec >1), the output data type is
4913 /// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
4914 /// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
4915 /// EVT.
4916 static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
4917 unsigned NumVec) {
4918 assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
4919 if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
4920 return EVT();
4922 if (PredVT != MVT::nxv16i1 && PredVT != MVT::nxv8i1 &&
4923 PredVT != MVT::nxv4i1 && PredVT != MVT::nxv2i1)
4924 return EVT();
4926 ElementCount EC = PredVT.getVectorElementCount();
4927 EVT ScalarVT =
4928 EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / EC.getKnownMinValue());
4929 EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC * NumVec);
4931 return MemVT;
4934 /// Return the EVT of the data associated to a memory operation in \p
4935 /// Root. If such EVT cannot be retrived, it returns an invalid EVT.
4936 static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
4937 if (isa<MemSDNode>(Root))
4938 return cast<MemSDNode>(Root)->getMemoryVT();
4940 if (isa<MemIntrinsicSDNode>(Root))
4941 return cast<MemIntrinsicSDNode>(Root)->getMemoryVT();
4943 const unsigned Opcode = Root->getOpcode();
4944 // For custom ISD nodes, we have to look at them individually to extract the
4945 // type of the data moved to/from memory.
4946 switch (Opcode) {
4947 case AArch64ISD::LD1_MERGE_ZERO:
4948 case AArch64ISD::LD1S_MERGE_ZERO:
4949 case AArch64ISD::LDNF1_MERGE_ZERO:
4950 case AArch64ISD::LDNF1S_MERGE_ZERO:
4951 return cast<VTSDNode>(Root->getOperand(3))->getVT();
4952 case AArch64ISD::ST1_PRED:
4953 return cast<VTSDNode>(Root->getOperand(4))->getVT();
4954 case AArch64ISD::SVE_LD2_MERGE_ZERO:
4955 return getPackedVectorTypeFromPredicateType(
4956 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/2);
4957 case AArch64ISD::SVE_LD3_MERGE_ZERO:
4958 return getPackedVectorTypeFromPredicateType(
4959 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/3);
4960 case AArch64ISD::SVE_LD4_MERGE_ZERO:
4961 return getPackedVectorTypeFromPredicateType(
4962 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/4);
4963 default:
4964 break;
4967 if (Opcode != ISD::INTRINSIC_VOID)
4968 return EVT();
4970 const unsigned IntNo =
4971 cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue();
4972 if (IntNo != Intrinsic::aarch64_sve_prf)
4973 return EVT();
4975 // We are using an SVE prefetch intrinsic. Type must be inferred
4976 // from the width of the predicate.
4977 return getPackedVectorTypeFromPredicateType(
4978 Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
4981 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
4982 /// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
4983 /// where Root is the memory access using N for its address.
4984 template <int64_t Min, int64_t Max>
4985 bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
4986 SDValue &Base,
4987 SDValue &OffImm) {
4988 const EVT MemVT = getMemVTFromNode(*(CurDAG->getContext()), Root);
4990 if (MemVT == EVT())
4991 return false;
4993 if (N.getOpcode() != ISD::ADD)
4994 return false;
4996 SDValue VScale = N.getOperand(1);
4997 if (VScale.getOpcode() != ISD::VSCALE)
4998 return false;
5000 TypeSize TS = MemVT.getSizeInBits();
5001 int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinSize()) / 8;
5002 int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
5004 if ((MulImm % MemWidthBytes) != 0)
5005 return false;
5007 int64_t Offset = MulImm / MemWidthBytes;
5008 if (Offset < Min || Offset > Max)
5009 return false;
5011 Base = N.getOperand(0);
5012 OffImm = CurDAG->getTargetConstant(Offset, SDLoc(N), MVT::i64);
5013 return true;
5016 /// Select register plus register addressing mode for SVE, with scaled
5017 /// offset.
5018 bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
5019 SDValue &Base,
5020 SDValue &Offset) {
5021 if (N.getOpcode() != ISD::ADD)
5022 return false;
5024 // Process an ADD node.
5025 const SDValue LHS = N.getOperand(0);
5026 const SDValue RHS = N.getOperand(1);
5028 // 8 bit data does not come with the SHL node, so it is treated
5029 // separately.
5030 if (Scale == 0) {
5031 Base = LHS;
5032 Offset = RHS;
5033 return true;
5036 if (auto C = dyn_cast<ConstantSDNode>(RHS)) {
5037 int64_t ImmOff = C->getSExtValue();
5038 unsigned Size = 1 << Scale;
5040 // To use the reg+reg addressing mode, the immediate must be a multiple of
5041 // the vector element's byte size.
5042 if (ImmOff % Size)
5043 return false;
5045 SDLoc DL(N);
5046 Base = LHS;
5047 Offset = CurDAG->getTargetConstant(ImmOff >> Scale, DL, MVT::i64);
5048 SDValue Ops[] = {Offset};
5049 SDNode *MI = CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
5050 Offset = SDValue(MI, 0);
5051 return true;
5054 // Check if the RHS is a shift node with a constant.
5055 if (RHS.getOpcode() != ISD::SHL)
5056 return false;
5058 const SDValue ShiftRHS = RHS.getOperand(1);
5059 if (auto *C = dyn_cast<ConstantSDNode>(ShiftRHS))
5060 if (C->getZExtValue() == Scale) {
5061 Base = LHS;
5062 Offset = RHS.getOperand(0);
5063 return true;
5066 return false;
5069 bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
5070 const AArch64TargetLowering *TLI =
5071 static_cast<const AArch64TargetLowering *>(getTargetLowering());
5073 return TLI->isAllActivePredicate(N);