[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / RISCV / RISCVISelDAGToDAG.cpp
blob669245f8edf260229e65a04ecb0bcb87e653db40
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
11 //===----------------------------------------------------------------------===//
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
26 using namespace llvm;
28 #define DEBUG_TYPE "riscv-isel"
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
44 void RISCVDAGToDAGISel::PreprocessISelDAG() {
45 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46 E = CurDAG->allnodes_end();
47 I != E;) {
48 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50 // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51 // load. Done after lowering and combining so that we have a chance to
52 // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53 if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54 continue;
56 assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57 MVT VT = N->getSimpleValueType(0);
58 SDValue Lo = N->getOperand(0);
59 SDValue Hi = N->getOperand(1);
60 SDValue VL = N->getOperand(2);
61 assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62 Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63 "Unexpected VTs!");
64 MachineFunction &MF = CurDAG->getMachineFunction();
65 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66 SDLoc DL(N);
68 // We use the same frame index we use for moving two i32s into 64-bit FPR.
69 // This is an analogous operation.
70 int FI = FuncInfo->getMoveF64FrameIndex(MF);
71 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73 SDValue StackSlot =
74 CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
76 SDValue Chain = CurDAG->getEntryNode();
77 Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
79 SDValue OffsetSlot =
80 CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81 Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82 Align(8));
84 Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
86 SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87 SDValue IntID =
88 CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89 SDValue Ops[] = {Chain, IntID, StackSlot,
90 CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
92 SDValue Result = CurDAG->getMemIntrinsicNode(
93 ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94 MachineMemOperand::MOLoad);
96 // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97 // vlse we created. This will cause general havok on the dag because
98 // anything below the conversion could be folded into other existing nodes.
99 // To avoid invalidating 'I', back it up to the convert node.
100 --I;
101 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
103 // Now that we did that, the node is dead. Increment the iterator to the
104 // next node to process, then delete N.
105 ++I;
106 CurDAG->DeleteNode(N);
110 void RISCVDAGToDAGISel::PostprocessISelDAG() {
111 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
113 bool MadeChange = false;
114 while (Position != CurDAG->allnodes_begin()) {
115 SDNode *N = &*--Position;
116 // Skip dead nodes and any non-machine opcodes.
117 if (N->use_empty() || !N->isMachineOpcode())
118 continue;
120 MadeChange |= doPeepholeSExtW(N);
121 MadeChange |= doPeepholeLoadStoreADDI(N);
124 if (MadeChange)
125 CurDAG->RemoveDeadNodes();
128 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
129 const RISCVSubtarget &Subtarget) {
130 MVT XLenVT = Subtarget.getXLenVT();
131 RISCVMatInt::InstSeq Seq =
132 RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
134 SDNode *Result = nullptr;
135 SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
136 for (RISCVMatInt::Inst &Inst : Seq) {
137 SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
138 if (Inst.Opc == RISCV::LUI)
139 Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
140 else if (Inst.Opc == RISCV::ADDUW)
141 Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
142 CurDAG->getRegister(RISCV::X0, XLenVT));
143 else
144 Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
146 // Only the first instruction has X0 as its source.
147 SrcReg = SDValue(Result, 0);
150 return Result;
153 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
154 unsigned RegClassID, unsigned SubReg0) {
155 assert(Regs.size() >= 2 && Regs.size() <= 8);
157 SDLoc DL(Regs[0]);
158 SmallVector<SDValue, 8> Ops;
160 Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
162 for (unsigned I = 0; I < Regs.size(); ++I) {
163 Ops.push_back(Regs[I]);
164 Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
166 SDNode *N =
167 CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
168 return SDValue(N, 0);
171 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
172 unsigned NF) {
173 static const unsigned RegClassIDs[] = {
174 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
175 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
176 RISCV::VRN8M1RegClassID};
178 return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
181 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
182 unsigned NF) {
183 static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
184 RISCV::VRN3M2RegClassID,
185 RISCV::VRN4M2RegClassID};
187 return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
190 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
191 unsigned NF) {
192 return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
193 RISCV::sub_vrm4_0);
196 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
197 unsigned NF, RISCVII::VLMUL LMUL) {
198 switch (LMUL) {
199 default:
200 llvm_unreachable("Invalid LMUL.");
201 case RISCVII::VLMUL::LMUL_F8:
202 case RISCVII::VLMUL::LMUL_F4:
203 case RISCVII::VLMUL::LMUL_F2:
204 case RISCVII::VLMUL::LMUL_1:
205 return createM1Tuple(CurDAG, Regs, NF);
206 case RISCVII::VLMUL::LMUL_2:
207 return createM2Tuple(CurDAG, Regs, NF);
208 case RISCVII::VLMUL::LMUL_4:
209 return createM4Tuple(CurDAG, Regs, NF);
213 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
214 SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
215 bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
216 MVT *IndexVT) {
217 SDValue Chain = Node->getOperand(0);
218 SDValue Glue;
220 SDValue Base;
221 SelectBaseAddr(Node->getOperand(CurOp++), Base);
222 Operands.push_back(Base); // Base pointer.
224 if (IsStridedOrIndexed) {
225 Operands.push_back(Node->getOperand(CurOp++)); // Index.
226 if (IndexVT)
227 *IndexVT = Operands.back()->getSimpleValueType(0);
230 if (IsMasked) {
231 // Mask needs to be copied to V0.
232 SDValue Mask = Node->getOperand(CurOp++);
233 Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
234 Glue = Chain.getValue(1);
235 Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
237 SDValue VL;
238 selectVLOp(Node->getOperand(CurOp++), VL);
239 Operands.push_back(VL);
241 MVT XLenVT = Subtarget->getXLenVT();
242 SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
243 Operands.push_back(SEWOp);
245 Operands.push_back(Chain); // Chain.
246 if (Glue)
247 Operands.push_back(Glue);
250 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
251 bool IsStrided) {
252 SDLoc DL(Node);
253 unsigned NF = Node->getNumValues() - 1;
254 MVT VT = Node->getSimpleValueType(0);
255 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
256 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
258 unsigned CurOp = 2;
259 SmallVector<SDValue, 8> Operands;
260 if (IsMasked) {
261 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
262 Node->op_begin() + CurOp + NF);
263 SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
264 Operands.push_back(MaskedOff);
265 CurOp += NF;
268 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
269 Operands);
271 const RISCV::VLSEGPseudo *P =
272 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
273 static_cast<unsigned>(LMUL));
274 MachineSDNode *Load =
275 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
277 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
278 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
280 SDValue SuperReg = SDValue(Load, 0);
281 for (unsigned I = 0; I < NF; ++I) {
282 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
283 ReplaceUses(SDValue(Node, I),
284 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
287 ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
288 CurDAG->RemoveDeadNode(Node);
291 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
292 SDLoc DL(Node);
293 unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
294 MVT VT = Node->getSimpleValueType(0);
295 MVT XLenVT = Subtarget->getXLenVT();
296 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
297 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
299 unsigned CurOp = 2;
300 SmallVector<SDValue, 7> Operands;
301 if (IsMasked) {
302 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
303 Node->op_begin() + CurOp + NF);
304 SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
305 Operands.push_back(MaskedOff);
306 CurOp += NF;
309 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
310 /*IsStridedOrIndexed*/ false, Operands);
312 const RISCV::VLSEGPseudo *P =
313 RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
314 Log2SEW, static_cast<unsigned>(LMUL));
315 MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
316 MVT::Other, MVT::Glue, Operands);
317 SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
318 /*Glue*/ SDValue(Load, 2));
320 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
321 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
323 SDValue SuperReg = SDValue(Load, 0);
324 for (unsigned I = 0; I < NF; ++I) {
325 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
326 ReplaceUses(SDValue(Node, I),
327 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
330 ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL
331 ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
332 CurDAG->RemoveDeadNode(Node);
335 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
336 bool IsOrdered) {
337 SDLoc DL(Node);
338 unsigned NF = Node->getNumValues() - 1;
339 MVT VT = Node->getSimpleValueType(0);
340 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
341 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
343 unsigned CurOp = 2;
344 SmallVector<SDValue, 8> Operands;
345 if (IsMasked) {
346 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
347 Node->op_begin() + CurOp + NF);
348 SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
349 Operands.push_back(MaskedOff);
350 CurOp += NF;
353 MVT IndexVT;
354 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
355 /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
357 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
358 "Element count mismatch");
360 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
361 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
362 const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
363 NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
364 static_cast<unsigned>(IndexLMUL));
365 MachineSDNode *Load =
366 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
368 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
369 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
371 SDValue SuperReg = SDValue(Load, 0);
372 for (unsigned I = 0; I < NF; ++I) {
373 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
374 ReplaceUses(SDValue(Node, I),
375 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
378 ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
379 CurDAG->RemoveDeadNode(Node);
382 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
383 bool IsStrided) {
384 SDLoc DL(Node);
385 unsigned NF = Node->getNumOperands() - 4;
386 if (IsStrided)
387 NF--;
388 if (IsMasked)
389 NF--;
390 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
391 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
392 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
393 SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
394 SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
396 SmallVector<SDValue, 8> Operands;
397 Operands.push_back(StoreVal);
398 unsigned CurOp = 2 + NF;
400 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
401 Operands);
403 const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
404 NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
405 MachineSDNode *Store =
406 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
408 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
409 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
411 ReplaceNode(Node, Store);
414 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
415 bool IsOrdered) {
416 SDLoc DL(Node);
417 unsigned NF = Node->getNumOperands() - 5;
418 if (IsMasked)
419 --NF;
420 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
421 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
422 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
423 SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
424 SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
426 SmallVector<SDValue, 8> Operands;
427 Operands.push_back(StoreVal);
428 unsigned CurOp = 2 + NF;
430 MVT IndexVT;
431 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
432 /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
434 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
435 "Element count mismatch");
437 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
438 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
439 const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
440 NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
441 static_cast<unsigned>(IndexLMUL));
442 MachineSDNode *Store =
443 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
445 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
446 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
448 ReplaceNode(Node, Store);
452 void RISCVDAGToDAGISel::Select(SDNode *Node) {
453 // If we have a custom node, we have already selected.
454 if (Node->isMachineOpcode()) {
455 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
456 Node->setNodeId(-1);
457 return;
460 // Instruction Selection not handled by the auto-generated tablegen selection
461 // should be handled here.
462 unsigned Opcode = Node->getOpcode();
463 MVT XLenVT = Subtarget->getXLenVT();
464 SDLoc DL(Node);
465 MVT VT = Node->getSimpleValueType(0);
467 switch (Opcode) {
468 case ISD::Constant: {
469 auto *ConstNode = cast<ConstantSDNode>(Node);
470 if (VT == XLenVT && ConstNode->isNullValue()) {
471 SDValue New =
472 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
473 ReplaceNode(Node, New.getNode());
474 return;
476 int64_t Imm = ConstNode->getSExtValue();
477 // If the upper XLen-16 bits are not used, try to convert this to a simm12
478 // by sign extending bit 15.
479 if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
480 hasAllHUsers(Node))
481 Imm = SignExtend64(Imm, 16);
482 // If the upper 32-bits are not used try to convert this into a simm32 by
483 // sign extending bit 32.
484 if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
485 Imm = SignExtend64(Imm, 32);
487 ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
488 return;
490 case ISD::FrameIndex: {
491 SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
492 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
493 SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
494 ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
495 return;
497 case ISD::SRL: {
498 // We don't need this transform if zext.h is supported.
499 if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
500 break;
501 // Optimize (srl (and X, 0xffff), C) ->
502 // (srli (slli X, (XLen-16), (XLen-16) + C)
503 // Taking into account that the 0xffff may have had lower bits unset by
504 // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
505 // This pattern occurs when type legalizing i16 right shifts.
506 // FIXME: This could be extended to other AND masks.
507 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
508 if (N1C) {
509 uint64_t ShAmt = N1C->getZExtValue();
510 SDValue N0 = Node->getOperand(0);
511 if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
512 isa<ConstantSDNode>(N0.getOperand(1))) {
513 uint64_t Mask = N0.getConstantOperandVal(1);
514 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
515 if (Mask == 0xffff) {
516 unsigned LShAmt = Subtarget->getXLen() - 16;
517 SDNode *SLLI =
518 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
519 CurDAG->getTargetConstant(LShAmt, DL, VT));
520 SDNode *SRLI = CurDAG->getMachineNode(
521 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
522 CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
523 ReplaceNode(Node, SRLI);
524 return;
529 break;
531 case ISD::AND: {
532 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
533 if (!N1C)
534 break;
536 SDValue N0 = Node->getOperand(0);
538 bool LeftShift = N0.getOpcode() == ISD::SHL;
539 if (!LeftShift && N0.getOpcode() != ISD::SRL)
540 break;
542 auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
543 if (!C)
544 break;
545 uint64_t C2 = C->getZExtValue();
546 unsigned XLen = Subtarget->getXLen();
547 if (!C2 || C2 >= XLen)
548 break;
550 uint64_t C1 = N1C->getZExtValue();
552 // Keep track of whether this is a andi, zext.h, or zext.w.
553 bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
554 if (C1 == UINT64_C(0xFFFF) &&
555 (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
556 ZExtOrANDI = true;
557 if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
558 ZExtOrANDI = true;
560 // Clear irrelevant bits in the mask.
561 if (LeftShift)
562 C1 &= maskTrailingZeros<uint64_t>(C2);
563 else
564 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
566 // Some transforms should only be done if the shift has a single use or
567 // the AND would become (srli (slli X, 32), 32)
568 bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
570 SDValue X = N0.getOperand(0);
572 // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
573 // with c3 leading zeros.
574 if (!LeftShift && isMask_64(C1)) {
575 uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
576 if (C2 < C3) {
577 // If the number of leading zeros is C2+32 this can be SRLIW.
578 if (C2 + 32 == C3) {
579 SDNode *SRLIW =
580 CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
581 CurDAG->getTargetConstant(C2, DL, XLenVT));
582 ReplaceNode(Node, SRLIW);
583 return;
586 // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
587 // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
589 // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
590 // legalized and goes through DAG combine.
591 SDValue Y;
592 if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
593 selectSExti32(X, Y)) {
594 SDNode *SRAIW =
595 CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
596 CurDAG->getTargetConstant(31, DL, XLenVT));
597 SDNode *SRLIW = CurDAG->getMachineNode(
598 RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
599 CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
600 ReplaceNode(Node, SRLIW);
601 return;
604 // (srli (slli x, c3-c2), c3).
605 if (OneUseOrZExtW && !ZExtOrANDI) {
606 SDNode *SLLI = CurDAG->getMachineNode(
607 RISCV::SLLI, DL, XLenVT, X,
608 CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
609 SDNode *SRLI =
610 CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
611 CurDAG->getTargetConstant(C3, DL, XLenVT));
612 ReplaceNode(Node, SRLI);
613 return;
618 // Turn (and (shl x, c2) c1) -> (srli (slli c2+c3), c3) if c1 is a mask
619 // shifted by c2 bits with c3 leading zeros.
620 if (LeftShift && isShiftedMask_64(C1)) {
621 uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
623 if (C2 + C3 < XLen &&
624 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
625 // Use slli.uw when possible.
626 if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
627 SDNode *SLLIUW =
628 CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
629 CurDAG->getTargetConstant(C2, DL, XLenVT));
630 ReplaceNode(Node, SLLIUW);
631 return;
634 // (srli (slli c2+c3), c3)
635 if (OneUseOrZExtW && !ZExtOrANDI) {
636 SDNode *SLLI = CurDAG->getMachineNode(
637 RISCV::SLLI, DL, XLenVT, X,
638 CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
639 SDNode *SRLI =
640 CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
641 CurDAG->getTargetConstant(C3, DL, XLenVT));
642 ReplaceNode(Node, SRLI);
643 return;
648 break;
650 case ISD::INTRINSIC_WO_CHAIN: {
651 unsigned IntNo = Node->getConstantOperandVal(0);
652 switch (IntNo) {
653 // By default we do not custom select any intrinsic.
654 default:
655 break;
656 case Intrinsic::riscv_vmsgeu:
657 case Intrinsic::riscv_vmsge: {
658 SDValue Src1 = Node->getOperand(1);
659 SDValue Src2 = Node->getOperand(2);
660 // Only custom select scalar second operand.
661 if (Src2.getValueType() != XLenVT)
662 break;
663 // Small constants are handled with patterns.
664 if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
665 int64_t CVal = C->getSExtValue();
666 if (CVal >= -15 && CVal <= 16)
667 break;
669 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
670 MVT Src1VT = Src1.getSimpleValueType();
671 unsigned VMSLTOpcode, VMNANDOpcode;
672 switch (RISCVTargetLowering::getLMUL(Src1VT)) {
673 default:
674 llvm_unreachable("Unexpected LMUL!");
675 case RISCVII::VLMUL::LMUL_F8:
676 VMSLTOpcode =
677 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
678 VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
679 break;
680 case RISCVII::VLMUL::LMUL_F4:
681 VMSLTOpcode =
682 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
683 VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
684 break;
685 case RISCVII::VLMUL::LMUL_F2:
686 VMSLTOpcode =
687 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
688 VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
689 break;
690 case RISCVII::VLMUL::LMUL_1:
691 VMSLTOpcode =
692 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
693 VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
694 break;
695 case RISCVII::VLMUL::LMUL_2:
696 VMSLTOpcode =
697 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
698 VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
699 break;
700 case RISCVII::VLMUL::LMUL_4:
701 VMSLTOpcode =
702 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
703 VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
704 break;
705 case RISCVII::VLMUL::LMUL_8:
706 VMSLTOpcode =
707 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
708 VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
709 break;
711 SDValue SEW = CurDAG->getTargetConstant(
712 Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
713 SDValue VL;
714 selectVLOp(Node->getOperand(3), VL);
716 // Expand to
717 // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
718 SDValue Cmp = SDValue(
719 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
721 ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
722 {Cmp, Cmp, VL, SEW}));
723 return;
725 case Intrinsic::riscv_vmsgeu_mask:
726 case Intrinsic::riscv_vmsge_mask: {
727 SDValue Src1 = Node->getOperand(2);
728 SDValue Src2 = Node->getOperand(3);
729 // Only custom select scalar second operand.
730 if (Src2.getValueType() != XLenVT)
731 break;
732 // Small constants are handled with patterns.
733 if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
734 int64_t CVal = C->getSExtValue();
735 if (CVal >= -15 && CVal <= 16)
736 break;
738 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
739 MVT Src1VT = Src1.getSimpleValueType();
740 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
741 switch (RISCVTargetLowering::getLMUL(Src1VT)) {
742 default:
743 llvm_unreachable("Unexpected LMUL!");
744 case RISCVII::VLMUL::LMUL_F8:
745 VMSLTOpcode =
746 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
747 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
748 : RISCV::PseudoVMSLT_VX_MF8_MASK;
749 break;
750 case RISCVII::VLMUL::LMUL_F4:
751 VMSLTOpcode =
752 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
753 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
754 : RISCV::PseudoVMSLT_VX_MF4_MASK;
755 break;
756 case RISCVII::VLMUL::LMUL_F2:
757 VMSLTOpcode =
758 IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
759 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
760 : RISCV::PseudoVMSLT_VX_MF2_MASK;
761 break;
762 case RISCVII::VLMUL::LMUL_1:
763 VMSLTOpcode =
764 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
765 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
766 : RISCV::PseudoVMSLT_VX_M1_MASK;
767 break;
768 case RISCVII::VLMUL::LMUL_2:
769 VMSLTOpcode =
770 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
771 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
772 : RISCV::PseudoVMSLT_VX_M2_MASK;
773 break;
774 case RISCVII::VLMUL::LMUL_4:
775 VMSLTOpcode =
776 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
777 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
778 : RISCV::PseudoVMSLT_VX_M4_MASK;
779 break;
780 case RISCVII::VLMUL::LMUL_8:
781 VMSLTOpcode =
782 IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
783 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
784 : RISCV::PseudoVMSLT_VX_M8_MASK;
785 break;
787 // Mask operations use the LMUL from the mask type.
788 switch (RISCVTargetLowering::getLMUL(VT)) {
789 default:
790 llvm_unreachable("Unexpected LMUL!");
791 case RISCVII::VLMUL::LMUL_F8:
792 VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
793 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
794 break;
795 case RISCVII::VLMUL::LMUL_F4:
796 VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
797 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
798 break;
799 case RISCVII::VLMUL::LMUL_F2:
800 VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
801 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
802 break;
803 case RISCVII::VLMUL::LMUL_1:
804 VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
805 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
806 break;
807 case RISCVII::VLMUL::LMUL_2:
808 VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
809 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
810 break;
811 case RISCVII::VLMUL::LMUL_4:
812 VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
813 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
814 break;
815 case RISCVII::VLMUL::LMUL_8:
816 VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
817 VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
818 break;
820 SDValue SEW = CurDAG->getTargetConstant(
821 Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
822 SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
823 SDValue VL;
824 selectVLOp(Node->getOperand(5), VL);
825 SDValue MaskedOff = Node->getOperand(1);
826 SDValue Mask = Node->getOperand(4);
827 // If the MaskedOff value and the Mask are the same value use
828 // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
829 // This avoids needing to copy v0 to vd before starting the next sequence.
830 if (Mask == MaskedOff) {
831 SDValue Cmp = SDValue(
832 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
834 ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
835 {Mask, Cmp, VL, MaskSEW}));
836 return;
839 // Mask needs to be copied to V0.
840 SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
841 RISCV::V0, Mask, SDValue());
842 SDValue Glue = Chain.getValue(1);
843 SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
845 // Otherwise use
846 // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
847 SDValue Cmp = SDValue(
848 CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
849 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
851 ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
852 {Cmp, Mask, VL, MaskSEW}));
853 return;
856 break;
858 case ISD::INTRINSIC_W_CHAIN: {
859 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
860 switch (IntNo) {
861 // By default we do not custom select any intrinsic.
862 default:
863 break;
865 case Intrinsic::riscv_vsetvli:
866 case Intrinsic::riscv_vsetvlimax: {
867 if (!Subtarget->hasStdExtV())
868 break;
870 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
871 unsigned Offset = VLMax ? 2 : 3;
873 assert(Node->getNumOperands() == Offset + 2 &&
874 "Unexpected number of operands");
876 unsigned SEW =
877 RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
878 RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
879 Node->getConstantOperandVal(Offset + 1) & 0x7);
881 unsigned VTypeI = RISCVVType::encodeVTYPE(
882 VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
883 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
885 SDValue VLOperand;
886 if (VLMax) {
887 VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
888 } else {
889 VLOperand = Node->getOperand(2);
891 if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
892 uint64_t AVL = C->getZExtValue();
893 if (isUInt<5>(AVL)) {
894 SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
895 ReplaceNode(
896 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
897 MVT::Other, VLImm, VTypeIOp,
898 /* Chain */ Node->getOperand(0)));
899 return;
904 ReplaceNode(Node,
905 CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
906 MVT::Other, VLOperand, VTypeIOp,
907 /* Chain */ Node->getOperand(0)));
908 return;
910 case Intrinsic::riscv_vlseg2:
911 case Intrinsic::riscv_vlseg3:
912 case Intrinsic::riscv_vlseg4:
913 case Intrinsic::riscv_vlseg5:
914 case Intrinsic::riscv_vlseg6:
915 case Intrinsic::riscv_vlseg7:
916 case Intrinsic::riscv_vlseg8: {
917 selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
918 return;
920 case Intrinsic::riscv_vlseg2_mask:
921 case Intrinsic::riscv_vlseg3_mask:
922 case Intrinsic::riscv_vlseg4_mask:
923 case Intrinsic::riscv_vlseg5_mask:
924 case Intrinsic::riscv_vlseg6_mask:
925 case Intrinsic::riscv_vlseg7_mask:
926 case Intrinsic::riscv_vlseg8_mask: {
927 selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
928 return;
930 case Intrinsic::riscv_vlsseg2:
931 case Intrinsic::riscv_vlsseg3:
932 case Intrinsic::riscv_vlsseg4:
933 case Intrinsic::riscv_vlsseg5:
934 case Intrinsic::riscv_vlsseg6:
935 case Intrinsic::riscv_vlsseg7:
936 case Intrinsic::riscv_vlsseg8: {
937 selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
938 return;
940 case Intrinsic::riscv_vlsseg2_mask:
941 case Intrinsic::riscv_vlsseg3_mask:
942 case Intrinsic::riscv_vlsseg4_mask:
943 case Intrinsic::riscv_vlsseg5_mask:
944 case Intrinsic::riscv_vlsseg6_mask:
945 case Intrinsic::riscv_vlsseg7_mask:
946 case Intrinsic::riscv_vlsseg8_mask: {
947 selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
948 return;
950 case Intrinsic::riscv_vloxseg2:
951 case Intrinsic::riscv_vloxseg3:
952 case Intrinsic::riscv_vloxseg4:
953 case Intrinsic::riscv_vloxseg5:
954 case Intrinsic::riscv_vloxseg6:
955 case Intrinsic::riscv_vloxseg7:
956 case Intrinsic::riscv_vloxseg8:
957 selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
958 return;
959 case Intrinsic::riscv_vluxseg2:
960 case Intrinsic::riscv_vluxseg3:
961 case Intrinsic::riscv_vluxseg4:
962 case Intrinsic::riscv_vluxseg5:
963 case Intrinsic::riscv_vluxseg6:
964 case Intrinsic::riscv_vluxseg7:
965 case Intrinsic::riscv_vluxseg8:
966 selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
967 return;
968 case Intrinsic::riscv_vloxseg2_mask:
969 case Intrinsic::riscv_vloxseg3_mask:
970 case Intrinsic::riscv_vloxseg4_mask:
971 case Intrinsic::riscv_vloxseg5_mask:
972 case Intrinsic::riscv_vloxseg6_mask:
973 case Intrinsic::riscv_vloxseg7_mask:
974 case Intrinsic::riscv_vloxseg8_mask:
975 selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
976 return;
977 case Intrinsic::riscv_vluxseg2_mask:
978 case Intrinsic::riscv_vluxseg3_mask:
979 case Intrinsic::riscv_vluxseg4_mask:
980 case Intrinsic::riscv_vluxseg5_mask:
981 case Intrinsic::riscv_vluxseg6_mask:
982 case Intrinsic::riscv_vluxseg7_mask:
983 case Intrinsic::riscv_vluxseg8_mask:
984 selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
985 return;
986 case Intrinsic::riscv_vlseg8ff:
987 case Intrinsic::riscv_vlseg7ff:
988 case Intrinsic::riscv_vlseg6ff:
989 case Intrinsic::riscv_vlseg5ff:
990 case Intrinsic::riscv_vlseg4ff:
991 case Intrinsic::riscv_vlseg3ff:
992 case Intrinsic::riscv_vlseg2ff: {
993 selectVLSEGFF(Node, /*IsMasked*/ false);
994 return;
996 case Intrinsic::riscv_vlseg8ff_mask:
997 case Intrinsic::riscv_vlseg7ff_mask:
998 case Intrinsic::riscv_vlseg6ff_mask:
999 case Intrinsic::riscv_vlseg5ff_mask:
1000 case Intrinsic::riscv_vlseg4ff_mask:
1001 case Intrinsic::riscv_vlseg3ff_mask:
1002 case Intrinsic::riscv_vlseg2ff_mask: {
1003 selectVLSEGFF(Node, /*IsMasked*/ true);
1004 return;
1006 case Intrinsic::riscv_vloxei:
1007 case Intrinsic::riscv_vloxei_mask:
1008 case Intrinsic::riscv_vluxei:
1009 case Intrinsic::riscv_vluxei_mask: {
1010 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1011 IntNo == Intrinsic::riscv_vluxei_mask;
1012 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1013 IntNo == Intrinsic::riscv_vloxei_mask;
1015 MVT VT = Node->getSimpleValueType(0);
1016 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1018 unsigned CurOp = 2;
1019 SmallVector<SDValue, 8> Operands;
1020 if (IsMasked)
1021 Operands.push_back(Node->getOperand(CurOp++));
1023 MVT IndexVT;
1024 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1025 /*IsStridedOrIndexed*/ true, Operands,
1026 &IndexVT);
1028 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1029 "Element count mismatch");
1031 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1032 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1033 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1034 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1035 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1036 static_cast<unsigned>(IndexLMUL));
1037 MachineSDNode *Load =
1038 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1040 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1041 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1043 ReplaceNode(Node, Load);
1044 return;
1046 case Intrinsic::riscv_vle1:
1047 case Intrinsic::riscv_vle:
1048 case Intrinsic::riscv_vle_mask:
1049 case Intrinsic::riscv_vlse:
1050 case Intrinsic::riscv_vlse_mask: {
1051 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1052 IntNo == Intrinsic::riscv_vlse_mask;
1053 bool IsStrided =
1054 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1056 MVT VT = Node->getSimpleValueType(0);
1057 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1059 unsigned CurOp = 2;
1060 SmallVector<SDValue, 8> Operands;
1061 if (IsMasked)
1062 Operands.push_back(Node->getOperand(CurOp++));
1064 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1065 Operands);
1067 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1068 const RISCV::VLEPseudo *P =
1069 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1070 static_cast<unsigned>(LMUL));
1071 MachineSDNode *Load =
1072 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1074 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1075 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1077 ReplaceNode(Node, Load);
1078 return;
1080 case Intrinsic::riscv_vleff:
1081 case Intrinsic::riscv_vleff_mask: {
1082 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1084 MVT VT = Node->getSimpleValueType(0);
1085 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1087 unsigned CurOp = 2;
1088 SmallVector<SDValue, 7> Operands;
1089 if (IsMasked)
1090 Operands.push_back(Node->getOperand(CurOp++));
1092 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1093 /*IsStridedOrIndexed*/ false, Operands);
1095 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1096 const RISCV::VLEPseudo *P =
1097 RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1098 static_cast<unsigned>(LMUL));
1099 MachineSDNode *Load =
1100 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1101 MVT::Other, MVT::Glue, Operands);
1102 SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1103 /*Glue*/ SDValue(Load, 2));
1105 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1106 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1108 ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1109 ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1110 ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain
1111 CurDAG->RemoveDeadNode(Node);
1112 return;
1115 break;
1117 case ISD::INTRINSIC_VOID: {
1118 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1119 switch (IntNo) {
1120 case Intrinsic::riscv_vsseg2:
1121 case Intrinsic::riscv_vsseg3:
1122 case Intrinsic::riscv_vsseg4:
1123 case Intrinsic::riscv_vsseg5:
1124 case Intrinsic::riscv_vsseg6:
1125 case Intrinsic::riscv_vsseg7:
1126 case Intrinsic::riscv_vsseg8: {
1127 selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1128 return;
1130 case Intrinsic::riscv_vsseg2_mask:
1131 case Intrinsic::riscv_vsseg3_mask:
1132 case Intrinsic::riscv_vsseg4_mask:
1133 case Intrinsic::riscv_vsseg5_mask:
1134 case Intrinsic::riscv_vsseg6_mask:
1135 case Intrinsic::riscv_vsseg7_mask:
1136 case Intrinsic::riscv_vsseg8_mask: {
1137 selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1138 return;
1140 case Intrinsic::riscv_vssseg2:
1141 case Intrinsic::riscv_vssseg3:
1142 case Intrinsic::riscv_vssseg4:
1143 case Intrinsic::riscv_vssseg5:
1144 case Intrinsic::riscv_vssseg6:
1145 case Intrinsic::riscv_vssseg7:
1146 case Intrinsic::riscv_vssseg8: {
1147 selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1148 return;
1150 case Intrinsic::riscv_vssseg2_mask:
1151 case Intrinsic::riscv_vssseg3_mask:
1152 case Intrinsic::riscv_vssseg4_mask:
1153 case Intrinsic::riscv_vssseg5_mask:
1154 case Intrinsic::riscv_vssseg6_mask:
1155 case Intrinsic::riscv_vssseg7_mask:
1156 case Intrinsic::riscv_vssseg8_mask: {
1157 selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1158 return;
1160 case Intrinsic::riscv_vsoxseg2:
1161 case Intrinsic::riscv_vsoxseg3:
1162 case Intrinsic::riscv_vsoxseg4:
1163 case Intrinsic::riscv_vsoxseg5:
1164 case Intrinsic::riscv_vsoxseg6:
1165 case Intrinsic::riscv_vsoxseg7:
1166 case Intrinsic::riscv_vsoxseg8:
1167 selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1168 return;
1169 case Intrinsic::riscv_vsuxseg2:
1170 case Intrinsic::riscv_vsuxseg3:
1171 case Intrinsic::riscv_vsuxseg4:
1172 case Intrinsic::riscv_vsuxseg5:
1173 case Intrinsic::riscv_vsuxseg6:
1174 case Intrinsic::riscv_vsuxseg7:
1175 case Intrinsic::riscv_vsuxseg8:
1176 selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1177 return;
1178 case Intrinsic::riscv_vsoxseg2_mask:
1179 case Intrinsic::riscv_vsoxseg3_mask:
1180 case Intrinsic::riscv_vsoxseg4_mask:
1181 case Intrinsic::riscv_vsoxseg5_mask:
1182 case Intrinsic::riscv_vsoxseg6_mask:
1183 case Intrinsic::riscv_vsoxseg7_mask:
1184 case Intrinsic::riscv_vsoxseg8_mask:
1185 selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1186 return;
1187 case Intrinsic::riscv_vsuxseg2_mask:
1188 case Intrinsic::riscv_vsuxseg3_mask:
1189 case Intrinsic::riscv_vsuxseg4_mask:
1190 case Intrinsic::riscv_vsuxseg5_mask:
1191 case Intrinsic::riscv_vsuxseg6_mask:
1192 case Intrinsic::riscv_vsuxseg7_mask:
1193 case Intrinsic::riscv_vsuxseg8_mask:
1194 selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1195 return;
1196 case Intrinsic::riscv_vsoxei:
1197 case Intrinsic::riscv_vsoxei_mask:
1198 case Intrinsic::riscv_vsuxei:
1199 case Intrinsic::riscv_vsuxei_mask: {
1200 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1201 IntNo == Intrinsic::riscv_vsuxei_mask;
1202 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1203 IntNo == Intrinsic::riscv_vsoxei_mask;
1205 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1206 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1208 unsigned CurOp = 2;
1209 SmallVector<SDValue, 8> Operands;
1210 Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1212 MVT IndexVT;
1213 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1214 /*IsStridedOrIndexed*/ true, Operands,
1215 &IndexVT);
1217 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1218 "Element count mismatch");
1220 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1221 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1222 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1223 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1224 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1225 static_cast<unsigned>(IndexLMUL));
1226 MachineSDNode *Store =
1227 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1229 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1230 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1232 ReplaceNode(Node, Store);
1233 return;
1235 case Intrinsic::riscv_vse1:
1236 case Intrinsic::riscv_vse:
1237 case Intrinsic::riscv_vse_mask:
1238 case Intrinsic::riscv_vsse:
1239 case Intrinsic::riscv_vsse_mask: {
1240 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1241 IntNo == Intrinsic::riscv_vsse_mask;
1242 bool IsStrided =
1243 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1245 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1246 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1248 unsigned CurOp = 2;
1249 SmallVector<SDValue, 8> Operands;
1250 Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1252 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1253 Operands);
1255 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1256 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1257 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1258 MachineSDNode *Store =
1259 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1260 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1261 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1263 ReplaceNode(Node, Store);
1264 return;
1267 break;
1269 case ISD::BITCAST: {
1270 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1271 // Just drop bitcasts between vectors if both are fixed or both are
1272 // scalable.
1273 if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1274 (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1275 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1276 CurDAG->RemoveDeadNode(Node);
1277 return;
1279 break;
1281 case ISD::INSERT_SUBVECTOR: {
1282 SDValue V = Node->getOperand(0);
1283 SDValue SubV = Node->getOperand(1);
1284 SDLoc DL(SubV);
1285 auto Idx = Node->getConstantOperandVal(2);
1286 MVT SubVecVT = SubV.getSimpleValueType();
1288 const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1289 MVT SubVecContainerVT = SubVecVT;
1290 // Establish the correct scalable-vector types for any fixed-length type.
1291 if (SubVecVT.isFixedLengthVector())
1292 SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1293 if (VT.isFixedLengthVector())
1294 VT = TLI.getContainerForFixedLengthVector(VT);
1296 const auto *TRI = Subtarget->getRegisterInfo();
1297 unsigned SubRegIdx;
1298 std::tie(SubRegIdx, Idx) =
1299 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1300 VT, SubVecContainerVT, Idx, TRI);
1302 // If the Idx hasn't been completely eliminated then this is a subvector
1303 // insert which doesn't naturally align to a vector register. These must
1304 // be handled using instructions to manipulate the vector registers.
1305 if (Idx != 0)
1306 break;
1308 RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1309 bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1310 SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1311 SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1312 (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1313 assert((!IsSubVecPartReg || V.isUndef()) &&
1314 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1315 "the subvector is smaller than a full-sized register");
1317 // If we haven't set a SubRegIdx, then we must be going between
1318 // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1319 if (SubRegIdx == RISCV::NoSubRegister) {
1320 unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1321 assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1322 InRegClassID &&
1323 "Unexpected subvector extraction");
1324 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1325 SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1326 DL, VT, SubV, RC);
1327 ReplaceNode(Node, NewNode);
1328 return;
1331 SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1332 ReplaceNode(Node, Insert.getNode());
1333 return;
1335 case ISD::EXTRACT_SUBVECTOR: {
1336 SDValue V = Node->getOperand(0);
1337 auto Idx = Node->getConstantOperandVal(1);
1338 MVT InVT = V.getSimpleValueType();
1339 SDLoc DL(V);
1341 const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1342 MVT SubVecContainerVT = VT;
1343 // Establish the correct scalable-vector types for any fixed-length type.
1344 if (VT.isFixedLengthVector())
1345 SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1346 if (InVT.isFixedLengthVector())
1347 InVT = TLI.getContainerForFixedLengthVector(InVT);
1349 const auto *TRI = Subtarget->getRegisterInfo();
1350 unsigned SubRegIdx;
1351 std::tie(SubRegIdx, Idx) =
1352 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1353 InVT, SubVecContainerVT, Idx, TRI);
1355 // If the Idx hasn't been completely eliminated then this is a subvector
1356 // extract which doesn't naturally align to a vector register. These must
1357 // be handled using instructions to manipulate the vector registers.
1358 if (Idx != 0)
1359 break;
1361 // If we haven't set a SubRegIdx, then we must be going between
1362 // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1363 if (SubRegIdx == RISCV::NoSubRegister) {
1364 unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1365 assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1366 InRegClassID &&
1367 "Unexpected subvector extraction");
1368 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1369 SDNode *NewNode =
1370 CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1371 ReplaceNode(Node, NewNode);
1372 return;
1375 SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1376 ReplaceNode(Node, Extract.getNode());
1377 return;
1379 case RISCVISD::VMV_V_X_VL:
1380 case RISCVISD::VFMV_V_F_VL: {
1381 // Try to match splat of a scalar load to a strided load with stride of x0.
1382 SDValue Src = Node->getOperand(0);
1383 auto *Ld = dyn_cast<LoadSDNode>(Src);
1384 if (!Ld)
1385 break;
1386 EVT MemVT = Ld->getMemoryVT();
1387 // The memory VT should be the same size as the element type.
1388 if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1389 break;
1390 if (!IsProfitableToFold(Src, Node, Node) ||
1391 !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1392 break;
1394 SDValue VL;
1395 selectVLOp(Node->getOperand(1), VL);
1397 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1398 SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1400 SDValue Operands[] = {Ld->getBasePtr(),
1401 CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1402 Ld->getChain()};
1404 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1405 const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1406 /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1407 static_cast<unsigned>(LMUL));
1408 MachineSDNode *Load =
1409 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1411 if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1412 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1414 ReplaceNode(Node, Load);
1415 return;
1419 // Select the default instruction.
1420 SelectCode(Node);
1423 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1424 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1425 switch (ConstraintID) {
1426 case InlineAsm::Constraint_m:
1427 // We just support simple memory operands that have a single address
1428 // operand and need no special handling.
1429 OutOps.push_back(Op);
1430 return false;
1431 case InlineAsm::Constraint_A:
1432 OutOps.push_back(Op);
1433 return false;
1434 default:
1435 break;
1438 return true;
1441 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1442 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1443 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1444 return true;
1446 return false;
1449 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1450 // If this is FrameIndex, select it directly. Otherwise just let it get
1451 // selected to a register independently.
1452 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1453 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1454 else
1455 Base = Addr;
1456 return true;
1459 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1460 SDValue &ShAmt) {
1461 // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1462 // amount. If there is an AND on the shift amount, we can bypass it if it
1463 // doesn't affect any of those bits.
1464 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1465 const APInt &AndMask = N->getConstantOperandAPInt(1);
1467 // Since the max shift amount is a power of 2 we can subtract 1 to make a
1468 // mask that covers the bits needed to represent all shift amounts.
1469 assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1470 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1472 if (ShMask.isSubsetOf(AndMask)) {
1473 ShAmt = N.getOperand(0);
1474 return true;
1477 // SimplifyDemandedBits may have optimized the mask so try restoring any
1478 // bits that are known zero.
1479 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1480 if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1481 ShAmt = N.getOperand(0);
1482 return true;
1486 ShAmt = N;
1487 return true;
1490 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1491 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1492 cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1493 Val = N.getOperand(0);
1494 return true;
1496 MVT VT = N.getSimpleValueType();
1497 if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1498 Val = N;
1499 return true;
1502 return false;
1505 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1506 if (N.getOpcode() == ISD::AND) {
1507 auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1508 if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1509 Val = N.getOperand(0);
1510 return true;
1513 MVT VT = N.getSimpleValueType();
1514 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1515 if (CurDAG->MaskedValueIsZero(N, Mask)) {
1516 Val = N;
1517 return true;
1520 return false;
1523 // Return true if all users of this SDNode* only consume the lower \p Bits.
1524 // This can be used to form W instructions for add/sub/mul/shl even when the
1525 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1526 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1527 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1528 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1529 // may be able to use a W instruction and CSE with the other instruction if
1530 // this has happened. We could try to detect that the CSE opportunity exists
1531 // before doing this, but that would be more complicated.
1532 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1533 // opportunities.
1534 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1535 assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1536 Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1537 isa<ConstantSDNode>(Node)) &&
1538 "Unexpected opcode");
1540 for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1541 SDNode *User = *UI;
1542 // Users of this node should have already been instruction selected
1543 if (!User->isMachineOpcode())
1544 return false;
1546 // TODO: Add more opcodes?
1547 switch (User->getMachineOpcode()) {
1548 default:
1549 return false;
1550 case RISCV::ADDW:
1551 case RISCV::ADDIW:
1552 case RISCV::SUBW:
1553 case RISCV::MULW:
1554 case RISCV::SLLW:
1555 case RISCV::SLLIW:
1556 case RISCV::SRAW:
1557 case RISCV::SRAIW:
1558 case RISCV::SRLW:
1559 case RISCV::SRLIW:
1560 case RISCV::DIVW:
1561 case RISCV::DIVUW:
1562 case RISCV::REMW:
1563 case RISCV::REMUW:
1564 case RISCV::ROLW:
1565 case RISCV::RORW:
1566 case RISCV::RORIW:
1567 case RISCV::CLZW:
1568 case RISCV::CTZW:
1569 case RISCV::CPOPW:
1570 case RISCV::SLLIUW:
1571 if (Bits < 32)
1572 return false;
1573 break;
1574 case RISCV::SLLI:
1575 // SLLI only uses the lower (XLen - ShAmt) bits.
1576 if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1577 return false;
1578 break;
1579 case RISCV::ADDUW:
1580 case RISCV::SH1ADDUW:
1581 case RISCV::SH2ADDUW:
1582 case RISCV::SH3ADDUW:
1583 // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1584 // 32 bits.
1585 if (UI.getOperandNo() != 0 || Bits < 32)
1586 return false;
1587 break;
1588 case RISCV::SB:
1589 if (UI.getOperandNo() != 0 || Bits < 8)
1590 return false;
1591 break;
1592 case RISCV::SH:
1593 if (UI.getOperandNo() != 0 || Bits < 16)
1594 return false;
1595 break;
1596 case RISCV::SW:
1597 if (UI.getOperandNo() != 0 || Bits < 32)
1598 return false;
1599 break;
1603 return true;
1606 // Select VL as a 5 bit immediate or a value that will become a register. This
1607 // allows us to choose betwen VSETIVLI or VSETVLI later.
1608 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1609 auto *C = dyn_cast<ConstantSDNode>(N);
1610 if (C && isUInt<5>(C->getZExtValue()))
1611 VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1612 N->getValueType(0));
1613 else
1614 VL = N;
1616 return true;
1619 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1620 if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1621 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1622 N.getOpcode() != RISCVISD::VMV_V_X_VL)
1623 return false;
1624 SplatVal = N.getOperand(0);
1625 return true;
1628 using ValidateFn = bool (*)(int64_t);
1630 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1631 SelectionDAG &DAG,
1632 const RISCVSubtarget &Subtarget,
1633 ValidateFn ValidateImm) {
1634 if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1635 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1636 N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1637 !isa<ConstantSDNode>(N.getOperand(0)))
1638 return false;
1640 int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1642 // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1643 // share semantics when the operand type is wider than the resulting vector
1644 // element type: an implicit truncation first takes place. Therefore, perform
1645 // a manual truncation/sign-extension in order to ignore any truncated bits
1646 // and catch any zero-extended immediate.
1647 // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1648 // sign-extending to (XLenVT -1).
1649 MVT XLenVT = Subtarget.getXLenVT();
1650 assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1651 "Unexpected splat operand type");
1652 MVT EltVT = N.getSimpleValueType().getVectorElementType();
1653 if (EltVT.bitsLT(XLenVT))
1654 SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1656 if (!ValidateImm(SplatImm))
1657 return false;
1659 SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1660 return true;
1663 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1664 return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1665 [](int64_t Imm) { return isInt<5>(Imm); });
1668 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1669 return selectVSplatSimmHelper(
1670 N, SplatVal, *CurDAG, *Subtarget,
1671 [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1674 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1675 SDValue &SplatVal) {
1676 return selectVSplatSimmHelper(
1677 N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1678 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1682 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1683 if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1684 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1685 N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1686 !isa<ConstantSDNode>(N.getOperand(0)))
1687 return false;
1689 int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1691 if (!isUInt<5>(SplatImm))
1692 return false;
1694 SplatVal =
1695 CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1697 return true;
1700 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1701 SDValue &Imm) {
1702 if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1703 int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1705 if (!isInt<5>(ImmVal))
1706 return false;
1708 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1709 return true;
1712 return false;
1715 // Merge an ADDI into the offset of a load/store instruction where possible.
1716 // (load (addi base, off1), off2) -> (load base, off1+off2)
1717 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1718 // This is possible when off1+off2 fits a 12-bit immediate.
1719 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
1720 int OffsetOpIdx;
1721 int BaseOpIdx;
1723 // Only attempt this optimisation for I-type loads and S-type stores.
1724 switch (N->getMachineOpcode()) {
1725 default:
1726 return false;
1727 case RISCV::LB:
1728 case RISCV::LH:
1729 case RISCV::LW:
1730 case RISCV::LBU:
1731 case RISCV::LHU:
1732 case RISCV::LWU:
1733 case RISCV::LD:
1734 case RISCV::FLH:
1735 case RISCV::FLW:
1736 case RISCV::FLD:
1737 BaseOpIdx = 0;
1738 OffsetOpIdx = 1;
1739 break;
1740 case RISCV::SB:
1741 case RISCV::SH:
1742 case RISCV::SW:
1743 case RISCV::SD:
1744 case RISCV::FSH:
1745 case RISCV::FSW:
1746 case RISCV::FSD:
1747 BaseOpIdx = 1;
1748 OffsetOpIdx = 2;
1749 break;
1752 if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1753 return false;
1755 SDValue Base = N->getOperand(BaseOpIdx);
1757 // If the base is an ADDI, we can merge it in to the load/store.
1758 if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1759 return false;
1761 SDValue ImmOperand = Base.getOperand(1);
1762 uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1764 if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1765 int64_t Offset1 = Const->getSExtValue();
1766 int64_t CombinedOffset = Offset1 + Offset2;
1767 if (!isInt<12>(CombinedOffset))
1768 return false;
1769 ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1770 ImmOperand.getValueType());
1771 } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1772 // If the off1 in (addi base, off1) is a global variable's address (its
1773 // low part, really), then we can rely on the alignment of that variable
1774 // to provide a margin of safety before off1 can overflow the 12 bits.
1775 // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1776 const DataLayout &DL = CurDAG->getDataLayout();
1777 Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1778 if (Offset2 != 0 && Alignment <= Offset2)
1779 return false;
1780 int64_t Offset1 = GA->getOffset();
1781 int64_t CombinedOffset = Offset1 + Offset2;
1782 ImmOperand = CurDAG->getTargetGlobalAddress(
1783 GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1784 CombinedOffset, GA->getTargetFlags());
1785 } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1786 // Ditto.
1787 Align Alignment = CP->getAlign();
1788 if (Offset2 != 0 && Alignment <= Offset2)
1789 return false;
1790 int64_t Offset1 = CP->getOffset();
1791 int64_t CombinedOffset = Offset1 + Offset2;
1792 ImmOperand = CurDAG->getTargetConstantPool(
1793 CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1794 CombinedOffset, CP->getTargetFlags());
1795 } else {
1796 return false;
1799 LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
1800 LLVM_DEBUG(Base->dump(CurDAG));
1801 LLVM_DEBUG(dbgs() << "\nN: ");
1802 LLVM_DEBUG(N->dump(CurDAG));
1803 LLVM_DEBUG(dbgs() << "\n");
1805 // Modify the offset operand of the load/store.
1806 if (BaseOpIdx == 0) // Load
1807 CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1808 N->getOperand(2));
1809 else // Store
1810 CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1811 ImmOperand, N->getOperand(3));
1813 return true;
1816 // Try to remove sext.w if the input is a W instruction or can be made into
1817 // a W instruction cheaply.
1818 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
1819 // Look for the sext.w pattern, addiw rd, rs1, 0.
1820 if (N->getMachineOpcode() != RISCV::ADDIW ||
1821 !isNullConstant(N->getOperand(1)))
1822 return false;
1824 SDValue N0 = N->getOperand(0);
1825 if (!N0.isMachineOpcode())
1826 return false;
1828 switch (N0.getMachineOpcode()) {
1829 default:
1830 break;
1831 case RISCV::ADD:
1832 case RISCV::ADDI:
1833 case RISCV::SUB:
1834 case RISCV::MUL:
1835 case RISCV::SLLI: {
1836 // Convert sext.w+add/sub/mul to their W instructions. This will create
1837 // a new independent instruction. This improves latency.
1838 unsigned Opc;
1839 switch (N0.getMachineOpcode()) {
1840 default:
1841 llvm_unreachable("Unexpected opcode!");
1842 case RISCV::ADD: Opc = RISCV::ADDW; break;
1843 case RISCV::ADDI: Opc = RISCV::ADDIW; break;
1844 case RISCV::SUB: Opc = RISCV::SUBW; break;
1845 case RISCV::MUL: Opc = RISCV::MULW; break;
1846 case RISCV::SLLI: Opc = RISCV::SLLIW; break;
1849 SDValue N00 = N0.getOperand(0);
1850 SDValue N01 = N0.getOperand(1);
1852 // Shift amount needs to be uimm5.
1853 if (N0.getMachineOpcode() == RISCV::SLLI &&
1854 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
1855 break;
1857 SDNode *Result =
1858 CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
1859 N00, N01);
1860 ReplaceUses(N, Result);
1861 return true;
1863 case RISCV::ADDW:
1864 case RISCV::ADDIW:
1865 case RISCV::SUBW:
1866 case RISCV::MULW:
1867 case RISCV::SLLIW:
1868 // Result is already sign extended just remove the sext.w.
1869 // NOTE: We only handle the nodes that are selected with hasAllWUsers.
1870 ReplaceUses(N, N0.getNode());
1871 return true;
1874 return false;
1877 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1878 // for instruction scheduling.
1879 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1880 return new RISCVDAGToDAGISel(TM);