[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / Hexagon / HexagonISelLowering.h
blobd518c036f1250fe55083d0fb63001bab3a604a7f
1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Hexagon uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
15 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
17 #include "Hexagon.h"
18 #include "MCTargetDesc/HexagonMCTargetDesc.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/ISDOpcodes.h"
21 #include "llvm/CodeGen/SelectionDAGNodes.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/ValueTypes.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/InlineAsm.h"
26 #include "llvm/Support/MachineValueType.h"
27 #include <cstdint>
28 #include <utility>
30 namespace llvm {
32 namespace HexagonISD {
34 enum NodeType : unsigned {
35 OP_BEGIN = ISD::BUILTIN_OP_END,
37 CONST32 = OP_BEGIN,
38 CONST32_GP, // For marking data present in GP.
39 ADDC, // Add with carry: (X, Y, Cin) -> (X+Y, Cout).
40 SUBC, // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout).
41 ALLOCA,
43 AT_GOT, // Index in GOT.
44 AT_PCREL, // Offset relative to PC.
46 CALL, // Function call.
47 CALLnr, // Function call that does not return.
48 CALLR,
50 RET_FLAG, // Return with a flag operand.
51 BARRIER, // Memory barrier.
52 JT, // Jump table.
53 CP, // Constant pool.
55 COMBINE,
56 VASL,
57 VASR,
58 VLSR,
60 TSTBIT,
61 INSERT,
62 EXTRACTU,
63 VEXTRACTW,
64 VINSERTW0,
65 VROR,
66 TC_RETURN,
67 EH_RETURN,
68 DCFETCH,
69 READCYCLE,
70 PTRUE,
71 PFALSE,
72 D2P, // Convert 8-byte value to 8-bit predicate register. [*]
73 P2D, // Convert 8-bit predicate register to 8-byte value. [*]
74 V2Q, // Convert HVX vector to a vector predicate reg. [*]
75 Q2V, // Convert vector predicate to an HVX vector. [*]
76 // [*] The equivalence is defined as "Q <=> (V != 0)",
77 // where the != operation compares bytes.
78 // Note: V != 0 is implemented as V >u 0.
79 QCAT,
80 QTRUE,
81 QFALSE,
82 TYPECAST, // No-op that's used to convert between different legal
83 // types in a register.
84 VALIGN, // Align two vectors (in Op0, Op1) to one that would have
85 // been loaded from address in Op2.
86 VALIGNADDR, // Align vector address: Op0 & -Op1, except when it is
87 // an address in a vector load, then it's a no-op.
88 VPACKL, // Pack low parts of the input vector to the front of the
89 // output. For example v64i16 VPACKL(v32i32) will pick
90 // the low halfwords and pack them into the first 32
91 // halfwords of the output. The rest of the output is
92 // unspecified.
93 VUNPACK, // Unpacking into low elements with sign extension.
94 VUNPACKU, // Unpacking into low elements with zero extension.
95 ISEL, // Marker for nodes that were created during ISel, and
96 // which need explicit selection (would have been left
97 // unselected otherwise).
98 OP_END
101 } // end namespace HexagonISD
103 class HexagonSubtarget;
105 class HexagonTargetLowering : public TargetLowering {
106 int VarArgsFrameOffset; // Frame offset to start of varargs area.
107 const HexagonTargetMachine &HTM;
108 const HexagonSubtarget &Subtarget;
110 bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
111 const;
113 public:
114 explicit HexagonTargetLowering(const TargetMachine &TM,
115 const HexagonSubtarget &ST);
117 bool isHVXVectorType(MVT Ty) const;
119 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
120 /// for tail call optimization. Targets which want to do tail call
121 /// optimization should implement this function.
122 bool IsEligibleForTailCallOptimization(SDValue Callee,
123 CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
124 bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
125 const SmallVectorImpl<SDValue> &OutVals,
126 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
128 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
129 MachineFunction &MF,
130 unsigned Intrinsic) const override;
132 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
133 bool isTruncateFree(EVT VT1, EVT VT2) const override;
135 bool isCheapToSpeculateCttz() const override { return true; }
136 bool isCheapToSpeculateCtlz() const override { return true; }
137 bool isCtlzFast() const override { return true; }
139 bool hasBitTest(SDValue X, SDValue Y) const override;
141 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
143 /// Return true if an FMA operation is faster than a pair of mul and add
144 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
145 /// method returns true (and FMAs are legal), otherwise fmuladd is
146 /// expanded to mul + add.
147 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &,
148 EVT) const override;
150 // Should we expand the build vector with shuffles?
151 bool shouldExpandBuildVectorWithShuffles(EVT VT,
152 unsigned DefinedValues) const override;
154 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
155 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
156 const override;
158 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
159 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
160 SelectionDAG &DAG) const override;
161 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
162 SelectionDAG &DAG) const override;
164 const char *getTargetNodeName(unsigned Opcode) const override;
166 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
167 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
168 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
169 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
170 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
171 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
172 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
173 SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
174 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
175 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
176 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
177 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
178 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
179 SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
180 SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
181 SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const;
182 SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const;
183 SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const;
185 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
186 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
187 SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
188 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
189 SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
190 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
191 SDValue
192 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
193 const SmallVectorImpl<ISD::InputArg> &Ins,
194 const SDLoc &dl, SelectionDAG &DAG,
195 SmallVectorImpl<SDValue> &InVals) const override;
196 SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
197 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
198 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
199 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
200 SelectionDAG &DAG) const;
201 SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
202 SelectionDAG &DAG) const;
203 SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
204 SelectionDAG &DAG) const;
205 SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
206 GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT,
207 unsigned ReturnReg, unsigned char OperandFlags) const;
208 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
210 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
211 SmallVectorImpl<SDValue> &InVals) const override;
212 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
213 CallingConv::ID CallConv, bool isVarArg,
214 const SmallVectorImpl<ISD::InputArg> &Ins,
215 const SDLoc &dl, SelectionDAG &DAG,
216 SmallVectorImpl<SDValue> &InVals,
217 const SmallVectorImpl<SDValue> &OutVals,
218 SDValue Callee) const;
220 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
221 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
222 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
223 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
224 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
226 bool CanLowerReturn(CallingConv::ID CallConv,
227 MachineFunction &MF, bool isVarArg,
228 const SmallVectorImpl<ISD::OutputArg> &Outs,
229 LLVMContext &Context) const override;
231 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
232 const SmallVectorImpl<ISD::OutputArg> &Outs,
233 const SmallVectorImpl<SDValue> &OutVals,
234 const SDLoc &dl, SelectionDAG &DAG) const override;
236 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
238 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
240 Register getRegisterByName(const char* RegName, LLT VT,
241 const MachineFunction &MF) const override;
243 /// If a physical register, this returns the register that receives the
244 /// exception address on entry to an EH pad.
245 Register
246 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
247 return Hexagon::R0;
250 /// If a physical register, this returns the register that receives the
251 /// exception typeid on entry to a landing pad.
252 Register
253 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
254 return Hexagon::R1;
257 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
258 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
259 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
260 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
262 EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
263 EVT VT) const override {
264 if (!VT.isVector())
265 return MVT::i1;
266 else
267 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
270 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
271 SDValue &Base, SDValue &Offset,
272 ISD::MemIndexedMode &AM,
273 SelectionDAG &DAG) const override;
275 ConstraintType getConstraintType(StringRef Constraint) const override;
277 std::pair<unsigned, const TargetRegisterClass *>
278 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
279 StringRef Constraint, MVT VT) const override;
281 // Intrinsics
282 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
283 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
284 /// isLegalAddressingMode - Return true if the addressing mode represented
285 /// by AM is legal for this target, for a load/store of the specified type.
286 /// The type may be VoidTy, in which case only return true if the addressing
287 /// mode is legal for a load/store of any legal type.
288 /// TODO: Handle pre/postinc as well.
289 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
290 Type *Ty, unsigned AS,
291 Instruction *I = nullptr) const override;
292 /// Return true if folding a constant offset with the given GlobalAddress
293 /// is legal. It is frequently not legal in PIC relocation models.
294 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
296 bool isFPImmLegal(const APFloat &Imm, EVT VT,
297 bool ForCodeSize) const override;
299 /// isLegalICmpImmediate - Return true if the specified immediate is legal
300 /// icmp immediate, that is the target has icmp instructions which can
301 /// compare a register against the immediate without having to materialize
302 /// the immediate into a register.
303 bool isLegalICmpImmediate(int64_t Imm) const override;
305 EVT getOptimalMemOpType(const MemOp &Op,
306 const AttributeList &FuncAttributes) const override;
308 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
309 unsigned AddrSpace, Align Alignment,
310 MachineMemOperand::Flags Flags,
311 bool *Fast) const override;
313 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
314 Align Alignment,
315 MachineMemOperand::Flags Flags,
316 bool *Fast) const override;
318 /// Returns relocation base for the given PIC jumptable.
319 SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
320 const override;
322 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
323 EVT NewVT) const override;
325 // Handling of atomic RMW instructions.
326 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
327 AtomicOrdering Ord) const override;
328 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
329 AtomicOrdering Ord) const override;
330 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
331 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
332 AtomicExpansionKind
333 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
335 AtomicExpansionKind
336 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
337 return AtomicExpansionKind::LLSC;
340 private:
341 void initializeHVXLowering();
342 unsigned getPreferredHvxVectorAction(MVT VecTy) const;
344 bool validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, const SDLoc &dl,
345 SelectionDAG &DAG) const;
346 SDValue replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) const;
348 std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const;
350 bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy,
351 SelectionDAG &DAG,
352 MutableArrayRef<ConstantInt*> Consts) const;
353 SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
354 SelectionDAG &DAG) const;
355 SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
356 SelectionDAG &DAG) const;
357 SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl,
358 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
359 SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
360 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
361 SDValue expandPredicate(SDValue Vec32, const SDLoc &dl,
362 SelectionDAG &DAG) const;
363 SDValue contractPredicate(SDValue Vec64, const SDLoc &dl,
364 SelectionDAG &DAG) const;
365 SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const;
366 SDValue appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) const;
368 bool isUndef(SDValue Op) const {
369 if (Op.isMachineOpcode())
370 return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
371 return Op.getOpcode() == ISD::UNDEF;
373 SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty,
374 ArrayRef<SDValue> Ops, SelectionDAG &DAG) const {
375 SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops);
376 return SDValue(N, 0);
378 SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const;
380 using VectorPair = std::pair<SDValue, SDValue>;
381 using TypePair = std::pair<MVT, MVT>;
383 SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops,
384 const SDLoc &dl, SelectionDAG &DAG) const;
386 MVT ty(SDValue Op) const {
387 return Op.getValueType().getSimpleVT();
389 TypePair ty(const VectorPair &Ops) const {
390 return { Ops.first.getValueType().getSimpleVT(),
391 Ops.second.getValueType().getSimpleVT() };
393 MVT tyScalar(MVT Ty) const {
394 if (!Ty.isVector())
395 return Ty;
396 return MVT::getIntegerVT(Ty.getSizeInBits());
398 MVT tyVector(MVT Ty, MVT ElemTy) const {
399 if (Ty.isVector() && Ty.getVectorElementType() == ElemTy)
400 return Ty;
401 unsigned TyWidth = Ty.getSizeInBits();
402 unsigned ElemWidth = ElemTy.getSizeInBits();
403 assert((TyWidth % ElemWidth) == 0);
404 return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth);
407 MVT typeJoin(const TypePair &Tys) const;
408 TypePair typeSplit(MVT Ty) const;
409 MVT typeExtElem(MVT VecTy, unsigned Factor) const;
410 MVT typeTruncElem(MVT VecTy, unsigned Factor) const;
412 SDValue opJoin(const VectorPair &Ops, const SDLoc &dl,
413 SelectionDAG &DAG) const;
414 VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const;
415 SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const;
417 bool allowsHvxMemoryAccess(MVT VecTy, MachineMemOperand::Flags Flags,
418 bool *Fast) const;
419 bool allowsHvxMisalignedMemoryAccesses(MVT VecTy,
420 MachineMemOperand::Flags Flags,
421 bool *Fast) const;
423 bool isHvxSingleTy(MVT Ty) const;
424 bool isHvxPairTy(MVT Ty) const;
425 bool isHvxBoolTy(MVT Ty) const;
426 SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy,
427 SelectionDAG &DAG) const;
428 SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const;
429 SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1,
430 ArrayRef<int> Mask, SelectionDAG &DAG) const;
432 SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl,
433 MVT VecTy, SelectionDAG &DAG) const;
434 SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl,
435 MVT VecTy, SelectionDAG &DAG) const;
436 SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl,
437 unsigned BitBytes, bool ZeroFill,
438 SelectionDAG &DAG) const;
439 SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
440 MVT ResTy, SelectionDAG &DAG) const;
441 SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
442 MVT ResTy, SelectionDAG &DAG) const;
443 SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV,
444 const SDLoc &dl, SelectionDAG &DAG) const;
445 SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV,
446 const SDLoc &dl, SelectionDAG &DAG) const;
447 SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
448 MVT ResTy, SelectionDAG &DAG) const;
449 SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
450 MVT ResTy, SelectionDAG &DAG) const;
451 SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV,
452 const SDLoc &dl, SelectionDAG &DAG) const;
453 SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV,
454 const SDLoc &dl, SelectionDAG &DAG) const;
455 SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy,
456 bool ZeroExt, SelectionDAG &DAG) const;
457 SDValue compressHvxPred(SDValue VecQ, const SDLoc &dl, MVT ResTy,
458 SelectionDAG &DAG) const;
460 SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
461 SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const;
468 SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
477 SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const;
478 SDValue LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const;
480 SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const;
481 SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
482 SDValue WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const;
483 SDValue WidenHvxStore(SDValue Op, SelectionDAG &DAG) const;
484 SDValue WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
485 SDValue WidenHvxExtend(SDValue Op, SelectionDAG &DAG) const;
486 SDValue WidenHvxTruncate(SDValue Op, SelectionDAG &DAG) const;
488 std::pair<const TargetRegisterClass*, uint8_t>
489 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
490 const override;
492 bool shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const;
493 bool isHvxOperation(SDNode *N, SelectionDAG &DAG) const;
494 SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const;
495 void LowerHvxOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
496 SelectionDAG &DAG) const;
497 void ReplaceHvxNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
498 SelectionDAG &DAG) const;
499 SDValue PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
502 } // end namespace llvm
504 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H