[X86][BMI] Pull out schedule classes from bmi_andn<> and bmi_bls<>
[llvm-core.git] / lib / Target / AArch64 / AArch64ISelLowering.h
blob00fa96bc4e6dbb97d181b23fcb15da051cbd7d3a
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 #include "AArch64.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
24 namespace llvm {
26 namespace AArch64ISD {
28 enum NodeType : unsigned {
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
30 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL, // Function call.
33 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
35 TLSDESC_CALLSEQ,
36 ADRP, // Page address of a TargetGlobalAddress operand.
37 ADR, // ADR
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
43 CSEL,
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50 // ELF.
51 THREAD_POINTER,
52 ADC,
53 SBC, // adc, sbc instructions
55 // Arithmetic instructions which write flags.
56 ADDS,
57 SUBS,
58 ADCS,
59 SBCS,
60 ANDS,
62 // Conditional compares. Operands: left,right,falsecc,cc,flags
63 CCMP,
64 CCMN,
65 FCCMP,
67 // Floating point comparison
68 FCMP,
70 // Scalar extract
71 EXTR,
73 // Scalar-to-vector duplication
74 DUP,
75 DUPLANE8,
76 DUPLANE16,
77 DUPLANE32,
78 DUPLANE64,
80 // Vector immedate moves
81 MOVI,
82 MOVIshift,
83 MOVIedit,
84 MOVImsl,
85 FMOV,
86 MVNIshift,
87 MVNImsl,
89 // Vector immediate ops
90 BICi,
91 ORRi,
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
95 BSL,
97 // Vector arithmetic negation
98 NEG,
100 // Vector shuffles
101 ZIP1,
102 ZIP2,
103 UZP1,
104 UZP2,
105 TRN1,
106 TRN2,
107 REV16,
108 REV32,
109 REV64,
110 EXT,
112 // Vector shift by scalar
113 VSHL,
114 VLSHR,
115 VASHR,
117 // Vector shift by scalar (again)
118 SQSHL_I,
119 UQSHL_I,
120 SQSHLU_I,
121 SRSHR_I,
122 URSHR_I,
124 // Vector comparisons
125 CMEQ,
126 CMGE,
127 CMGT,
128 CMHI,
129 CMHS,
130 FCMEQ,
131 FCMGE,
132 FCMGT,
134 // Vector zero comparisons
135 CMEQz,
136 CMGEz,
137 CMGTz,
138 CMLEz,
139 CMLTz,
140 FCMEQz,
141 FCMGEz,
142 FCMGTz,
143 FCMLEz,
144 FCMLTz,
146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
148 SADDV,
149 UADDV,
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
153 SMINV,
154 UMINV,
155 SMAXV,
156 UMAXV,
158 // Vector bitwise negation
159 NOT,
161 // Vector bitwise selection
162 BIT,
164 // Compare-and-branch
165 CBZ,
166 CBNZ,
167 TBZ,
168 TBNZ,
170 // Tail calls
171 TC_RETURN,
173 // Custom prefetch handling
174 PREFETCH,
176 // {s|u}int to FP within a FP register.
177 SITOF,
178 UITOF,
180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
185 NVCAST,
187 SMULL,
188 UMULL,
190 // Reciprocal estimates and steps.
191 FRECPE, FRECPS,
192 FRSQRTE, FRSQRTS,
194 SUNPKHI,
195 SUNPKLO,
196 UUNPKHI,
197 UUNPKLO,
199 // NEON Load/Store with post-increment base updates
200 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
201 LD3post,
202 LD4post,
203 ST2post,
204 ST3post,
205 ST4post,
206 LD1x2post,
207 LD1x3post,
208 LD1x4post,
209 ST1x2post,
210 ST1x3post,
211 ST1x4post,
212 LD1DUPpost,
213 LD2DUPpost,
214 LD3DUPpost,
215 LD4DUPpost,
216 LD1LANEpost,
217 LD2LANEpost,
218 LD3LANEpost,
219 LD4LANEpost,
220 ST2LANEpost,
221 ST3LANEpost,
222 ST4LANEpost,
224 STG,
225 STZG,
226 ST2G,
227 STZ2G
231 } // end namespace AArch64ISD
233 namespace {
235 // Any instruction that defines a 32-bit result zeros out the high half of the
236 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
237 // be copying from a truncate. But any other 32-bit operation will zero-extend
238 // up to 64 bits.
239 // FIXME: X86 also checks for CMOV here. Do we need something similar?
240 static inline bool isDef32(const SDNode &N) {
241 unsigned Opc = N.getOpcode();
242 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
243 Opc != ISD::CopyFromReg;
246 } // end anonymous namespace
248 class AArch64Subtarget;
249 class AArch64TargetMachine;
251 class AArch64TargetLowering : public TargetLowering {
252 public:
253 explicit AArch64TargetLowering(const TargetMachine &TM,
254 const AArch64Subtarget &STI);
256 /// Selects the correct CCAssignFn for a given CallingConvention value.
257 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
259 /// Selects the correct CCAssignFn for a given CallingConvention value.
260 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
262 /// Determine which of the bits specified in Mask are known to be either zero
263 /// or one and return them in the KnownZero/KnownOne bitsets.
264 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
265 const APInt &DemandedElts,
266 const SelectionDAG &DAG,
267 unsigned Depth = 0) const override;
269 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
270 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
271 // *DAG* representation of pointers will always be 64-bits. They will be
272 // truncated and extended when transferred to memory, but the 64-bit DAG
273 // allows us to use AArch64's addressing modes much more easily.
274 return MVT::getIntegerVT(64);
277 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
278 TargetLoweringOpt &TLO) const override;
280 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
282 /// Returns true if the target allows unaligned memory accesses of the
283 /// specified type.
284 bool allowsMisalignedMemoryAccesses(
285 EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
286 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
287 bool *Fast = nullptr) const override;
288 /// LLT variant.
289 bool allowsMisalignedMemoryAccesses(
290 LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
291 bool *Fast = nullptr) const override;
293 /// Provide custom lowering hooks for some operations.
294 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
296 const char *getTargetNodeName(unsigned Opcode) const override;
298 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
300 /// Returns true if a cast between SrcAS and DestAS is a noop.
301 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
302 // Addrspacecasts are always noops.
303 return true;
306 /// This method returns a target specific FastISel object, or null if the
307 /// target does not support "fast" ISel.
308 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
309 const TargetLibraryInfo *libInfo) const override;
311 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
313 bool isFPImmLegal(const APFloat &Imm, EVT VT,
314 bool ForCodeSize) const override;
316 /// Return true if the given shuffle mask can be codegen'd directly, or if it
317 /// should be stack expanded.
318 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
320 /// Return the ISD::SETCC ValueType.
321 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
322 EVT VT) const override;
324 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
326 MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
327 MachineBasicBlock *BB) const;
329 MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
330 MachineBasicBlock *BB) const;
332 MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
333 MachineBasicBlock *BB) const;
335 MachineBasicBlock *
336 EmitInstrWithCustomInserter(MachineInstr &MI,
337 MachineBasicBlock *MBB) const override;
339 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
340 MachineFunction &MF,
341 unsigned Intrinsic) const override;
343 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
344 EVT NewVT) const override;
346 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
347 bool isTruncateFree(EVT VT1, EVT VT2) const override;
349 bool isProfitableToHoist(Instruction *I) const override;
351 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
352 bool isZExtFree(EVT VT1, EVT VT2) const override;
353 bool isZExtFree(SDValue Val, EVT VT2) const override;
355 bool shouldSinkOperands(Instruction *I,
356 SmallVectorImpl<Use *> &Ops) const override;
358 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
360 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
362 bool lowerInterleavedLoad(LoadInst *LI,
363 ArrayRef<ShuffleVectorInst *> Shuffles,
364 ArrayRef<unsigned> Indices,
365 unsigned Factor) const override;
366 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
367 unsigned Factor) const override;
369 bool isLegalAddImmediate(int64_t) const override;
370 bool isLegalICmpImmediate(int64_t) const override;
372 bool shouldConsiderGEPOffsetSplit() const override;
374 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
375 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
376 const AttributeList &FuncAttributes) const override;
378 LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
379 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
380 const AttributeList &FuncAttributes) const override;
382 /// Return true if the addressing mode represented by AM is legal for this
383 /// target, for a load/store of the specified type.
384 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
385 unsigned AS,
386 Instruction *I = nullptr) const override;
388 /// Return the cost of the scaling factor used in the addressing
389 /// mode represented by AM for this target, for a load/store
390 /// of the specified type.
391 /// If the AM is supported, the return value must be >= 0.
392 /// If the AM is not supported, it returns a negative value.
393 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
394 unsigned AS) const override;
396 /// Return true if an FMA operation is faster than a pair of fmul and fadd
397 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
398 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
399 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
401 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
403 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
404 bool isDesirableToCommuteWithShift(const SDNode *N,
405 CombineLevel Level) const override;
407 /// Returns true if it is beneficial to convert a load of a constant
408 /// to just the constant itself.
409 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
410 Type *Ty) const override;
412 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
413 /// with this index.
414 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
415 unsigned Index) const override;
417 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
418 AtomicOrdering Ord) const override;
419 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
420 Value *Addr, AtomicOrdering Ord) const override;
422 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
424 TargetLoweringBase::AtomicExpansionKind
425 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
426 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
427 TargetLoweringBase::AtomicExpansionKind
428 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
430 TargetLoweringBase::AtomicExpansionKind
431 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
433 bool useLoadStackGuardNode() const override;
434 TargetLoweringBase::LegalizeTypeAction
435 getPreferredVectorAction(MVT VT) const override;
437 /// If the target has a standard location for the stack protector cookie,
438 /// returns the address of that location. Otherwise, returns nullptr.
439 Value *getIRStackGuard(IRBuilder<> &IRB) const override;
441 void insertSSPDeclarations(Module &M) const override;
442 Value *getSDagStackGuard(const Module &M) const override;
443 Function *getSSPStackGuardCheck(const Module &M) const override;
445 /// If the target has a standard location for the unsafe stack pointer,
446 /// returns the address of that location. Otherwise, returns nullptr.
447 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
449 /// If a physical register, this returns the register that receives the
450 /// exception address on entry to an EH pad.
451 unsigned
452 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
453 // FIXME: This is a guess. Has this been defined yet?
454 return AArch64::X0;
457 /// If a physical register, this returns the register that receives the
458 /// exception typeid on entry to a landing pad.
459 unsigned
460 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
461 // FIXME: This is a guess. Has this been defined yet?
462 return AArch64::X1;
465 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
467 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
468 const SelectionDAG &DAG) const override {
469 // Do not merge to float value size (128 bytes) if no implicit
470 // float attribute is set.
472 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
473 Attribute::NoImplicitFloat);
475 if (NoFloat)
476 return (MemVT.getSizeInBits() <= 64);
477 return true;
480 bool isCheapToSpeculateCttz() const override {
481 return true;
484 bool isCheapToSpeculateCtlz() const override {
485 return true;
488 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
490 bool hasAndNotCompare(SDValue V) const override {
491 // We can use bics for any scalar.
492 return V.getValueType().isScalarInteger();
495 bool hasAndNot(SDValue Y) const override {
496 EVT VT = Y.getValueType();
498 if (!VT.isVector())
499 return hasAndNotCompare(Y);
501 return VT.getSizeInBits() >= 64; // vector 'bic'
504 bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
505 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
506 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
507 SelectionDAG &DAG) const override;
509 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
511 bool shouldTransformSignedTruncationCheck(EVT XVT,
512 unsigned KeptBits) const override {
513 // For vectors, we don't have a preference..
514 if (XVT.isVector())
515 return false;
517 auto VTIsOk = [](EVT VT) -> bool {
518 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
519 VT == MVT::i64;
522 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
523 // XVT will be larger than KeptBitsVT.
524 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
525 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
528 bool preferIncOfAddToSubOfNot(EVT VT) const override;
530 bool hasBitPreservingFPLogic(EVT VT) const override {
531 // FIXME: Is this always true? It should be true for vectors at least.
532 return VT == MVT::f32 || VT == MVT::f64;
535 bool supportSplitCSR(MachineFunction *MF) const override {
536 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
537 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
539 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
540 void insertCopiesSplitCSR(
541 MachineBasicBlock *Entry,
542 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
544 bool supportSwiftError() const override {
545 return true;
548 /// Enable aggressive FMA fusion on targets that want it.
549 bool enableAggressiveFMAFusion(EVT VT) const override;
551 /// Returns the size of the platform's va_list object.
552 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
554 /// Returns true if \p VecTy is a legal interleaved access type. This
555 /// function checks the vector element type and the overall width of the
556 /// vector.
557 bool isLegalInterleavedAccessType(VectorType *VecTy,
558 const DataLayout &DL) const;
560 /// Returns the number of interleaved accesses that will be generated when
561 /// lowering accesses of the given type.
562 unsigned getNumInterleavedAccesses(VectorType *VecTy,
563 const DataLayout &DL) const;
565 MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
567 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
568 CallingConv::ID CallConv,
569 bool isVarArg) const override;
570 /// Used for exception handling on Win64.
571 bool needsFixedCatchObjects() const override;
572 private:
573 /// Keep a pointer to the AArch64Subtarget around so that we can
574 /// make the right decision when generating code for different targets.
575 const AArch64Subtarget *Subtarget;
577 bool isExtFreeImpl(const Instruction *Ext) const override;
579 void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
580 void addDRTypeForNEON(MVT VT);
581 void addQRTypeForNEON(MVT VT);
583 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
584 bool isVarArg,
585 const SmallVectorImpl<ISD::InputArg> &Ins,
586 const SDLoc &DL, SelectionDAG &DAG,
587 SmallVectorImpl<SDValue> &InVals) const override;
589 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
590 SmallVectorImpl<SDValue> &InVals) const override;
592 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
593 CallingConv::ID CallConv, bool isVarArg,
594 const SmallVectorImpl<ISD::InputArg> &Ins,
595 const SDLoc &DL, SelectionDAG &DAG,
596 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
597 SDValue ThisVal) const;
599 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
601 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
603 bool isEligibleForTailCallOptimization(
604 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
605 const SmallVectorImpl<ISD::OutputArg> &Outs,
606 const SmallVectorImpl<SDValue> &OutVals,
607 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
609 /// Finds the incoming stack arguments which overlap the given fixed stack
610 /// object and incorporates their load into the current chain. This prevents
611 /// an upcoming store from clobbering the stack argument before it's used.
612 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
613 MachineFrameInfo &MFI, int ClobberedFI) const;
615 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
617 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
618 SDValue &Chain) const;
620 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
621 bool isVarArg,
622 const SmallVectorImpl<ISD::OutputArg> &Outs,
623 LLVMContext &Context) const override;
625 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
626 const SmallVectorImpl<ISD::OutputArg> &Outs,
627 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
628 SelectionDAG &DAG) const override;
630 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
631 unsigned Flag) const;
632 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
633 unsigned Flag) const;
634 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
635 unsigned Flag) const;
636 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
637 unsigned Flag) const;
638 template <class NodeTy>
639 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
640 template <class NodeTy>
641 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
642 template <class NodeTy>
643 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
644 template <class NodeTy>
645 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
646 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
647 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
648 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
649 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
650 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
651 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
652 SelectionDAG &DAG) const;
653 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
654 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
655 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
656 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
657 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
658 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
659 SDValue TVal, SDValue FVal, const SDLoc &dl,
660 SelectionDAG &DAG) const;
661 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
662 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
663 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
664 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
665 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
666 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
667 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
668 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
669 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
670 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
671 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
672 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
673 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
674 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
675 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
676 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
677 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
678 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
679 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
680 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
681 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
682 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
683 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
684 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
685 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
686 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
687 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
688 RTLIB::Libcall Call) const;
689 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
690 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
691 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
692 SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
693 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
694 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
695 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
703 SDValue &Size,
704 SelectionDAG &DAG) const;
706 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
707 SmallVectorImpl<SDNode *> &Created) const override;
708 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
709 int &ExtraSteps, bool &UseOneConst,
710 bool Reciprocal) const override;
711 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
712 int &ExtraSteps) const override;
713 unsigned combineRepeatedFPDivisors() const override;
715 ConstraintType getConstraintType(StringRef Constraint) const override;
716 Register getRegisterByName(const char* RegName, EVT VT,
717 const MachineFunction &MF) const override;
719 /// Examine constraint string and operand type and determine a weight value.
720 /// The operand object must already have been set up with the operand type.
721 ConstraintWeight
722 getSingleConstraintMatchWeight(AsmOperandInfo &info,
723 const char *constraint) const override;
725 std::pair<unsigned, const TargetRegisterClass *>
726 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
727 StringRef Constraint, MVT VT) const override;
729 const char *LowerXConstraint(EVT ConstraintVT) const override;
731 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
732 std::vector<SDValue> &Ops,
733 SelectionDAG &DAG) const override;
735 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
736 if (ConstraintCode == "Q")
737 return InlineAsm::Constraint_Q;
738 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
739 // followed by llvm_unreachable so we'll leave them unimplemented in
740 // the backend for now.
741 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
744 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
745 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
746 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
747 ISD::MemIndexedMode &AM, bool &IsInc,
748 SelectionDAG &DAG) const;
749 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
750 ISD::MemIndexedMode &AM,
751 SelectionDAG &DAG) const override;
752 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
753 SDValue &Offset, ISD::MemIndexedMode &AM,
754 SelectionDAG &DAG) const override;
756 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
757 SelectionDAG &DAG) const override;
759 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
761 void finalizeLowering(MachineFunction &MF) const override;
764 namespace AArch64 {
765 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
766 const TargetLibraryInfo *libInfo);
767 } // end namespace AArch64
769 } // end namespace llvm
771 #endif