1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
26 namespace AArch64ISD
{
28 enum NodeType
: unsigned {
29 FIRST_NUMBER
= ISD::BUILTIN_OP_END
,
30 WrapperLarge
, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL
, // Function call.
33 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
36 ADRP
, // Page address of a TargetGlobalAddress operand.
38 ADDlow
, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot
, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG
, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND
, // Conditional branch instruction; "b.cond".
44 FCSEL
, // Conditional move instruction.
45 CSINV
, // Conditional select invert.
46 CSNEG
, // Conditional select negate.
47 CSINC
, // Conditional select increment.
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
53 SBC
, // adc, sbc instructions
55 // Arithmetic instructions which write flags.
62 // Conditional compares. Operands: left,right,falsecc,cc,flags
67 // Floating point comparison
73 // Scalar-to-vector duplication
80 // Vector immedate moves
89 // Vector immediate ops
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
97 // Vector arithmetic negation
112 // Vector shift by scalar
117 // Vector shift by scalar (again)
124 // Vector comparisons
134 // Vector zero comparisons
146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
158 // Vector bitwise negation
161 // Vector bitwise selection
164 // Compare-and-branch
173 // Custom prefetch handling
176 // {s|u}int to FP within a FP register.
180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
190 // Reciprocal estimates and steps.
194 // NEON Load/Store with post-increment base updates
195 LD2post
= ISD::FIRST_TARGET_MEMORY_OPCODE
,
226 } // end namespace AArch64ISD
230 // Any instruction that defines a 32-bit result zeros out the high half of the
231 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
232 // be copying from a truncate. But any other 32-bit operation will zero-extend
234 // FIXME: X86 also checks for CMOV here. Do we need something similar?
235 static inline bool isDef32(const SDNode
&N
) {
236 unsigned Opc
= N
.getOpcode();
237 return Opc
!= ISD::TRUNCATE
&& Opc
!= TargetOpcode::EXTRACT_SUBREG
&&
238 Opc
!= ISD::CopyFromReg
;
241 } // end anonymous namespace
243 class AArch64Subtarget
;
244 class AArch64TargetMachine
;
246 class AArch64TargetLowering
: public TargetLowering
{
248 explicit AArch64TargetLowering(const TargetMachine
&TM
,
249 const AArch64Subtarget
&STI
);
251 /// Selects the correct CCAssignFn for a given CallingConvention value.
252 CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
, bool IsVarArg
) const;
254 /// Selects the correct CCAssignFn for a given CallingConvention value.
255 CCAssignFn
*CCAssignFnForReturn(CallingConv::ID CC
) const;
257 /// Determine which of the bits specified in Mask are known to be either zero
258 /// or one and return them in the KnownZero/KnownOne bitsets.
259 void computeKnownBitsForTargetNode(const SDValue Op
, KnownBits
&Known
,
260 const APInt
&DemandedElts
,
261 const SelectionDAG
&DAG
,
262 unsigned Depth
= 0) const override
;
264 bool targetShrinkDemandedConstant(SDValue Op
, const APInt
&Demanded
,
265 TargetLoweringOpt
&TLO
) const override
;
267 MVT
getScalarShiftAmountTy(const DataLayout
&DL
, EVT
) const override
;
269 /// Returns true if the target allows unaligned memory accesses of the
271 bool allowsMisalignedMemoryAccesses(
272 EVT VT
, unsigned AddrSpace
= 0, unsigned Align
= 1,
273 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
274 bool *Fast
= nullptr) const override
;
276 bool allowsMisalignedMemoryAccesses(
277 LLT Ty
, unsigned AddrSpace
, unsigned Align
, MachineMemOperand::Flags Flags
,
278 bool *Fast
= nullptr) const override
;
280 /// Provide custom lowering hooks for some operations.
281 SDValue
LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const override
;
283 const char *getTargetNodeName(unsigned Opcode
) const override
;
285 SDValue
PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const override
;
287 /// Returns true if a cast between SrcAS and DestAS is a noop.
288 bool isNoopAddrSpaceCast(unsigned SrcAS
, unsigned DestAS
) const override
{
289 // Addrspacecasts are always noops.
293 /// This method returns a target specific FastISel object, or null if the
294 /// target does not support "fast" ISel.
295 FastISel
*createFastISel(FunctionLoweringInfo
&funcInfo
,
296 const TargetLibraryInfo
*libInfo
) const override
;
298 bool isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const override
;
300 bool isFPImmLegal(const APFloat
&Imm
, EVT VT
,
301 bool ForCodeSize
) const override
;
303 /// Return true if the given shuffle mask can be codegen'd directly, or if it
304 /// should be stack expanded.
305 bool isShuffleMaskLegal(ArrayRef
<int> M
, EVT VT
) const override
;
307 /// Return the ISD::SETCC ValueType.
308 EVT
getSetCCResultType(const DataLayout
&DL
, LLVMContext
&Context
,
309 EVT VT
) const override
;
311 SDValue
ReconstructShuffle(SDValue Op
, SelectionDAG
&DAG
) const;
313 MachineBasicBlock
*EmitF128CSEL(MachineInstr
&MI
,
314 MachineBasicBlock
*BB
) const;
316 MachineBasicBlock
*EmitLoweredCatchRet(MachineInstr
&MI
,
317 MachineBasicBlock
*BB
) const;
319 MachineBasicBlock
*EmitLoweredCatchPad(MachineInstr
&MI
,
320 MachineBasicBlock
*BB
) const;
323 EmitInstrWithCustomInserter(MachineInstr
&MI
,
324 MachineBasicBlock
*MBB
) const override
;
326 bool getTgtMemIntrinsic(IntrinsicInfo
&Info
, const CallInst
&I
,
328 unsigned Intrinsic
) const override
;
330 bool shouldReduceLoadWidth(SDNode
*Load
, ISD::LoadExtType ExtTy
,
331 EVT NewVT
) const override
;
333 bool isTruncateFree(Type
*Ty1
, Type
*Ty2
) const override
;
334 bool isTruncateFree(EVT VT1
, EVT VT2
) const override
;
336 bool isProfitableToHoist(Instruction
*I
) const override
;
338 bool isZExtFree(Type
*Ty1
, Type
*Ty2
) const override
;
339 bool isZExtFree(EVT VT1
, EVT VT2
) const override
;
340 bool isZExtFree(SDValue Val
, EVT VT2
) const override
;
342 bool shouldSinkOperands(Instruction
*I
,
343 SmallVectorImpl
<Use
*> &Ops
) const override
;
345 bool hasPairedLoad(EVT LoadedType
, unsigned &RequiredAligment
) const override
;
347 unsigned getMaxSupportedInterleaveFactor() const override
{ return 4; }
349 bool lowerInterleavedLoad(LoadInst
*LI
,
350 ArrayRef
<ShuffleVectorInst
*> Shuffles
,
351 ArrayRef
<unsigned> Indices
,
352 unsigned Factor
) const override
;
353 bool lowerInterleavedStore(StoreInst
*SI
, ShuffleVectorInst
*SVI
,
354 unsigned Factor
) const override
;
356 bool isLegalAddImmediate(int64_t) const override
;
357 bool isLegalICmpImmediate(int64_t) const override
;
359 bool shouldConsiderGEPOffsetSplit() const override
;
361 EVT
getOptimalMemOpType(uint64_t Size
, unsigned DstAlign
, unsigned SrcAlign
,
362 bool IsMemset
, bool ZeroMemset
, bool MemcpyStrSrc
,
363 const AttributeList
&FuncAttributes
) const override
;
365 LLT
getOptimalMemOpLLT(uint64_t Size
, unsigned DstAlign
, unsigned SrcAlign
,
366 bool IsMemset
, bool ZeroMemset
, bool MemcpyStrSrc
,
367 const AttributeList
&FuncAttributes
) const override
;
369 /// Return true if the addressing mode represented by AM is legal for this
370 /// target, for a load/store of the specified type.
371 bool isLegalAddressingMode(const DataLayout
&DL
, const AddrMode
&AM
, Type
*Ty
,
373 Instruction
*I
= nullptr) const override
;
375 /// Return the cost of the scaling factor used in the addressing
376 /// mode represented by AM for this target, for a load/store
377 /// of the specified type.
378 /// If the AM is supported, the return value must be >= 0.
379 /// If the AM is not supported, it returns a negative value.
380 int getScalingFactorCost(const DataLayout
&DL
, const AddrMode
&AM
, Type
*Ty
,
381 unsigned AS
) const override
;
383 /// Return true if an FMA operation is faster than a pair of fmul and fadd
384 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
385 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
386 bool isFMAFasterThanFMulAndFAdd(EVT VT
) const override
;
388 const MCPhysReg
*getScratchRegisters(CallingConv::ID CC
) const override
;
390 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
391 bool isDesirableToCommuteWithShift(const SDNode
*N
,
392 CombineLevel Level
) const override
;
394 /// Returns true if it is beneficial to convert a load of a constant
395 /// to just the constant itself.
396 bool shouldConvertConstantLoadToIntImm(const APInt
&Imm
,
397 Type
*Ty
) const override
;
399 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
401 bool isExtractSubvectorCheap(EVT ResVT
, EVT SrcVT
,
402 unsigned Index
) const override
;
404 Value
*emitLoadLinked(IRBuilder
<> &Builder
, Value
*Addr
,
405 AtomicOrdering Ord
) const override
;
406 Value
*emitStoreConditional(IRBuilder
<> &Builder
, Value
*Val
,
407 Value
*Addr
, AtomicOrdering Ord
) const override
;
409 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder
<> &Builder
) const override
;
411 TargetLoweringBase::AtomicExpansionKind
412 shouldExpandAtomicLoadInIR(LoadInst
*LI
) const override
;
413 bool shouldExpandAtomicStoreInIR(StoreInst
*SI
) const override
;
414 TargetLoweringBase::AtomicExpansionKind
415 shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const override
;
417 TargetLoweringBase::AtomicExpansionKind
418 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst
*AI
) const override
;
420 bool useLoadStackGuardNode() const override
;
421 TargetLoweringBase::LegalizeTypeAction
422 getPreferredVectorAction(MVT VT
) const override
;
424 /// If the target has a standard location for the stack protector cookie,
425 /// returns the address of that location. Otherwise, returns nullptr.
426 Value
*getIRStackGuard(IRBuilder
<> &IRB
) const override
;
428 void insertSSPDeclarations(Module
&M
) const override
;
429 Value
*getSDagStackGuard(const Module
&M
) const override
;
430 Function
*getSSPStackGuardCheck(const Module
&M
) const override
;
432 /// If the target has a standard location for the unsafe stack pointer,
433 /// returns the address of that location. Otherwise, returns nullptr.
434 Value
*getSafeStackPointerLocation(IRBuilder
<> &IRB
) const override
;
436 /// If a physical register, this returns the register that receives the
437 /// exception address on entry to an EH pad.
439 getExceptionPointerRegister(const Constant
*PersonalityFn
) const override
{
440 // FIXME: This is a guess. Has this been defined yet?
444 /// If a physical register, this returns the register that receives the
445 /// exception typeid on entry to a landing pad.
447 getExceptionSelectorRegister(const Constant
*PersonalityFn
) const override
{
448 // FIXME: This is a guess. Has this been defined yet?
452 bool isIntDivCheap(EVT VT
, AttributeList Attr
) const override
;
454 bool canMergeStoresTo(unsigned AddressSpace
, EVT MemVT
,
455 const SelectionDAG
&DAG
) const override
{
456 // Do not merge to float value size (128 bytes) if no implicit
457 // float attribute is set.
459 bool NoFloat
= DAG
.getMachineFunction().getFunction().hasFnAttribute(
460 Attribute::NoImplicitFloat
);
463 return (MemVT
.getSizeInBits() <= 64);
467 bool isCheapToSpeculateCttz() const override
{
471 bool isCheapToSpeculateCtlz() const override
{
475 bool isMaskAndCmp0FoldingBeneficial(const Instruction
&AndI
) const override
;
477 bool hasAndNotCompare(SDValue V
) const override
{
478 // We can use bics for any scalar.
479 return V
.getValueType().isScalarInteger();
482 bool hasAndNot(SDValue Y
) const override
{
483 EVT VT
= Y
.getValueType();
486 return hasAndNotCompare(Y
);
488 return VT
.getSizeInBits() >= 64; // vector 'bic'
491 bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
492 SDValue X
, ConstantSDNode
*XC
, ConstantSDNode
*CC
, SDValue Y
,
493 unsigned OldShiftOpcode
, unsigned NewShiftOpcode
,
494 SelectionDAG
&DAG
) const override
;
496 bool shouldExpandShift(SelectionDAG
&DAG
, SDNode
*N
) const override
;
498 bool shouldTransformSignedTruncationCheck(EVT XVT
,
499 unsigned KeptBits
) const override
{
500 // For vectors, we don't have a preference..
504 auto VTIsOk
= [](EVT VT
) -> bool {
505 return VT
== MVT::i8
|| VT
== MVT::i16
|| VT
== MVT::i32
||
509 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
510 // XVT will be larger than KeptBitsVT.
511 MVT KeptBitsVT
= MVT::getIntegerVT(KeptBits
);
512 return VTIsOk(XVT
) && VTIsOk(KeptBitsVT
);
515 bool preferIncOfAddToSubOfNot(EVT VT
) const override
;
517 bool hasBitPreservingFPLogic(EVT VT
) const override
{
518 // FIXME: Is this always true? It should be true for vectors at least.
519 return VT
== MVT::f32
|| VT
== MVT::f64
;
522 bool supportSplitCSR(MachineFunction
*MF
) const override
{
523 return MF
->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS
&&
524 MF
->getFunction().hasFnAttribute(Attribute::NoUnwind
);
526 void initializeSplitCSR(MachineBasicBlock
*Entry
) const override
;
527 void insertCopiesSplitCSR(
528 MachineBasicBlock
*Entry
,
529 const SmallVectorImpl
<MachineBasicBlock
*> &Exits
) const override
;
531 bool supportSwiftError() const override
{
535 /// Enable aggressive FMA fusion on targets that want it.
536 bool enableAggressiveFMAFusion(EVT VT
) const override
;
538 /// Returns the size of the platform's va_list object.
539 unsigned getVaListSizeInBits(const DataLayout
&DL
) const override
;
541 /// Returns true if \p VecTy is a legal interleaved access type. This
542 /// function checks the vector element type and the overall width of the
544 bool isLegalInterleavedAccessType(VectorType
*VecTy
,
545 const DataLayout
&DL
) const;
547 /// Returns the number of interleaved accesses that will be generated when
548 /// lowering accesses of the given type.
549 unsigned getNumInterleavedAccesses(VectorType
*VecTy
,
550 const DataLayout
&DL
) const;
552 MachineMemOperand::Flags
getMMOFlags(const Instruction
&I
) const override
;
554 bool functionArgumentNeedsConsecutiveRegisters(Type
*Ty
,
555 CallingConv::ID CallConv
,
556 bool isVarArg
) const override
;
557 /// Used for exception handling on Win64.
558 bool needsFixedCatchObjects() const override
;
560 /// Keep a pointer to the AArch64Subtarget around so that we can
561 /// make the right decision when generating code for different targets.
562 const AArch64Subtarget
*Subtarget
;
564 bool isExtFreeImpl(const Instruction
*Ext
) const override
;
566 void addTypeForNEON(MVT VT
, MVT PromotedBitwiseVT
);
567 void addDRTypeForNEON(MVT VT
);
568 void addQRTypeForNEON(MVT VT
);
570 SDValue
LowerFormalArguments(SDValue Chain
, CallingConv::ID CallConv
,
572 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
573 const SDLoc
&DL
, SelectionDAG
&DAG
,
574 SmallVectorImpl
<SDValue
> &InVals
) const override
;
576 SDValue
LowerCall(CallLoweringInfo
& /*CLI*/,
577 SmallVectorImpl
<SDValue
> &InVals
) const override
;
579 SDValue
LowerCallResult(SDValue Chain
, SDValue InFlag
,
580 CallingConv::ID CallConv
, bool isVarArg
,
581 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
582 const SDLoc
&DL
, SelectionDAG
&DAG
,
583 SmallVectorImpl
<SDValue
> &InVals
, bool isThisReturn
,
584 SDValue ThisVal
) const;
586 SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
) const;
588 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op
, SelectionDAG
&DAG
) const;
590 bool isEligibleForTailCallOptimization(
591 SDValue Callee
, CallingConv::ID CalleeCC
, bool isVarArg
,
592 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
593 const SmallVectorImpl
<SDValue
> &OutVals
,
594 const SmallVectorImpl
<ISD::InputArg
> &Ins
, SelectionDAG
&DAG
) const;
596 /// Finds the incoming stack arguments which overlap the given fixed stack
597 /// object and incorporates their load into the current chain. This prevents
598 /// an upcoming store from clobbering the stack argument before it's used.
599 SDValue
addTokenForArgument(SDValue Chain
, SelectionDAG
&DAG
,
600 MachineFrameInfo
&MFI
, int ClobberedFI
) const;
602 bool DoesCalleeRestoreStack(CallingConv::ID CallCC
, bool TailCallOpt
) const;
604 void saveVarArgRegisters(CCState
&CCInfo
, SelectionDAG
&DAG
, const SDLoc
&DL
,
605 SDValue
&Chain
) const;
607 bool CanLowerReturn(CallingConv::ID CallConv
, MachineFunction
&MF
,
609 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
610 LLVMContext
&Context
) const override
;
612 SDValue
LowerReturn(SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
613 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
614 const SmallVectorImpl
<SDValue
> &OutVals
, const SDLoc
&DL
,
615 SelectionDAG
&DAG
) const override
;
617 SDValue
getTargetNode(GlobalAddressSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
618 unsigned Flag
) const;
619 SDValue
getTargetNode(JumpTableSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
620 unsigned Flag
) const;
621 SDValue
getTargetNode(ConstantPoolSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
622 unsigned Flag
) const;
623 SDValue
getTargetNode(BlockAddressSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
624 unsigned Flag
) const;
625 template <class NodeTy
>
626 SDValue
getGOT(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
627 template <class NodeTy
>
628 SDValue
getAddrLarge(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
629 template <class NodeTy
>
630 SDValue
getAddr(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
631 template <class NodeTy
>
632 SDValue
getAddrTiny(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
633 SDValue
LowerADDROFRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const;
634 SDValue
LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
) const;
635 SDValue
LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
636 SDValue
LowerDarwinGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
637 SDValue
LowerELFGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
638 SDValue
LowerELFTLSDescCallSeq(SDValue SymAddr
, const SDLoc
&DL
,
639 SelectionDAG
&DAG
) const;
640 SDValue
LowerWindowsGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
641 SDValue
LowerSETCC(SDValue Op
, SelectionDAG
&DAG
) const;
642 SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const;
643 SDValue
LowerSELECT(SDValue Op
, SelectionDAG
&DAG
) const;
644 SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const;
645 SDValue
LowerSELECT_CC(ISD::CondCode CC
, SDValue LHS
, SDValue RHS
,
646 SDValue TVal
, SDValue FVal
, const SDLoc
&dl
,
647 SelectionDAG
&DAG
) const;
648 SDValue
LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
) const;
649 SDValue
LowerBR_JT(SDValue Op
, SelectionDAG
&DAG
) const;
650 SDValue
LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
) const;
651 SDValue
LowerBlockAddress(SDValue Op
, SelectionDAG
&DAG
) const;
652 SDValue
LowerAAPCS_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
653 SDValue
LowerDarwin_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
654 SDValue
LowerWin64_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
655 SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) const;
656 SDValue
LowerVACOPY(SDValue Op
, SelectionDAG
&DAG
) const;
657 SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) const;
658 SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) const;
659 SDValue
LowerSPONENTRY(SDValue Op
, SelectionDAG
&DAG
) const;
660 SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const;
661 SDValue
LowerFLT_ROUNDS_(SDValue Op
, SelectionDAG
&DAG
) const;
662 SDValue
LowerINSERT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) const;
663 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) const;
664 SDValue
LowerSCALAR_TO_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
665 SDValue
LowerBUILD_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
666 SDValue
LowerVECTOR_SHUFFLE(SDValue Op
, SelectionDAG
&DAG
) const;
667 SDValue
LowerEXTRACT_SUBVECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
668 SDValue
LowerVectorSRA_SRL_SHL(SDValue Op
, SelectionDAG
&DAG
) const;
669 SDValue
LowerShiftLeftParts(SDValue Op
, SelectionDAG
&DAG
) const;
670 SDValue
LowerShiftRightParts(SDValue Op
, SelectionDAG
&DAG
) const;
671 SDValue
LowerVSETCC(SDValue Op
, SelectionDAG
&DAG
) const;
672 SDValue
LowerCTPOP(SDValue Op
, SelectionDAG
&DAG
) const;
673 SDValue
LowerF128Call(SDValue Op
, SelectionDAG
&DAG
,
674 RTLIB::Libcall Call
) const;
675 SDValue
LowerFCOPYSIGN(SDValue Op
, SelectionDAG
&DAG
) const;
676 SDValue
LowerFP_EXTEND(SDValue Op
, SelectionDAG
&DAG
) const;
677 SDValue
LowerFP_ROUND(SDValue Op
, SelectionDAG
&DAG
) const;
678 SDValue
LowerVectorFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const;
679 SDValue
LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const;
680 SDValue
LowerINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const;
681 SDValue
LowerVectorOR(SDValue Op
, SelectionDAG
&DAG
) const;
682 SDValue
LowerCONCAT_VECTORS(SDValue Op
, SelectionDAG
&DAG
) const;
683 SDValue
LowerFSINCOS(SDValue Op
, SelectionDAG
&DAG
) const;
684 SDValue
LowerVECREDUCE(SDValue Op
, SelectionDAG
&DAG
) const;
685 SDValue
LowerATOMIC_LOAD_SUB(SDValue Op
, SelectionDAG
&DAG
) const;
686 SDValue
LowerATOMIC_LOAD_AND(SDValue Op
, SelectionDAG
&DAG
) const;
687 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const;
688 SDValue
LowerWindowsDYNAMIC_STACKALLOC(SDValue Op
, SDValue Chain
,
690 SelectionDAG
&DAG
) const;
692 SDValue
BuildSDIVPow2(SDNode
*N
, const APInt
&Divisor
, SelectionDAG
&DAG
,
693 SmallVectorImpl
<SDNode
*> &Created
) const override
;
694 SDValue
getSqrtEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
695 int &ExtraSteps
, bool &UseOneConst
,
696 bool Reciprocal
) const override
;
697 SDValue
getRecipEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
698 int &ExtraSteps
) const override
;
699 unsigned combineRepeatedFPDivisors() const override
;
701 ConstraintType
getConstraintType(StringRef Constraint
) const override
;
702 unsigned getRegisterByName(const char* RegName
, EVT VT
,
703 SelectionDAG
&DAG
) const override
;
705 /// Examine constraint string and operand type and determine a weight value.
706 /// The operand object must already have been set up with the operand type.
708 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
709 const char *constraint
) const override
;
711 std::pair
<unsigned, const TargetRegisterClass
*>
712 getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
713 StringRef Constraint
, MVT VT
) const override
;
715 const char *LowerXConstraint(EVT ConstraintVT
) const override
;
717 void LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
718 std::vector
<SDValue
> &Ops
,
719 SelectionDAG
&DAG
) const override
;
721 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode
) const override
{
722 if (ConstraintCode
== "Q")
723 return InlineAsm::Constraint_Q
;
724 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
725 // followed by llvm_unreachable so we'll leave them unimplemented in
726 // the backend for now.
727 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode
);
730 bool isUsedByReturnOnly(SDNode
*N
, SDValue
&Chain
) const override
;
731 bool mayBeEmittedAsTailCall(const CallInst
*CI
) const override
;
732 bool getIndexedAddressParts(SDNode
*Op
, SDValue
&Base
, SDValue
&Offset
,
733 ISD::MemIndexedMode
&AM
, bool &IsInc
,
734 SelectionDAG
&DAG
) const;
735 bool getPreIndexedAddressParts(SDNode
*N
, SDValue
&Base
, SDValue
&Offset
,
736 ISD::MemIndexedMode
&AM
,
737 SelectionDAG
&DAG
) const override
;
738 bool getPostIndexedAddressParts(SDNode
*N
, SDNode
*Op
, SDValue
&Base
,
739 SDValue
&Offset
, ISD::MemIndexedMode
&AM
,
740 SelectionDAG
&DAG
) const override
;
742 void ReplaceNodeResults(SDNode
*N
, SmallVectorImpl
<SDValue
> &Results
,
743 SelectionDAG
&DAG
) const override
;
745 bool shouldNormalizeToSelectSequence(LLVMContext
&, EVT
) const override
;
747 void finalizeLowering(MachineFunction
&MF
) const override
;
751 FastISel
*createFastISel(FunctionLoweringInfo
&funcInfo
,
752 const TargetLibraryInfo
*libInfo
);
753 } // end namespace AArch64
755 } // end namespace llvm