1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
26 namespace AArch64ISD
{
28 enum NodeType
: unsigned {
29 FIRST_NUMBER
= ISD::BUILTIN_OP_END
,
30 WrapperLarge
, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL
, // Function call.
33 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
36 ADRP
, // Page address of a TargetGlobalAddress operand.
38 ADDlow
, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot
, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG
, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND
, // Conditional branch instruction; "b.cond".
44 FCSEL
, // Conditional move instruction.
45 CSINV
, // Conditional select invert.
46 CSNEG
, // Conditional select negate.
47 CSINC
, // Conditional select increment.
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
53 SBC
, // adc, sbc instructions
55 // Arithmetic instructions which write flags.
62 // Conditional compares. Operands: left,right,falsecc,cc,flags
67 // Floating point comparison
73 // Scalar-to-vector duplication
80 // Vector immedate moves
89 // Vector immediate ops
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
97 // Vector arithmetic negation
112 // Vector shift by scalar
117 // Vector shift by scalar (again)
124 // Vector comparisons
134 // Vector zero comparisons
146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
158 // Vector bitwise negation
161 // Vector bitwise selection
164 // Compare-and-branch
173 // Custom prefetch handling
176 // {s|u}int to FP within a FP register.
180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
190 // Reciprocal estimates and steps.
194 // NEON Load/Store with post-increment base updates
195 LD2post
= ISD::FIRST_TARGET_MEMORY_OPCODE
,
226 } // end namespace AArch64ISD
230 // Any instruction that defines a 32-bit result zeros out the high half of the
231 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
232 // be copying from a truncate. But any other 32-bit operation will zero-extend
234 // FIXME: X86 also checks for CMOV here. Do we need something similar?
235 static inline bool isDef32(const SDNode
&N
) {
236 unsigned Opc
= N
.getOpcode();
237 return Opc
!= ISD::TRUNCATE
&& Opc
!= TargetOpcode::EXTRACT_SUBREG
&&
238 Opc
!= ISD::CopyFromReg
;
241 } // end anonymous namespace
243 class AArch64Subtarget
;
244 class AArch64TargetMachine
;
246 class AArch64TargetLowering
: public TargetLowering
{
248 explicit AArch64TargetLowering(const TargetMachine
&TM
,
249 const AArch64Subtarget
&STI
);
251 /// Selects the correct CCAssignFn for a given CallingConvention value.
252 CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
, bool IsVarArg
) const;
254 /// Selects the correct CCAssignFn for a given CallingConvention value.
255 CCAssignFn
*CCAssignFnForReturn(CallingConv::ID CC
) const;
257 /// Determine which of the bits specified in Mask are known to be either zero
258 /// or one and return them in the KnownZero/KnownOne bitsets.
259 void computeKnownBitsForTargetNode(const SDValue Op
, KnownBits
&Known
,
260 const APInt
&DemandedElts
,
261 const SelectionDAG
&DAG
,
262 unsigned Depth
= 0) const override
;
264 MVT
getPointerTy(const DataLayout
&DL
, uint32_t AS
= 0) const override
{
265 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
266 // *DAG* representation of pointers will always be 64-bits. They will be
267 // truncated and extended when transferred to memory, but the 64-bit DAG
268 // allows us to use AArch64's addressing modes much more easily.
269 return MVT::getIntegerVT(64);
272 bool targetShrinkDemandedConstant(SDValue Op
, const APInt
&Demanded
,
273 TargetLoweringOpt
&TLO
) const override
;
275 MVT
getScalarShiftAmountTy(const DataLayout
&DL
, EVT
) const override
;
277 /// Returns true if the target allows unaligned memory accesses of the
279 bool allowsMisalignedMemoryAccesses(
280 EVT VT
, unsigned AddrSpace
= 0, unsigned Align
= 1,
281 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
282 bool *Fast
= nullptr) const override
;
284 bool allowsMisalignedMemoryAccesses(
285 LLT Ty
, unsigned AddrSpace
, unsigned Align
, MachineMemOperand::Flags Flags
,
286 bool *Fast
= nullptr) const override
;
288 /// Provide custom lowering hooks for some operations.
289 SDValue
LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const override
;
291 const char *getTargetNodeName(unsigned Opcode
) const override
;
293 SDValue
PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const override
;
295 /// Returns true if a cast between SrcAS and DestAS is a noop.
296 bool isNoopAddrSpaceCast(unsigned SrcAS
, unsigned DestAS
) const override
{
297 // Addrspacecasts are always noops.
301 /// This method returns a target specific FastISel object, or null if the
302 /// target does not support "fast" ISel.
303 FastISel
*createFastISel(FunctionLoweringInfo
&funcInfo
,
304 const TargetLibraryInfo
*libInfo
) const override
;
306 bool isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const override
;
308 bool isFPImmLegal(const APFloat
&Imm
, EVT VT
,
309 bool ForCodeSize
) const override
;
311 /// Return true if the given shuffle mask can be codegen'd directly, or if it
312 /// should be stack expanded.
313 bool isShuffleMaskLegal(ArrayRef
<int> M
, EVT VT
) const override
;
315 /// Return the ISD::SETCC ValueType.
316 EVT
getSetCCResultType(const DataLayout
&DL
, LLVMContext
&Context
,
317 EVT VT
) const override
;
319 SDValue
ReconstructShuffle(SDValue Op
, SelectionDAG
&DAG
) const;
321 MachineBasicBlock
*EmitF128CSEL(MachineInstr
&MI
,
322 MachineBasicBlock
*BB
) const;
324 MachineBasicBlock
*EmitLoweredCatchRet(MachineInstr
&MI
,
325 MachineBasicBlock
*BB
) const;
327 MachineBasicBlock
*EmitLoweredCatchPad(MachineInstr
&MI
,
328 MachineBasicBlock
*BB
) const;
331 EmitInstrWithCustomInserter(MachineInstr
&MI
,
332 MachineBasicBlock
*MBB
) const override
;
334 bool getTgtMemIntrinsic(IntrinsicInfo
&Info
, const CallInst
&I
,
336 unsigned Intrinsic
) const override
;
338 bool shouldReduceLoadWidth(SDNode
*Load
, ISD::LoadExtType ExtTy
,
339 EVT NewVT
) const override
;
341 bool isTruncateFree(Type
*Ty1
, Type
*Ty2
) const override
;
342 bool isTruncateFree(EVT VT1
, EVT VT2
) const override
;
344 bool isProfitableToHoist(Instruction
*I
) const override
;
346 bool isZExtFree(Type
*Ty1
, Type
*Ty2
) const override
;
347 bool isZExtFree(EVT VT1
, EVT VT2
) const override
;
348 bool isZExtFree(SDValue Val
, EVT VT2
) const override
;
350 bool shouldSinkOperands(Instruction
*I
,
351 SmallVectorImpl
<Use
*> &Ops
) const override
;
353 bool hasPairedLoad(EVT LoadedType
, unsigned &RequiredAligment
) const override
;
355 unsigned getMaxSupportedInterleaveFactor() const override
{ return 4; }
357 bool lowerInterleavedLoad(LoadInst
*LI
,
358 ArrayRef
<ShuffleVectorInst
*> Shuffles
,
359 ArrayRef
<unsigned> Indices
,
360 unsigned Factor
) const override
;
361 bool lowerInterleavedStore(StoreInst
*SI
, ShuffleVectorInst
*SVI
,
362 unsigned Factor
) const override
;
364 bool isLegalAddImmediate(int64_t) const override
;
365 bool isLegalICmpImmediate(int64_t) const override
;
367 bool shouldConsiderGEPOffsetSplit() const override
;
369 EVT
getOptimalMemOpType(uint64_t Size
, unsigned DstAlign
, unsigned SrcAlign
,
370 bool IsMemset
, bool ZeroMemset
, bool MemcpyStrSrc
,
371 const AttributeList
&FuncAttributes
) const override
;
373 LLT
getOptimalMemOpLLT(uint64_t Size
, unsigned DstAlign
, unsigned SrcAlign
,
374 bool IsMemset
, bool ZeroMemset
, bool MemcpyStrSrc
,
375 const AttributeList
&FuncAttributes
) const override
;
377 /// Return true if the addressing mode represented by AM is legal for this
378 /// target, for a load/store of the specified type.
379 bool isLegalAddressingMode(const DataLayout
&DL
, const AddrMode
&AM
, Type
*Ty
,
381 Instruction
*I
= nullptr) const override
;
383 /// Return the cost of the scaling factor used in the addressing
384 /// mode represented by AM for this target, for a load/store
385 /// of the specified type.
386 /// If the AM is supported, the return value must be >= 0.
387 /// If the AM is not supported, it returns a negative value.
388 int getScalingFactorCost(const DataLayout
&DL
, const AddrMode
&AM
, Type
*Ty
,
389 unsigned AS
) const override
;
391 /// Return true if an FMA operation is faster than a pair of fmul and fadd
392 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
393 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
394 bool isFMAFasterThanFMulAndFAdd(EVT VT
) const override
;
396 const MCPhysReg
*getScratchRegisters(CallingConv::ID CC
) const override
;
398 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
399 bool isDesirableToCommuteWithShift(const SDNode
*N
,
400 CombineLevel Level
) const override
;
402 /// Returns true if it is beneficial to convert a load of a constant
403 /// to just the constant itself.
404 bool shouldConvertConstantLoadToIntImm(const APInt
&Imm
,
405 Type
*Ty
) const override
;
407 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
409 bool isExtractSubvectorCheap(EVT ResVT
, EVT SrcVT
,
410 unsigned Index
) const override
;
412 Value
*emitLoadLinked(IRBuilder
<> &Builder
, Value
*Addr
,
413 AtomicOrdering Ord
) const override
;
414 Value
*emitStoreConditional(IRBuilder
<> &Builder
, Value
*Val
,
415 Value
*Addr
, AtomicOrdering Ord
) const override
;
417 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder
<> &Builder
) const override
;
419 TargetLoweringBase::AtomicExpansionKind
420 shouldExpandAtomicLoadInIR(LoadInst
*LI
) const override
;
421 bool shouldExpandAtomicStoreInIR(StoreInst
*SI
) const override
;
422 TargetLoweringBase::AtomicExpansionKind
423 shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const override
;
425 TargetLoweringBase::AtomicExpansionKind
426 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst
*AI
) const override
;
428 bool useLoadStackGuardNode() const override
;
429 TargetLoweringBase::LegalizeTypeAction
430 getPreferredVectorAction(MVT VT
) const override
;
432 /// If the target has a standard location for the stack protector cookie,
433 /// returns the address of that location. Otherwise, returns nullptr.
434 Value
*getIRStackGuard(IRBuilder
<> &IRB
) const override
;
436 void insertSSPDeclarations(Module
&M
) const override
;
437 Value
*getSDagStackGuard(const Module
&M
) const override
;
438 Function
*getSSPStackGuardCheck(const Module
&M
) const override
;
440 /// If the target has a standard location for the unsafe stack pointer,
441 /// returns the address of that location. Otherwise, returns nullptr.
442 Value
*getSafeStackPointerLocation(IRBuilder
<> &IRB
) const override
;
444 /// If a physical register, this returns the register that receives the
445 /// exception address on entry to an EH pad.
447 getExceptionPointerRegister(const Constant
*PersonalityFn
) const override
{
448 // FIXME: This is a guess. Has this been defined yet?
452 /// If a physical register, this returns the register that receives the
453 /// exception typeid on entry to a landing pad.
455 getExceptionSelectorRegister(const Constant
*PersonalityFn
) const override
{
456 // FIXME: This is a guess. Has this been defined yet?
460 bool isIntDivCheap(EVT VT
, AttributeList Attr
) const override
;
462 bool canMergeStoresTo(unsigned AddressSpace
, EVT MemVT
,
463 const SelectionDAG
&DAG
) const override
{
464 // Do not merge to float value size (128 bytes) if no implicit
465 // float attribute is set.
467 bool NoFloat
= DAG
.getMachineFunction().getFunction().hasFnAttribute(
468 Attribute::NoImplicitFloat
);
471 return (MemVT
.getSizeInBits() <= 64);
475 bool isCheapToSpeculateCttz() const override
{
479 bool isCheapToSpeculateCtlz() const override
{
483 bool isMaskAndCmp0FoldingBeneficial(const Instruction
&AndI
) const override
;
485 bool hasAndNotCompare(SDValue V
) const override
{
486 // We can use bics for any scalar.
487 return V
.getValueType().isScalarInteger();
490 bool hasAndNot(SDValue Y
) const override
{
491 EVT VT
= Y
.getValueType();
494 return hasAndNotCompare(Y
);
496 return VT
.getSizeInBits() >= 64; // vector 'bic'
499 bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
500 SDValue X
, ConstantSDNode
*XC
, ConstantSDNode
*CC
, SDValue Y
,
501 unsigned OldShiftOpcode
, unsigned NewShiftOpcode
,
502 SelectionDAG
&DAG
) const override
;
504 bool shouldExpandShift(SelectionDAG
&DAG
, SDNode
*N
) const override
;
506 bool shouldTransformSignedTruncationCheck(EVT XVT
,
507 unsigned KeptBits
) const override
{
508 // For vectors, we don't have a preference..
512 auto VTIsOk
= [](EVT VT
) -> bool {
513 return VT
== MVT::i8
|| VT
== MVT::i16
|| VT
== MVT::i32
||
517 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
518 // XVT will be larger than KeptBitsVT.
519 MVT KeptBitsVT
= MVT::getIntegerVT(KeptBits
);
520 return VTIsOk(XVT
) && VTIsOk(KeptBitsVT
);
523 bool preferIncOfAddToSubOfNot(EVT VT
) const override
;
525 bool hasBitPreservingFPLogic(EVT VT
) const override
{
526 // FIXME: Is this always true? It should be true for vectors at least.
527 return VT
== MVT::f32
|| VT
== MVT::f64
;
530 bool supportSplitCSR(MachineFunction
*MF
) const override
{
531 return MF
->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS
&&
532 MF
->getFunction().hasFnAttribute(Attribute::NoUnwind
);
534 void initializeSplitCSR(MachineBasicBlock
*Entry
) const override
;
535 void insertCopiesSplitCSR(
536 MachineBasicBlock
*Entry
,
537 const SmallVectorImpl
<MachineBasicBlock
*> &Exits
) const override
;
539 bool supportSwiftError() const override
{
543 /// Enable aggressive FMA fusion on targets that want it.
544 bool enableAggressiveFMAFusion(EVT VT
) const override
;
546 /// Returns the size of the platform's va_list object.
547 unsigned getVaListSizeInBits(const DataLayout
&DL
) const override
;
549 /// Returns true if \p VecTy is a legal interleaved access type. This
550 /// function checks the vector element type and the overall width of the
552 bool isLegalInterleavedAccessType(VectorType
*VecTy
,
553 const DataLayout
&DL
) const;
555 /// Returns the number of interleaved accesses that will be generated when
556 /// lowering accesses of the given type.
557 unsigned getNumInterleavedAccesses(VectorType
*VecTy
,
558 const DataLayout
&DL
) const;
560 MachineMemOperand::Flags
getMMOFlags(const Instruction
&I
) const override
;
562 bool functionArgumentNeedsConsecutiveRegisters(Type
*Ty
,
563 CallingConv::ID CallConv
,
564 bool isVarArg
) const override
;
565 /// Used for exception handling on Win64.
566 bool needsFixedCatchObjects() const override
;
568 /// Keep a pointer to the AArch64Subtarget around so that we can
569 /// make the right decision when generating code for different targets.
570 const AArch64Subtarget
*Subtarget
;
572 bool isExtFreeImpl(const Instruction
*Ext
) const override
;
574 void addTypeForNEON(MVT VT
, MVT PromotedBitwiseVT
);
575 void addDRTypeForNEON(MVT VT
);
576 void addQRTypeForNEON(MVT VT
);
578 SDValue
LowerFormalArguments(SDValue Chain
, CallingConv::ID CallConv
,
580 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
581 const SDLoc
&DL
, SelectionDAG
&DAG
,
582 SmallVectorImpl
<SDValue
> &InVals
) const override
;
584 SDValue
LowerCall(CallLoweringInfo
& /*CLI*/,
585 SmallVectorImpl
<SDValue
> &InVals
) const override
;
587 SDValue
LowerCallResult(SDValue Chain
, SDValue InFlag
,
588 CallingConv::ID CallConv
, bool isVarArg
,
589 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
590 const SDLoc
&DL
, SelectionDAG
&DAG
,
591 SmallVectorImpl
<SDValue
> &InVals
, bool isThisReturn
,
592 SDValue ThisVal
) const;
594 SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
) const;
596 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op
, SelectionDAG
&DAG
) const;
598 bool isEligibleForTailCallOptimization(
599 SDValue Callee
, CallingConv::ID CalleeCC
, bool isVarArg
,
600 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
601 const SmallVectorImpl
<SDValue
> &OutVals
,
602 const SmallVectorImpl
<ISD::InputArg
> &Ins
, SelectionDAG
&DAG
) const;
604 /// Finds the incoming stack arguments which overlap the given fixed stack
605 /// object and incorporates their load into the current chain. This prevents
606 /// an upcoming store from clobbering the stack argument before it's used.
607 SDValue
addTokenForArgument(SDValue Chain
, SelectionDAG
&DAG
,
608 MachineFrameInfo
&MFI
, int ClobberedFI
) const;
610 bool DoesCalleeRestoreStack(CallingConv::ID CallCC
, bool TailCallOpt
) const;
612 void saveVarArgRegisters(CCState
&CCInfo
, SelectionDAG
&DAG
, const SDLoc
&DL
,
613 SDValue
&Chain
) const;
615 bool CanLowerReturn(CallingConv::ID CallConv
, MachineFunction
&MF
,
617 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
618 LLVMContext
&Context
) const override
;
620 SDValue
LowerReturn(SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
621 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
622 const SmallVectorImpl
<SDValue
> &OutVals
, const SDLoc
&DL
,
623 SelectionDAG
&DAG
) const override
;
625 SDValue
getTargetNode(GlobalAddressSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
626 unsigned Flag
) const;
627 SDValue
getTargetNode(JumpTableSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
628 unsigned Flag
) const;
629 SDValue
getTargetNode(ConstantPoolSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
630 unsigned Flag
) const;
631 SDValue
getTargetNode(BlockAddressSDNode
*N
, EVT Ty
, SelectionDAG
&DAG
,
632 unsigned Flag
) const;
633 template <class NodeTy
>
634 SDValue
getGOT(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
635 template <class NodeTy
>
636 SDValue
getAddrLarge(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
637 template <class NodeTy
>
638 SDValue
getAddr(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
639 template <class NodeTy
>
640 SDValue
getAddrTiny(NodeTy
*N
, SelectionDAG
&DAG
, unsigned Flags
= 0) const;
641 SDValue
LowerADDROFRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const;
642 SDValue
LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
) const;
643 SDValue
LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
644 SDValue
LowerDarwinGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
645 SDValue
LowerELFGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
646 SDValue
LowerELFTLSDescCallSeq(SDValue SymAddr
, const SDLoc
&DL
,
647 SelectionDAG
&DAG
) const;
648 SDValue
LowerWindowsGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
649 SDValue
LowerSETCC(SDValue Op
, SelectionDAG
&DAG
) const;
650 SDValue
LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const;
651 SDValue
LowerSELECT(SDValue Op
, SelectionDAG
&DAG
) const;
652 SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const;
653 SDValue
LowerSELECT_CC(ISD::CondCode CC
, SDValue LHS
, SDValue RHS
,
654 SDValue TVal
, SDValue FVal
, const SDLoc
&dl
,
655 SelectionDAG
&DAG
) const;
656 SDValue
LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
) const;
657 SDValue
LowerBR_JT(SDValue Op
, SelectionDAG
&DAG
) const;
658 SDValue
LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
) const;
659 SDValue
LowerBlockAddress(SDValue Op
, SelectionDAG
&DAG
) const;
660 SDValue
LowerAAPCS_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
661 SDValue
LowerDarwin_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
662 SDValue
LowerWin64_VASTART(SDValue Op
, SelectionDAG
&DAG
) const;
663 SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) const;
664 SDValue
LowerVACOPY(SDValue Op
, SelectionDAG
&DAG
) const;
665 SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) const;
666 SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) const;
667 SDValue
LowerSPONENTRY(SDValue Op
, SelectionDAG
&DAG
) const;
668 SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const;
669 SDValue
LowerFLT_ROUNDS_(SDValue Op
, SelectionDAG
&DAG
) const;
670 SDValue
LowerINSERT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) const;
671 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) const;
672 SDValue
LowerSCALAR_TO_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
673 SDValue
LowerBUILD_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
674 SDValue
LowerVECTOR_SHUFFLE(SDValue Op
, SelectionDAG
&DAG
) const;
675 SDValue
LowerEXTRACT_SUBVECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
676 SDValue
LowerVectorSRA_SRL_SHL(SDValue Op
, SelectionDAG
&DAG
) const;
677 SDValue
LowerShiftLeftParts(SDValue Op
, SelectionDAG
&DAG
) const;
678 SDValue
LowerShiftRightParts(SDValue Op
, SelectionDAG
&DAG
) const;
679 SDValue
LowerVSETCC(SDValue Op
, SelectionDAG
&DAG
) const;
680 SDValue
LowerCTPOP(SDValue Op
, SelectionDAG
&DAG
) const;
681 SDValue
LowerF128Call(SDValue Op
, SelectionDAG
&DAG
,
682 RTLIB::Libcall Call
) const;
683 SDValue
LowerFCOPYSIGN(SDValue Op
, SelectionDAG
&DAG
) const;
684 SDValue
LowerFP_EXTEND(SDValue Op
, SelectionDAG
&DAG
) const;
685 SDValue
LowerFP_ROUND(SDValue Op
, SelectionDAG
&DAG
) const;
686 SDValue
LowerVectorFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const;
687 SDValue
LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const;
688 SDValue
LowerINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const;
689 SDValue
LowerVectorOR(SDValue Op
, SelectionDAG
&DAG
) const;
690 SDValue
LowerCONCAT_VECTORS(SDValue Op
, SelectionDAG
&DAG
) const;
691 SDValue
LowerFSINCOS(SDValue Op
, SelectionDAG
&DAG
) const;
692 SDValue
LowerVECREDUCE(SDValue Op
, SelectionDAG
&DAG
) const;
693 SDValue
LowerATOMIC_LOAD_SUB(SDValue Op
, SelectionDAG
&DAG
) const;
694 SDValue
LowerATOMIC_LOAD_AND(SDValue Op
, SelectionDAG
&DAG
) const;
695 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const;
696 SDValue
LowerWindowsDYNAMIC_STACKALLOC(SDValue Op
, SDValue Chain
,
698 SelectionDAG
&DAG
) const;
700 SDValue
BuildSDIVPow2(SDNode
*N
, const APInt
&Divisor
, SelectionDAG
&DAG
,
701 SmallVectorImpl
<SDNode
*> &Created
) const override
;
702 SDValue
getSqrtEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
703 int &ExtraSteps
, bool &UseOneConst
,
704 bool Reciprocal
) const override
;
705 SDValue
getRecipEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
706 int &ExtraSteps
) const override
;
707 unsigned combineRepeatedFPDivisors() const override
;
709 ConstraintType
getConstraintType(StringRef Constraint
) const override
;
710 Register
getRegisterByName(const char* RegName
, EVT VT
,
711 const MachineFunction
&MF
) const override
;
713 /// Examine constraint string and operand type and determine a weight value.
714 /// The operand object must already have been set up with the operand type.
716 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
717 const char *constraint
) const override
;
719 std::pair
<unsigned, const TargetRegisterClass
*>
720 getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
721 StringRef Constraint
, MVT VT
) const override
;
723 const char *LowerXConstraint(EVT ConstraintVT
) const override
;
725 void LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
726 std::vector
<SDValue
> &Ops
,
727 SelectionDAG
&DAG
) const override
;
729 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode
) const override
{
730 if (ConstraintCode
== "Q")
731 return InlineAsm::Constraint_Q
;
732 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
733 // followed by llvm_unreachable so we'll leave them unimplemented in
734 // the backend for now.
735 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode
);
738 bool isUsedByReturnOnly(SDNode
*N
, SDValue
&Chain
) const override
;
739 bool mayBeEmittedAsTailCall(const CallInst
*CI
) const override
;
740 bool getIndexedAddressParts(SDNode
*Op
, SDValue
&Base
, SDValue
&Offset
,
741 ISD::MemIndexedMode
&AM
, bool &IsInc
,
742 SelectionDAG
&DAG
) const;
743 bool getPreIndexedAddressParts(SDNode
*N
, SDValue
&Base
, SDValue
&Offset
,
744 ISD::MemIndexedMode
&AM
,
745 SelectionDAG
&DAG
) const override
;
746 bool getPostIndexedAddressParts(SDNode
*N
, SDNode
*Op
, SDValue
&Base
,
747 SDValue
&Offset
, ISD::MemIndexedMode
&AM
,
748 SelectionDAG
&DAG
) const override
;
750 void ReplaceNodeResults(SDNode
*N
, SmallVectorImpl
<SDValue
> &Results
,
751 SelectionDAG
&DAG
) const override
;
753 bool shouldNormalizeToSelectSequence(LLVMContext
&, EVT
) const override
;
755 void finalizeLowering(MachineFunction
&MF
) const override
;
759 FastISel
*createFastISel(FunctionLoweringInfo
&funcInfo
,
760 const TargetLibraryInfo
*libInfo
);
761 } // end namespace AArch64
763 } // end namespace llvm