1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file describes how to lower LLVM code to machine code. This has two
13 /// 1. Which ValueTypes are natively supported by the target.
14 /// 2. Which operations are supported for supported ValueTypes.
15 /// 3. Cost thresholds for alternative implementations of certain operations.
17 /// In addition it has a few other components, like information about FP
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
32 #include "llvm/CodeGen/DAGCombine.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/RuntimeLibcalls.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/TargetCallingConv.h"
38 #include "llvm/CodeGen/ValueTypes.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/CallSite.h"
41 #include "llvm/IR/CallingConv.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/MC/MCRegisterInfo.h"
51 #include "llvm/Support/Alignment.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/MachineValueType.h"
56 #include "llvm/Target/TargetMachine.h"
69 class BranchProbability
;
74 class FunctionLoweringInfo
;
79 class MachineBasicBlock
;
80 class MachineFunction
;
82 class MachineJumpTableInfo
;
84 class MachineRegisterInfo
;
88 class TargetRegisterClass
;
89 class TargetLibraryInfo
;
90 class TargetRegisterInfo
;
96 None
, // No preference
97 Source
, // Follow source order.
98 RegPressure
, // Scheduling for lowest register pressure.
99 Hybrid
, // Scheduling for both latency and register pressure.
100 ILP
, // Scheduling for ILP in low register pressure mode.
101 VLIW
// Scheduling for VLIW targets.
104 } // end namespace Sched
106 /// This base class for TargetLowering contains the SelectionDAG-independent
107 /// parts that can be used from the rest of CodeGen.
108 class TargetLoweringBase
{
110 /// This enum indicates whether operations are valid for a target, and if not,
111 /// what action should be used to make them valid.
112 enum LegalizeAction
: uint8_t {
113 Legal
, // The target natively supports this operation.
114 Promote
, // This operation should be executed in a larger type.
115 Expand
, // Try to expand this to other ops, otherwise use a libcall.
116 LibCall
, // Don't try to expand this to other ops, always use a libcall.
117 Custom
// Use the LowerOperation hook to implement custom lowering.
120 /// This enum indicates whether a types are legal for a target, and if not,
121 /// what action should be used to make them valid.
122 enum LegalizeTypeAction
: uint8_t {
123 TypeLegal
, // The target natively supports this type.
124 TypePromoteInteger
, // Replace this integer with a larger one.
125 TypeExpandInteger
, // Split this integer into two of half the size.
126 TypeSoftenFloat
, // Convert this float to a same size integer type,
127 // if an operation is not supported in target HW.
128 TypeExpandFloat
, // Split this float into two of half the size.
129 TypeScalarizeVector
, // Replace this one-element vector with its element.
130 TypeSplitVector
, // Split this vector into two of half the size.
131 TypeWidenVector
, // This vector should be widened into a larger vector.
132 TypePromoteFloat
// Replace this float with a larger one.
135 /// LegalizeKind holds the legalization kind that needs to happen to EVT
136 /// in order to type-legalize it.
137 using LegalizeKind
= std::pair
<LegalizeTypeAction
, EVT
>;
139 /// Enum that describes how the target represents true/false values.
140 enum BooleanContent
{
141 UndefinedBooleanContent
, // Only bit 0 counts, the rest can hold garbage.
142 ZeroOrOneBooleanContent
, // All bits zero except for bit 0.
143 ZeroOrNegativeOneBooleanContent
// All bits equal to bit 0.
146 /// Enum that describes what type of support for selects the target has.
147 enum SelectSupportKind
{
148 ScalarValSelect
, // The target supports scalar selects (ex: cmov).
149 ScalarCondVectorVal
, // The target supports selects with a scalar condition
150 // and vector values (ex: cmov).
151 VectorMaskSelect
// The target supports vector selects with a vector
152 // mask (ex: x86 blends).
155 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
156 /// to, if at all. Exists because different targets have different levels of
157 /// support for these atomic instructions, and also have different options
158 /// w.r.t. what they should expand to.
159 enum class AtomicExpansionKind
{
160 None
, // Don't expand the instruction.
161 LLSC
, // Expand the instruction into loadlinked/storeconditional; used
163 LLOnly
, // Expand the (load) instruction into just a load-linked, which has
164 // greater atomic guarantees than a normal load.
165 CmpXChg
, // Expand the instruction into cmpxchg; used by at least X86.
166 MaskedIntrinsic
, // Use a target-specific intrinsic for the LL/SC loop.
169 /// Enum that specifies when a multiplication should be expanded.
170 enum class MulExpansionKind
{
171 Always
, // Always expand the instruction.
172 OnlyLegalOrCustom
, // Only expand when the resulting instructions are legal
178 Value
*Val
= nullptr;
179 SDValue Node
= SDValue();
189 bool IsSwiftSelf
: 1;
190 bool IsSwiftError
: 1;
191 uint16_t Alignment
= 0;
192 Type
*ByValType
= nullptr;
195 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
196 IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
197 IsSwiftSelf(false), IsSwiftError(false) {}
199 void setAttributes(const CallBase
*Call
, unsigned ArgIdx
);
201 void setAttributes(ImmutableCallSite
*CS
, unsigned ArgIdx
) {
202 return setAttributes(cast
<CallBase
>(CS
->getInstruction()), ArgIdx
);
205 using ArgListTy
= std::vector
<ArgListEntry
>;
207 virtual void markLibCallAttributes(MachineFunction
*MF
, unsigned CC
,
208 ArgListTy
&Args
) const {};
210 static ISD::NodeType
getExtendForContent(BooleanContent Content
) {
212 case UndefinedBooleanContent
:
213 // Extend by adding rubbish bits.
214 return ISD::ANY_EXTEND
;
215 case ZeroOrOneBooleanContent
:
216 // Extend by adding zero bits.
217 return ISD::ZERO_EXTEND
;
218 case ZeroOrNegativeOneBooleanContent
:
219 // Extend by copying the sign bit.
220 return ISD::SIGN_EXTEND
;
222 llvm_unreachable("Invalid content kind");
225 /// NOTE: The TargetMachine owns TLOF.
226 explicit TargetLoweringBase(const TargetMachine
&TM
);
227 TargetLoweringBase(const TargetLoweringBase
&) = delete;
228 TargetLoweringBase
&operator=(const TargetLoweringBase
&) = delete;
229 virtual ~TargetLoweringBase() = default;
232 /// Initialize all of the actions to default values.
236 const TargetMachine
&getTargetMachine() const { return TM
; }
238 virtual bool useSoftFloat() const { return false; }
240 /// Return the pointer type for the given address space, defaults to
241 /// the pointer type from the data layout.
242 /// FIXME: The default needs to be removed once all the code is updated.
243 virtual MVT
getPointerTy(const DataLayout
&DL
, uint32_t AS
= 0) const {
244 return MVT::getIntegerVT(DL
.getPointerSizeInBits(AS
));
247 /// Return the in-memory pointer type for the given address space, defaults to
248 /// the pointer type from the data layout. FIXME: The default needs to be
249 /// removed once all the code is updated.
250 MVT
getPointerMemTy(const DataLayout
&DL
, uint32_t AS
= 0) const {
251 return MVT::getIntegerVT(DL
.getPointerSizeInBits(AS
));
254 /// Return the type for frame index, which is determined by
255 /// the alloca address space specified through the data layout.
256 MVT
getFrameIndexTy(const DataLayout
&DL
) const {
257 return getPointerTy(DL
, DL
.getAllocaAddrSpace());
260 /// Return the type for operands of fence.
261 /// TODO: Let fence operands be of i32 type and remove this.
262 virtual MVT
getFenceOperandTy(const DataLayout
&DL
) const {
263 return getPointerTy(DL
);
266 /// EVT is not used in-tree, but is used by out-of-tree target.
267 /// A documentation for this function would be nice...
268 virtual MVT
getScalarShiftAmountTy(const DataLayout
&, EVT
) const;
270 EVT
getShiftAmountTy(EVT LHSTy
, const DataLayout
&DL
,
271 bool LegalTypes
= true) const;
273 /// Returns the type to be used for the index operand of:
274 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
275 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
276 virtual MVT
getVectorIdxTy(const DataLayout
&DL
) const {
277 return getPointerTy(DL
);
280 virtual bool isSelectSupported(SelectSupportKind
/*kind*/) const {
284 /// Return true if it is profitable to convert a select of FP constants into
285 /// a constant pool load whose address depends on the select condition. The
286 /// parameter may be used to differentiate a select with FP compare from
288 virtual bool reduceSelectOfFPConstantLoads(bool IsFPSetCC
) const {
292 /// Return true if multiple condition registers are available.
293 bool hasMultipleConditionRegisters() const {
294 return HasMultipleConditionRegisters
;
297 /// Return true if the target has BitExtract instructions.
298 bool hasExtractBitsInsn() const { return HasExtractBitsInsn
; }
300 /// Return the preferred vector type legalization action.
301 virtual TargetLoweringBase::LegalizeTypeAction
302 getPreferredVectorAction(MVT VT
) const {
303 // The default action for one element vectors is to scalarize
304 if (VT
.getVectorNumElements() == 1)
305 return TypeScalarizeVector
;
306 // The default action for an odd-width vector is to widen.
307 if (!VT
.isPow2VectorType())
308 return TypeWidenVector
;
309 // The default action for other vectors is to promote
310 return TypePromoteInteger
;
313 // There are two general methods for expanding a BUILD_VECTOR node:
314 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
316 // 2. Build the vector on the stack and then load it.
317 // If this function returns true, then method (1) will be used, subject to
318 // the constraint that all of the necessary shuffles are legal (as determined
319 // by isShuffleMaskLegal). If this function returns false, then method (2) is
320 // always used. The vector type, and the number of defined values, are
323 shouldExpandBuildVectorWithShuffles(EVT
/* VT */,
324 unsigned DefinedValues
) const {
325 return DefinedValues
< 3;
328 /// Return true if integer divide is usually cheaper than a sequence of
329 /// several shifts, adds, and multiplies for this target.
330 /// The definition of "cheaper" may depend on whether we're optimizing
331 /// for speed or for size.
332 virtual bool isIntDivCheap(EVT VT
, AttributeList Attr
) const { return false; }
334 /// Return true if the target can handle a standalone remainder operation.
335 virtual bool hasStandaloneRem(EVT VT
) const {
339 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
340 virtual bool isFsqrtCheap(SDValue X
, SelectionDAG
&DAG
) const {
341 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
345 /// Reciprocal estimate status values used by the functions below.
346 enum ReciprocalEstimate
: int {
352 /// Return a ReciprocalEstimate enum value for a square root of the given type
353 /// based on the function's attributes. If the operation is not overridden by
354 /// the function's attributes, "Unspecified" is returned and target defaults
355 /// are expected to be used for instruction selection.
356 int getRecipEstimateSqrtEnabled(EVT VT
, MachineFunction
&MF
) const;
358 /// Return a ReciprocalEstimate enum value for a division of the given type
359 /// based on the function's attributes. If the operation is not overridden by
360 /// the function's attributes, "Unspecified" is returned and target defaults
361 /// are expected to be used for instruction selection.
362 int getRecipEstimateDivEnabled(EVT VT
, MachineFunction
&MF
) const;
364 /// Return the refinement step count for a square root of the given type based
365 /// on the function's attributes. If the operation is not overridden by
366 /// the function's attributes, "Unspecified" is returned and target defaults
367 /// are expected to be used for instruction selection.
368 int getSqrtRefinementSteps(EVT VT
, MachineFunction
&MF
) const;
370 /// Return the refinement step count for a division of the given type based
371 /// on the function's attributes. If the operation is not overridden by
372 /// the function's attributes, "Unspecified" is returned and target defaults
373 /// are expected to be used for instruction selection.
374 int getDivRefinementSteps(EVT VT
, MachineFunction
&MF
) const;
376 /// Returns true if target has indicated at least one type should be bypassed.
377 bool isSlowDivBypassed() const { return !BypassSlowDivWidths
.empty(); }
379 /// Returns map of slow types for division or remainder with corresponding
381 const DenseMap
<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
382 return BypassSlowDivWidths
;
385 /// Return true if Flow Control is an expensive operation that should be
387 bool isJumpExpensive() const { return JumpIsExpensive
; }
389 /// Return true if selects are only cheaper than branches if the branch is
390 /// unlikely to be predicted right.
391 bool isPredictableSelectExpensive() const {
392 return PredictableSelectIsExpensive
;
395 /// If a branch or a select condition is skewed in one direction by more than
396 /// this factor, it is very likely to be predicted correctly.
397 virtual BranchProbability
getPredictableBranchThreshold() const;
399 /// Return true if the following transform is beneficial:
400 /// fold (conv (load x)) -> (load (conv*)x)
401 /// On architectures that don't natively support some vector loads
402 /// efficiently, casting the load to a smaller vector of larger types and
403 /// loading is more efficient, however, this can be undone by optimizations in
405 virtual bool isLoadBitCastBeneficial(EVT LoadVT
, EVT BitcastVT
,
406 const SelectionDAG
&DAG
,
407 const MachineMemOperand
&MMO
) const {
408 // Don't do if we could do an indexed load on the original type, but not on
410 if (!LoadVT
.isSimple() || !BitcastVT
.isSimple())
413 MVT LoadMVT
= LoadVT
.getSimpleVT();
415 // Don't bother doing this if it's just going to be promoted again later, as
416 // doing so might interfere with other combines.
417 if (getOperationAction(ISD::LOAD
, LoadMVT
) == Promote
&&
418 getTypeToPromoteTo(ISD::LOAD
, LoadMVT
) == BitcastVT
.getSimpleVT())
422 return allowsMemoryAccess(*DAG
.getContext(), DAG
.getDataLayout(), BitcastVT
,
426 /// Return true if the following transform is beneficial:
427 /// (store (y (conv x)), y*)) -> (store x, (x*))
428 virtual bool isStoreBitCastBeneficial(EVT StoreVT
, EVT BitcastVT
,
429 const SelectionDAG
&DAG
,
430 const MachineMemOperand
&MMO
) const {
431 // Default to the same logic as loads.
432 return isLoadBitCastBeneficial(StoreVT
, BitcastVT
, DAG
, MMO
);
435 /// Return true if it is expected to be cheaper to do a store of a non-zero
436 /// vector constant with the given size and type for the address space than to
437 /// store the individual scalar element constants.
438 virtual bool storeOfVectorConstantIsCheap(EVT MemVT
,
440 unsigned AddrSpace
) const {
444 /// Allow store merging for the specified type after legalization in addition
445 /// to before legalization. This may transform stores that do not exist
446 /// earlier (for example, stores created from intrinsics).
447 virtual bool mergeStoresAfterLegalization(EVT MemVT
) const {
451 /// Returns if it's reasonable to merge stores to MemVT size.
452 virtual bool canMergeStoresTo(unsigned AS
, EVT MemVT
,
453 const SelectionDAG
&DAG
) const {
457 /// Return true if it is cheap to speculate a call to intrinsic cttz.
458 virtual bool isCheapToSpeculateCttz() const {
462 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
463 virtual bool isCheapToSpeculateCtlz() const {
467 /// Return true if ctlz instruction is fast.
468 virtual bool isCtlzFast() const {
472 /// Return true if it is safe to transform an integer-domain bitwise operation
473 /// into the equivalent floating-point operation. This should be set to true
474 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
476 virtual bool hasBitPreservingFPLogic(EVT VT
) const {
480 /// Return true if it is cheaper to split the store of a merged int val
481 /// from a pair of smaller values into multiple stores.
482 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy
, EVT HTy
) const {
486 /// Return if the target supports combining a
489 /// %andResult = and %val1, #mask
490 /// %icmpResult = icmp %andResult, 0
492 /// into a single machine instruction of a form like:
494 /// cc = test %register, #mask
496 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction
&AndI
) const {
500 /// Use bitwise logic to make pairs of compares more efficient. For example:
501 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
502 /// This should be true when it takes more than one instruction to lower
503 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
504 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
505 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT
) const {
509 /// Return the preferred operand type if the target has a quick way to compare
510 /// integer values of the given size. Assume that any legal integer type can
511 /// be compared efficiently. Targets may override this to allow illegal wide
512 /// types to return a vector type if there is support to compare that type.
513 virtual MVT
hasFastEqualityCompare(unsigned NumBits
) const {
514 MVT VT
= MVT::getIntegerVT(NumBits
);
515 return isTypeLegal(VT
) ? VT
: MVT::INVALID_SIMPLE_VALUE_TYPE
;
518 /// Return true if the target should transform:
519 /// (X & Y) == Y ---> (~X & Y) == 0
520 /// (X & Y) != Y ---> (~X & Y) != 0
522 /// This may be profitable if the target has a bitwise and-not operation that
523 /// sets comparison flags. A target may want to limit the transformation based
524 /// on the type of Y or if Y is a constant.
526 /// Note that the transform will not occur if Y is known to be a power-of-2
527 /// because a mask and compare of a single bit can be handled by inverting the
528 /// predicate, for example:
529 /// (X & 8) == 8 ---> (X & 8) != 0
530 virtual bool hasAndNotCompare(SDValue Y
) const {
534 /// Return true if the target has a bitwise and-not operation:
536 /// This can be used to simplify select or other instructions.
537 virtual bool hasAndNot(SDValue X
) const {
538 // If the target has the more complex version of this operation, assume that
539 // it has this operation too.
540 return hasAndNotCompare(X
);
543 /// Return true if the target has a bit-test instruction:
544 /// (X & (1 << Y)) ==/!= 0
545 /// This knowledge can be used to prevent breaking the pattern,
546 /// or creating it if it could be recognized.
547 virtual bool hasBitTest(SDValue X
, SDValue Y
) const { return false; }
549 /// There are two ways to clear extreme bits (either low or high):
550 /// Mask: x & (-1 << y) (the instcombine canonical form)
551 /// Shifts: x >> y << y
552 /// Return true if the variant with 2 variable shifts is preferred.
553 /// Return false if there is no preference.
554 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X
) const {
555 // By default, let's assume that no one prefers shifts.
559 /// Return true if it is profitable to fold a pair of shifts into a mask.
560 /// This is usually true on most targets. But some targets, like Thumb1,
561 /// have immediate shift instructions, but no immediate "and" instruction;
562 /// this makes the fold unprofitable.
563 virtual bool shouldFoldConstantShiftPairToMask(const SDNode
*N
,
564 CombineLevel Level
) const {
568 /// Should we tranform the IR-optimal check for whether given truncation
569 /// down into KeptBits would be truncating or not:
570 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
571 /// Into it's more traditional form:
572 /// ((%x << C) a>> C) dstcond %x
573 /// Return true if we should transform.
574 /// Return false if there is no preference.
575 virtual bool shouldTransformSignedTruncationCheck(EVT XVT
,
576 unsigned KeptBits
) const {
577 // By default, let's assume that no one prefers shifts.
581 /// Given the pattern
582 /// (X & (C l>>/<< Y)) ==/!= 0
583 /// return true if it should be transformed into:
584 /// ((X <</l>> Y) & C) ==/!= 0
585 /// WARNING: if 'X' is a constant, the fold may deadlock!
586 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
587 /// here because it can end up being not linked in.
588 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
589 SDValue X
, ConstantSDNode
*XC
, ConstantSDNode
*CC
, SDValue Y
,
590 unsigned OldShiftOpcode
, unsigned NewShiftOpcode
,
591 SelectionDAG
&DAG
) const {
592 if (hasBitTest(X
, Y
)) {
593 // One interesting pattern that we'd want to form is 'bit test':
594 // ((1 << Y) & C) ==/!= 0
595 // But we also need to be careful not to try to reverse that fold.
597 // Is this '1 << Y' ?
598 if (OldShiftOpcode
== ISD::SHL
&& CC
->isOne())
599 return false; // Keep the 'bit test' pattern.
601 // Will it be '1 << Y' after the transform ?
602 if (XC
&& NewShiftOpcode
== ISD::SHL
&& XC
->isOne())
603 return true; // Do form the 'bit test' pattern.
606 // If 'X' is a constant, and we transform, then we will immediately
607 // try to undo the fold, thus causing endless combine loop.
608 // So by default, let's assume everyone prefers the fold
609 // iff 'X' is not a constant.
613 /// These two forms are equivalent:
614 /// sub %y, (xor %x, -1)
615 /// add (add %x, 1), %y
616 /// The variant with two add's is IR-canonical.
617 /// Some targets may prefer one to the other.
618 virtual bool preferIncOfAddToSubOfNot(EVT VT
) const {
619 // By default, let's assume that everyone prefers the form with two add's.
623 /// Return true if the target wants to use the optimization that
624 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
625 /// promotedInst1(...(promotedInstN(ext(load)))).
626 bool enableExtLdPromotion() const { return EnableExtLdPromotion
; }
628 /// Return true if the target can combine store(extractelement VectorTy,
630 /// \p Cost[out] gives the cost of that transformation when this is true.
631 virtual bool canCombineStoreAndExtract(Type
*VectorTy
, Value
*Idx
,
632 unsigned &Cost
) const {
636 /// Return true if inserting a scalar into a variable element of an undef
637 /// vector is more efficiently handled by splatting the scalar instead.
638 virtual bool shouldSplatInsEltVarIndex(EVT
) const {
642 /// Return true if target always beneficiates from combining into FMA for a
643 /// given value type. This must typically return false on targets where FMA
644 /// takes more cycles to execute than FADD.
645 virtual bool enableAggressiveFMAFusion(EVT VT
) const {
649 /// Return the ValueType of the result of SETCC operations.
650 virtual EVT
getSetCCResultType(const DataLayout
&DL
, LLVMContext
&Context
,
653 /// Return the ValueType for comparison libcalls. Comparions libcalls include
654 /// floating point comparion calls, and Ordered/Unordered check calls on
655 /// floating point numbers.
657 MVT::SimpleValueType
getCmpLibcallReturnType() const;
659 /// For targets without i1 registers, this gives the nature of the high-bits
660 /// of boolean values held in types wider than i1.
662 /// "Boolean values" are special true/false values produced by nodes like
663 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
664 /// Not to be confused with general values promoted from i1. Some cpus
665 /// distinguish between vectors of boolean and scalars; the isVec parameter
666 /// selects between the two kinds. For example on X86 a scalar boolean should
667 /// be zero extended from i1, while the elements of a vector of booleans
668 /// should be sign extended from i1.
670 /// Some cpus also treat floating point types the same way as they treat
671 /// vectors instead of the way they treat scalars.
672 BooleanContent
getBooleanContents(bool isVec
, bool isFloat
) const {
674 return BooleanVectorContents
;
675 return isFloat
? BooleanFloatContents
: BooleanContents
;
678 BooleanContent
getBooleanContents(EVT Type
) const {
679 return getBooleanContents(Type
.isVector(), Type
.isFloatingPoint());
682 /// Return target scheduling preference.
683 Sched::Preference
getSchedulingPreference() const {
684 return SchedPreferenceInfo
;
687 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
688 /// for different nodes. This function returns the preference (or none) for
690 virtual Sched::Preference
getSchedulingPreference(SDNode
*) const {
694 /// Return the register class that should be used for the specified value
696 virtual const TargetRegisterClass
*getRegClassFor(MVT VT
, bool isDivergent
= false) const {
698 const TargetRegisterClass
*RC
= RegClassForVT
[VT
.SimpleTy
];
699 assert(RC
&& "This value type is not natively supported!");
703 /// Allows target to decide about the register class of the
704 /// specific value that is live outside the defining block.
705 /// Returns true if the value needs uniform register class.
706 virtual bool requiresUniformRegister(MachineFunction
&MF
,
707 const Value
*) const {
711 /// Return the 'representative' register class for the specified value
714 /// The 'representative' register class is the largest legal super-reg
715 /// register class for the register class of the value type. For example, on
716 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
717 /// register class is GR64 on x86_64.
718 virtual const TargetRegisterClass
*getRepRegClassFor(MVT VT
) const {
719 const TargetRegisterClass
*RC
= RepRegClassForVT
[VT
.SimpleTy
];
723 /// Return the cost of the 'representative' register class for the specified
725 virtual uint8_t getRepRegClassCostFor(MVT VT
) const {
726 return RepRegClassCostForVT
[VT
.SimpleTy
];
729 /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
730 /// instructions, and false if a library call is preferred (e.g for code-size
732 virtual bool shouldExpandShift(SelectionDAG
&DAG
, SDNode
*N
) const {
736 /// Return true if the target has native support for the specified value type.
737 /// This means that it has a register that directly holds it without
738 /// promotions or expansions.
739 bool isTypeLegal(EVT VT
) const {
740 assert(!VT
.isSimple() ||
741 (unsigned)VT
.getSimpleVT().SimpleTy
< array_lengthof(RegClassForVT
));
742 return VT
.isSimple() && RegClassForVT
[VT
.getSimpleVT().SimpleTy
] != nullptr;
745 class ValueTypeActionImpl
{
746 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
747 /// that indicates how instruction selection should deal with the type.
748 LegalizeTypeAction ValueTypeActions
[MVT::LAST_VALUETYPE
];
751 ValueTypeActionImpl() {
752 std::fill(std::begin(ValueTypeActions
), std::end(ValueTypeActions
),
756 LegalizeTypeAction
getTypeAction(MVT VT
) const {
757 return ValueTypeActions
[VT
.SimpleTy
];
760 void setTypeAction(MVT VT
, LegalizeTypeAction Action
) {
761 ValueTypeActions
[VT
.SimpleTy
] = Action
;
765 const ValueTypeActionImpl
&getValueTypeActions() const {
766 return ValueTypeActions
;
769 /// Return how we should legalize values of this type, either it is already
770 /// legal (return 'Legal') or we need to promote it to a larger type (return
771 /// 'Promote'), or we need to expand it into multiple registers of smaller
772 /// integer type (return 'Expand'). 'Custom' is not an option.
773 LegalizeTypeAction
getTypeAction(LLVMContext
&Context
, EVT VT
) const {
774 return getTypeConversion(Context
, VT
).first
;
776 LegalizeTypeAction
getTypeAction(MVT VT
) const {
777 return ValueTypeActions
.getTypeAction(VT
);
780 /// For types supported by the target, this is an identity function. For
781 /// types that must be promoted to larger types, this returns the larger type
782 /// to promote to. For integer types that are larger than the largest integer
783 /// register, this contains one step in the expansion to get to the smaller
784 /// register. For illegal floating point types, this returns the integer type
786 EVT
getTypeToTransformTo(LLVMContext
&Context
, EVT VT
) const {
787 return getTypeConversion(Context
, VT
).second
;
790 /// For types supported by the target, this is an identity function. For
791 /// types that must be expanded (i.e. integer types that are larger than the
792 /// largest integer register or illegal floating point types), this returns
793 /// the largest legal type it will be expanded to.
794 EVT
getTypeToExpandTo(LLVMContext
&Context
, EVT VT
) const {
795 assert(!VT
.isVector());
797 switch (getTypeAction(Context
, VT
)) {
800 case TypeExpandInteger
:
801 VT
= getTypeToTransformTo(Context
, VT
);
804 llvm_unreachable("Type is not legal nor is it to be expanded!");
809 /// Vector types are broken down into some number of legal first class types.
810 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
811 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
812 /// turns into 4 EVT::i32 values with both PPC and X86.
814 /// This method returns the number of registers needed, and the VT for each
815 /// register. It also returns the VT and quantity of the intermediate values
816 /// before they are promoted/expanded.
817 unsigned getVectorTypeBreakdown(LLVMContext
&Context
, EVT VT
,
819 unsigned &NumIntermediates
,
820 MVT
&RegisterVT
) const;
822 /// Certain targets such as MIPS require that some types such as vectors are
823 /// always broken down into scalars in some contexts. This occurs even if the
824 /// vector type is legal.
825 virtual unsigned getVectorTypeBreakdownForCallingConv(
826 LLVMContext
&Context
, CallingConv::ID CC
, EVT VT
, EVT
&IntermediateVT
,
827 unsigned &NumIntermediates
, MVT
&RegisterVT
) const {
828 return getVectorTypeBreakdown(Context
, VT
, IntermediateVT
, NumIntermediates
,
832 struct IntrinsicInfo
{
833 unsigned opc
= 0; // target opcode
834 EVT memVT
; // memory VT
836 // value representing memory location
837 PointerUnion
<const Value
*, const PseudoSourceValue
*> ptrVal
;
839 int offset
= 0; // offset off of ptrVal
840 unsigned size
= 0; // the size of the memory location
841 // (taken from memVT if zero)
842 MaybeAlign align
= Align(1); // alignment
844 MachineMemOperand::Flags flags
= MachineMemOperand::MONone
;
845 IntrinsicInfo() = default;
848 /// Given an intrinsic, checks if on the target the intrinsic will need to map
849 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
850 /// true and store the intrinsic information into the IntrinsicInfo that was
851 /// passed to the function.
852 virtual bool getTgtMemIntrinsic(IntrinsicInfo
&, const CallInst
&,
854 unsigned /*Intrinsic*/) const {
858 /// Returns true if the target can instruction select the specified FP
859 /// immediate natively. If false, the legalizer will materialize the FP
860 /// immediate as a load from a constant pool.
861 virtual bool isFPImmLegal(const APFloat
& /*Imm*/, EVT
/*VT*/,
862 bool ForCodeSize
= false) const {
866 /// Targets can use this to indicate that they only support *some*
867 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
868 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
870 virtual bool isShuffleMaskLegal(ArrayRef
<int> /*Mask*/, EVT
/*VT*/) const {
874 /// Returns true if the operation can trap for the value type.
876 /// VT must be a legal type. By default, we optimistically assume most
877 /// operations don't trap except for integer divide and remainder.
878 virtual bool canOpTrap(unsigned Op
, EVT VT
) const;
880 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
881 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
882 /// constant pool entry.
883 virtual bool isVectorClearMaskLegal(ArrayRef
<int> /*Mask*/,
888 /// Return how this operation should be treated: either it is legal, needs to
889 /// be promoted to a larger size, needs to be expanded to some other code
890 /// sequence, or the target has a custom expander for it.
891 LegalizeAction
getOperationAction(unsigned Op
, EVT VT
) const {
892 if (VT
.isExtended()) return Expand
;
893 // If a target-specific SDNode requires legalization, require the target
894 // to provide custom legalization for it.
895 if (Op
>= array_lengthof(OpActions
[0])) return Custom
;
896 return OpActions
[(unsigned)VT
.getSimpleVT().SimpleTy
][Op
];
899 /// Custom method defined by each target to indicate if an operation which
900 /// may require a scale is supported natively by the target.
901 /// If not, the operation is illegal.
902 virtual bool isSupportedFixedPointOperation(unsigned Op
, EVT VT
,
903 unsigned Scale
) const {
907 /// Some fixed point operations may be natively supported by the target but
908 /// only for specific scales. This method allows for checking
909 /// if the width is supported by the target for a given operation that may
911 LegalizeAction
getFixedPointOperationAction(unsigned Op
, EVT VT
,
912 unsigned Scale
) const {
913 auto Action
= getOperationAction(Op
, VT
);
917 // This operation is supported in this type but may only work on specific
922 llvm_unreachable("Unexpected fixed point operation.");
924 case ISD::SMULFIXSAT
:
926 Supported
= isSupportedFixedPointOperation(Op
, VT
, Scale
);
930 return Supported
? Action
: Expand
;
933 // If Op is a strict floating-point operation, return the result
934 // of getOperationAction for the equivalent non-strict operation.
935 LegalizeAction
getStrictFPOperationAction(unsigned Op
, EVT VT
) const {
938 default: llvm_unreachable("Unexpected FP pseudo-opcode");
939 case ISD::STRICT_FADD
: EqOpc
= ISD::FADD
; break;
940 case ISD::STRICT_FSUB
: EqOpc
= ISD::FSUB
; break;
941 case ISD::STRICT_FMUL
: EqOpc
= ISD::FMUL
; break;
942 case ISD::STRICT_FDIV
: EqOpc
= ISD::FDIV
; break;
943 case ISD::STRICT_FREM
: EqOpc
= ISD::FREM
; break;
944 case ISD::STRICT_FSQRT
: EqOpc
= ISD::FSQRT
; break;
945 case ISD::STRICT_FPOW
: EqOpc
= ISD::FPOW
; break;
946 case ISD::STRICT_FPOWI
: EqOpc
= ISD::FPOWI
; break;
947 case ISD::STRICT_FMA
: EqOpc
= ISD::FMA
; break;
948 case ISD::STRICT_FSIN
: EqOpc
= ISD::FSIN
; break;
949 case ISD::STRICT_FCOS
: EqOpc
= ISD::FCOS
; break;
950 case ISD::STRICT_FEXP
: EqOpc
= ISD::FEXP
; break;
951 case ISD::STRICT_FEXP2
: EqOpc
= ISD::FEXP2
; break;
952 case ISD::STRICT_FLOG
: EqOpc
= ISD::FLOG
; break;
953 case ISD::STRICT_FLOG10
: EqOpc
= ISD::FLOG10
; break;
954 case ISD::STRICT_FLOG2
: EqOpc
= ISD::FLOG2
; break;
955 case ISD::STRICT_FRINT
: EqOpc
= ISD::FRINT
; break;
956 case ISD::STRICT_FNEARBYINT
: EqOpc
= ISD::FNEARBYINT
; break;
957 case ISD::STRICT_FMAXNUM
: EqOpc
= ISD::FMAXNUM
; break;
958 case ISD::STRICT_FMINNUM
: EqOpc
= ISD::FMINNUM
; break;
959 case ISD::STRICT_FCEIL
: EqOpc
= ISD::FCEIL
; break;
960 case ISD::STRICT_FFLOOR
: EqOpc
= ISD::FFLOOR
; break;
961 case ISD::STRICT_FROUND
: EqOpc
= ISD::FROUND
; break;
962 case ISD::STRICT_FTRUNC
: EqOpc
= ISD::FTRUNC
; break;
963 case ISD::STRICT_FP_TO_SINT
: EqOpc
= ISD::FP_TO_SINT
; break;
964 case ISD::STRICT_FP_TO_UINT
: EqOpc
= ISD::FP_TO_UINT
; break;
965 case ISD::STRICT_FP_ROUND
: EqOpc
= ISD::FP_ROUND
; break;
966 case ISD::STRICT_FP_EXTEND
: EqOpc
= ISD::FP_EXTEND
; break;
969 return getOperationAction(EqOpc
, VT
);
972 /// Return true if the specified operation is legal on this target or can be
973 /// made legal with custom lowering. This is used to help guide high-level
974 /// lowering decisions.
975 bool isOperationLegalOrCustom(unsigned Op
, EVT VT
) const {
976 return (VT
== MVT::Other
|| isTypeLegal(VT
)) &&
977 (getOperationAction(Op
, VT
) == Legal
||
978 getOperationAction(Op
, VT
) == Custom
);
981 /// Return true if the specified operation is legal on this target or can be
982 /// made legal using promotion. This is used to help guide high-level lowering
984 bool isOperationLegalOrPromote(unsigned Op
, EVT VT
) const {
985 return (VT
== MVT::Other
|| isTypeLegal(VT
)) &&
986 (getOperationAction(Op
, VT
) == Legal
||
987 getOperationAction(Op
, VT
) == Promote
);
990 /// Return true if the specified operation is legal on this target or can be
991 /// made legal with custom lowering or using promotion. This is used to help
992 /// guide high-level lowering decisions.
993 bool isOperationLegalOrCustomOrPromote(unsigned Op
, EVT VT
) const {
994 return (VT
== MVT::Other
|| isTypeLegal(VT
)) &&
995 (getOperationAction(Op
, VT
) == Legal
||
996 getOperationAction(Op
, VT
) == Custom
||
997 getOperationAction(Op
, VT
) == Promote
);
1000 /// Return true if the operation uses custom lowering, regardless of whether
1001 /// the type is legal or not.
1002 bool isOperationCustom(unsigned Op
, EVT VT
) const {
1003 return getOperationAction(Op
, VT
) == Custom
;
1006 /// Return true if lowering to a jump table is allowed.
1007 virtual bool areJTsAllowed(const Function
*Fn
) const {
1008 if (Fn
->getFnAttribute("no-jump-tables").getValueAsString() == "true")
1011 return isOperationLegalOrCustom(ISD::BR_JT
, MVT::Other
) ||
1012 isOperationLegalOrCustom(ISD::BRIND
, MVT::Other
);
1015 /// Check whether the range [Low,High] fits in a machine word.
1016 bool rangeFitsInWord(const APInt
&Low
, const APInt
&High
,
1017 const DataLayout
&DL
) const {
1018 // FIXME: Using the pointer type doesn't seem ideal.
1019 uint64_t BW
= DL
.getIndexSizeInBits(0u);
1020 uint64_t Range
= (High
- Low
).getLimitedValue(UINT64_MAX
- 1) + 1;
1024 /// Return true if lowering to a jump table is suitable for a set of case
1025 /// clusters which may contain \p NumCases cases, \p Range range of values.
1026 virtual bool isSuitableForJumpTable(const SwitchInst
*SI
, uint64_t NumCases
,
1027 uint64_t Range
) const {
1028 // FIXME: This function check the maximum table size and density, but the
1029 // minimum size is not checked. It would be nice if the minimum size is
1030 // also combined within this function. Currently, the minimum size check is
1031 // performed in findJumpTable() in SelectionDAGBuiler and
1032 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1033 const bool OptForSize
= SI
->getParent()->getParent()->hasOptSize();
1034 const unsigned MinDensity
= getMinimumJumpTableDensity(OptForSize
);
1035 const unsigned MaxJumpTableSize
= getMaximumJumpTableSize();
1037 // Check whether the number of cases is small enough and
1038 // the range is dense enough for a jump table.
1039 if ((OptForSize
|| Range
<= MaxJumpTableSize
) &&
1040 (NumCases
* 100 >= Range
* MinDensity
)) {
1046 /// Return true if lowering to a bit test is suitable for a set of case
1047 /// clusters which contains \p NumDests unique destinations, \p Low and
1048 /// \p High as its lowest and highest case values, and expects \p NumCmps
1049 /// case value comparisons. Check if the number of destinations, comparison
1050 /// metric, and range are all suitable.
1051 bool isSuitableForBitTests(unsigned NumDests
, unsigned NumCmps
,
1052 const APInt
&Low
, const APInt
&High
,
1053 const DataLayout
&DL
) const {
1054 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1055 // range of cases both require only one branch to lower. Just looking at the
1056 // number of clusters and destinations should be enough to decide whether to
1059 // To lower a range with bit tests, the range must fit the bitwidth of a
1061 if (!rangeFitsInWord(Low
, High
, DL
))
1064 // Decide whether it's profitable to lower this range with bit tests. Each
1065 // destination requires a bit test and branch, and there is an overall range
1066 // check branch. For a small number of clusters, separate comparisons might
1067 // be cheaper, and for many destinations, splitting the range might be
1069 return (NumDests
== 1 && NumCmps
>= 3) || (NumDests
== 2 && NumCmps
>= 5) ||
1070 (NumDests
== 3 && NumCmps
>= 6);
1073 /// Return true if the specified operation is illegal on this target or
1074 /// unlikely to be made legal with custom lowering. This is used to help guide
1075 /// high-level lowering decisions.
1076 bool isOperationExpand(unsigned Op
, EVT VT
) const {
1077 return (!isTypeLegal(VT
) || getOperationAction(Op
, VT
) == Expand
);
1080 /// Return true if the specified operation is legal on this target.
1081 bool isOperationLegal(unsigned Op
, EVT VT
) const {
1082 return (VT
== MVT::Other
|| isTypeLegal(VT
)) &&
1083 getOperationAction(Op
, VT
) == Legal
;
1086 /// Return how this load with extension should be treated: either it is legal,
1087 /// needs to be promoted to a larger size, needs to be expanded to some other
1088 /// code sequence, or the target has a custom expander for it.
1089 LegalizeAction
getLoadExtAction(unsigned ExtType
, EVT ValVT
,
1091 if (ValVT
.isExtended() || MemVT
.isExtended()) return Expand
;
1092 unsigned ValI
= (unsigned) ValVT
.getSimpleVT().SimpleTy
;
1093 unsigned MemI
= (unsigned) MemVT
.getSimpleVT().SimpleTy
;
1094 assert(ExtType
< ISD::LAST_LOADEXT_TYPE
&& ValI
< MVT::LAST_VALUETYPE
&&
1095 MemI
< MVT::LAST_VALUETYPE
&& "Table isn't big enough!");
1096 unsigned Shift
= 4 * ExtType
;
1097 return (LegalizeAction
)((LoadExtActions
[ValI
][MemI
] >> Shift
) & 0xf);
1100 /// Return true if the specified load with extension is legal on this target.
1101 bool isLoadExtLegal(unsigned ExtType
, EVT ValVT
, EVT MemVT
) const {
1102 return getLoadExtAction(ExtType
, ValVT
, MemVT
) == Legal
;
1105 /// Return true if the specified load with extension is legal or custom
1107 bool isLoadExtLegalOrCustom(unsigned ExtType
, EVT ValVT
, EVT MemVT
) const {
1108 return getLoadExtAction(ExtType
, ValVT
, MemVT
) == Legal
||
1109 getLoadExtAction(ExtType
, ValVT
, MemVT
) == Custom
;
1112 /// Return how this store with truncation should be treated: either it is
1113 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1114 /// other code sequence, or the target has a custom expander for it.
1115 LegalizeAction
getTruncStoreAction(EVT ValVT
, EVT MemVT
) const {
1116 if (ValVT
.isExtended() || MemVT
.isExtended()) return Expand
;
1117 unsigned ValI
= (unsigned) ValVT
.getSimpleVT().SimpleTy
;
1118 unsigned MemI
= (unsigned) MemVT
.getSimpleVT().SimpleTy
;
1119 assert(ValI
< MVT::LAST_VALUETYPE
&& MemI
< MVT::LAST_VALUETYPE
&&
1120 "Table isn't big enough!");
1121 return TruncStoreActions
[ValI
][MemI
];
1124 /// Return true if the specified store with truncation is legal on this
1126 bool isTruncStoreLegal(EVT ValVT
, EVT MemVT
) const {
1127 return isTypeLegal(ValVT
) && getTruncStoreAction(ValVT
, MemVT
) == Legal
;
1130 /// Return true if the specified store with truncation has solution on this
1132 bool isTruncStoreLegalOrCustom(EVT ValVT
, EVT MemVT
) const {
1133 return isTypeLegal(ValVT
) &&
1134 (getTruncStoreAction(ValVT
, MemVT
) == Legal
||
1135 getTruncStoreAction(ValVT
, MemVT
) == Custom
);
1138 /// Return how the indexed load should be treated: either it is legal, needs
1139 /// to be promoted to a larger size, needs to be expanded to some other code
1140 /// sequence, or the target has a custom expander for it.
1142 getIndexedLoadAction(unsigned IdxMode
, MVT VT
) const {
1143 assert(IdxMode
< ISD::LAST_INDEXED_MODE
&& VT
.isValid() &&
1144 "Table isn't big enough!");
1145 unsigned Ty
= (unsigned)VT
.SimpleTy
;
1146 return (LegalizeAction
)((IndexedModeActions
[Ty
][IdxMode
] & 0xf0) >> 4);
1149 /// Return true if the specified indexed load is legal on this target.
1150 bool isIndexedLoadLegal(unsigned IdxMode
, EVT VT
) const {
1151 return VT
.isSimple() &&
1152 (getIndexedLoadAction(IdxMode
, VT
.getSimpleVT()) == Legal
||
1153 getIndexedLoadAction(IdxMode
, VT
.getSimpleVT()) == Custom
);
1156 /// Return how the indexed store should be treated: either it is legal, needs
1157 /// to be promoted to a larger size, needs to be expanded to some other code
1158 /// sequence, or the target has a custom expander for it.
1160 getIndexedStoreAction(unsigned IdxMode
, MVT VT
) const {
1161 assert(IdxMode
< ISD::LAST_INDEXED_MODE
&& VT
.isValid() &&
1162 "Table isn't big enough!");
1163 unsigned Ty
= (unsigned)VT
.SimpleTy
;
1164 return (LegalizeAction
)(IndexedModeActions
[Ty
][IdxMode
] & 0x0f);
1167 /// Return true if the specified indexed load is legal on this target.
1168 bool isIndexedStoreLegal(unsigned IdxMode
, EVT VT
) const {
1169 return VT
.isSimple() &&
1170 (getIndexedStoreAction(IdxMode
, VT
.getSimpleVT()) == Legal
||
1171 getIndexedStoreAction(IdxMode
, VT
.getSimpleVT()) == Custom
);
1174 /// Return how the condition code should be treated: either it is legal, needs
1175 /// to be expanded to some other code sequence, or the target has a custom
1176 /// expander for it.
1178 getCondCodeAction(ISD::CondCode CC
, MVT VT
) const {
1179 assert((unsigned)CC
< array_lengthof(CondCodeActions
) &&
1180 ((unsigned)VT
.SimpleTy
>> 3) < array_lengthof(CondCodeActions
[0]) &&
1181 "Table isn't big enough!");
1182 // See setCondCodeAction for how this is encoded.
1183 uint32_t Shift
= 4 * (VT
.SimpleTy
& 0x7);
1184 uint32_t Value
= CondCodeActions
[CC
][VT
.SimpleTy
>> 3];
1185 LegalizeAction Action
= (LegalizeAction
) ((Value
>> Shift
) & 0xF);
1186 assert(Action
!= Promote
&& "Can't promote condition code!");
1190 /// Return true if the specified condition code is legal on this target.
1191 bool isCondCodeLegal(ISD::CondCode CC
, MVT VT
) const {
1192 return getCondCodeAction(CC
, VT
) == Legal
;
1195 /// Return true if the specified condition code is legal or custom on this
1197 bool isCondCodeLegalOrCustom(ISD::CondCode CC
, MVT VT
) const {
1198 return getCondCodeAction(CC
, VT
) == Legal
||
1199 getCondCodeAction(CC
, VT
) == Custom
;
1202 /// If the action for this operation is to promote, this method returns the
1203 /// ValueType to promote to.
1204 MVT
getTypeToPromoteTo(unsigned Op
, MVT VT
) const {
1205 assert(getOperationAction(Op
, VT
) == Promote
&&
1206 "This operation isn't promoted!");
1208 // See if this has an explicit type specified.
1209 std::map
<std::pair
<unsigned, MVT::SimpleValueType
>,
1210 MVT::SimpleValueType
>::const_iterator PTTI
=
1211 PromoteToType
.find(std::make_pair(Op
, VT
.SimpleTy
));
1212 if (PTTI
!= PromoteToType
.end()) return PTTI
->second
;
1214 assert((VT
.isInteger() || VT
.isFloatingPoint()) &&
1215 "Cannot autopromote this type, add it with AddPromotedToType.");
1219 NVT
= (MVT::SimpleValueType
)(NVT
.SimpleTy
+1);
1220 assert(NVT
.isInteger() == VT
.isInteger() && NVT
!= MVT::isVoid
&&
1221 "Didn't find type to promote to!");
1222 } while (!isTypeLegal(NVT
) ||
1223 getOperationAction(Op
, NVT
) == Promote
);
1227 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1228 /// operations except for the pointer size. If AllowUnknown is true, this
1229 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1230 /// otherwise it will assert.
1231 EVT
getValueType(const DataLayout
&DL
, Type
*Ty
,
1232 bool AllowUnknown
= false) const {
1233 // Lower scalar pointers to native pointer types.
1234 if (auto *PTy
= dyn_cast
<PointerType
>(Ty
))
1235 return getPointerTy(DL
, PTy
->getAddressSpace());
1237 if (auto *VTy
= dyn_cast
<VectorType
>(Ty
)) {
1238 Type
*EltTy
= VTy
->getElementType();
1239 // Lower vectors of pointers to native pointer types.
1240 if (auto *PTy
= dyn_cast
<PointerType
>(EltTy
)) {
1241 EVT
PointerTy(getPointerTy(DL
, PTy
->getAddressSpace()));
1242 EltTy
= PointerTy
.getTypeForEVT(Ty
->getContext());
1244 return EVT::getVectorVT(Ty
->getContext(), EVT::getEVT(EltTy
, false),
1245 VTy
->getElementCount());
1248 return EVT::getEVT(Ty
, AllowUnknown
);
1251 EVT
getMemValueType(const DataLayout
&DL
, Type
*Ty
,
1252 bool AllowUnknown
= false) const {
1253 // Lower scalar pointers to native pointer types.
1254 if (PointerType
*PTy
= dyn_cast
<PointerType
>(Ty
))
1255 return getPointerMemTy(DL
, PTy
->getAddressSpace());
1256 else if (VectorType
*VTy
= dyn_cast
<VectorType
>(Ty
)) {
1257 Type
*Elm
= VTy
->getElementType();
1258 if (PointerType
*PT
= dyn_cast
<PointerType
>(Elm
)) {
1259 EVT
PointerTy(getPointerMemTy(DL
, PT
->getAddressSpace()));
1260 Elm
= PointerTy
.getTypeForEVT(Ty
->getContext());
1262 return EVT::getVectorVT(Ty
->getContext(), EVT::getEVT(Elm
, false),
1263 VTy
->getNumElements());
1266 return getValueType(DL
, Ty
, AllowUnknown
);
1270 /// Return the MVT corresponding to this LLVM type. See getValueType.
1271 MVT
getSimpleValueType(const DataLayout
&DL
, Type
*Ty
,
1272 bool AllowUnknown
= false) const {
1273 return getValueType(DL
, Ty
, AllowUnknown
).getSimpleVT();
1276 /// Return the desired alignment for ByVal or InAlloca aggregate function
1277 /// arguments in the caller parameter area. This is the actual alignment, not
1279 virtual unsigned getByValTypeAlignment(Type
*Ty
, const DataLayout
&DL
) const;
1281 /// Return the type of registers that this ValueType will eventually require.
1282 MVT
getRegisterType(MVT VT
) const {
1283 assert((unsigned)VT
.SimpleTy
< array_lengthof(RegisterTypeForVT
));
1284 return RegisterTypeForVT
[VT
.SimpleTy
];
1287 /// Return the type of registers that this ValueType will eventually require.
1288 MVT
getRegisterType(LLVMContext
&Context
, EVT VT
) const {
1289 if (VT
.isSimple()) {
1290 assert((unsigned)VT
.getSimpleVT().SimpleTy
<
1291 array_lengthof(RegisterTypeForVT
));
1292 return RegisterTypeForVT
[VT
.getSimpleVT().SimpleTy
];
1294 if (VT
.isVector()) {
1297 unsigned NumIntermediates
;
1298 (void)getVectorTypeBreakdown(Context
, VT
, VT1
,
1299 NumIntermediates
, RegisterVT
);
1302 if (VT
.isInteger()) {
1303 return getRegisterType(Context
, getTypeToTransformTo(Context
, VT
));
1305 llvm_unreachable("Unsupported extended type!");
1308 /// Return the number of registers that this ValueType will eventually
1311 /// This is one for any types promoted to live in larger registers, but may be
1312 /// more than one for types (like i64) that are split into pieces. For types
1313 /// like i140, which are first promoted then expanded, it is the number of
1314 /// registers needed to hold all the bits of the original type. For an i140
1315 /// on a 32 bit machine this means 5 registers.
1316 unsigned getNumRegisters(LLVMContext
&Context
, EVT VT
) const {
1317 if (VT
.isSimple()) {
1318 assert((unsigned)VT
.getSimpleVT().SimpleTy
<
1319 array_lengthof(NumRegistersForVT
));
1320 return NumRegistersForVT
[VT
.getSimpleVT().SimpleTy
];
1322 if (VT
.isVector()) {
1325 unsigned NumIntermediates
;
1326 return getVectorTypeBreakdown(Context
, VT
, VT1
, NumIntermediates
, VT2
);
1328 if (VT
.isInteger()) {
1329 unsigned BitWidth
= VT
.getSizeInBits();
1330 unsigned RegWidth
= getRegisterType(Context
, VT
).getSizeInBits();
1331 return (BitWidth
+ RegWidth
- 1) / RegWidth
;
1333 llvm_unreachable("Unsupported extended type!");
1336 /// Certain combinations of ABIs, Targets and features require that types
1337 /// are legal for some operations and not for other operations.
1338 /// For MIPS all vector types must be passed through the integer register set.
1339 virtual MVT
getRegisterTypeForCallingConv(LLVMContext
&Context
,
1340 CallingConv::ID CC
, EVT VT
) const {
1341 return getRegisterType(Context
, VT
);
1344 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1345 /// this occurs when a vector type is used, as vector are passed through the
1346 /// integer register set.
1347 virtual unsigned getNumRegistersForCallingConv(LLVMContext
&Context
,
1350 return getNumRegisters(Context
, VT
);
1353 /// Certain targets have context senstive alignment requirements, where one
1354 /// type has the alignment requirement of another type.
1355 virtual unsigned getABIAlignmentForCallingConv(Type
*ArgTy
,
1356 DataLayout DL
) const {
1357 return DL
.getABITypeAlignment(ArgTy
);
1360 /// If true, then instruction selection should seek to shrink the FP constant
1361 /// of the specified type to a smaller type in order to save space and / or
1363 virtual bool ShouldShrinkFPConstant(EVT
) const { return true; }
1365 /// Return true if it is profitable to reduce a load to a smaller type.
1366 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1367 virtual bool shouldReduceLoadWidth(SDNode
*Load
, ISD::LoadExtType ExtTy
,
1369 // By default, assume that it is cheaper to extract a subvector from a wide
1370 // vector load rather than creating multiple narrow vector loads.
1371 if (NewVT
.isVector() && !Load
->hasOneUse())
1377 /// When splitting a value of the specified type into parts, does the Lo
1378 /// or Hi part come first? This usually follows the endianness, except
1379 /// for ppcf128, where the Hi part always comes first.
1380 bool hasBigEndianPartOrdering(EVT VT
, const DataLayout
&DL
) const {
1381 return DL
.isBigEndian() || VT
== MVT::ppcf128
;
1384 /// If true, the target has custom DAG combine transformations that it can
1385 /// perform for the specified node.
1386 bool hasTargetDAGCombine(ISD::NodeType NT
) const {
1387 assert(unsigned(NT
>> 3) < array_lengthof(TargetDAGCombineArray
));
1388 return TargetDAGCombineArray
[NT
>> 3] & (1 << (NT
&7));
1391 unsigned getGatherAllAliasesMaxDepth() const {
1392 return GatherAllAliasesMaxDepth
;
1395 /// Returns the size of the platform's va_list object.
1396 virtual unsigned getVaListSizeInBits(const DataLayout
&DL
) const {
1397 return getPointerTy(DL
).getSizeInBits();
1400 /// Get maximum # of store operations permitted for llvm.memset
1402 /// This function returns the maximum number of store operations permitted
1403 /// to replace a call to llvm.memset. The value is set by the target at the
1404 /// performance threshold for such a replacement. If OptSize is true,
1405 /// return the limit for functions that have OptSize attribute.
1406 unsigned getMaxStoresPerMemset(bool OptSize
) const {
1407 return OptSize
? MaxStoresPerMemsetOptSize
: MaxStoresPerMemset
;
1410 /// Get maximum # of store operations permitted for llvm.memcpy
1412 /// This function returns the maximum number of store operations permitted
1413 /// to replace a call to llvm.memcpy. The value is set by the target at the
1414 /// performance threshold for such a replacement. If OptSize is true,
1415 /// return the limit for functions that have OptSize attribute.
1416 unsigned getMaxStoresPerMemcpy(bool OptSize
) const {
1417 return OptSize
? MaxStoresPerMemcpyOptSize
: MaxStoresPerMemcpy
;
1420 /// \brief Get maximum # of store operations to be glued together
1422 /// This function returns the maximum number of store operations permitted
1423 /// to glue together during lowering of llvm.memcpy. The value is set by
1424 // the target at the performance threshold for such a replacement.
1425 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1426 return MaxGluedStoresPerMemcpy
;
1429 /// Get maximum # of load operations permitted for memcmp
1431 /// This function returns the maximum number of load operations permitted
1432 /// to replace a call to memcmp. The value is set by the target at the
1433 /// performance threshold for such a replacement. If OptSize is true,
1434 /// return the limit for functions that have OptSize attribute.
1435 unsigned getMaxExpandSizeMemcmp(bool OptSize
) const {
1436 return OptSize
? MaxLoadsPerMemcmpOptSize
: MaxLoadsPerMemcmp
;
1439 /// Get maximum # of store operations permitted for llvm.memmove
1441 /// This function returns the maximum number of store operations permitted
1442 /// to replace a call to llvm.memmove. The value is set by the target at the
1443 /// performance threshold for such a replacement. If OptSize is true,
1444 /// return the limit for functions that have OptSize attribute.
1445 unsigned getMaxStoresPerMemmove(bool OptSize
) const {
1446 return OptSize
? MaxStoresPerMemmoveOptSize
: MaxStoresPerMemmove
;
1449 /// Determine if the target supports unaligned memory accesses.
1451 /// This function returns true if the target allows unaligned memory accesses
1452 /// of the specified type in the given address space. If true, it also returns
1453 /// whether the unaligned memory access is "fast" in the last argument by
1454 /// reference. This is used, for example, in situations where an array
1455 /// copy/move/set is converted to a sequence of store operations. Its use
1456 /// helps to ensure that such replacements don't generate code that causes an
1457 /// alignment error (trap) on the target machine.
1458 virtual bool allowsMisalignedMemoryAccesses(
1459 EVT
, unsigned AddrSpace
= 0, unsigned Align
= 1,
1460 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
1461 bool * /*Fast*/ = nullptr) const {
1465 /// LLT handling variant.
1466 virtual bool allowsMisalignedMemoryAccesses(
1467 LLT
, unsigned AddrSpace
= 0, unsigned Align
= 1,
1468 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
1469 bool * /*Fast*/ = nullptr) const {
1473 /// Return true if the target supports a memory access of this type for the
1474 /// given address space and alignment. If the access is allowed, the optional
1475 /// final parameter returns if the access is also fast (as defined by the
1478 allowsMemoryAccess(LLVMContext
&Context
, const DataLayout
&DL
, EVT VT
,
1479 unsigned AddrSpace
= 0, unsigned Alignment
= 1,
1480 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
1481 bool *Fast
= nullptr) const;
1483 /// Return true if the target supports a memory access of this type for the
1484 /// given MachineMemOperand. If the access is allowed, the optional
1485 /// final parameter returns if the access is also fast (as defined by the
1487 bool allowsMemoryAccess(LLVMContext
&Context
, const DataLayout
&DL
, EVT VT
,
1488 const MachineMemOperand
&MMO
,
1489 bool *Fast
= nullptr) const;
1491 /// Returns the target specific optimal type for load and store operations as
1492 /// a result of memset, memcpy, and memmove lowering.
1494 /// If DstAlign is zero that means it's safe to destination alignment can
1495 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1496 /// a need to check it against alignment requirement, probably because the
1497 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1498 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1499 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1500 /// does not need to be loaded. It returns EVT::Other if the type should be
1501 /// determined using generic target-independent logic.
1503 getOptimalMemOpType(uint64_t /*Size*/, unsigned /*DstAlign*/,
1504 unsigned /*SrcAlign*/, bool /*IsMemset*/,
1505 bool /*ZeroMemset*/, bool /*MemcpyStrSrc*/,
1506 const AttributeList
& /*FuncAttributes*/) const {
1511 /// LLT returning variant.
1513 getOptimalMemOpLLT(uint64_t /*Size*/, unsigned /*DstAlign*/,
1514 unsigned /*SrcAlign*/, bool /*IsMemset*/,
1515 bool /*ZeroMemset*/, bool /*MemcpyStrSrc*/,
1516 const AttributeList
& /*FuncAttributes*/) const {
1520 /// Returns true if it's safe to use load / store of the specified type to
1521 /// expand memcpy / memset inline.
1523 /// This is mostly true for all types except for some special cases. For
1524 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1525 /// fstpl which also does type conversion. Note the specified type doesn't
1526 /// have to be legal as the hook is used before type legalization.
1527 virtual bool isSafeMemOpType(MVT
/*VT*/) const { return true; }
1529 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
1530 bool usesUnderscoreSetJmp() const {
1531 return UseUnderscoreSetJmp
;
1534 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
1535 bool usesUnderscoreLongJmp() const {
1536 return UseUnderscoreLongJmp
;
1539 /// Return lower limit for number of blocks in a jump table.
1540 virtual unsigned getMinimumJumpTableEntries() const;
1542 /// Return lower limit of the density in a jump table.
1543 unsigned getMinimumJumpTableDensity(bool OptForSize
) const;
1545 /// Return upper limit for number of entries in a jump table.
1546 /// Zero if no limit.
1547 unsigned getMaximumJumpTableSize() const;
1549 virtual bool isJumpTableRelative() const {
1550 return TM
.isPositionIndependent();
1553 /// If a physical register, this specifies the register that
1554 /// llvm.savestack/llvm.restorestack should save and restore.
1555 unsigned getStackPointerRegisterToSaveRestore() const {
1556 return StackPointerRegisterToSaveRestore
;
1559 /// If a physical register, this returns the register that receives the
1560 /// exception address on entry to an EH pad.
1562 getExceptionPointerRegister(const Constant
*PersonalityFn
) const {
1563 // 0 is guaranteed to be the NoRegister value on all targets
1567 /// If a physical register, this returns the register that receives the
1568 /// exception typeid on entry to a landing pad.
1570 getExceptionSelectorRegister(const Constant
*PersonalityFn
) const {
1571 // 0 is guaranteed to be the NoRegister value on all targets
1575 virtual bool needsFixedCatchObjects() const {
1576 report_fatal_error("Funclet EH is not implemented for this target");
1579 /// Return the minimum stack alignment of an argument.
1580 unsigned getMinStackArgumentAlignment() const {
1581 return MinStackArgumentAlignment
.value();
1584 /// Return the minimum function alignment.
1585 unsigned getMinFunctionLogAlignment() const {
1586 return Log2(MinFunctionAlignment
);
1589 /// Return the preferred function alignment.
1590 unsigned getPrefFunctionLogAlignment() const {
1591 return Log2(PrefFunctionAlignment
);
1594 /// Return the preferred loop alignment.
1595 virtual unsigned getPrefLoopLogAlignment(MachineLoop
*ML
= nullptr) const {
1596 return Log2(PrefLoopAlignment
);
1599 /// Should loops be aligned even when the function is marked OptSize (but not
1601 virtual bool alignLoopsWithOptSize() const {
1605 /// If the target has a standard location for the stack protector guard,
1606 /// returns the address of that location. Otherwise, returns nullptr.
1607 /// DEPRECATED: please override useLoadStackGuardNode and customize
1608 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1609 virtual Value
*getIRStackGuard(IRBuilder
<> &IRB
) const;
1611 /// Inserts necessary declarations for SSP (stack protection) purpose.
1612 /// Should be used only when getIRStackGuard returns nullptr.
1613 virtual void insertSSPDeclarations(Module
&M
) const;
1615 /// Return the variable that's previously inserted by insertSSPDeclarations,
1616 /// if any, otherwise return nullptr. Should be used only when
1617 /// getIRStackGuard returns nullptr.
1618 virtual Value
*getSDagStackGuard(const Module
&M
) const;
1620 /// If this function returns true, stack protection checks should XOR the
1621 /// frame pointer (or whichever pointer is used to address locals) into the
1622 /// stack guard value before checking it. getIRStackGuard must return nullptr
1623 /// if this returns true.
1624 virtual bool useStackGuardXorFP() const { return false; }
1626 /// If the target has a standard stack protection check function that
1627 /// performs validation and error handling, returns the function. Otherwise,
1628 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1629 /// Should be used only when getIRStackGuard returns nullptr.
1630 virtual Function
*getSSPStackGuardCheck(const Module
&M
) const;
1633 Value
*getDefaultSafeStackPointerLocation(IRBuilder
<> &IRB
,
1637 /// Returns the target-specific address of the unsafe stack pointer.
1638 virtual Value
*getSafeStackPointerLocation(IRBuilder
<> &IRB
) const;
1640 /// Returns the name of the symbol used to emit stack probes or the empty
1641 /// string if not applicable.
1642 virtual StringRef
getStackProbeSymbolName(MachineFunction
&MF
) const {
1646 /// Returns true if a cast between SrcAS and DestAS is a noop.
1647 virtual bool isNoopAddrSpaceCast(unsigned SrcAS
, unsigned DestAS
) const {
1651 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1652 /// are happy to sink it into basic blocks. A cast may be free, but not
1653 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1654 virtual bool isFreeAddrSpaceCast(unsigned SrcAS
, unsigned DestAS
) const {
1655 return isNoopAddrSpaceCast(SrcAS
, DestAS
);
1658 /// Return true if the pointer arguments to CI should be aligned by aligning
1659 /// the object whose address is being passed. If so then MinSize is set to the
1660 /// minimum size the object must be to be aligned and PrefAlign is set to the
1661 /// preferred alignment.
1662 virtual bool shouldAlignPointerArgs(CallInst
* /*CI*/, unsigned & /*MinSize*/,
1663 unsigned & /*PrefAlign*/) const {
1667 //===--------------------------------------------------------------------===//
1668 /// \name Helpers for TargetTransformInfo implementations
1671 /// Get the ISD node that corresponds to the Instruction class opcode.
1672 int InstructionOpcodeToISD(unsigned Opcode
) const;
1674 /// Estimate the cost of type-legalization and the legalized type.
1675 std::pair
<int, MVT
> getTypeLegalizationCost(const DataLayout
&DL
,
1680 //===--------------------------------------------------------------------===//
1681 /// \name Helpers for atomic expansion.
1684 /// Returns the maximum atomic operation size (in bits) supported by
1685 /// the backend. Atomic operations greater than this size (as well
1686 /// as ones that are not naturally aligned), will be expanded by
1687 /// AtomicExpandPass into an __atomic_* library call.
1688 unsigned getMaxAtomicSizeInBitsSupported() const {
1689 return MaxAtomicSizeInBitsSupported
;
1692 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1693 /// the backend supports. Any smaller operations are widened in
1694 /// AtomicExpandPass.
1696 /// Note that *unlike* operations above the maximum size, atomic ops
1697 /// are still natively supported below the minimum; they just
1698 /// require a more complex expansion.
1699 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits
; }
1701 /// Whether the target supports unaligned atomic operations.
1702 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics
; }
1704 /// Whether AtomicExpandPass should automatically insert fences and reduce
1705 /// ordering for this atomic. This should be true for most architectures with
1706 /// weak memory ordering. Defaults to false.
1707 virtual bool shouldInsertFencesForAtomic(const Instruction
*I
) const {
1711 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1712 /// corresponding pointee type. This may entail some non-trivial operations to
1713 /// truncate or reconstruct types that will be illegal in the backend. See
1714 /// ARMISelLowering for an example implementation.
1715 virtual Value
*emitLoadLinked(IRBuilder
<> &Builder
, Value
*Addr
,
1716 AtomicOrdering Ord
) const {
1717 llvm_unreachable("Load linked unimplemented on this target");
1720 /// Perform a store-conditional operation to Addr. Return the status of the
1721 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1722 virtual Value
*emitStoreConditional(IRBuilder
<> &Builder
, Value
*Val
,
1723 Value
*Addr
, AtomicOrdering Ord
) const {
1724 llvm_unreachable("Store conditional unimplemented on this target");
1727 /// Perform a masked atomicrmw using a target-specific intrinsic. This
1728 /// represents the core LL/SC loop which will be lowered at a late stage by
1730 virtual Value
*emitMaskedAtomicRMWIntrinsic(IRBuilder
<> &Builder
,
1732 Value
*AlignedAddr
, Value
*Incr
,
1733 Value
*Mask
, Value
*ShiftAmt
,
1734 AtomicOrdering Ord
) const {
1735 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
1738 /// Perform a masked cmpxchg using a target-specific intrinsic. This
1739 /// represents the core LL/SC loop which will be lowered at a late stage by
1741 virtual Value
*emitMaskedAtomicCmpXchgIntrinsic(
1742 IRBuilder
<> &Builder
, AtomicCmpXchgInst
*CI
, Value
*AlignedAddr
,
1743 Value
*CmpVal
, Value
*NewVal
, Value
*Mask
, AtomicOrdering Ord
) const {
1744 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
1747 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1748 /// It is called by AtomicExpandPass before expanding an
1749 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1750 /// if shouldInsertFencesForAtomic returns true.
1752 /// Inst is the original atomic instruction, prior to other expansions that
1753 /// may be performed.
1755 /// This function should either return a nullptr, or a pointer to an IR-level
1756 /// Instruction*. Even complex fence sequences can be represented by a
1757 /// single Instruction* through an intrinsic to be lowered later.
1758 /// Backends should override this method to produce target-specific intrinsic
1759 /// for their fences.
1760 /// FIXME: Please note that the default implementation here in terms of
1761 /// IR-level fences exists for historical/compatibility reasons and is
1762 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1763 /// consistency. For example, consider the following example:
1764 /// atomic<int> x = y = 0;
1765 /// int r1, r2, r3, r4;
1776 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1777 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1778 /// IR-level fences can prevent it.
1780 virtual Instruction
*emitLeadingFence(IRBuilder
<> &Builder
, Instruction
*Inst
,
1781 AtomicOrdering Ord
) const {
1782 if (isReleaseOrStronger(Ord
) && Inst
->hasAtomicStore())
1783 return Builder
.CreateFence(Ord
);
1788 virtual Instruction
*emitTrailingFence(IRBuilder
<> &Builder
,
1790 AtomicOrdering Ord
) const {
1791 if (isAcquireOrStronger(Ord
))
1792 return Builder
.CreateFence(Ord
);
1798 // Emits code that executes when the comparison result in the ll/sc
1799 // expansion of a cmpxchg instruction is such that the store-conditional will
1800 // not execute. This makes it possible to balance out the load-linked with
1801 // a dedicated instruction, if desired.
1802 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1803 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1804 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder
<> &Builder
) const {}
1806 /// Returns true if the given (atomic) store should be expanded by the
1807 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1808 virtual bool shouldExpandAtomicStoreInIR(StoreInst
*SI
) const {
1812 /// Returns true if arguments should be sign-extended in lib calls.
1813 virtual bool shouldSignExtendTypeInLibCall(EVT Type
, bool IsSigned
) const {
1817 /// Returns true if arguments should be extended in lib calls.
1818 virtual bool shouldExtendTypeInLibCall(EVT Type
) const {
1822 /// Returns how the given (atomic) load should be expanded by the
1823 /// IR-level AtomicExpand pass.
1824 virtual AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst
*LI
) const {
1825 return AtomicExpansionKind::None
;
1828 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1829 /// AtomicExpand pass.
1830 virtual AtomicExpansionKind
1831 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst
*AI
) const {
1832 return AtomicExpansionKind::None
;
1835 /// Returns how the IR-level AtomicExpand pass should expand the given
1836 /// AtomicRMW, if at all. Default is to never expand.
1837 virtual AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst
*RMW
) const {
1838 return RMW
->isFloatingPointOperation() ?
1839 AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None
;
1842 /// On some platforms, an AtomicRMW that never actually modifies the value
1843 /// (such as fetch_add of 0) can be turned into a fence followed by an
1844 /// atomic load. This may sound useless, but it makes it possible for the
1845 /// processor to keep the cacheline shared, dramatically improving
1846 /// performance. And such idempotent RMWs are useful for implementing some
1847 /// kinds of locks, see for example (justification + benchmarks):
1848 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1849 /// This method tries doing that transformation, returning the atomic load if
1850 /// it succeeds, and nullptr otherwise.
1851 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1852 /// another round of expansion.
1854 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst
*RMWI
) const {
1858 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1859 /// SIGN_EXTEND, or ANY_EXTEND).
1860 virtual ISD::NodeType
getExtendForAtomicOps() const {
1861 return ISD::ZERO_EXTEND
;
1866 /// Returns true if we should normalize
1867 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1868 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1869 /// that it saves us from materializing N0 and N1 in an integer register.
1870 /// Targets that are able to perform and/or on flags should return false here.
1871 virtual bool shouldNormalizeToSelectSequence(LLVMContext
&Context
,
1873 // If a target has multiple condition registers, then it likely has logical
1874 // operations on those registers.
1875 if (hasMultipleConditionRegisters())
1877 // Only do the transform if the value won't be split into multiple
1879 LegalizeTypeAction Action
= getTypeAction(Context
, VT
);
1880 return Action
!= TypeExpandInteger
&& Action
!= TypeExpandFloat
&&
1881 Action
!= TypeSplitVector
;
1884 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT
) const { return true; }
1886 /// Return true if a select of constants (select Cond, C1, C2) should be
1887 /// transformed into simple math ops with the condition value. For example:
1888 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
1889 virtual bool convertSelectOfConstantsToMath(EVT VT
) const {
1893 /// Return true if it is profitable to transform an integer
1894 /// multiplication-by-constant into simpler operations like shifts and adds.
1895 /// This may be true if the target does not directly support the
1896 /// multiplication operation for the specified type or the sequence of simpler
1897 /// ops is faster than the multiply.
1898 virtual bool decomposeMulByConstant(LLVMContext
&Context
,
1899 EVT VT
, SDValue C
) const {
1903 /// Return true if it is more correct/profitable to use strict FP_TO_INT
1904 /// conversion operations - canonicalizing the FP source value instead of
1905 /// converting all cases and then selecting based on value.
1906 /// This may be true if the target throws exceptions for out of bounds
1907 /// conversions or has fast FP CMOV.
1908 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT
, EVT IntVT
,
1909 bool IsSigned
) const {
1913 //===--------------------------------------------------------------------===//
1914 // TargetLowering Configuration Methods - These methods should be invoked by
1915 // the derived class constructor to configure this object for the target.
1918 /// Specify how the target extends the result of integer and floating point
1919 /// boolean values from i1 to a wider type. See getBooleanContents.
1920 void setBooleanContents(BooleanContent Ty
) {
1921 BooleanContents
= Ty
;
1922 BooleanFloatContents
= Ty
;
1925 /// Specify how the target extends the result of integer and floating point
1926 /// boolean values from i1 to a wider type. See getBooleanContents.
1927 void setBooleanContents(BooleanContent IntTy
, BooleanContent FloatTy
) {
1928 BooleanContents
= IntTy
;
1929 BooleanFloatContents
= FloatTy
;
1932 /// Specify how the target extends the result of a vector boolean value from a
1933 /// vector of i1 to a wider type. See getBooleanContents.
1934 void setBooleanVectorContents(BooleanContent Ty
) {
1935 BooleanVectorContents
= Ty
;
1938 /// Specify the target scheduling preference.
1939 void setSchedulingPreference(Sched::Preference Pref
) {
1940 SchedPreferenceInfo
= Pref
;
1943 /// Indicate whether this target prefers to use _setjmp to implement
1944 /// llvm.setjmp or the version without _. Defaults to false.
1945 void setUseUnderscoreSetJmp(bool Val
) {
1946 UseUnderscoreSetJmp
= Val
;
1949 /// Indicate whether this target prefers to use _longjmp to implement
1950 /// llvm.longjmp or the version without _. Defaults to false.
1951 void setUseUnderscoreLongJmp(bool Val
) {
1952 UseUnderscoreLongJmp
= Val
;
1955 /// Indicate the minimum number of blocks to generate jump tables.
1956 void setMinimumJumpTableEntries(unsigned Val
);
1958 /// Indicate the maximum number of entries in jump tables.
1959 /// Set to zero to generate unlimited jump tables.
1960 void setMaximumJumpTableSize(unsigned);
1962 /// If set to a physical register, this specifies the register that
1963 /// llvm.savestack/llvm.restorestack should save and restore.
1964 void setStackPointerRegisterToSaveRestore(unsigned R
) {
1965 StackPointerRegisterToSaveRestore
= R
;
1968 /// Tells the code generator that the target has multiple (allocatable)
1969 /// condition registers that can be used to store the results of comparisons
1970 /// for use by selects and conditional branches. With multiple condition
1971 /// registers, the code generator will not aggressively sink comparisons into
1972 /// the blocks of their users.
1973 void setHasMultipleConditionRegisters(bool hasManyRegs
= true) {
1974 HasMultipleConditionRegisters
= hasManyRegs
;
1977 /// Tells the code generator that the target has BitExtract instructions.
1978 /// The code generator will aggressively sink "shift"s into the blocks of
1979 /// their users if the users will generate "and" instructions which can be
1980 /// combined with "shift" to BitExtract instructions.
1981 void setHasExtractBitsInsn(bool hasExtractInsn
= true) {
1982 HasExtractBitsInsn
= hasExtractInsn
;
1985 /// Tells the code generator not to expand logic operations on comparison
1986 /// predicates into separate sequences that increase the amount of flow
1988 void setJumpIsExpensive(bool isExpensive
= true);
1990 /// Tells the code generator which bitwidths to bypass.
1991 void addBypassSlowDiv(unsigned int SlowBitWidth
, unsigned int FastBitWidth
) {
1992 BypassSlowDivWidths
[SlowBitWidth
] = FastBitWidth
;
1995 /// Add the specified register class as an available regclass for the
1996 /// specified value type. This indicates the selector can handle values of
1997 /// that class natively.
1998 void addRegisterClass(MVT VT
, const TargetRegisterClass
*RC
) {
1999 assert((unsigned)VT
.SimpleTy
< array_lengthof(RegClassForVT
));
2000 RegClassForVT
[VT
.SimpleTy
] = RC
;
2003 /// Return the largest legal super-reg register class of the register class
2004 /// for the specified type and its associated "cost".
2005 virtual std::pair
<const TargetRegisterClass
*, uint8_t>
2006 findRepresentativeClass(const TargetRegisterInfo
*TRI
, MVT VT
) const;
2008 /// Once all of the register classes are added, this allows us to compute
2009 /// derived properties we expose.
2010 void computeRegisterProperties(const TargetRegisterInfo
*TRI
);
2012 /// Indicate that the specified operation does not work with the specified
2013 /// type and indicate what to do about it. Note that VT may refer to either
2014 /// the type of a result or that of an operand of Op.
2015 void setOperationAction(unsigned Op
, MVT VT
,
2016 LegalizeAction Action
) {
2017 assert(Op
< array_lengthof(OpActions
[0]) && "Table isn't big enough!");
2018 OpActions
[(unsigned)VT
.SimpleTy
][Op
] = Action
;
2021 /// Indicate that the specified load with extension does not work with the
2022 /// specified type and indicate what to do about it.
2023 void setLoadExtAction(unsigned ExtType
, MVT ValVT
, MVT MemVT
,
2024 LegalizeAction Action
) {
2025 assert(ExtType
< ISD::LAST_LOADEXT_TYPE
&& ValVT
.isValid() &&
2026 MemVT
.isValid() && "Table isn't big enough!");
2027 assert((unsigned)Action
< 0x10 && "too many bits for bitfield array");
2028 unsigned Shift
= 4 * ExtType
;
2029 LoadExtActions
[ValVT
.SimpleTy
][MemVT
.SimpleTy
] &= ~((uint16_t)0xF << Shift
);
2030 LoadExtActions
[ValVT
.SimpleTy
][MemVT
.SimpleTy
] |= (uint16_t)Action
<< Shift
;
2033 /// Indicate that the specified truncating store does not work with the
2034 /// specified type and indicate what to do about it.
2035 void setTruncStoreAction(MVT ValVT
, MVT MemVT
,
2036 LegalizeAction Action
) {
2037 assert(ValVT
.isValid() && MemVT
.isValid() && "Table isn't big enough!");
2038 TruncStoreActions
[(unsigned)ValVT
.SimpleTy
][MemVT
.SimpleTy
] = Action
;
2041 /// Indicate that the specified indexed load does or does not work with the
2042 /// specified type and indicate what to do abort it.
2044 /// NOTE: All indexed mode loads are initialized to Expand in
2045 /// TargetLowering.cpp
2046 void setIndexedLoadAction(unsigned IdxMode
, MVT VT
,
2047 LegalizeAction Action
) {
2048 assert(VT
.isValid() && IdxMode
< ISD::LAST_INDEXED_MODE
&&
2049 (unsigned)Action
< 0xf && "Table isn't big enough!");
2050 // Load action are kept in the upper half.
2051 IndexedModeActions
[(unsigned)VT
.SimpleTy
][IdxMode
] &= ~0xf0;
2052 IndexedModeActions
[(unsigned)VT
.SimpleTy
][IdxMode
] |= ((uint8_t)Action
) <<4;
2055 /// Indicate that the specified indexed store does or does not work with the
2056 /// specified type and indicate what to do about it.
2058 /// NOTE: All indexed mode stores are initialized to Expand in
2059 /// TargetLowering.cpp
2060 void setIndexedStoreAction(unsigned IdxMode
, MVT VT
,
2061 LegalizeAction Action
) {
2062 assert(VT
.isValid() && IdxMode
< ISD::LAST_INDEXED_MODE
&&
2063 (unsigned)Action
< 0xf && "Table isn't big enough!");
2064 // Store action are kept in the lower half.
2065 IndexedModeActions
[(unsigned)VT
.SimpleTy
][IdxMode
] &= ~0x0f;
2066 IndexedModeActions
[(unsigned)VT
.SimpleTy
][IdxMode
] |= ((uint8_t)Action
);
2069 /// Indicate that the specified condition code is or isn't supported on the
2070 /// target and indicate what to do about it.
2071 void setCondCodeAction(ISD::CondCode CC
, MVT VT
,
2072 LegalizeAction Action
) {
2073 assert(VT
.isValid() && (unsigned)CC
< array_lengthof(CondCodeActions
) &&
2074 "Table isn't big enough!");
2075 assert((unsigned)Action
< 0x10 && "too many bits for bitfield array");
2076 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2077 /// value and the upper 29 bits index into the second dimension of the array
2078 /// to select what 32-bit value to use.
2079 uint32_t Shift
= 4 * (VT
.SimpleTy
& 0x7);
2080 CondCodeActions
[CC
][VT
.SimpleTy
>> 3] &= ~((uint32_t)0xF << Shift
);
2081 CondCodeActions
[CC
][VT
.SimpleTy
>> 3] |= (uint32_t)Action
<< Shift
;
2084 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2085 /// to trying a larger integer/fp until it can find one that works. If that
2086 /// default is insufficient, this method can be used by the target to override
2088 void AddPromotedToType(unsigned Opc
, MVT OrigVT
, MVT DestVT
) {
2089 PromoteToType
[std::make_pair(Opc
, OrigVT
.SimpleTy
)] = DestVT
.SimpleTy
;
2092 /// Convenience method to set an operation to Promote and specify the type
2093 /// in a single call.
2094 void setOperationPromotedToType(unsigned Opc
, MVT OrigVT
, MVT DestVT
) {
2095 setOperationAction(Opc
, OrigVT
, Promote
);
2096 AddPromotedToType(Opc
, OrigVT
, DestVT
);
2099 /// Targets should invoke this method for each target independent node that
2100 /// they want to provide a custom DAG combiner for by implementing the
2101 /// PerformDAGCombine virtual method.
2102 void setTargetDAGCombine(ISD::NodeType NT
) {
2103 assert(unsigned(NT
>> 3) < array_lengthof(TargetDAGCombineArray
));
2104 TargetDAGCombineArray
[NT
>> 3] |= 1 << (NT
&7);
2107 /// Set the target's minimum function alignment.
2108 void setMinFunctionAlignment(llvm::Align Align
) {
2109 MinFunctionAlignment
= Align
;
2112 /// Set the target's preferred function alignment. This should be set if
2113 /// there is a performance benefit to higher-than-minimum alignment (in
2115 void setPrefFunctionLogAlignment(unsigned LogAlign
) {
2116 PrefFunctionAlignment
= llvm::Align(1ULL << LogAlign
);
2119 /// Set the target's preferred loop alignment. Default alignment is zero, it
2120 /// means the target does not care about loop alignment. The alignment is
2121 /// specified in log2(bytes). The target may also override
2122 /// getPrefLoopAlignment to provide per-loop values.
2123 void setPrefLoopLogAlignment(unsigned LogAlign
) {
2124 PrefLoopAlignment
= llvm::Align(1ULL << LogAlign
);
2127 /// Set the minimum stack alignment of an argument.
2128 void setMinStackArgumentAlignment(unsigned Align
) {
2129 MinStackArgumentAlignment
= llvm::Align(Align
);
2132 /// Set the maximum atomic operation size supported by the
2133 /// backend. Atomic operations greater than this size (as well as
2134 /// ones that are not naturally aligned), will be expanded by
2135 /// AtomicExpandPass into an __atomic_* library call.
2136 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits
) {
2137 MaxAtomicSizeInBitsSupported
= SizeInBits
;
2140 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2141 void setMinCmpXchgSizeInBits(unsigned SizeInBits
) {
2142 MinCmpXchgSizeInBits
= SizeInBits
;
2145 /// Sets whether unaligned atomic operations are supported.
2146 void setSupportsUnalignedAtomics(bool UnalignedSupported
) {
2147 SupportsUnalignedAtomics
= UnalignedSupported
;
2151 //===--------------------------------------------------------------------===//
2152 // Addressing mode description hooks (used by LSR etc).
2155 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2156 /// instructions reading the address. This allows as much computation as
2157 /// possible to be done in the address mode for that operand. This hook lets
2158 /// targets also pass back when this should be done on intrinsics which
2160 virtual bool getAddrModeArguments(IntrinsicInst
* /*I*/,
2161 SmallVectorImpl
<Value
*> &/*Ops*/,
2162 Type
*&/*AccessTy*/) const {
2166 /// This represents an addressing mode of:
2167 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2168 /// If BaseGV is null, there is no BaseGV.
2169 /// If BaseOffs is zero, there is no base offset.
2170 /// If HasBaseReg is false, there is no base register.
2171 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2174 GlobalValue
*BaseGV
= nullptr;
2175 int64_t BaseOffs
= 0;
2176 bool HasBaseReg
= false;
2178 AddrMode() = default;
2181 /// Return true if the addressing mode represented by AM is legal for this
2182 /// target, for a load/store of the specified type.
2184 /// The type may be VoidTy, in which case only return true if the addressing
2185 /// mode is legal for a load/store of any legal type. TODO: Handle
2186 /// pre/postinc as well.
2188 /// If the address space cannot be determined, it will be -1.
2190 /// TODO: Remove default argument
2191 virtual bool isLegalAddressingMode(const DataLayout
&DL
, const AddrMode
&AM
,
2192 Type
*Ty
, unsigned AddrSpace
,
2193 Instruction
*I
= nullptr) const;
2195 /// Return the cost of the scaling factor used in the addressing mode
2196 /// represented by AM for this target, for a load/store of the specified type.
2198 /// If the AM is supported, the return value must be >= 0.
2199 /// If the AM is not supported, it returns a negative value.
2200 /// TODO: Handle pre/postinc as well.
2201 /// TODO: Remove default argument
2202 virtual int getScalingFactorCost(const DataLayout
&DL
, const AddrMode
&AM
,
2203 Type
*Ty
, unsigned AS
= 0) const {
2204 // Default: assume that any scaling factor used in a legal AM is free.
2205 if (isLegalAddressingMode(DL
, AM
, Ty
, AS
))
2210 /// Return true if the specified immediate is legal icmp immediate, that is
2211 /// the target has icmp instructions which can compare a register against the
2212 /// immediate without having to materialize the immediate into a register.
2213 virtual bool isLegalICmpImmediate(int64_t) const {
2217 /// Return true if the specified immediate is legal add immediate, that is the
2218 /// target has add instructions which can add a register with the immediate
2219 /// without having to materialize the immediate into a register.
2220 virtual bool isLegalAddImmediate(int64_t) const {
2224 /// Return true if the specified immediate is legal for the value input of a
2225 /// store instruction.
2226 virtual bool isLegalStoreImmediate(int64_t Value
) const {
2227 // Default implementation assumes that at least 0 works since it is likely
2228 // that a zero register exists or a zero immediate is allowed.
2232 /// Return true if it's significantly cheaper to shift a vector by a uniform
2233 /// scalar than by an amount which will vary across each lane. On x86, for
2234 /// example, there is a "psllw" instruction for the former case, but no simple
2235 /// instruction for a general "a << b" operation on vectors.
2236 virtual bool isVectorShiftByScalarCheap(Type
*Ty
) const {
2240 /// Returns true if the opcode is a commutative binary operation.
2241 virtual bool isCommutativeBinOp(unsigned Opcode
) const {
2242 // FIXME: This should get its info from the td file.
2252 case ISD::SMUL_LOHI
:
2253 case ISD::UMUL_LOHI
:
2267 case ISD::FMINNUM_IEEE
:
2268 case ISD::FMAXNUM_IEEE
:
2272 default: return false;
2276 /// Return true if the node is a math/logic binary operator.
2277 virtual bool isBinOp(unsigned Opcode
) const {
2278 // A commutative binop must be a binop.
2279 if (isCommutativeBinOp(Opcode
))
2281 // These are non-commutative binops.
2300 /// Return true if it's free to truncate a value of type FromTy to type
2301 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2302 /// by referencing its sub-register AX.
2303 /// Targets must return false when FromTy <= ToTy.
2304 virtual bool isTruncateFree(Type
*FromTy
, Type
*ToTy
) const {
2308 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2309 /// whether a call is in tail position. Typically this means that both results
2310 /// would be assigned to the same register or stack slot, but it could mean
2311 /// the target performs adequate checks of its own before proceeding with the
2312 /// tail call. Targets must return false when FromTy <= ToTy.
2313 virtual bool allowTruncateForTailCall(Type
*FromTy
, Type
*ToTy
) const {
2317 virtual bool isTruncateFree(EVT FromVT
, EVT ToVT
) const {
2321 virtual bool isProfitableToHoist(Instruction
*I
) const { return true; }
2323 /// Return true if the extension represented by \p I is free.
2324 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2325 /// this method can use the context provided by \p I to decide
2326 /// whether or not \p I is free.
2327 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2328 /// In other words, if is[Z|FP]Free returns true, then this method
2329 /// returns true as well. The converse is not true.
2330 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2331 /// \pre \p I must be a sign, zero, or fp extension.
2332 bool isExtFree(const Instruction
*I
) const {
2333 switch (I
->getOpcode()) {
2334 case Instruction::FPExt
:
2335 if (isFPExtFree(EVT::getEVT(I
->getType()),
2336 EVT::getEVT(I
->getOperand(0)->getType())))
2339 case Instruction::ZExt
:
2340 if (isZExtFree(I
->getOperand(0)->getType(), I
->getType()))
2343 case Instruction::SExt
:
2346 llvm_unreachable("Instruction is not an extension");
2348 return isExtFreeImpl(I
);
2351 /// Return true if \p Load and \p Ext can form an ExtLoad.
2352 /// For example, in AArch64
2353 /// %L = load i8, i8* %ptr
2354 /// %E = zext i8 %L to i32
2355 /// can be lowered into one load instruction
2357 bool isExtLoad(const LoadInst
*Load
, const Instruction
*Ext
,
2358 const DataLayout
&DL
) const {
2359 EVT VT
= getValueType(DL
, Ext
->getType());
2360 EVT LoadVT
= getValueType(DL
, Load
->getType());
2362 // If the load has other users and the truncate is not free, the ext
2363 // probably isn't free.
2364 if (!Load
->hasOneUse() && (isTypeLegal(LoadVT
) || !isTypeLegal(VT
)) &&
2365 !isTruncateFree(Ext
->getType(), Load
->getType()))
2368 // Check whether the target supports casts folded into loads.
2370 if (isa
<ZExtInst
>(Ext
))
2371 LType
= ISD::ZEXTLOAD
;
2373 assert(isa
<SExtInst
>(Ext
) && "Unexpected ext type!");
2374 LType
= ISD::SEXTLOAD
;
2377 return isLoadExtLegal(LType
, VT
, LoadVT
);
2380 /// Return true if any actual instruction that defines a value of type FromTy
2381 /// implicitly zero-extends the value to ToTy in the result register.
2383 /// The function should return true when it is likely that the truncate can
2384 /// be freely folded with an instruction defining a value of FromTy. If
2385 /// the defining instruction is unknown (because you're looking at a
2386 /// function argument, PHI, etc.) then the target may require an
2387 /// explicit truncate, which is not necessarily free, but this function
2388 /// does not deal with those cases.
2389 /// Targets must return false when FromTy >= ToTy.
2390 virtual bool isZExtFree(Type
*FromTy
, Type
*ToTy
) const {
2394 virtual bool isZExtFree(EVT FromTy
, EVT ToTy
) const {
2398 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2400 virtual bool isSExtCheaperThanZExt(EVT FromTy
, EVT ToTy
) const {
2404 /// Return true if sinking I's operands to the same basic block as I is
2405 /// profitable, e.g. because the operands can be folded into a target
2406 /// instruction during instruction selection. After calling the function
2407 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2409 virtual bool shouldSinkOperands(Instruction
*I
,
2410 SmallVectorImpl
<Use
*> &Ops
) const {
2414 /// Return true if the target supplies and combines to a paired load
2415 /// two loaded values of type LoadedType next to each other in memory.
2416 /// RequiredAlignment gives the minimal alignment constraints that must be met
2417 /// to be able to select this paired load.
2419 /// This information is *not* used to generate actual paired loads, but it is
2420 /// used to generate a sequence of loads that is easier to combine into a
2422 /// For instance, something like this:
2423 /// a = load i64* addr
2424 /// b = trunc i64 a to i32
2425 /// c = lshr i64 a, 32
2426 /// d = trunc i64 c to i32
2427 /// will be optimized into:
2428 /// b = load i32* addr1
2429 /// d = load i32* addr2
2430 /// Where addr1 = addr2 +/- sizeof(i32).
2432 /// In other words, unless the target performs a post-isel load combining,
2433 /// this information should not be provided because it will generate more
2435 virtual bool hasPairedLoad(EVT
/*LoadedType*/,
2436 unsigned & /*RequiredAlignment*/) const {
2440 /// Return true if the target has a vector blend instruction.
2441 virtual bool hasVectorBlend() const { return false; }
2443 /// Get the maximum supported factor for interleaved memory accesses.
2444 /// Default to be the minimum interleave factor: 2.
2445 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2447 /// Lower an interleaved load to target specific intrinsics. Return
2448 /// true on success.
2450 /// \p LI is the vector load instruction.
2451 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2452 /// \p Indices is the corresponding indices for each shufflevector.
2453 /// \p Factor is the interleave factor.
2454 virtual bool lowerInterleavedLoad(LoadInst
*LI
,
2455 ArrayRef
<ShuffleVectorInst
*> Shuffles
,
2456 ArrayRef
<unsigned> Indices
,
2457 unsigned Factor
) const {
2461 /// Lower an interleaved store to target specific intrinsics. Return
2462 /// true on success.
2464 /// \p SI is the vector store instruction.
2465 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2466 /// \p Factor is the interleave factor.
2467 virtual bool lowerInterleavedStore(StoreInst
*SI
, ShuffleVectorInst
*SVI
,
2468 unsigned Factor
) const {
2472 /// Return true if zero-extending the specific node Val to type VT2 is free
2473 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2474 /// because it's folded such as X86 zero-extending loads).
2475 virtual bool isZExtFree(SDValue Val
, EVT VT2
) const {
2476 return isZExtFree(Val
.getValueType(), VT2
);
2479 /// Return true if an fpext operation is free (for instance, because
2480 /// single-precision floating-point numbers are implicitly extended to
2481 /// double-precision).
2482 virtual bool isFPExtFree(EVT DestVT
, EVT SrcVT
) const {
2483 assert(SrcVT
.isFloatingPoint() && DestVT
.isFloatingPoint() &&
2484 "invalid fpext types");
2488 /// Return true if an fpext operation input to an \p Opcode operation is free
2489 /// (for instance, because half-precision floating-point numbers are
2490 /// implicitly extended to float-precision) for an FMA instruction.
2491 virtual bool isFPExtFoldable(unsigned Opcode
, EVT DestVT
, EVT SrcVT
) const {
2492 assert(DestVT
.isFloatingPoint() && SrcVT
.isFloatingPoint() &&
2493 "invalid fpext types");
2494 return isFPExtFree(DestVT
, SrcVT
);
2497 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2498 /// extend node) is profitable.
2499 virtual bool isVectorLoadExtDesirable(SDValue ExtVal
) const { return false; }
2501 /// Return true if an fneg operation is free to the point where it is never
2502 /// worthwhile to replace it with a bitwise operation.
2503 virtual bool isFNegFree(EVT VT
) const {
2504 assert(VT
.isFloatingPoint());
2508 /// Return true if an fabs operation is free to the point where it is never
2509 /// worthwhile to replace it with a bitwise operation.
2510 virtual bool isFAbsFree(EVT VT
) const {
2511 assert(VT
.isFloatingPoint());
2515 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2516 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2517 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2519 /// NOTE: This may be called before legalization on types for which FMAs are
2520 /// not legal, but should return true if those types will eventually legalize
2521 /// to types that support FMAs. After legalization, it will only be called on
2522 /// types that support FMAs (via Legal or Custom actions)
2523 virtual bool isFMAFasterThanFMulAndFAdd(EVT
) const {
2527 /// Return true if it's profitable to narrow operations of type VT1 to
2528 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2530 virtual bool isNarrowingProfitable(EVT
/*VT1*/, EVT
/*VT2*/) const {
2534 /// Return true if it is beneficial to convert a load of a constant to
2535 /// just the constant itself.
2536 /// On some targets it might be more efficient to use a combination of
2537 /// arithmetic instructions to materialize the constant instead of loading it
2538 /// from a constant pool.
2539 virtual bool shouldConvertConstantLoadToIntImm(const APInt
&Imm
,
2544 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2545 /// from this source type with this index. This is needed because
2546 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2547 /// the first element, and only the target knows which lowering is cheap.
2548 virtual bool isExtractSubvectorCheap(EVT ResVT
, EVT SrcVT
,
2549 unsigned Index
) const {
2553 /// Try to convert an extract element of a vector binary operation into an
2554 /// extract element followed by a scalar operation.
2555 virtual bool shouldScalarizeBinop(SDValue VecOp
) const {
2559 /// Return true if extraction of a scalar element from the given vector type
2560 /// at the given index is cheap. For example, if scalar operations occur on
2561 /// the same register file as vector operations, then an extract element may
2562 /// be a sub-register rename rather than an actual instruction.
2563 virtual bool isExtractVecEltCheap(EVT VT
, unsigned Index
) const {
2567 /// Try to convert math with an overflow comparison into the corresponding DAG
2568 /// node operation. Targets may want to override this independently of whether
2569 /// the operation is legal/custom for the given type because it may obscure
2570 /// matching of other patterns.
2571 virtual bool shouldFormOverflowOp(unsigned Opcode
, EVT VT
) const {
2572 // TODO: The default logic is inherited from code in CodeGenPrepare.
2573 // The opcode should not make a difference by default?
2574 if (Opcode
!= ISD::UADDO
)
2577 // Allow the transform as long as we have an integer type that is not
2578 // obviously illegal and unsupported.
2581 return VT
.isSimple() || !isOperationExpand(Opcode
, VT
);
2584 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2585 // even if the vector itself has multiple uses.
2586 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT
) const {
2590 // Return true if CodeGenPrepare should consider splitting large offset of a
2591 // GEP to make the GEP fit into the addressing mode and can be sunk into the
2592 // same blocks of its users.
2593 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2595 //===--------------------------------------------------------------------===//
2596 // Runtime Library hooks
2599 /// Rename the default libcall routine name for the specified libcall.
2600 void setLibcallName(RTLIB::Libcall Call
, const char *Name
) {
2601 LibcallRoutineNames
[Call
] = Name
;
2604 /// Get the libcall routine name for the specified libcall.
2605 const char *getLibcallName(RTLIB::Libcall Call
) const {
2606 return LibcallRoutineNames
[Call
];
2609 /// Override the default CondCode to be used to test the result of the
2610 /// comparison libcall against zero.
2611 void setCmpLibcallCC(RTLIB::Libcall Call
, ISD::CondCode CC
) {
2612 CmpLibcallCCs
[Call
] = CC
;
2615 /// Get the CondCode that's to be used to test the result of the comparison
2616 /// libcall against zero.
2617 ISD::CondCode
getCmpLibcallCC(RTLIB::Libcall Call
) const {
2618 return CmpLibcallCCs
[Call
];
2621 /// Set the CallingConv that should be used for the specified libcall.
2622 void setLibcallCallingConv(RTLIB::Libcall Call
, CallingConv::ID CC
) {
2623 LibcallCallingConvs
[Call
] = CC
;
2626 /// Get the CallingConv that should be used for the specified libcall.
2627 CallingConv::ID
getLibcallCallingConv(RTLIB::Libcall Call
) const {
2628 return LibcallCallingConvs
[Call
];
2631 /// Execute target specific actions to finalize target lowering.
2632 /// This is used to set extra flags in MachineFrameInformation and freezing
2633 /// the set of reserved registers.
2634 /// The default implementation just freezes the set of reserved registers.
2635 virtual void finalizeLowering(MachineFunction
&MF
) const;
2638 const TargetMachine
&TM
;
2640 /// Tells the code generator that the target has multiple (allocatable)
2641 /// condition registers that can be used to store the results of comparisons
2642 /// for use by selects and conditional branches. With multiple condition
2643 /// registers, the code generator will not aggressively sink comparisons into
2644 /// the blocks of their users.
2645 bool HasMultipleConditionRegisters
;
2647 /// Tells the code generator that the target has BitExtract instructions.
2648 /// The code generator will aggressively sink "shift"s into the blocks of
2649 /// their users if the users will generate "and" instructions which can be
2650 /// combined with "shift" to BitExtract instructions.
2651 bool HasExtractBitsInsn
;
2653 /// Tells the code generator to bypass slow divide or remainder
2654 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2655 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2656 /// div/rem when the operands are positive and less than 256.
2657 DenseMap
<unsigned int, unsigned int> BypassSlowDivWidths
;
2659 /// Tells the code generator that it shouldn't generate extra flow control
2660 /// instructions and should attempt to combine flow control instructions via
2662 bool JumpIsExpensive
;
2664 /// This target prefers to use _setjmp to implement llvm.setjmp.
2666 /// Defaults to false.
2667 bool UseUnderscoreSetJmp
;
2669 /// This target prefers to use _longjmp to implement llvm.longjmp.
2671 /// Defaults to false.
2672 bool UseUnderscoreLongJmp
;
2674 /// Information about the contents of the high-bits in boolean values held in
2675 /// a type wider than i1. See getBooleanContents.
2676 BooleanContent BooleanContents
;
2678 /// Information about the contents of the high-bits in boolean values held in
2679 /// a type wider than i1. See getBooleanContents.
2680 BooleanContent BooleanFloatContents
;
2682 /// Information about the contents of the high-bits in boolean vector values
2683 /// when the element type is wider than i1. See getBooleanContents.
2684 BooleanContent BooleanVectorContents
;
2686 /// The target scheduling preference: shortest possible total cycles or lowest
2688 Sched::Preference SchedPreferenceInfo
;
2690 /// The minimum alignment that any argument on the stack needs to have.
2691 llvm::Align MinStackArgumentAlignment
;
2693 /// The minimum function alignment (used when optimizing for size, and to
2694 /// prevent explicitly provided alignment from leading to incorrect code).
2695 llvm::Align MinFunctionAlignment
;
2697 /// The preferred function alignment (used when alignment unspecified and
2698 /// optimizing for speed).
2699 llvm::Align PrefFunctionAlignment
;
2701 /// The preferred loop alignment (in log2 bot in bytes).
2702 llvm::Align PrefLoopAlignment
;
2704 /// Size in bits of the maximum atomics size the backend supports.
2705 /// Accesses larger than this will be expanded by AtomicExpandPass.
2706 unsigned MaxAtomicSizeInBitsSupported
;
2708 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2709 /// backend supports.
2710 unsigned MinCmpXchgSizeInBits
;
2712 /// This indicates if the target supports unaligned atomic operations.
2713 bool SupportsUnalignedAtomics
;
2715 /// If set to a physical register, this specifies the register that
2716 /// llvm.savestack/llvm.restorestack should save and restore.
2717 unsigned StackPointerRegisterToSaveRestore
;
2719 /// This indicates the default register class to use for each ValueType the
2720 /// target supports natively.
2721 const TargetRegisterClass
*RegClassForVT
[MVT::LAST_VALUETYPE
];
2722 unsigned char NumRegistersForVT
[MVT::LAST_VALUETYPE
];
2723 MVT RegisterTypeForVT
[MVT::LAST_VALUETYPE
];
2725 /// This indicates the "representative" register class to use for each
2726 /// ValueType the target supports natively. This information is used by the
2727 /// scheduler to track register pressure. By default, the representative
2728 /// register class is the largest legal super-reg register class of the
2729 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2730 /// representative class would be GR32.
2731 const TargetRegisterClass
*RepRegClassForVT
[MVT::LAST_VALUETYPE
];
2733 /// This indicates the "cost" of the "representative" register class for each
2734 /// ValueType. The cost is used by the scheduler to approximate register
2736 uint8_t RepRegClassCostForVT
[MVT::LAST_VALUETYPE
];
2738 /// For any value types we are promoting or expanding, this contains the value
2739 /// type that we are changing to. For Expanded types, this contains one step
2740 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2741 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2742 /// the same type (e.g. i32 -> i32).
2743 MVT TransformToType
[MVT::LAST_VALUETYPE
];
2745 /// For each operation and each value type, keep a LegalizeAction that
2746 /// indicates how instruction selection should deal with the operation. Most
2747 /// operations are Legal (aka, supported natively by the target), but
2748 /// operations that are not should be described. Note that operations on
2749 /// non-legal value types are not described here.
2750 LegalizeAction OpActions
[MVT::LAST_VALUETYPE
][ISD::BUILTIN_OP_END
];
2752 /// For each load extension type and each value type, keep a LegalizeAction
2753 /// that indicates how instruction selection should deal with a load of a
2754 /// specific value type and extension type. Uses 4-bits to store the action
2755 /// for each of the 4 load ext types.
2756 uint16_t LoadExtActions
[MVT::LAST_VALUETYPE
][MVT::LAST_VALUETYPE
];
2758 /// For each value type pair keep a LegalizeAction that indicates whether a
2759 /// truncating store of a specific value type and truncating type is legal.
2760 LegalizeAction TruncStoreActions
[MVT::LAST_VALUETYPE
][MVT::LAST_VALUETYPE
];
2762 /// For each indexed mode and each value type, keep a pair of LegalizeAction
2763 /// that indicates how instruction selection should deal with the load /
2766 /// The first dimension is the value_type for the reference. The second
2767 /// dimension represents the various modes for load store.
2768 uint8_t IndexedModeActions
[MVT::LAST_VALUETYPE
][ISD::LAST_INDEXED_MODE
];
2770 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2771 /// indicates how instruction selection should deal with the condition code.
2773 /// Because each CC action takes up 4 bits, we need to have the array size be
2774 /// large enough to fit all of the value types. This can be done by rounding
2775 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2776 uint32_t CondCodeActions
[ISD::SETCC_INVALID
][(MVT::LAST_VALUETYPE
+ 7) / 8];
2779 ValueTypeActionImpl ValueTypeActions
;
2782 LegalizeKind
getTypeConversion(LLVMContext
&Context
, EVT VT
) const;
2784 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2785 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2788 TargetDAGCombineArray
[(ISD::BUILTIN_OP_END
+CHAR_BIT
-1)/CHAR_BIT
];
2790 /// For operations that must be promoted to a specific type, this holds the
2791 /// destination type. This map should be sparse, so don't hold it as an
2794 /// Targets add entries to this map with AddPromotedToType(..), clients access
2795 /// this with getTypeToPromoteTo(..).
2796 std::map
<std::pair
<unsigned, MVT::SimpleValueType
>, MVT::SimpleValueType
>
2799 /// Stores the name each libcall.
2800 const char *LibcallRoutineNames
[RTLIB::UNKNOWN_LIBCALL
+ 1];
2802 /// The ISD::CondCode that should be used to test the result of each of the
2803 /// comparison libcall against zero.
2804 ISD::CondCode CmpLibcallCCs
[RTLIB::UNKNOWN_LIBCALL
];
2806 /// Stores the CallingConv that should be used for each libcall.
2807 CallingConv::ID LibcallCallingConvs
[RTLIB::UNKNOWN_LIBCALL
];
2809 /// Set default libcall names and calling conventions.
2810 void InitLibcalls(const Triple
&TT
);
2813 /// Return true if the extension represented by \p I is free.
2814 /// \pre \p I is a sign, zero, or fp extension and
2815 /// is[Z|FP]ExtFree of the related types is not true.
2816 virtual bool isExtFreeImpl(const Instruction
*I
) const { return false; }
2818 /// Depth that GatherAllAliases should should continue looking for chain
2819 /// dependencies when trying to find a more preferable chain. As an
2820 /// approximation, this should be more than the number of consecutive stores
2821 /// expected to be merged.
2822 unsigned GatherAllAliasesMaxDepth
;
2824 /// \brief Specify maximum number of store instructions per memset call.
2826 /// When lowering \@llvm.memset this field specifies the maximum number of
2827 /// store operations that may be substituted for the call to memset. Targets
2828 /// must set this value based on the cost threshold for that target. Targets
2829 /// should assume that the memset will be done using as many of the largest
2830 /// store operations first, followed by smaller ones, if necessary, per
2831 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2832 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2833 /// store. This only applies to setting a constant array of a constant size.
2834 unsigned MaxStoresPerMemset
;
2835 /// Likewise for functions with the OptSize attribute.
2836 unsigned MaxStoresPerMemsetOptSize
;
2838 /// \brief Specify maximum number of store instructions per memcpy call.
2840 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2841 /// store operations that may be substituted for a call to memcpy. Targets
2842 /// must set this value based on the cost threshold for that target. Targets
2843 /// should assume that the memcpy will be done using as many of the largest
2844 /// store operations first, followed by smaller ones, if necessary, per
2845 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2846 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2847 /// and one 1-byte store. This only applies to copying a constant array of
2849 unsigned MaxStoresPerMemcpy
;
2850 /// Likewise for functions with the OptSize attribute.
2851 unsigned MaxStoresPerMemcpyOptSize
;
2852 /// \brief Specify max number of store instructions to glue in inlined memcpy.
2854 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
2855 /// of store instructions to keep together. This helps in pairing and
2856 // vectorization later on.
2857 unsigned MaxGluedStoresPerMemcpy
= 0;
2859 /// \brief Specify maximum number of load instructions per memcmp call.
2861 /// When lowering \@llvm.memcmp this field specifies the maximum number of
2862 /// pairs of load operations that may be substituted for a call to memcmp.
2863 /// Targets must set this value based on the cost threshold for that target.
2864 /// Targets should assume that the memcmp will be done using as many of the
2865 /// largest load operations first, followed by smaller ones, if necessary, per
2866 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
2867 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
2868 /// and one 1-byte load. This only applies to copying a constant array of
2870 unsigned MaxLoadsPerMemcmp
;
2871 /// Likewise for functions with the OptSize attribute.
2872 unsigned MaxLoadsPerMemcmpOptSize
;
2874 /// \brief Specify maximum number of store instructions per memmove call.
2876 /// When lowering \@llvm.memmove this field specifies the maximum number of
2877 /// store instructions that may be substituted for a call to memmove. Targets
2878 /// must set this value based on the cost threshold for that target. Targets
2879 /// should assume that the memmove will be done using as many of the largest
2880 /// store operations first, followed by smaller ones, if necessary, per
2881 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2882 /// with 8-bit alignment would result in nine 1-byte stores. This only
2883 /// applies to copying a constant array of constant size.
2884 unsigned MaxStoresPerMemmove
;
2885 /// Likewise for functions with the OptSize attribute.
2886 unsigned MaxStoresPerMemmoveOptSize
;
2888 /// Tells the code generator that select is more expensive than a branch if
2889 /// the branch is usually predicted right.
2890 bool PredictableSelectIsExpensive
;
2892 /// \see enableExtLdPromotion.
2893 bool EnableExtLdPromotion
;
2895 /// Return true if the value types that can be represented by the specified
2896 /// register class are all legal.
2897 bool isLegalRC(const TargetRegisterInfo
&TRI
,
2898 const TargetRegisterClass
&RC
) const;
2900 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2901 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2902 MachineBasicBlock
*emitPatchPoint(MachineInstr
&MI
,
2903 MachineBasicBlock
*MBB
) const;
2905 /// Replace/modify the XRay custom event operands with target-dependent
2907 MachineBasicBlock
*emitXRayCustomEvent(MachineInstr
&MI
,
2908 MachineBasicBlock
*MBB
) const;
2910 /// Replace/modify the XRay typed event operands with target-dependent
2912 MachineBasicBlock
*emitXRayTypedEvent(MachineInstr
&MI
,
2913 MachineBasicBlock
*MBB
) const;
2916 /// This class defines information used to lower LLVM code to legal SelectionDAG
2917 /// operators that the target instruction selector can accept natively.
2919 /// This class also defines callbacks that targets must implement to lower
2920 /// target-specific constructs to SelectionDAG operators.
2921 class TargetLowering
: public TargetLoweringBase
{
2923 struct DAGCombinerInfo
;
2924 struct MakeLibCallOptions
;
2926 TargetLowering(const TargetLowering
&) = delete;
2927 TargetLowering
&operator=(const TargetLowering
&) = delete;
2929 /// NOTE: The TargetMachine owns TLOF.
2930 explicit TargetLowering(const TargetMachine
&TM
);
2932 bool isPositionIndependent() const;
2934 virtual bool isSDNodeSourceOfDivergence(const SDNode
*N
,
2935 FunctionLoweringInfo
*FLI
,
2936 LegacyDivergenceAnalysis
*DA
) const {
2940 virtual bool isSDNodeAlwaysUniform(const SDNode
* N
) const {
2944 /// Returns true by value, base pointer and offset pointer and addressing mode
2945 /// by reference if the node's address can be legally represented as
2946 /// pre-indexed load / store address.
2947 virtual bool getPreIndexedAddressParts(SDNode
* /*N*/, SDValue
&/*Base*/,
2948 SDValue
&/*Offset*/,
2949 ISD::MemIndexedMode
&/*AM*/,
2950 SelectionDAG
&/*DAG*/) const {
2954 /// Returns true by value, base pointer and offset pointer and addressing mode
2955 /// by reference if this node can be combined with a load / store to form a
2956 /// post-indexed load / store.
2957 virtual bool getPostIndexedAddressParts(SDNode
* /*N*/, SDNode
* /*Op*/,
2959 SDValue
&/*Offset*/,
2960 ISD::MemIndexedMode
&/*AM*/,
2961 SelectionDAG
&/*DAG*/) const {
2965 /// Return the entry encoding for a jump table in the current function. The
2966 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2967 virtual unsigned getJumpTableEncoding() const;
2969 virtual const MCExpr
*
2970 LowerCustomJumpTableEntry(const MachineJumpTableInfo
* /*MJTI*/,
2971 const MachineBasicBlock
* /*MBB*/, unsigned /*uid*/,
2972 MCContext
&/*Ctx*/) const {
2973 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2976 /// Returns relocation base for the given PIC jumptable.
2977 virtual SDValue
getPICJumpTableRelocBase(SDValue Table
,
2978 SelectionDAG
&DAG
) const;
2980 /// This returns the relocation base for the given PIC jumptable, the same as
2981 /// getPICJumpTableRelocBase, but as an MCExpr.
2982 virtual const MCExpr
*
2983 getPICJumpTableRelocBaseExpr(const MachineFunction
*MF
,
2984 unsigned JTI
, MCContext
&Ctx
) const;
2986 /// Return true if folding a constant offset with the given GlobalAddress is
2987 /// legal. It is frequently not legal in PIC relocation models.
2988 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const;
2990 bool isInTailCallPosition(SelectionDAG
&DAG
, SDNode
*Node
,
2991 SDValue
&Chain
) const;
2993 void softenSetCCOperands(SelectionDAG
&DAG
, EVT VT
, SDValue
&NewLHS
,
2994 SDValue
&NewRHS
, ISD::CondCode
&CCCode
,
2995 const SDLoc
&DL
, const SDValue OldLHS
,
2996 const SDValue OldRHS
) const;
2998 /// Returns a pair of (return value, chain).
2999 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3000 std::pair
<SDValue
, SDValue
> makeLibCall(SelectionDAG
&DAG
, RTLIB::Libcall LC
,
3001 EVT RetVT
, ArrayRef
<SDValue
> Ops
,
3002 MakeLibCallOptions CallOptions
,
3003 const SDLoc
&dl
) const;
3005 /// Check whether parameters to a call that are passed in callee saved
3006 /// registers are the same as from the calling function. This needs to be
3007 /// checked for tail call eligibility.
3008 bool parametersInCSRMatch(const MachineRegisterInfo
&MRI
,
3009 const uint32_t *CallerPreservedMask
,
3010 const SmallVectorImpl
<CCValAssign
> &ArgLocs
,
3011 const SmallVectorImpl
<SDValue
> &OutVals
) const;
3013 //===--------------------------------------------------------------------===//
3014 // TargetLowering Optimization Methods
3017 /// A convenience struct that encapsulates a DAG, and two SDValues for
3018 /// returning information from TargetLowering to its clients that want to
3020 struct TargetLoweringOpt
{
3027 explicit TargetLoweringOpt(SelectionDAG
&InDAG
,
3029 DAG(InDAG
), LegalTys(LT
), LegalOps(LO
) {}
3031 bool LegalTypes() const { return LegalTys
; }
3032 bool LegalOperations() const { return LegalOps
; }
3034 bool CombineTo(SDValue O
, SDValue N
) {
3041 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3042 /// Return true if the number of memory ops is below the threshold (Limit).
3043 /// It returns the types of the sequence of memory ops to perform
3044 /// memset / memcpy by reference.
3045 bool findOptimalMemOpLowering(std::vector
<EVT
> &MemOps
,
3046 unsigned Limit
, uint64_t Size
,
3047 unsigned DstAlign
, unsigned SrcAlign
,
3052 unsigned DstAS
, unsigned SrcAS
,
3053 const AttributeList
&FuncAttributes
) const;
3055 /// Check to see if the specified operand of the specified instruction is a
3056 /// constant integer. If so, check to see if there are any bits set in the
3057 /// constant that are not demanded. If so, shrink the constant and return
3059 bool ShrinkDemandedConstant(SDValue Op
, const APInt
&Demanded
,
3060 TargetLoweringOpt
&TLO
) const;
3062 // Target hook to do target-specific const optimization, which is called by
3063 // ShrinkDemandedConstant. This function should return true if the target
3064 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3065 virtual bool targetShrinkDemandedConstant(SDValue Op
, const APInt
&Demanded
,
3066 TargetLoweringOpt
&TLO
) const {
3070 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
3071 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
3072 /// generalized for targets with other types of implicit widening casts.
3073 bool ShrinkDemandedOp(SDValue Op
, unsigned BitWidth
, const APInt
&Demanded
,
3074 TargetLoweringOpt
&TLO
) const;
3076 /// Look at Op. At this point, we know that only the DemandedBits bits of the
3077 /// result of Op are ever used downstream. If we can use this information to
3078 /// simplify Op, create a new simplified DAG node and return true, returning
3079 /// the original and new nodes in Old and New. Otherwise, analyze the
3080 /// expression and return a mask of KnownOne and KnownZero bits for the
3081 /// expression (used to simplify the caller). The KnownZero/One bits may only
3082 /// be accurate for those bits in the Demanded masks.
3083 /// \p AssumeSingleUse When this parameter is true, this function will
3084 /// attempt to simplify \p Op even if there are multiple uses.
3085 /// Callers are responsible for correctly updating the DAG based on the
3086 /// results of this function, because simply replacing replacing TLO.Old
3087 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3088 /// has multiple uses.
3089 bool SimplifyDemandedBits(SDValue Op
, const APInt
&DemandedBits
,
3090 const APInt
&DemandedElts
, KnownBits
&Known
,
3091 TargetLoweringOpt
&TLO
, unsigned Depth
= 0,
3092 bool AssumeSingleUse
= false) const;
3094 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3095 /// Adds Op back to the worklist upon success.
3096 bool SimplifyDemandedBits(SDValue Op
, const APInt
&DemandedBits
,
3097 KnownBits
&Known
, TargetLoweringOpt
&TLO
,
3099 bool AssumeSingleUse
= false) const;
3101 /// Helper wrapper around SimplifyDemandedBits.
3102 /// Adds Op back to the worklist upon success.
3103 bool SimplifyDemandedBits(SDValue Op
, const APInt
&DemandedMask
,
3104 DAGCombinerInfo
&DCI
) const;
3106 /// More limited version of SimplifyDemandedBits that can be used to "look
3107 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3108 /// bitwise ops etc.
3109 SDValue
SimplifyMultipleUseDemandedBits(SDValue Op
, const APInt
&DemandedBits
,
3110 const APInt
&DemandedElts
,
3112 unsigned Depth
) const;
3114 /// Look at Vector Op. At this point, we know that only the DemandedElts
3115 /// elements of the result of Op are ever used downstream. If we can use
3116 /// this information to simplify Op, create a new simplified DAG node and
3117 /// return true, storing the original and new nodes in TLO.
3118 /// Otherwise, analyze the expression and return a mask of KnownUndef and
3119 /// KnownZero elements for the expression (used to simplify the caller).
3120 /// The KnownUndef/Zero elements may only be accurate for those bits
3121 /// in the DemandedMask.
3122 /// \p AssumeSingleUse When this parameter is true, this function will
3123 /// attempt to simplify \p Op even if there are multiple uses.
3124 /// Callers are responsible for correctly updating the DAG based on the
3125 /// results of this function, because simply replacing replacing TLO.Old
3126 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3127 /// has multiple uses.
3128 bool SimplifyDemandedVectorElts(SDValue Op
, const APInt
&DemandedEltMask
,
3129 APInt
&KnownUndef
, APInt
&KnownZero
,
3130 TargetLoweringOpt
&TLO
, unsigned Depth
= 0,
3131 bool AssumeSingleUse
= false) const;
3133 /// Helper wrapper around SimplifyDemandedVectorElts.
3134 /// Adds Op back to the worklist upon success.
3135 bool SimplifyDemandedVectorElts(SDValue Op
, const APInt
&DemandedElts
,
3136 APInt
&KnownUndef
, APInt
&KnownZero
,
3137 DAGCombinerInfo
&DCI
) const;
3139 /// Determine which of the bits specified in Mask are known to be either zero
3140 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3141 /// argument allows us to only collect the known bits that are shared by the
3142 /// requested vector elements.
3143 virtual void computeKnownBitsForTargetNode(const SDValue Op
,
3145 const APInt
&DemandedElts
,
3146 const SelectionDAG
&DAG
,
3147 unsigned Depth
= 0) const;
3148 /// Determine which of the bits specified in Mask are known to be either zero
3149 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3150 /// argument allows us to only collect the known bits that are shared by the
3151 /// requested vector elements. This is for GISel.
3152 virtual void computeKnownBitsForTargetInstr(Register R
, KnownBits
&Known
,
3153 const APInt
&DemandedElts
,
3154 const MachineRegisterInfo
&MRI
,
3155 unsigned Depth
= 0) const;
3157 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3158 /// Default implementation computes low bits based on alignment
3159 /// information. This should preserve known bits passed into it.
3160 virtual void computeKnownBitsForFrameIndex(const SDValue FIOp
,
3162 const APInt
&DemandedElts
,
3163 const SelectionDAG
&DAG
,
3164 unsigned Depth
= 0) const;
3166 /// This method can be implemented by targets that want to expose additional
3167 /// information about sign bits to the DAG Combiner. The DemandedElts
3168 /// argument allows us to only collect the minimum sign bits that are shared
3169 /// by the requested vector elements.
3170 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op
,
3171 const APInt
&DemandedElts
,
3172 const SelectionDAG
&DAG
,
3173 unsigned Depth
= 0) const;
3175 /// Attempt to simplify any target nodes based on the demanded vector
3176 /// elements, returning true on success. Otherwise, analyze the expression and
3177 /// return a mask of KnownUndef and KnownZero elements for the expression
3178 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
3179 /// accurate for those bits in the DemandedMask.
3180 virtual bool SimplifyDemandedVectorEltsForTargetNode(
3181 SDValue Op
, const APInt
&DemandedElts
, APInt
&KnownUndef
,
3182 APInt
&KnownZero
, TargetLoweringOpt
&TLO
, unsigned Depth
= 0) const;
3184 /// Attempt to simplify any target nodes based on the demanded bits/elts,
3185 /// returning true on success. Otherwise, analyze the
3186 /// expression and return a mask of KnownOne and KnownZero bits for the
3187 /// expression (used to simplify the caller). The KnownZero/One bits may only
3188 /// be accurate for those bits in the Demanded masks.
3189 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op
,
3190 const APInt
&DemandedBits
,
3191 const APInt
&DemandedElts
,
3193 TargetLoweringOpt
&TLO
,
3194 unsigned Depth
= 0) const;
3196 /// More limited version of SimplifyDemandedBits that can be used to "look
3197 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3198 /// bitwise ops etc.
3199 virtual SDValue
SimplifyMultipleUseDemandedBitsForTargetNode(
3200 SDValue Op
, const APInt
&DemandedBits
, const APInt
&DemandedElts
,
3201 SelectionDAG
&DAG
, unsigned Depth
) const;
3203 /// Tries to build a legal vector shuffle using the provided parameters
3204 /// or equivalent variations. The Mask argument maybe be modified as the
3205 /// function tries different variations.
3206 /// Returns an empty SDValue if the operation fails.
3207 SDValue
buildLegalVectorShuffle(EVT VT
, const SDLoc
&DL
, SDValue N0
,
3208 SDValue N1
, MutableArrayRef
<int> Mask
,
3209 SelectionDAG
&DAG
) const;
3211 /// This method returns the constant pool value that will be loaded by LD.
3212 /// NOTE: You must check for implicit extensions of the constant by LD.
3213 virtual const Constant
*getTargetConstantFromLoad(LoadSDNode
*LD
) const;
3215 /// If \p SNaN is false, \returns true if \p Op is known to never be any
3216 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3218 virtual bool isKnownNeverNaNForTargetNode(SDValue Op
,
3219 const SelectionDAG
&DAG
,
3221 unsigned Depth
= 0) const;
3222 struct DAGCombinerInfo
{
3223 void *DC
; // The DAG Combiner object.
3225 bool CalledByLegalizer
;
3230 DAGCombinerInfo(SelectionDAG
&dag
, CombineLevel level
, bool cl
, void *dc
)
3231 : DC(dc
), Level(level
), CalledByLegalizer(cl
), DAG(dag
) {}
3233 bool isBeforeLegalize() const { return Level
== BeforeLegalizeTypes
; }
3234 bool isBeforeLegalizeOps() const { return Level
< AfterLegalizeVectorOps
; }
3235 bool isAfterLegalizeDAG() const {
3236 return Level
== AfterLegalizeDAG
;
3238 CombineLevel
getDAGCombineLevel() { return Level
; }
3239 bool isCalledByLegalizer() const { return CalledByLegalizer
; }
3241 void AddToWorklist(SDNode
*N
);
3242 SDValue
CombineTo(SDNode
*N
, ArrayRef
<SDValue
> To
, bool AddTo
= true);
3243 SDValue
CombineTo(SDNode
*N
, SDValue Res
, bool AddTo
= true);
3244 SDValue
CombineTo(SDNode
*N
, SDValue Res0
, SDValue Res1
, bool AddTo
= true);
3246 void CommitTargetLoweringOpt(const TargetLoweringOpt
&TLO
);
3249 /// Return if the N is a constant or constant vector equal to the true value
3250 /// from getBooleanContents().
3251 bool isConstTrueVal(const SDNode
*N
) const;
3253 /// Return if the N is a constant or constant vector equal to the false value
3254 /// from getBooleanContents().
3255 bool isConstFalseVal(const SDNode
*N
) const;
3257 /// Return if \p N is a True value when extended to \p VT.
3258 bool isExtendedTrueVal(const ConstantSDNode
*N
, EVT VT
, bool SExt
) const;
3260 /// Try to simplify a setcc built with the specified operands and cc. If it is
3261 /// unable to simplify it, return a null SDValue.
3262 SDValue
SimplifySetCC(EVT VT
, SDValue N0
, SDValue N1
, ISD::CondCode Cond
,
3263 bool foldBooleans
, DAGCombinerInfo
&DCI
,
3264 const SDLoc
&dl
) const;
3266 // For targets which wrap address, unwrap for analysis.
3267 virtual SDValue
unwrapAddress(SDValue N
) const { return N
; }
3269 /// Returns true (and the GlobalValue and the offset) if the node is a
3270 /// GlobalAddress + offset.
3272 isGAPlusOffset(SDNode
*N
, const GlobalValue
* &GA
, int64_t &Offset
) const;
3274 /// This method will be invoked for all target nodes and for any
3275 /// target-independent nodes that the target has registered with invoke it
3278 /// The semantics are as follows:
3280 /// SDValue.Val == 0 - No change was made
3281 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
3282 /// otherwise - N should be replaced by the returned Operand.
3284 /// In addition, methods provided by DAGCombinerInfo may be used to perform
3285 /// more complex transformations.
3287 virtual SDValue
PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
3289 /// Return true if it is profitable to move this shift by a constant amount
3290 /// though its operand, adjusting any immediate operands as necessary to
3291 /// preserve semantics. This transformation may not be desirable if it
3292 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3293 /// extraction in AArch64). By default, it returns true.
3295 /// @param N the shift node
3296 /// @param Level the current DAGCombine legalization level.
3297 virtual bool isDesirableToCommuteWithShift(const SDNode
*N
,
3298 CombineLevel Level
) const {
3302 // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
3303 // to a shuffle and a truncate.
3304 // Example of such a combine:
3305 // v4i32 build_vector((extract_elt V, 1),
3306 // (extract_elt V, 3),
3307 // (extract_elt V, 5),
3308 // (extract_elt V, 7))
3310 // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
3311 virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
3312 ArrayRef
<int> ShuffleMask
, EVT SrcVT
, EVT TruncVT
) const {
3316 /// Return true if the target has native support for the specified value type
3317 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
3318 /// i16 is legal, but undesirable since i16 instruction encodings are longer
3319 /// and some i16 instructions are slow.
3320 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT
) const {
3321 // By default, assume all legal types are desirable.
3322 return isTypeLegal(VT
);
3325 /// Return true if it is profitable for dag combiner to transform a floating
3326 /// point op of specified opcode to a equivalent op of an integer
3327 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
3328 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
3333 /// This method query the target whether it is beneficial for dag combiner to
3334 /// promote the specified node. If true, it should return the desired
3335 /// promotion type by reference.
3336 virtual bool IsDesirableToPromoteOp(SDValue
/*Op*/, EVT
&/*PVT*/) const {
3340 /// Return true if the target supports swifterror attribute. It optimizes
3341 /// loads and stores to reading and writing a specific register.
3342 virtual bool supportSwiftError() const {
3346 /// Return true if the target supports that a subset of CSRs for the given
3347 /// machine function is handled explicitly via copies.
3348 virtual bool supportSplitCSR(MachineFunction
*MF
) const {
3352 /// Perform necessary initialization to handle a subset of CSRs explicitly
3353 /// via copies. This function is called at the beginning of instruction
3355 virtual void initializeSplitCSR(MachineBasicBlock
*Entry
) const {
3356 llvm_unreachable("Not Implemented");
3359 /// Insert explicit copies in entry and exit blocks. We copy a subset of
3360 /// CSRs to virtual registers in the entry block, and copy them back to
3361 /// physical registers in the exit blocks. This function is called at the end
3362 /// of instruction selection.
3363 virtual void insertCopiesSplitCSR(
3364 MachineBasicBlock
*Entry
,
3365 const SmallVectorImpl
<MachineBasicBlock
*> &Exits
) const {
3366 llvm_unreachable("Not Implemented");
3369 //===--------------------------------------------------------------------===//
3370 // Lowering methods - These methods must be implemented by targets so that
3371 // the SelectionDAGBuilder code knows how to lower these.
3374 /// This hook must be implemented to lower the incoming (formal) arguments,
3375 /// described by the Ins array, into the specified DAG. The implementation
3376 /// should fill in the InVals array with legal-type argument values, and
3377 /// return the resulting token chain value.
3378 virtual SDValue
LowerFormalArguments(
3379 SDValue
/*Chain*/, CallingConv::ID
/*CallConv*/, bool /*isVarArg*/,
3380 const SmallVectorImpl
<ISD::InputArg
> & /*Ins*/, const SDLoc
& /*dl*/,
3381 SelectionDAG
& /*DAG*/, SmallVectorImpl
<SDValue
> & /*InVals*/) const {
3382 llvm_unreachable("Not Implemented");
3385 /// This structure contains all information that is necessary for lowering
3386 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3387 /// needs to lower a call, and targets will see this struct in their LowerCall
3389 struct CallLoweringInfo
{
3391 Type
*RetTy
= nullptr;
3396 bool DoesNotReturn
: 1;
3397 bool IsReturnValueUsed
: 1;
3398 bool IsConvergent
: 1;
3399 bool IsPatchPoint
: 1;
3401 // IsTailCall should be modified by implementations of
3402 // TargetLowering::LowerCall that perform tail call conversions.
3403 bool IsTailCall
= false;
3405 // Is Call lowering done post SelectionDAG type legalization.
3406 bool IsPostTypeLegalization
= false;
3408 unsigned NumFixedArgs
= -1;
3409 CallingConv::ID CallConv
= CallingConv::C
;
3414 ImmutableCallSite CS
;
3415 SmallVector
<ISD::OutputArg
, 32> Outs
;
3416 SmallVector
<SDValue
, 32> OutVals
;
3417 SmallVector
<ISD::InputArg
, 32> Ins
;
3418 SmallVector
<SDValue
, 4> InVals
;
3420 CallLoweringInfo(SelectionDAG
&DAG
)
3421 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
3422 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
3423 IsPatchPoint(false), DAG(DAG
) {}
3425 CallLoweringInfo
&setDebugLoc(const SDLoc
&dl
) {
3430 CallLoweringInfo
&setChain(SDValue InChain
) {
3435 // setCallee with target/module-specific attributes
3436 CallLoweringInfo
&setLibCallee(CallingConv::ID CC
, Type
*ResultType
,
3437 SDValue Target
, ArgListTy
&&ArgsList
) {
3441 NumFixedArgs
= ArgsList
.size();
3442 Args
= std::move(ArgsList
);
3444 DAG
.getTargetLoweringInfo().markLibCallAttributes(
3445 &(DAG
.getMachineFunction()), CC
, Args
);
3449 CallLoweringInfo
&setCallee(CallingConv::ID CC
, Type
*ResultType
,
3450 SDValue Target
, ArgListTy
&&ArgsList
) {
3454 NumFixedArgs
= ArgsList
.size();
3455 Args
= std::move(ArgsList
);
3459 CallLoweringInfo
&setCallee(Type
*ResultType
, FunctionType
*FTy
,
3460 SDValue Target
, ArgListTy
&&ArgsList
,
3461 ImmutableCallSite Call
) {
3464 IsInReg
= Call
.hasRetAttr(Attribute::InReg
);
3466 Call
.doesNotReturn() ||
3467 (!Call
.isInvoke() &&
3468 isa
<UnreachableInst
>(Call
.getInstruction()->getNextNode()));
3469 IsVarArg
= FTy
->isVarArg();
3470 IsReturnValueUsed
= !Call
.getInstruction()->use_empty();
3471 RetSExt
= Call
.hasRetAttr(Attribute::SExt
);
3472 RetZExt
= Call
.hasRetAttr(Attribute::ZExt
);
3476 CallConv
= Call
.getCallingConv();
3477 NumFixedArgs
= FTy
->getNumParams();
3478 Args
= std::move(ArgsList
);
3485 CallLoweringInfo
&setInRegister(bool Value
= true) {
3490 CallLoweringInfo
&setNoReturn(bool Value
= true) {
3491 DoesNotReturn
= Value
;
3495 CallLoweringInfo
&setVarArg(bool Value
= true) {
3500 CallLoweringInfo
&setTailCall(bool Value
= true) {
3505 CallLoweringInfo
&setDiscardResult(bool Value
= true) {
3506 IsReturnValueUsed
= !Value
;
3510 CallLoweringInfo
&setConvergent(bool Value
= true) {
3511 IsConvergent
= Value
;
3515 CallLoweringInfo
&setSExtResult(bool Value
= true) {
3520 CallLoweringInfo
&setZExtResult(bool Value
= true) {
3525 CallLoweringInfo
&setIsPatchPoint(bool Value
= true) {
3526 IsPatchPoint
= Value
;
3530 CallLoweringInfo
&setIsPostTypeLegalization(bool Value
=true) {
3531 IsPostTypeLegalization
= Value
;
3535 ArgListTy
&getArgs() {
3540 /// This structure is used to pass arguments to makeLibCall function.
3541 struct MakeLibCallOptions
{
3542 // By passing type list before soften to makeLibCall, the target hook
3543 // shouldExtendTypeInLibCall can get the original type before soften.
3544 ArrayRef
<EVT
> OpsVTBeforeSoften
;
3545 EVT RetVTBeforeSoften
;
3547 bool DoesNotReturn
: 1;
3548 bool IsReturnValueUsed
: 1;
3549 bool IsPostTypeLegalization
: 1;
3552 MakeLibCallOptions()
3553 : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
3554 IsPostTypeLegalization(false), IsSoften(false) {}
3556 MakeLibCallOptions
&setSExt(bool Value
= true) {
3561 MakeLibCallOptions
&setNoReturn(bool Value
= true) {
3562 DoesNotReturn
= Value
;
3566 MakeLibCallOptions
&setDiscardResult(bool Value
= true) {
3567 IsReturnValueUsed
= !Value
;
3571 MakeLibCallOptions
&setIsPostTypeLegalization(bool Value
= true) {
3572 IsPostTypeLegalization
= Value
;
3576 MakeLibCallOptions
&setTypeListBeforeSoften(ArrayRef
<EVT
> OpsVT
, EVT RetVT
,
3577 bool Value
= true) {
3578 OpsVTBeforeSoften
= OpsVT
;
3579 RetVTBeforeSoften
= RetVT
;
3585 /// This function lowers an abstract call to a function into an actual call.
3586 /// This returns a pair of operands. The first element is the return value
3587 /// for the function (if RetTy is not VoidTy). The second element is the
3588 /// outgoing token chain. It calls LowerCall to do the actual lowering.
3589 std::pair
<SDValue
, SDValue
> LowerCallTo(CallLoweringInfo
&CLI
) const;
3591 /// This hook must be implemented to lower calls into the specified
3592 /// DAG. The outgoing arguments to the call are described by the Outs array,
3593 /// and the values to be returned by the call are described by the Ins
3594 /// array. The implementation should fill in the InVals array with legal-type
3595 /// return values from the call, and return the resulting token chain value.
3597 LowerCall(CallLoweringInfo
&/*CLI*/,
3598 SmallVectorImpl
<SDValue
> &/*InVals*/) const {
3599 llvm_unreachable("Not Implemented");
3602 /// Target-specific cleanup for formal ByVal parameters.
3603 virtual void HandleByVal(CCState
*, unsigned &, unsigned) const {}
3605 /// This hook should be implemented to check whether the return values
3606 /// described by the Outs array can fit into the return registers. If false
3607 /// is returned, an sret-demotion is performed.
3608 virtual bool CanLowerReturn(CallingConv::ID
/*CallConv*/,
3609 MachineFunction
&/*MF*/, bool /*isVarArg*/,
3610 const SmallVectorImpl
<ISD::OutputArg
> &/*Outs*/,
3611 LLVMContext
&/*Context*/) const
3613 // Return true by default to get preexisting behavior.
3617 /// This hook must be implemented to lower outgoing return values, described
3618 /// by the Outs array, into the specified DAG. The implementation should
3619 /// return the resulting token chain value.
3620 virtual SDValue
LowerReturn(SDValue
/*Chain*/, CallingConv::ID
/*CallConv*/,
3622 const SmallVectorImpl
<ISD::OutputArg
> & /*Outs*/,
3623 const SmallVectorImpl
<SDValue
> & /*OutVals*/,
3624 const SDLoc
& /*dl*/,
3625 SelectionDAG
& /*DAG*/) const {
3626 llvm_unreachable("Not Implemented");
3629 /// Return true if result of the specified node is used by a return node
3630 /// only. It also compute and return the input chain for the tail call.
3632 /// This is used to determine whether it is possible to codegen a libcall as
3633 /// tail call at legalization time.
3634 virtual bool isUsedByReturnOnly(SDNode
*, SDValue
&/*Chain*/) const {
3638 /// Return true if the target may be able emit the call instruction as a tail
3639 /// call. This is used by optimization passes to determine if it's profitable
3640 /// to duplicate return instructions to enable tailcall optimization.
3641 virtual bool mayBeEmittedAsTailCall(const CallInst
*) const {
3645 /// Return the builtin name for the __builtin___clear_cache intrinsic
3646 /// Default is to invoke the clear cache library call
3647 virtual const char * getClearCacheBuiltinName() const {
3648 return "__clear_cache";
3651 /// Return the register ID of the name passed in. Used by named register
3652 /// global variables extension. There is no target-independent behaviour
3653 /// so the default action is to bail.
3654 virtual unsigned getRegisterByName(const char* RegName
, EVT VT
,
3655 SelectionDAG
&DAG
) const {
3656 report_fatal_error("Named registers not implemented for this target");
3659 /// Return the type that should be used to zero or sign extend a
3660 /// zeroext/signext integer return value. FIXME: Some C calling conventions
3661 /// require the return type to be promoted, but this is not true all the time,
3662 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3663 /// conventions. The frontend should handle this and include all of the
3664 /// necessary information.
3665 virtual EVT
getTypeForExtReturn(LLVMContext
&Context
, EVT VT
,
3666 ISD::NodeType
/*ExtendKind*/) const {
3667 EVT MinVT
= getRegisterType(Context
, MVT::i32
);
3668 return VT
.bitsLT(MinVT
) ? MinVT
: VT
;
3671 /// For some targets, an LLVM struct type must be broken down into multiple
3672 /// simple types, but the calling convention specifies that the entire struct
3673 /// must be passed in a block of consecutive registers.
3675 functionArgumentNeedsConsecutiveRegisters(Type
*Ty
, CallingConv::ID CallConv
,
3676 bool isVarArg
) const {
3680 /// For most targets, an LLVM type must be broken down into multiple
3681 /// smaller types. Usually the halves are ordered according to the endianness
3682 /// but for some platform that would break. So this method will default to
3683 /// matching the endianness but can be overridden.
3685 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout
&DL
) const {
3686 return DL
.isLittleEndian();
3689 /// Returns a 0 terminated array of registers that can be safely used as
3690 /// scratch registers.
3691 virtual const MCPhysReg
*getScratchRegisters(CallingConv::ID CC
) const {
3695 /// This callback is used to prepare for a volatile or atomic load.
3696 /// It takes a chain node as input and returns the chain for the load itself.
3698 /// Having a callback like this is necessary for targets like SystemZ,
3699 /// which allows a CPU to reuse the result of a previous load indefinitely,
3700 /// even if a cache-coherent store is performed by another CPU. The default
3701 /// implementation does nothing.
3702 virtual SDValue
prepareVolatileOrAtomicLoad(SDValue Chain
, const SDLoc
&DL
,
3703 SelectionDAG
&DAG
) const {
3707 /// This callback is used to inspect load/store instructions and add
3708 /// target-specific MachineMemOperand flags to them. The default
3709 /// implementation does nothing.
3710 virtual MachineMemOperand::Flags
getMMOFlags(const Instruction
&I
) const {
3711 return MachineMemOperand::MONone
;
3714 /// This callback is invoked by the type legalizer to legalize nodes with an
3715 /// illegal operand type but legal result types. It replaces the
3716 /// LowerOperation callback in the type Legalizer. The reason we can not do
3717 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
3718 /// use this callback.
3720 /// TODO: Consider merging with ReplaceNodeResults.
3722 /// The target places new result values for the node in Results (their number
3723 /// and types must exactly match those of the original return values of
3724 /// the node), or leaves Results empty, which indicates that the node is not
3725 /// to be custom lowered after all.
3726 /// The default implementation calls LowerOperation.
3727 virtual void LowerOperationWrapper(SDNode
*N
,
3728 SmallVectorImpl
<SDValue
> &Results
,
3729 SelectionDAG
&DAG
) const;
3731 /// This callback is invoked for operations that are unsupported by the
3732 /// target, which are registered to use 'custom' lowering, and whose defined
3733 /// values are all legal. If the target has no operations that require custom
3734 /// lowering, it need not implement this. The default implementation of this
3736 virtual SDValue
LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const;
3738 /// This callback is invoked when a node result type is illegal for the
3739 /// target, and the operation was registered to use 'custom' lowering for that
3740 /// result type. The target places new result values for the node in Results
3741 /// (their number and types must exactly match those of the original return
3742 /// values of the node), or leaves Results empty, which indicates that the
3743 /// node is not to be custom lowered after all.
3745 /// If the target has no operations that require custom lowering, it need not
3746 /// implement this. The default implementation aborts.
3747 virtual void ReplaceNodeResults(SDNode
* /*N*/,
3748 SmallVectorImpl
<SDValue
> &/*Results*/,
3749 SelectionDAG
&/*DAG*/) const {
3750 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
3753 /// This method returns the name of a target specific DAG node.
3754 virtual const char *getTargetNodeName(unsigned Opcode
) const;
3756 /// This method returns a target specific FastISel object, or null if the
3757 /// target does not support "fast" ISel.
3758 virtual FastISel
*createFastISel(FunctionLoweringInfo
&,
3759 const TargetLibraryInfo
*) const {
3763 bool verifyReturnAddressArgumentIsConstant(SDValue Op
,
3764 SelectionDAG
&DAG
) const;
3766 //===--------------------------------------------------------------------===//
3767 // Inline Asm Support hooks
3770 /// This hook allows the target to expand an inline asm call to be explicit
3771 /// llvm code if it wants to. This is useful for turning simple inline asms
3772 /// into LLVM intrinsics, which gives the compiler more information about the
3773 /// behavior of the code.
3774 virtual bool ExpandInlineAsm(CallInst
*) const {
3778 enum ConstraintType
{
3779 C_Register
, // Constraint represents specific register(s).
3780 C_RegisterClass
, // Constraint represents any of register(s) in class.
3781 C_Memory
, // Memory constraint.
3782 C_Immediate
, // Requires an immediate.
3783 C_Other
, // Something else.
3784 C_Unknown
// Unsupported constraint.
3787 enum ConstraintWeight
{
3789 CW_Invalid
= -1, // No match.
3790 CW_Okay
= 0, // Acceptable.
3791 CW_Good
= 1, // Good weight.
3792 CW_Better
= 2, // Better weight.
3793 CW_Best
= 3, // Best weight.
3795 // Well-known weights.
3796 CW_SpecificReg
= CW_Okay
, // Specific register operands.
3797 CW_Register
= CW_Good
, // Register operands.
3798 CW_Memory
= CW_Better
, // Memory operands.
3799 CW_Constant
= CW_Best
, // Constant operand.
3800 CW_Default
= CW_Okay
// Default or don't know type.
3803 /// This contains information for each constraint that we are lowering.
3804 struct AsmOperandInfo
: public InlineAsm::ConstraintInfo
{
3805 /// This contains the actual string for the code, like "m". TargetLowering
3806 /// picks the 'best' code from ConstraintInfo::Codes that most closely
3807 /// matches the operand.
3808 std::string ConstraintCode
;
3810 /// Information about the constraint code, e.g. Register, RegisterClass,
3811 /// Memory, Other, Unknown.
3812 TargetLowering::ConstraintType ConstraintType
= TargetLowering::C_Unknown
;
3814 /// If this is the result output operand or a clobber, this is null,
3815 /// otherwise it is the incoming operand to the CallInst. This gets
3816 /// modified as the asm is processed.
3817 Value
*CallOperandVal
= nullptr;
3819 /// The ValueType for the operand value.
3820 MVT ConstraintVT
= MVT::Other
;
3822 /// Copy constructor for copying from a ConstraintInfo.
3823 AsmOperandInfo(InlineAsm::ConstraintInfo Info
)
3824 : InlineAsm::ConstraintInfo(std::move(Info
)) {}
3826 /// Return true of this is an input operand that is a matching constraint
3828 bool isMatchingInputConstraint() const;
3830 /// If this is an input matching constraint, this method returns the output
3831 /// operand it matches.
3832 unsigned getMatchedOperand() const;
3835 using AsmOperandInfoVector
= std::vector
<AsmOperandInfo
>;
3837 /// Split up the constraint string from the inline assembly value into the
3838 /// specific constraints and their prefixes, and also tie in the associated
3839 /// operand values. If this returns an empty vector, and if the constraint
3840 /// string itself isn't empty, there was an error parsing.
3841 virtual AsmOperandInfoVector
ParseConstraints(const DataLayout
&DL
,
3842 const TargetRegisterInfo
*TRI
,
3843 ImmutableCallSite CS
) const;
3845 /// Examine constraint type and operand type and determine a weight value.
3846 /// The operand object must already have been set up with the operand type.
3847 virtual ConstraintWeight
getMultipleConstraintMatchWeight(
3848 AsmOperandInfo
&info
, int maIndex
) const;
3850 /// Examine constraint string and operand type and determine a weight value.
3851 /// The operand object must already have been set up with the operand type.
3852 virtual ConstraintWeight
getSingleConstraintMatchWeight(
3853 AsmOperandInfo
&info
, const char *constraint
) const;
3855 /// Determines the constraint code and constraint type to use for the specific
3856 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
3857 /// If the actual operand being passed in is available, it can be passed in as
3858 /// Op, otherwise an empty SDValue can be passed.
3859 virtual void ComputeConstraintToUse(AsmOperandInfo
&OpInfo
,
3861 SelectionDAG
*DAG
= nullptr) const;
3863 /// Given a constraint, return the type of constraint it is for this target.
3864 virtual ConstraintType
getConstraintType(StringRef Constraint
) const;
3866 /// Given a physical register constraint (e.g. {edx}), return the register
3867 /// number and the register class for the register.
3869 /// Given a register class constraint, like 'r', if this corresponds directly
3870 /// to an LLVM register class, return a register of 0 and the register class
3873 /// This should only be used for C_Register constraints. On error, this
3874 /// returns a register number of 0 and a null register class pointer.
3875 virtual std::pair
<unsigned, const TargetRegisterClass
*>
3876 getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
3877 StringRef Constraint
, MVT VT
) const;
3879 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode
) const {
3880 if (ConstraintCode
== "i")
3881 return InlineAsm::Constraint_i
;
3882 else if (ConstraintCode
== "m")
3883 return InlineAsm::Constraint_m
;
3884 return InlineAsm::Constraint_Unknown
;
3887 /// Try to replace an X constraint, which matches anything, with another that
3888 /// has more specific requirements based on the type of the corresponding
3889 /// operand. This returns null if there is no replacement to make.
3890 virtual const char *LowerXConstraint(EVT ConstraintVT
) const;
3892 /// Lower the specified operand into the Ops vector. If it is invalid, don't
3893 /// add anything to Ops.
3894 virtual void LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
3895 std::vector
<SDValue
> &Ops
,
3896 SelectionDAG
&DAG
) const;
3898 // Lower custom output constraints. If invalid, return SDValue().
3899 virtual SDValue
LowerAsmOutputForConstraint(SDValue
&Chain
, SDValue
&Flag
,
3901 const AsmOperandInfo
&OpInfo
,
3902 SelectionDAG
&DAG
) const;
3904 //===--------------------------------------------------------------------===//
3905 // Div utility functions
3907 SDValue
BuildSDIV(SDNode
*N
, SelectionDAG
&DAG
, bool IsAfterLegalization
,
3908 SmallVectorImpl
<SDNode
*> &Created
) const;
3909 SDValue
BuildUDIV(SDNode
*N
, SelectionDAG
&DAG
, bool IsAfterLegalization
,
3910 SmallVectorImpl
<SDNode
*> &Created
) const;
3912 /// Targets may override this function to provide custom SDIV lowering for
3913 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
3914 /// assumes SDIV is expensive and replaces it with a series of other integer
3916 virtual SDValue
BuildSDIVPow2(SDNode
*N
, const APInt
&Divisor
,
3918 SmallVectorImpl
<SDNode
*> &Created
) const;
3920 /// Indicate whether this target prefers to combine FDIVs with the same
3921 /// divisor. If the transform should never be done, return zero. If the
3922 /// transform should be done, return the minimum number of divisor uses
3923 /// that must exist.
3924 virtual unsigned combineRepeatedFPDivisors() const {
3928 /// Hooks for building estimates in place of slower divisions and square
3931 /// Return either a square root or its reciprocal estimate value for the input
3933 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3934 /// 'Enabled' as set by a potential default override attribute.
3935 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3936 /// refinement iterations required to generate a sufficient (though not
3937 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3938 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3939 /// algorithm implementation that uses either one or two constants.
3940 /// The boolean Reciprocal is used to select whether the estimate is for the
3941 /// square root of the input operand or the reciprocal of its square root.
3942 /// A target may choose to implement its own refinement within this function.
3943 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3944 /// any further refinement of the estimate.
3945 /// An empty SDValue return means no estimate sequence can be created.
3946 virtual SDValue
getSqrtEstimate(SDValue Operand
, SelectionDAG
&DAG
,
3947 int Enabled
, int &RefinementSteps
,
3948 bool &UseOneConstNR
, bool Reciprocal
) const {
3952 /// Return a reciprocal estimate value for the input operand.
3953 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3954 /// 'Enabled' as set by a potential default override attribute.
3955 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3956 /// refinement iterations required to generate a sufficient (though not
3957 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3958 /// A target may choose to implement its own refinement within this function.
3959 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3960 /// any further refinement of the estimate.
3961 /// An empty SDValue return means no estimate sequence can be created.
3962 virtual SDValue
getRecipEstimate(SDValue Operand
, SelectionDAG
&DAG
,
3963 int Enabled
, int &RefinementSteps
) const {
3967 //===--------------------------------------------------------------------===//
3968 // Legalization utility functions
3971 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3972 /// respectively, each computing an n/2-bit part of the result.
3973 /// \param Result A vector that will be filled with the parts of the result
3974 /// in little-endian order.
3975 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3976 /// if you want to control how low bits are extracted from the LHS.
3977 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3978 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3979 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3980 /// \returns true if the node has been expanded, false if it has not
3981 bool expandMUL_LOHI(unsigned Opcode
, EVT VT
, SDLoc dl
, SDValue LHS
,
3982 SDValue RHS
, SmallVectorImpl
<SDValue
> &Result
, EVT HiLoVT
,
3983 SelectionDAG
&DAG
, MulExpansionKind Kind
,
3984 SDValue LL
= SDValue(), SDValue LH
= SDValue(),
3985 SDValue RL
= SDValue(), SDValue RH
= SDValue()) const;
3987 /// Expand a MUL into two nodes. One that computes the high bits of
3988 /// the result and one that computes the low bits.
3989 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3990 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3991 /// if you want to control how low bits are extracted from the LHS.
3992 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3993 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3994 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3995 /// \returns true if the node has been expanded. false if it has not
3996 bool expandMUL(SDNode
*N
, SDValue
&Lo
, SDValue
&Hi
, EVT HiLoVT
,
3997 SelectionDAG
&DAG
, MulExpansionKind Kind
,
3998 SDValue LL
= SDValue(), SDValue LH
= SDValue(),
3999 SDValue RL
= SDValue(), SDValue RH
= SDValue()) const;
4001 /// Expand funnel shift.
4002 /// \param N Node to expand
4003 /// \param Result output after conversion
4004 /// \returns True, if the expansion was successful, false otherwise
4005 bool expandFunnelShift(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4007 /// Expand rotations.
4008 /// \param N Node to expand
4009 /// \param Result output after conversion
4010 /// \returns True, if the expansion was successful, false otherwise
4011 bool expandROT(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4013 /// Expand float(f32) to SINT(i64) conversion
4014 /// \param N Node to expand
4015 /// \param Result output after conversion
4016 /// \returns True, if the expansion was successful, false otherwise
4017 bool expandFP_TO_SINT(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4019 /// Expand float to UINT conversion
4020 /// \param N Node to expand
4021 /// \param Result output after conversion
4022 /// \returns True, if the expansion was successful, false otherwise
4023 bool expandFP_TO_UINT(SDNode
*N
, SDValue
&Result
, SDValue
&Chain
, SelectionDAG
&DAG
) const;
4025 /// Expand UINT(i64) to double(f64) conversion
4026 /// \param N Node to expand
4027 /// \param Result output after conversion
4028 /// \returns True, if the expansion was successful, false otherwise
4029 bool expandUINT_TO_FP(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4031 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
4032 SDValue
expandFMINNUM_FMAXNUM(SDNode
*N
, SelectionDAG
&DAG
) const;
4034 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
4035 /// vector nodes can only succeed if all operations are legal/custom.
4036 /// \param N Node to expand
4037 /// \param Result output after conversion
4038 /// \returns True, if the expansion was successful, false otherwise
4039 bool expandCTPOP(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4041 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
4042 /// vector nodes can only succeed if all operations are legal/custom.
4043 /// \param N Node to expand
4044 /// \param Result output after conversion
4045 /// \returns True, if the expansion was successful, false otherwise
4046 bool expandCTLZ(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4048 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
4049 /// vector nodes can only succeed if all operations are legal/custom.
4050 /// \param N Node to expand
4051 /// \param Result output after conversion
4052 /// \returns True, if the expansion was successful, false otherwise
4053 bool expandCTTZ(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4055 /// Expand ABS nodes. Expands vector/scalar ABS nodes,
4056 /// vector nodes can only succeed if all operations are legal/custom.
4057 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
4058 /// \param N Node to expand
4059 /// \param Result output after conversion
4060 /// \returns True, if the expansion was successful, false otherwise
4061 bool expandABS(SDNode
*N
, SDValue
&Result
, SelectionDAG
&DAG
) const;
4063 /// Turn load of vector type into a load of the individual elements.
4064 /// \param LD load to expand
4065 /// \returns MERGE_VALUEs of the scalar loads with their chains.
4066 SDValue
scalarizeVectorLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
) const;
4068 // Turn a store of a vector type into stores of the individual elements.
4069 /// \param ST Store with a vector value type
4070 /// \returns MERGE_VALUs of the individual store chains.
4071 SDValue
scalarizeVectorStore(StoreSDNode
*ST
, SelectionDAG
&DAG
) const;
4073 /// Expands an unaligned load to 2 half-size loads for an integer, and
4074 /// possibly more for vectors.
4075 std::pair
<SDValue
, SDValue
> expandUnalignedLoad(LoadSDNode
*LD
,
4076 SelectionDAG
&DAG
) const;
4078 /// Expands an unaligned store to 2 half-size stores for integer values, and
4079 /// possibly more for vectors.
4080 SDValue
expandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
) const;
4082 /// Increments memory address \p Addr according to the type of the value
4083 /// \p DataVT that should be stored. If the data is stored in compressed
4084 /// form, the memory address should be incremented according to the number of
4085 /// the stored elements. This number is equal to the number of '1's bits
4087 /// \p DataVT is a vector type. \p Mask is a vector value.
4088 /// \p DataVT and \p Mask have the same number of vector elements.
4089 SDValue
IncrementMemoryAddress(SDValue Addr
, SDValue Mask
, const SDLoc
&DL
,
4090 EVT DataVT
, SelectionDAG
&DAG
,
4091 bool IsCompressedMemory
) const;
4093 /// Get a pointer to vector element \p Idx located in memory for a vector of
4094 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
4095 /// bounds the returned pointer is unspecified, but will be within the vector
4097 SDValue
getVectorElementPointer(SelectionDAG
&DAG
, SDValue VecPtr
, EVT VecVT
,
4098 SDValue Index
) const;
4100 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
4101 /// method accepts integers as its arguments.
4102 SDValue
expandAddSubSat(SDNode
*Node
, SelectionDAG
&DAG
) const;
4104 /// Method for building the DAG expansion of ISD::SMULFIX. This method accepts
4105 /// integers as its arguments.
4106 SDValue
expandFixedPointMul(SDNode
*Node
, SelectionDAG
&DAG
) const;
4108 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
4109 /// always suceeds and populates the Result and Overflow arguments.
4110 void expandUADDSUBO(SDNode
*Node
, SDValue
&Result
, SDValue
&Overflow
,
4111 SelectionDAG
&DAG
) const;
4113 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
4114 /// always suceeds and populates the Result and Overflow arguments.
4115 void expandSADDSUBO(SDNode
*Node
, SDValue
&Result
, SDValue
&Overflow
,
4116 SelectionDAG
&DAG
) const;
4118 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
4119 /// expansion was successful and populates the Result and Overflow arguments.
4120 bool expandMULO(SDNode
*Node
, SDValue
&Result
, SDValue
&Overflow
,
4121 SelectionDAG
&DAG
) const;
4123 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
4124 /// only the first Count elements of the vector are used.
4125 SDValue
expandVecReduce(SDNode
*Node
, SelectionDAG
&DAG
) const;
4127 //===--------------------------------------------------------------------===//
4128 // Instruction Emitting Hooks
4131 /// This method should be implemented by targets that mark instructions with
4132 /// the 'usesCustomInserter' flag. These instructions are special in various
4133 /// ways, which require special support to insert. The specified MachineInstr
4134 /// is created but not inserted into any basic blocks, and this method is
4135 /// called to expand it into a sequence of instructions, potentially also
4136 /// creating new basic blocks and control flow.
4137 /// As long as the returned basic block is different (i.e., we created a new
4138 /// one), the custom inserter is free to modify the rest of \p MBB.
4139 virtual MachineBasicBlock
*
4140 EmitInstrWithCustomInserter(MachineInstr
&MI
, MachineBasicBlock
*MBB
) const;
4142 /// This method should be implemented by targets that mark instructions with
4143 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
4144 /// instruction selection by target hooks. e.g. To fill in optional defs for
4145 /// ARM 's' setting instructions.
4146 virtual void AdjustInstrPostInstrSelection(MachineInstr
&MI
,
4147 SDNode
*Node
) const;
4149 /// If this function returns true, SelectionDAGBuilder emits a
4150 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
4151 virtual bool useLoadStackGuardNode() const {
4155 virtual SDValue
emitStackGuardXorFP(SelectionDAG
&DAG
, SDValue Val
,
4156 const SDLoc
&DL
) const {
4157 llvm_unreachable("not implemented for this target");
4160 /// Lower TLS global address SDNode for target independent emulated TLS model.
4161 virtual SDValue
LowerToTLSEmulatedModel(const GlobalAddressSDNode
*GA
,
4162 SelectionDAG
&DAG
) const;
4164 /// Expands target specific indirect branch for the case of JumpTable
4166 virtual SDValue
expandIndirectJTBranch(const SDLoc
& dl
, SDValue Value
, SDValue Addr
,
4167 SelectionDAG
&DAG
) const {
4168 return DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, Value
, Addr
);
4171 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
4172 // If we're comparing for equality to zero and isCtlzFast is true, expose the
4173 // fact that this can be implemented as a ctlz/srl pair, so that the dag
4174 // combiner can fold the new nodes.
4175 SDValue
lowerCmpEqZeroToCtlzSrl(SDValue Op
, SelectionDAG
&DAG
) const;
4178 SDValue
foldSetCCWithAnd(EVT VT
, SDValue N0
, SDValue N1
, ISD::CondCode Cond
,
4179 const SDLoc
&DL
, DAGCombinerInfo
&DCI
) const;
4180 SDValue
foldSetCCWithBinOp(EVT VT
, SDValue N0
, SDValue N1
, ISD::CondCode Cond
,
4181 const SDLoc
&DL
, DAGCombinerInfo
&DCI
) const;
4183 SDValue
optimizeSetCCOfSignedTruncationCheck(EVT SCCVT
, SDValue N0
,
4184 SDValue N1
, ISD::CondCode Cond
,
4185 DAGCombinerInfo
&DCI
,
4186 const SDLoc
&DL
) const;
4188 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
4189 SDValue
optimizeSetCCByHoistingAndByConstFromLogicalShift(
4190 EVT SCCVT
, SDValue N0
, SDValue N1C
, ISD::CondCode Cond
,
4191 DAGCombinerInfo
&DCI
, const SDLoc
&DL
) const;
4193 SDValue
prepareUREMEqFold(EVT SETCCVT
, SDValue REMNode
,
4194 SDValue CompTargetNode
, ISD::CondCode Cond
,
4195 DAGCombinerInfo
&DCI
, const SDLoc
&DL
,
4196 SmallVectorImpl
<SDNode
*> &Created
) const;
4197 SDValue
buildUREMEqFold(EVT SETCCVT
, SDValue REMNode
, SDValue CompTargetNode
,
4198 ISD::CondCode Cond
, DAGCombinerInfo
&DCI
,
4199 const SDLoc
&DL
) const;
4201 SDValue
prepareSREMEqFold(EVT SETCCVT
, SDValue REMNode
,
4202 SDValue CompTargetNode
, ISD::CondCode Cond
,
4203 DAGCombinerInfo
&DCI
, const SDLoc
&DL
,
4204 SmallVectorImpl
<SDNode
*> &Created
) const;
4205 SDValue
buildSREMEqFold(EVT SETCCVT
, SDValue REMNode
, SDValue CompTargetNode
,
4206 ISD::CondCode Cond
, DAGCombinerInfo
&DCI
,
4207 const SDLoc
&DL
) const;
4210 /// Given an LLVM IR type and return type attributes, compute the return value
4211 /// EVTs and flags, and optionally also the offsets, if the return value is
4212 /// being lowered to memory.
4213 void GetReturnInfo(CallingConv::ID CC
, Type
*ReturnType
, AttributeList attr
,
4214 SmallVectorImpl
<ISD::OutputArg
> &Outs
,
4215 const TargetLowering
&TLI
, const DataLayout
&DL
);
4217 } // end namespace llvm
4219 #endif // LLVM_CODEGEN_TARGETLOWERING_H