1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements the SelectionDAG class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/CodeGen/ISDOpcodes.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineConstantPool.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/RuntimeLibcalls.h"
35 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CodeGen.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/KnownBits.h"
59 #include "llvm/Support/MachineValueType.h"
60 #include "llvm/Support/ManagedStatic.h"
61 #include "llvm/Support/MathExtras.h"
62 #include "llvm/Support/Mutex.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetMachine.h"
65 #include "llvm/Target/TargetOptions.h"
78 /// makeVTList - Return an instance of the SDVTList struct initialized with the
79 /// specified members.
80 static SDVTList
makeVTList(const EVT
*VTs
, unsigned NumVTs
) {
81 SDVTList Res
= {VTs
, NumVTs
};
85 // Default null implementations of the callbacks.
86 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode
*, SDNode
*) {}
87 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode
*) {}
88 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode
*) {}
90 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
92 #define DEBUG_TYPE "selectiondag"
94 static cl::opt
<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
95 cl::Hidden
, cl::init(true),
96 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
98 static cl::opt
<int> MaxLdStGlue("ldstmemcpy-glue-max",
99 cl::desc("Number limit for gluing ld/st of memcpy."),
100 cl::Hidden
, cl::init(0));
102 static void NewSDValueDbgMsg(SDValue V
, StringRef Msg
, SelectionDAG
*G
) {
103 LLVM_DEBUG(dbgs() << Msg
; V
.getNode()->dump(G
););
106 //===----------------------------------------------------------------------===//
107 // ConstantFPSDNode Class
108 //===----------------------------------------------------------------------===//
110 /// isExactlyValue - We don't rely on operator== working on double values, as
111 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
112 /// As such, this method can be used to do an exact bit-for-bit comparison of
113 /// two floating point values.
114 bool ConstantFPSDNode::isExactlyValue(const APFloat
& V
) const {
115 return getValueAPF().bitwiseIsEqual(V
);
118 bool ConstantFPSDNode::isValueValidForType(EVT VT
,
119 const APFloat
& Val
) {
120 assert(VT
.isFloatingPoint() && "Can only convert between FP types");
122 // convert modifies in place, so make a copy.
123 APFloat Val2
= APFloat(Val
);
125 (void) Val2
.convert(SelectionDAG::EVTToAPFloatSemantics(VT
),
126 APFloat::rmNearestTiesToEven
,
131 //===----------------------------------------------------------------------===//
133 //===----------------------------------------------------------------------===//
135 bool ISD::isConstantSplatVector(const SDNode
*N
, APInt
&SplatVal
) {
136 auto *BV
= dyn_cast
<BuildVectorSDNode
>(N
);
141 unsigned SplatBitSize
;
143 unsigned EltSize
= N
->getValueType(0).getVectorElementType().getSizeInBits();
144 return BV
->isConstantSplat(SplatVal
, SplatUndef
, SplatBitSize
, HasUndefs
,
146 EltSize
== SplatBitSize
;
149 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
150 // specializations of the more general isConstantSplatVector()?
152 bool ISD::isBuildVectorAllOnes(const SDNode
*N
) {
153 // Look through a bit convert.
154 while (N
->getOpcode() == ISD::BITCAST
)
155 N
= N
->getOperand(0).getNode();
157 if (N
->getOpcode() != ISD::BUILD_VECTOR
) return false;
159 unsigned i
= 0, e
= N
->getNumOperands();
161 // Skip over all of the undef values.
162 while (i
!= e
&& N
->getOperand(i
).isUndef())
165 // Do not accept an all-undef vector.
166 if (i
== e
) return false;
168 // Do not accept build_vectors that aren't all constants or which have non-~0
169 // elements. We have to be a bit careful here, as the type of the constant
170 // may not be the same as the type of the vector elements due to type
171 // legalization (the elements are promoted to a legal type for the target and
172 // a vector of a type may be legal when the base element type is not).
173 // We only want to check enough bits to cover the vector elements, because
174 // we care if the resultant vector is all ones, not whether the individual
176 SDValue NotZero
= N
->getOperand(i
);
177 unsigned EltSize
= N
->getValueType(0).getScalarSizeInBits();
178 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(NotZero
)) {
179 if (CN
->getAPIntValue().countTrailingOnes() < EltSize
)
181 } else if (ConstantFPSDNode
*CFPN
= dyn_cast
<ConstantFPSDNode
>(NotZero
)) {
182 if (CFPN
->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize
)
187 // Okay, we have at least one ~0 value, check to see if the rest match or are
188 // undefs. Even with the above element type twiddling, this should be OK, as
189 // the same type legalization should have applied to all the elements.
190 for (++i
; i
!= e
; ++i
)
191 if (N
->getOperand(i
) != NotZero
&& !N
->getOperand(i
).isUndef())
196 bool ISD::isBuildVectorAllZeros(const SDNode
*N
) {
197 // Look through a bit convert.
198 while (N
->getOpcode() == ISD::BITCAST
)
199 N
= N
->getOperand(0).getNode();
201 if (N
->getOpcode() != ISD::BUILD_VECTOR
) return false;
203 bool IsAllUndef
= true;
204 for (const SDValue
&Op
: N
->op_values()) {
208 // Do not accept build_vectors that aren't all constants or which have non-0
209 // elements. We have to be a bit careful here, as the type of the constant
210 // may not be the same as the type of the vector elements due to type
211 // legalization (the elements are promoted to a legal type for the target
212 // and a vector of a type may be legal when the base element type is not).
213 // We only want to check enough bits to cover the vector elements, because
214 // we care if the resultant vector is all zeros, not whether the individual
216 unsigned EltSize
= N
->getValueType(0).getScalarSizeInBits();
217 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(Op
)) {
218 if (CN
->getAPIntValue().countTrailingZeros() < EltSize
)
220 } else if (ConstantFPSDNode
*CFPN
= dyn_cast
<ConstantFPSDNode
>(Op
)) {
221 if (CFPN
->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize
)
227 // Do not accept an all-undef vector.
233 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode
*N
) {
234 if (N
->getOpcode() != ISD::BUILD_VECTOR
)
237 for (const SDValue
&Op
: N
->op_values()) {
240 if (!isa
<ConstantSDNode
>(Op
))
246 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode
*N
) {
247 if (N
->getOpcode() != ISD::BUILD_VECTOR
)
250 for (const SDValue
&Op
: N
->op_values()) {
253 if (!isa
<ConstantFPSDNode
>(Op
))
259 bool ISD::allOperandsUndef(const SDNode
*N
) {
260 // Return false if the node has no operands.
261 // This is "logically inconsistent" with the definition of "all" but
262 // is probably the desired behavior.
263 if (N
->getNumOperands() == 0)
265 return all_of(N
->op_values(), [](SDValue Op
) { return Op
.isUndef(); });
268 bool ISD::matchUnaryPredicate(SDValue Op
,
269 std::function
<bool(ConstantSDNode
*)> Match
,
271 // FIXME: Add support for scalar UNDEF cases?
272 if (auto *Cst
= dyn_cast
<ConstantSDNode
>(Op
))
275 // FIXME: Add support for vector UNDEF cases?
276 if (ISD::BUILD_VECTOR
!= Op
.getOpcode())
279 EVT SVT
= Op
.getValueType().getScalarType();
280 for (unsigned i
= 0, e
= Op
.getNumOperands(); i
!= e
; ++i
) {
281 if (AllowUndefs
&& Op
.getOperand(i
).isUndef()) {
287 auto *Cst
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(i
));
288 if (!Cst
|| Cst
->getValueType(0) != SVT
|| !Match(Cst
))
294 bool ISD::matchBinaryPredicate(
295 SDValue LHS
, SDValue RHS
,
296 std::function
<bool(ConstantSDNode
*, ConstantSDNode
*)> Match
,
297 bool AllowUndefs
, bool AllowTypeMismatch
) {
298 if (!AllowTypeMismatch
&& LHS
.getValueType() != RHS
.getValueType())
301 // TODO: Add support for scalar UNDEF cases?
302 if (auto *LHSCst
= dyn_cast
<ConstantSDNode
>(LHS
))
303 if (auto *RHSCst
= dyn_cast
<ConstantSDNode
>(RHS
))
304 return Match(LHSCst
, RHSCst
);
306 // TODO: Add support for vector UNDEF cases?
307 if (ISD::BUILD_VECTOR
!= LHS
.getOpcode() ||
308 ISD::BUILD_VECTOR
!= RHS
.getOpcode())
311 EVT SVT
= LHS
.getValueType().getScalarType();
312 for (unsigned i
= 0, e
= LHS
.getNumOperands(); i
!= e
; ++i
) {
313 SDValue LHSOp
= LHS
.getOperand(i
);
314 SDValue RHSOp
= RHS
.getOperand(i
);
315 bool LHSUndef
= AllowUndefs
&& LHSOp
.isUndef();
316 bool RHSUndef
= AllowUndefs
&& RHSOp
.isUndef();
317 auto *LHSCst
= dyn_cast
<ConstantSDNode
>(LHSOp
);
318 auto *RHSCst
= dyn_cast
<ConstantSDNode
>(RHSOp
);
319 if ((!LHSCst
&& !LHSUndef
) || (!RHSCst
&& !RHSUndef
))
321 if (!AllowTypeMismatch
&& (LHSOp
.getValueType() != SVT
||
322 LHSOp
.getValueType() != RHSOp
.getValueType()))
324 if (!Match(LHSCst
, RHSCst
))
330 ISD::NodeType
ISD::getExtForLoadExtType(bool IsFP
, ISD::LoadExtType ExtType
) {
333 return IsFP
? ISD::FP_EXTEND
: ISD::ANY_EXTEND
;
335 return ISD::SIGN_EXTEND
;
337 return ISD::ZERO_EXTEND
;
342 llvm_unreachable("Invalid LoadExtType");
345 ISD::CondCode
ISD::getSetCCSwappedOperands(ISD::CondCode Operation
) {
346 // To perform this operation, we just need to swap the L and G bits of the
348 unsigned OldL
= (Operation
>> 2) & 1;
349 unsigned OldG
= (Operation
>> 1) & 1;
350 return ISD::CondCode((Operation
& ~6) | // Keep the N, U, E bits
351 (OldL
<< 1) | // New G bit
352 (OldG
<< 2)); // New L bit.
355 ISD::CondCode
ISD::getSetCCInverse(ISD::CondCode Op
, bool isInteger
) {
356 unsigned Operation
= Op
;
358 Operation
^= 7; // Flip L, G, E bits, but not U.
360 Operation
^= 15; // Flip all of the condition bits.
362 if (Operation
> ISD::SETTRUE2
)
363 Operation
&= ~8; // Don't let N and U bits get set.
365 return ISD::CondCode(Operation
);
368 /// For an integer comparison, return 1 if the comparison is a signed operation
369 /// and 2 if the result is an unsigned comparison. Return zero if the operation
370 /// does not depend on the sign of the input (setne and seteq).
371 static int isSignedOp(ISD::CondCode Opcode
) {
373 default: llvm_unreachable("Illegal integer setcc operation!");
375 case ISD::SETNE
: return 0;
379 case ISD::SETGE
: return 1;
383 case ISD::SETUGE
: return 2;
387 ISD::CondCode
ISD::getSetCCOrOperation(ISD::CondCode Op1
, ISD::CondCode Op2
,
389 if (IsInteger
&& (isSignedOp(Op1
) | isSignedOp(Op2
)) == 3)
390 // Cannot fold a signed integer setcc with an unsigned integer setcc.
391 return ISD::SETCC_INVALID
;
393 unsigned Op
= Op1
| Op2
; // Combine all of the condition bits.
395 // If the N and U bits get set, then the resultant comparison DOES suddenly
396 // care about orderedness, and it is true when ordered.
397 if (Op
> ISD::SETTRUE2
)
398 Op
&= ~16; // Clear the U bit if the N bit is set.
400 // Canonicalize illegal integer setcc's.
401 if (IsInteger
&& Op
== ISD::SETUNE
) // e.g. SETUGT | SETULT
404 return ISD::CondCode(Op
);
407 ISD::CondCode
ISD::getSetCCAndOperation(ISD::CondCode Op1
, ISD::CondCode Op2
,
409 if (IsInteger
&& (isSignedOp(Op1
) | isSignedOp(Op2
)) == 3)
410 // Cannot fold a signed setcc with an unsigned setcc.
411 return ISD::SETCC_INVALID
;
413 // Combine all of the condition bits.
414 ISD::CondCode Result
= ISD::CondCode(Op1
& Op2
);
416 // Canonicalize illegal integer setcc's.
420 case ISD::SETUO
: Result
= ISD::SETFALSE
; break; // SETUGT & SETULT
421 case ISD::SETOEQ
: // SETEQ & SETU[LG]E
422 case ISD::SETUEQ
: Result
= ISD::SETEQ
; break; // SETUGE & SETULE
423 case ISD::SETOLT
: Result
= ISD::SETULT
; break; // SETULT & SETNE
424 case ISD::SETOGT
: Result
= ISD::SETUGT
; break; // SETUGT & SETNE
431 //===----------------------------------------------------------------------===//
432 // SDNode Profile Support
433 //===----------------------------------------------------------------------===//
435 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
436 static void AddNodeIDOpcode(FoldingSetNodeID
&ID
, unsigned OpC
) {
440 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
441 /// solely with their pointer.
442 static void AddNodeIDValueTypes(FoldingSetNodeID
&ID
, SDVTList VTList
) {
443 ID
.AddPointer(VTList
.VTs
);
446 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
447 static void AddNodeIDOperands(FoldingSetNodeID
&ID
,
448 ArrayRef
<SDValue
> Ops
) {
449 for (auto& Op
: Ops
) {
450 ID
.AddPointer(Op
.getNode());
451 ID
.AddInteger(Op
.getResNo());
455 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
456 static void AddNodeIDOperands(FoldingSetNodeID
&ID
,
457 ArrayRef
<SDUse
> Ops
) {
458 for (auto& Op
: Ops
) {
459 ID
.AddPointer(Op
.getNode());
460 ID
.AddInteger(Op
.getResNo());
464 static void AddNodeIDNode(FoldingSetNodeID
&ID
, unsigned short OpC
,
465 SDVTList VTList
, ArrayRef
<SDValue
> OpList
) {
466 AddNodeIDOpcode(ID
, OpC
);
467 AddNodeIDValueTypes(ID
, VTList
);
468 AddNodeIDOperands(ID
, OpList
);
471 /// If this is an SDNode with special info, add this info to the NodeID data.
472 static void AddNodeIDCustom(FoldingSetNodeID
&ID
, const SDNode
*N
) {
473 switch (N
->getOpcode()) {
474 case ISD::TargetExternalSymbol
:
475 case ISD::ExternalSymbol
:
477 llvm_unreachable("Should only be used on nodes with operands");
478 default: break; // Normal nodes don't need extra info.
479 case ISD::TargetConstant
:
480 case ISD::Constant
: {
481 const ConstantSDNode
*C
= cast
<ConstantSDNode
>(N
);
482 ID
.AddPointer(C
->getConstantIntValue());
483 ID
.AddBoolean(C
->isOpaque());
486 case ISD::TargetConstantFP
:
487 case ISD::ConstantFP
:
488 ID
.AddPointer(cast
<ConstantFPSDNode
>(N
)->getConstantFPValue());
490 case ISD::TargetGlobalAddress
:
491 case ISD::GlobalAddress
:
492 case ISD::TargetGlobalTLSAddress
:
493 case ISD::GlobalTLSAddress
: {
494 const GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(N
);
495 ID
.AddPointer(GA
->getGlobal());
496 ID
.AddInteger(GA
->getOffset());
497 ID
.AddInteger(GA
->getTargetFlags());
500 case ISD::BasicBlock
:
501 ID
.AddPointer(cast
<BasicBlockSDNode
>(N
)->getBasicBlock());
504 ID
.AddInteger(cast
<RegisterSDNode
>(N
)->getReg());
506 case ISD::RegisterMask
:
507 ID
.AddPointer(cast
<RegisterMaskSDNode
>(N
)->getRegMask());
510 ID
.AddPointer(cast
<SrcValueSDNode
>(N
)->getValue());
512 case ISD::FrameIndex
:
513 case ISD::TargetFrameIndex
:
514 ID
.AddInteger(cast
<FrameIndexSDNode
>(N
)->getIndex());
516 case ISD::LIFETIME_START
:
517 case ISD::LIFETIME_END
:
518 if (cast
<LifetimeSDNode
>(N
)->hasOffset()) {
519 ID
.AddInteger(cast
<LifetimeSDNode
>(N
)->getSize());
520 ID
.AddInteger(cast
<LifetimeSDNode
>(N
)->getOffset());
524 case ISD::TargetJumpTable
:
525 ID
.AddInteger(cast
<JumpTableSDNode
>(N
)->getIndex());
526 ID
.AddInteger(cast
<JumpTableSDNode
>(N
)->getTargetFlags());
528 case ISD::ConstantPool
:
529 case ISD::TargetConstantPool
: {
530 const ConstantPoolSDNode
*CP
= cast
<ConstantPoolSDNode
>(N
);
531 ID
.AddInteger(CP
->getAlignment());
532 ID
.AddInteger(CP
->getOffset());
533 if (CP
->isMachineConstantPoolEntry())
534 CP
->getMachineCPVal()->addSelectionDAGCSEId(ID
);
536 ID
.AddPointer(CP
->getConstVal());
537 ID
.AddInteger(CP
->getTargetFlags());
540 case ISD::TargetIndex
: {
541 const TargetIndexSDNode
*TI
= cast
<TargetIndexSDNode
>(N
);
542 ID
.AddInteger(TI
->getIndex());
543 ID
.AddInteger(TI
->getOffset());
544 ID
.AddInteger(TI
->getTargetFlags());
548 const LoadSDNode
*LD
= cast
<LoadSDNode
>(N
);
549 ID
.AddInteger(LD
->getMemoryVT().getRawBits());
550 ID
.AddInteger(LD
->getRawSubclassData());
551 ID
.AddInteger(LD
->getPointerInfo().getAddrSpace());
555 const StoreSDNode
*ST
= cast
<StoreSDNode
>(N
);
556 ID
.AddInteger(ST
->getMemoryVT().getRawBits());
557 ID
.AddInteger(ST
->getRawSubclassData());
558 ID
.AddInteger(ST
->getPointerInfo().getAddrSpace());
562 const MaskedLoadSDNode
*MLD
= cast
<MaskedLoadSDNode
>(N
);
563 ID
.AddInteger(MLD
->getMemoryVT().getRawBits());
564 ID
.AddInteger(MLD
->getRawSubclassData());
565 ID
.AddInteger(MLD
->getPointerInfo().getAddrSpace());
569 const MaskedStoreSDNode
*MST
= cast
<MaskedStoreSDNode
>(N
);
570 ID
.AddInteger(MST
->getMemoryVT().getRawBits());
571 ID
.AddInteger(MST
->getRawSubclassData());
572 ID
.AddInteger(MST
->getPointerInfo().getAddrSpace());
576 const MaskedGatherSDNode
*MG
= cast
<MaskedGatherSDNode
>(N
);
577 ID
.AddInteger(MG
->getMemoryVT().getRawBits());
578 ID
.AddInteger(MG
->getRawSubclassData());
579 ID
.AddInteger(MG
->getPointerInfo().getAddrSpace());
582 case ISD::MSCATTER
: {
583 const MaskedScatterSDNode
*MS
= cast
<MaskedScatterSDNode
>(N
);
584 ID
.AddInteger(MS
->getMemoryVT().getRawBits());
585 ID
.AddInteger(MS
->getRawSubclassData());
586 ID
.AddInteger(MS
->getPointerInfo().getAddrSpace());
589 case ISD::ATOMIC_CMP_SWAP
:
590 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
:
591 case ISD::ATOMIC_SWAP
:
592 case ISD::ATOMIC_LOAD_ADD
:
593 case ISD::ATOMIC_LOAD_SUB
:
594 case ISD::ATOMIC_LOAD_AND
:
595 case ISD::ATOMIC_LOAD_CLR
:
596 case ISD::ATOMIC_LOAD_OR
:
597 case ISD::ATOMIC_LOAD_XOR
:
598 case ISD::ATOMIC_LOAD_NAND
:
599 case ISD::ATOMIC_LOAD_MIN
:
600 case ISD::ATOMIC_LOAD_MAX
:
601 case ISD::ATOMIC_LOAD_UMIN
:
602 case ISD::ATOMIC_LOAD_UMAX
:
603 case ISD::ATOMIC_LOAD
:
604 case ISD::ATOMIC_STORE
: {
605 const AtomicSDNode
*AT
= cast
<AtomicSDNode
>(N
);
606 ID
.AddInteger(AT
->getMemoryVT().getRawBits());
607 ID
.AddInteger(AT
->getRawSubclassData());
608 ID
.AddInteger(AT
->getPointerInfo().getAddrSpace());
611 case ISD::PREFETCH
: {
612 const MemSDNode
*PF
= cast
<MemSDNode
>(N
);
613 ID
.AddInteger(PF
->getPointerInfo().getAddrSpace());
616 case ISD::VECTOR_SHUFFLE
: {
617 const ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(N
);
618 for (unsigned i
= 0, e
= N
->getValueType(0).getVectorNumElements();
620 ID
.AddInteger(SVN
->getMaskElt(i
));
623 case ISD::TargetBlockAddress
:
624 case ISD::BlockAddress
: {
625 const BlockAddressSDNode
*BA
= cast
<BlockAddressSDNode
>(N
);
626 ID
.AddPointer(BA
->getBlockAddress());
627 ID
.AddInteger(BA
->getOffset());
628 ID
.AddInteger(BA
->getTargetFlags());
631 } // end switch (N->getOpcode())
633 // Target specific memory nodes could also have address spaces to check.
634 if (N
->isTargetMemoryOpcode())
635 ID
.AddInteger(cast
<MemSDNode
>(N
)->getPointerInfo().getAddrSpace());
638 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
640 static void AddNodeIDNode(FoldingSetNodeID
&ID
, const SDNode
*N
) {
641 AddNodeIDOpcode(ID
, N
->getOpcode());
642 // Add the return value info.
643 AddNodeIDValueTypes(ID
, N
->getVTList());
644 // Add the operand info.
645 AddNodeIDOperands(ID
, N
->ops());
647 // Handle SDNode leafs with special info.
648 AddNodeIDCustom(ID
, N
);
651 //===----------------------------------------------------------------------===//
652 // SelectionDAG Class
653 //===----------------------------------------------------------------------===//
655 /// doNotCSE - Return true if CSE should not be performed for this node.
656 static bool doNotCSE(SDNode
*N
) {
657 if (N
->getValueType(0) == MVT::Glue
)
658 return true; // Never CSE anything that produces a flag.
660 switch (N
->getOpcode()) {
662 case ISD::HANDLENODE
:
664 return true; // Never CSE these nodes.
667 // Check that remaining values produced are not flags.
668 for (unsigned i
= 1, e
= N
->getNumValues(); i
!= e
; ++i
)
669 if (N
->getValueType(i
) == MVT::Glue
)
670 return true; // Never CSE anything that produces a flag.
675 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
677 void SelectionDAG::RemoveDeadNodes() {
678 // Create a dummy node (which is not added to allnodes), that adds a reference
679 // to the root node, preventing it from being deleted.
680 HandleSDNode
Dummy(getRoot());
682 SmallVector
<SDNode
*, 128> DeadNodes
;
684 // Add all obviously-dead nodes to the DeadNodes worklist.
685 for (SDNode
&Node
: allnodes())
686 if (Node
.use_empty())
687 DeadNodes
.push_back(&Node
);
689 RemoveDeadNodes(DeadNodes
);
691 // If the root changed (e.g. it was a dead load, update the root).
692 setRoot(Dummy
.getValue());
695 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
696 /// given list, and any nodes that become unreachable as a result.
697 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl
<SDNode
*> &DeadNodes
) {
699 // Process the worklist, deleting the nodes and adding their uses to the
701 while (!DeadNodes
.empty()) {
702 SDNode
*N
= DeadNodes
.pop_back_val();
703 // Skip to next node if we've already managed to delete the node. This could
704 // happen if replacing a node causes a node previously added to the node to
706 if (N
->getOpcode() == ISD::DELETED_NODE
)
709 for (DAGUpdateListener
*DUL
= UpdateListeners
; DUL
; DUL
= DUL
->Next
)
710 DUL
->NodeDeleted(N
, nullptr);
712 // Take the node out of the appropriate CSE map.
713 RemoveNodeFromCSEMaps(N
);
715 // Next, brutally remove the operand list. This is safe to do, as there are
716 // no cycles in the graph.
717 for (SDNode::op_iterator I
= N
->op_begin(), E
= N
->op_end(); I
!= E
; ) {
719 SDNode
*Operand
= Use
.getNode();
722 // Now that we removed this operand, see if there are no uses of it left.
723 if (Operand
->use_empty())
724 DeadNodes
.push_back(Operand
);
731 void SelectionDAG::RemoveDeadNode(SDNode
*N
){
732 SmallVector
<SDNode
*, 16> DeadNodes(1, N
);
734 // Create a dummy node that adds a reference to the root node, preventing
735 // it from being deleted. (This matters if the root is an operand of the
737 HandleSDNode
Dummy(getRoot());
739 RemoveDeadNodes(DeadNodes
);
742 void SelectionDAG::DeleteNode(SDNode
*N
) {
743 // First take this out of the appropriate CSE map.
744 RemoveNodeFromCSEMaps(N
);
746 // Finally, remove uses due to operands of this node, remove from the
747 // AllNodes list, and delete the node.
748 DeleteNodeNotInCSEMaps(N
);
751 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode
*N
) {
752 assert(N
->getIterator() != AllNodes
.begin() &&
753 "Cannot delete the entry node!");
754 assert(N
->use_empty() && "Cannot delete a node that is not dead!");
756 // Drop all of the operands and decrement used node's use counts.
762 void SDDbgInfo::erase(const SDNode
*Node
) {
763 DbgValMapType::iterator I
= DbgValMap
.find(Node
);
764 if (I
== DbgValMap
.end())
766 for (auto &Val
: I
->second
)
767 Val
->setIsInvalidated();
771 void SelectionDAG::DeallocateNode(SDNode
*N
) {
772 // If we have operands, deallocate them.
775 NodeAllocator
.Deallocate(AllNodes
.remove(N
));
777 // Set the opcode to DELETED_NODE to help catch bugs when node
778 // memory is reallocated.
779 // FIXME: There are places in SDag that have grown a dependency on the opcode
780 // value in the released node.
781 __asan_unpoison_memory_region(&N
->NodeType
, sizeof(N
->NodeType
));
782 N
->NodeType
= ISD::DELETED_NODE
;
784 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
785 // them and forget about that node.
790 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
791 static void VerifySDNode(SDNode
*N
) {
792 switch (N
->getOpcode()) {
795 case ISD::BUILD_PAIR
: {
796 EVT VT
= N
->getValueType(0);
797 assert(N
->getNumValues() == 1 && "Too many results!");
798 assert(!VT
.isVector() && (VT
.isInteger() || VT
.isFloatingPoint()) &&
799 "Wrong return type!");
800 assert(N
->getNumOperands() == 2 && "Wrong number of operands!");
801 assert(N
->getOperand(0).getValueType() == N
->getOperand(1).getValueType() &&
802 "Mismatched operand types!");
803 assert(N
->getOperand(0).getValueType().isInteger() == VT
.isInteger() &&
804 "Wrong operand type!");
805 assert(VT
.getSizeInBits() == 2 * N
->getOperand(0).getValueSizeInBits() &&
806 "Wrong return type size");
809 case ISD::BUILD_VECTOR
: {
810 assert(N
->getNumValues() == 1 && "Too many results!");
811 assert(N
->getValueType(0).isVector() && "Wrong return type!");
812 assert(N
->getNumOperands() == N
->getValueType(0).getVectorNumElements() &&
813 "Wrong number of operands!");
814 EVT EltVT
= N
->getValueType(0).getVectorElementType();
815 for (SDNode::op_iterator I
= N
->op_begin(), E
= N
->op_end(); I
!= E
; ++I
) {
816 assert((I
->getValueType() == EltVT
||
817 (EltVT
.isInteger() && I
->getValueType().isInteger() &&
818 EltVT
.bitsLE(I
->getValueType()))) &&
819 "Wrong operand type!");
820 assert(I
->getValueType() == N
->getOperand(0).getValueType() &&
821 "Operands must all have the same type");
829 /// Insert a newly allocated node into the DAG.
831 /// Handles insertion into the all nodes list and CSE map, as well as
832 /// verification and other common operations when a new node is allocated.
833 void SelectionDAG::InsertNode(SDNode
*N
) {
834 AllNodes
.push_back(N
);
836 N
->PersistentId
= NextPersistentId
++;
839 for (DAGUpdateListener
*DUL
= UpdateListeners
; DUL
; DUL
= DUL
->Next
)
840 DUL
->NodeInserted(N
);
843 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
844 /// correspond to it. This is useful when we're about to delete or repurpose
845 /// the node. We don't want future request for structurally identical nodes
846 /// to return N anymore.
847 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode
*N
) {
849 switch (N
->getOpcode()) {
850 case ISD::HANDLENODE
: return false; // noop.
852 assert(CondCodeNodes
[cast
<CondCodeSDNode
>(N
)->get()] &&
853 "Cond code doesn't exist!");
854 Erased
= CondCodeNodes
[cast
<CondCodeSDNode
>(N
)->get()] != nullptr;
855 CondCodeNodes
[cast
<CondCodeSDNode
>(N
)->get()] = nullptr;
857 case ISD::ExternalSymbol
:
858 Erased
= ExternalSymbols
.erase(cast
<ExternalSymbolSDNode
>(N
)->getSymbol());
860 case ISD::TargetExternalSymbol
: {
861 ExternalSymbolSDNode
*ESN
= cast
<ExternalSymbolSDNode
>(N
);
862 Erased
= TargetExternalSymbols
.erase(std::pair
<std::string
, unsigned>(
863 ESN
->getSymbol(), ESN
->getTargetFlags()));
866 case ISD::MCSymbol
: {
867 auto *MCSN
= cast
<MCSymbolSDNode
>(N
);
868 Erased
= MCSymbols
.erase(MCSN
->getMCSymbol());
871 case ISD::VALUETYPE
: {
872 EVT VT
= cast
<VTSDNode
>(N
)->getVT();
873 if (VT
.isExtended()) {
874 Erased
= ExtendedValueTypeNodes
.erase(VT
);
876 Erased
= ValueTypeNodes
[VT
.getSimpleVT().SimpleTy
] != nullptr;
877 ValueTypeNodes
[VT
.getSimpleVT().SimpleTy
] = nullptr;
882 // Remove it from the CSE Map.
883 assert(N
->getOpcode() != ISD::DELETED_NODE
&& "DELETED_NODE in CSEMap!");
884 assert(N
->getOpcode() != ISD::EntryToken
&& "EntryToken in CSEMap!");
885 Erased
= CSEMap
.RemoveNode(N
);
889 // Verify that the node was actually in one of the CSE maps, unless it has a
890 // flag result (which cannot be CSE'd) or is one of the special cases that are
891 // not subject to CSE.
892 if (!Erased
&& N
->getValueType(N
->getNumValues()-1) != MVT::Glue
&&
893 !N
->isMachineOpcode() && !doNotCSE(N
)) {
896 llvm_unreachable("Node is not in map!");
902 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
903 /// maps and modified in place. Add it back to the CSE maps, unless an identical
904 /// node already exists, in which case transfer all its users to the existing
905 /// node. This transfer can potentially trigger recursive merging.
907 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode
*N
) {
908 // For node types that aren't CSE'd, just act as if no identical node
911 SDNode
*Existing
= CSEMap
.GetOrInsertNode(N
);
913 // If there was already an existing matching node, use ReplaceAllUsesWith
914 // to replace the dead one with the existing one. This can cause
915 // recursive merging of other unrelated nodes down the line.
916 ReplaceAllUsesWith(N
, Existing
);
918 // N is now dead. Inform the listeners and delete it.
919 for (DAGUpdateListener
*DUL
= UpdateListeners
; DUL
; DUL
= DUL
->Next
)
920 DUL
->NodeDeleted(N
, Existing
);
921 DeleteNodeNotInCSEMaps(N
);
926 // If the node doesn't already exist, we updated it. Inform listeners.
927 for (DAGUpdateListener
*DUL
= UpdateListeners
; DUL
; DUL
= DUL
->Next
)
931 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
932 /// were replaced with those specified. If this node is never memoized,
933 /// return null, otherwise return a pointer to the slot it would take. If a
934 /// node already exists with these operands, the slot will be non-null.
935 SDNode
*SelectionDAG::FindModifiedNodeSlot(SDNode
*N
, SDValue Op
,
940 SDValue Ops
[] = { Op
};
942 AddNodeIDNode(ID
, N
->getOpcode(), N
->getVTList(), Ops
);
943 AddNodeIDCustom(ID
, N
);
944 SDNode
*Node
= FindNodeOrInsertPos(ID
, SDLoc(N
), InsertPos
);
946 Node
->intersectFlagsWith(N
->getFlags());
950 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
951 /// were replaced with those specified. If this node is never memoized,
952 /// return null, otherwise return a pointer to the slot it would take. If a
953 /// node already exists with these operands, the slot will be non-null.
954 SDNode
*SelectionDAG::FindModifiedNodeSlot(SDNode
*N
,
955 SDValue Op1
, SDValue Op2
,
960 SDValue Ops
[] = { Op1
, Op2
};
962 AddNodeIDNode(ID
, N
->getOpcode(), N
->getVTList(), Ops
);
963 AddNodeIDCustom(ID
, N
);
964 SDNode
*Node
= FindNodeOrInsertPos(ID
, SDLoc(N
), InsertPos
);
966 Node
->intersectFlagsWith(N
->getFlags());
970 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
971 /// were replaced with those specified. If this node is never memoized,
972 /// return null, otherwise return a pointer to the slot it would take. If a
973 /// node already exists with these operands, the slot will be non-null.
974 SDNode
*SelectionDAG::FindModifiedNodeSlot(SDNode
*N
, ArrayRef
<SDValue
> Ops
,
980 AddNodeIDNode(ID
, N
->getOpcode(), N
->getVTList(), Ops
);
981 AddNodeIDCustom(ID
, N
);
982 SDNode
*Node
= FindNodeOrInsertPos(ID
, SDLoc(N
), InsertPos
);
984 Node
->intersectFlagsWith(N
->getFlags());
988 unsigned SelectionDAG::getEVTAlignment(EVT VT
) const {
989 Type
*Ty
= VT
== MVT::iPTR
?
990 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
991 VT
.getTypeForEVT(*getContext());
993 return getDataLayout().getABITypeAlignment(Ty
);
996 // EntryNode could meaningfully have debug info if we can find it...
997 SelectionDAG::SelectionDAG(const TargetMachine
&tm
, CodeGenOpt::Level OL
)
998 : TM(tm
), OptLevel(OL
),
999 EntryNode(ISD::EntryToken
, 0, DebugLoc(), getVTList(MVT::Other
)),
1000 Root(getEntryNode()) {
1001 InsertNode(&EntryNode
);
1002 DbgInfo
= new SDDbgInfo();
1005 void SelectionDAG::init(MachineFunction
&NewMF
,
1006 OptimizationRemarkEmitter
&NewORE
,
1007 Pass
*PassPtr
, const TargetLibraryInfo
*LibraryInfo
,
1008 LegacyDivergenceAnalysis
* Divergence
) {
1010 SDAGISelPass
= PassPtr
;
1012 TLI
= getSubtarget().getTargetLowering();
1013 TSI
= getSubtarget().getSelectionDAGInfo();
1014 LibInfo
= LibraryInfo
;
1015 Context
= &MF
->getFunction().getContext();
1019 SelectionDAG::~SelectionDAG() {
1020 assert(!UpdateListeners
&& "Dangling registered DAGUpdateListeners");
1022 OperandRecycler
.clear(OperandAllocator
);
1026 void SelectionDAG::allnodes_clear() {
1027 assert(&*AllNodes
.begin() == &EntryNode
);
1028 AllNodes
.remove(AllNodes
.begin());
1029 while (!AllNodes
.empty())
1030 DeallocateNode(&AllNodes
.front());
1032 NextPersistentId
= 0;
1036 SDNode
*SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID
&ID
,
1038 SDNode
*N
= CSEMap
.FindNodeOrInsertPos(ID
, InsertPos
);
1040 switch (N
->getOpcode()) {
1043 case ISD::ConstantFP
:
1044 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1045 "debug location. Use another overload.");
1051 SDNode
*SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID
&ID
,
1052 const SDLoc
&DL
, void *&InsertPos
) {
1053 SDNode
*N
= CSEMap
.FindNodeOrInsertPos(ID
, InsertPos
);
1055 switch (N
->getOpcode()) {
1057 case ISD::ConstantFP
:
1058 // Erase debug location from the node if the node is used at several
1059 // different places. Do not propagate one location to all uses as it
1060 // will cause a worse single stepping debugging experience.
1061 if (N
->getDebugLoc() != DL
.getDebugLoc())
1062 N
->setDebugLoc(DebugLoc());
1065 // When the node's point of use is located earlier in the instruction
1066 // sequence than its prior point of use, update its debug info to the
1067 // earlier location.
1068 if (DL
.getIROrder() && DL
.getIROrder() < N
->getIROrder())
1069 N
->setDebugLoc(DL
.getDebugLoc());
1076 void SelectionDAG::clear() {
1078 OperandRecycler
.clear(OperandAllocator
);
1079 OperandAllocator
.Reset();
1082 ExtendedValueTypeNodes
.clear();
1083 ExternalSymbols
.clear();
1084 TargetExternalSymbols
.clear();
1086 SDCallSiteDbgInfo
.clear();
1087 std::fill(CondCodeNodes
.begin(), CondCodeNodes
.end(),
1088 static_cast<CondCodeSDNode
*>(nullptr));
1089 std::fill(ValueTypeNodes
.begin(), ValueTypeNodes
.end(),
1090 static_cast<SDNode
*>(nullptr));
1092 EntryNode
.UseList
= nullptr;
1093 InsertNode(&EntryNode
);
1094 Root
= getEntryNode();
1098 SDValue
SelectionDAG::getFPExtendOrRound(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1099 return VT
.bitsGT(Op
.getValueType())
1100 ? getNode(ISD::FP_EXTEND
, DL
, VT
, Op
)
1101 : getNode(ISD::FP_ROUND
, DL
, VT
, Op
, getIntPtrConstant(0, DL
));
1104 SDValue
SelectionDAG::getAnyExtOrTrunc(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1105 return VT
.bitsGT(Op
.getValueType()) ?
1106 getNode(ISD::ANY_EXTEND
, DL
, VT
, Op
) :
1107 getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
1110 SDValue
SelectionDAG::getSExtOrTrunc(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1111 return VT
.bitsGT(Op
.getValueType()) ?
1112 getNode(ISD::SIGN_EXTEND
, DL
, VT
, Op
) :
1113 getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
1116 SDValue
SelectionDAG::getZExtOrTrunc(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1117 return VT
.bitsGT(Op
.getValueType()) ?
1118 getNode(ISD::ZERO_EXTEND
, DL
, VT
, Op
) :
1119 getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
1122 SDValue
SelectionDAG::getBoolExtOrTrunc(SDValue Op
, const SDLoc
&SL
, EVT VT
,
1124 if (VT
.bitsLE(Op
.getValueType()))
1125 return getNode(ISD::TRUNCATE
, SL
, VT
, Op
);
1127 TargetLowering::BooleanContent BType
= TLI
->getBooleanContents(OpVT
);
1128 return getNode(TLI
->getExtendForContent(BType
), SL
, VT
, Op
);
1131 SDValue
SelectionDAG::getZeroExtendInReg(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1132 assert(!VT
.isVector() &&
1133 "getZeroExtendInReg should use the vector element type instead of "
1134 "the vector type!");
1135 if (Op
.getValueType().getScalarType() == VT
) return Op
;
1136 unsigned BitWidth
= Op
.getScalarValueSizeInBits();
1137 APInt Imm
= APInt::getLowBitsSet(BitWidth
,
1138 VT
.getSizeInBits());
1139 return getNode(ISD::AND
, DL
, Op
.getValueType(), Op
,
1140 getConstant(Imm
, DL
, Op
.getValueType()));
1143 SDValue
SelectionDAG::getPtrExtOrTrunc(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1144 // Only unsigned pointer semantics are supported right now. In the future this
1145 // might delegate to TLI to check pointer signedness.
1146 return getZExtOrTrunc(Op
, DL
, VT
);
1149 SDValue
SelectionDAG::getPtrExtendInReg(SDValue Op
, const SDLoc
&DL
, EVT VT
) {
1150 // Only unsigned pointer semantics are supported right now. In the future this
1151 // might delegate to TLI to check pointer signedness.
1152 return getZeroExtendInReg(Op
, DL
, VT
);
1155 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1156 SDValue
SelectionDAG::getNOT(const SDLoc
&DL
, SDValue Val
, EVT VT
) {
1157 EVT EltVT
= VT
.getScalarType();
1159 getConstant(APInt::getAllOnesValue(EltVT
.getSizeInBits()), DL
, VT
);
1160 return getNode(ISD::XOR
, DL
, VT
, Val
, NegOne
);
1163 SDValue
SelectionDAG::getLogicalNOT(const SDLoc
&DL
, SDValue Val
, EVT VT
) {
1164 SDValue TrueValue
= getBoolConstant(true, DL
, VT
, VT
);
1165 return getNode(ISD::XOR
, DL
, VT
, Val
, TrueValue
);
1168 SDValue
SelectionDAG::getBoolConstant(bool V
, const SDLoc
&DL
, EVT VT
,
1171 return getConstant(0, DL
, VT
);
1173 switch (TLI
->getBooleanContents(OpVT
)) {
1174 case TargetLowering::ZeroOrOneBooleanContent
:
1175 case TargetLowering::UndefinedBooleanContent
:
1176 return getConstant(1, DL
, VT
);
1177 case TargetLowering::ZeroOrNegativeOneBooleanContent
:
1178 return getAllOnesConstant(DL
, VT
);
1180 llvm_unreachable("Unexpected boolean content enum!");
1183 SDValue
SelectionDAG::getConstant(uint64_t Val
, const SDLoc
&DL
, EVT VT
,
1184 bool isT
, bool isO
) {
1185 EVT EltVT
= VT
.getScalarType();
1186 assert((EltVT
.getSizeInBits() >= 64 ||
1187 (uint64_t)((int64_t)Val
>> EltVT
.getSizeInBits()) + 1 < 2) &&
1188 "getConstant with a uint64_t value that doesn't fit in the type!");
1189 return getConstant(APInt(EltVT
.getSizeInBits(), Val
), DL
, VT
, isT
, isO
);
1192 SDValue
SelectionDAG::getConstant(const APInt
&Val
, const SDLoc
&DL
, EVT VT
,
1193 bool isT
, bool isO
) {
1194 return getConstant(*ConstantInt::get(*Context
, Val
), DL
, VT
, isT
, isO
);
1197 SDValue
SelectionDAG::getConstant(const ConstantInt
&Val
, const SDLoc
&DL
,
1198 EVT VT
, bool isT
, bool isO
) {
1199 assert(VT
.isInteger() && "Cannot create FP integer constant!");
1201 EVT EltVT
= VT
.getScalarType();
1202 const ConstantInt
*Elt
= &Val
;
1204 // In some cases the vector type is legal but the element type is illegal and
1205 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1206 // inserted value (the type does not need to match the vector element type).
1207 // Any extra bits introduced will be truncated away.
1208 if (VT
.isVector() && TLI
->getTypeAction(*getContext(), EltVT
) ==
1209 TargetLowering::TypePromoteInteger
) {
1210 EltVT
= TLI
->getTypeToTransformTo(*getContext(), EltVT
);
1211 APInt NewVal
= Elt
->getValue().zextOrTrunc(EltVT
.getSizeInBits());
1212 Elt
= ConstantInt::get(*getContext(), NewVal
);
1214 // In other cases the element type is illegal and needs to be expanded, for
1215 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1216 // the value into n parts and use a vector type with n-times the elements.
1217 // Then bitcast to the type requested.
1218 // Legalizing constants too early makes the DAGCombiner's job harder so we
1219 // only legalize if the DAG tells us we must produce legal types.
1220 else if (NewNodesMustHaveLegalTypes
&& VT
.isVector() &&
1221 TLI
->getTypeAction(*getContext(), EltVT
) ==
1222 TargetLowering::TypeExpandInteger
) {
1223 const APInt
&NewVal
= Elt
->getValue();
1224 EVT ViaEltVT
= TLI
->getTypeToTransformTo(*getContext(), EltVT
);
1225 unsigned ViaEltSizeInBits
= ViaEltVT
.getSizeInBits();
1226 unsigned ViaVecNumElts
= VT
.getSizeInBits() / ViaEltSizeInBits
;
1227 EVT ViaVecVT
= EVT::getVectorVT(*getContext(), ViaEltVT
, ViaVecNumElts
);
1229 // Check the temporary vector is the correct size. If this fails then
1230 // getTypeToTransformTo() probably returned a type whose size (in bits)
1231 // isn't a power-of-2 factor of the requested type size.
1232 assert(ViaVecVT
.getSizeInBits() == VT
.getSizeInBits());
1234 SmallVector
<SDValue
, 2> EltParts
;
1235 for (unsigned i
= 0; i
< ViaVecNumElts
/ VT
.getVectorNumElements(); ++i
) {
1236 EltParts
.push_back(getConstant(NewVal
.lshr(i
* ViaEltSizeInBits
)
1237 .zextOrTrunc(ViaEltSizeInBits
), DL
,
1238 ViaEltVT
, isT
, isO
));
1241 // EltParts is currently in little endian order. If we actually want
1242 // big-endian order then reverse it now.
1243 if (getDataLayout().isBigEndian())
1244 std::reverse(EltParts
.begin(), EltParts
.end());
1246 // The elements must be reversed when the element order is different
1247 // to the endianness of the elements (because the BITCAST is itself a
1248 // vector shuffle in this situation). However, we do not need any code to
1249 // perform this reversal because getConstant() is producing a vector
1251 // This situation occurs in MIPS MSA.
1253 SmallVector
<SDValue
, 8> Ops
;
1254 for (unsigned i
= 0, e
= VT
.getVectorNumElements(); i
!= e
; ++i
)
1255 Ops
.insert(Ops
.end(), EltParts
.begin(), EltParts
.end());
1257 SDValue V
= getNode(ISD::BITCAST
, DL
, VT
, getBuildVector(ViaVecVT
, DL
, Ops
));
1261 assert(Elt
->getBitWidth() == EltVT
.getSizeInBits() &&
1262 "APInt size does not match type size!");
1263 unsigned Opc
= isT
? ISD::TargetConstant
: ISD::Constant
;
1264 FoldingSetNodeID ID
;
1265 AddNodeIDNode(ID
, Opc
, getVTList(EltVT
), None
);
1269 SDNode
*N
= nullptr;
1270 if ((N
= FindNodeOrInsertPos(ID
, DL
, IP
)))
1272 return SDValue(N
, 0);
1275 N
= newSDNode
<ConstantSDNode
>(isT
, isO
, Elt
, EltVT
);
1276 CSEMap
.InsertNode(N
, IP
);
1278 NewSDValueDbgMsg(SDValue(N
, 0), "Creating constant: ", this);
1281 SDValue
Result(N
, 0);
1283 Result
= getSplatBuildVector(VT
, DL
, Result
);
1288 SDValue
SelectionDAG::getIntPtrConstant(uint64_t Val
, const SDLoc
&DL
,
1290 return getConstant(Val
, DL
, TLI
->getPointerTy(getDataLayout()), isTarget
);
1293 SDValue
SelectionDAG::getShiftAmountConstant(uint64_t Val
, EVT VT
,
1294 const SDLoc
&DL
, bool LegalTypes
) {
1295 EVT ShiftVT
= TLI
->getShiftAmountTy(VT
, getDataLayout(), LegalTypes
);
1296 return getConstant(Val
, DL
, ShiftVT
);
1299 SDValue
SelectionDAG::getConstantFP(const APFloat
&V
, const SDLoc
&DL
, EVT VT
,
1301 return getConstantFP(*ConstantFP::get(*getContext(), V
), DL
, VT
, isTarget
);
1304 SDValue
SelectionDAG::getConstantFP(const ConstantFP
&V
, const SDLoc
&DL
,
1305 EVT VT
, bool isTarget
) {
1306 assert(VT
.isFloatingPoint() && "Cannot create integer FP constant!");
1308 EVT EltVT
= VT
.getScalarType();
1310 // Do the map lookup using the actual bit pattern for the floating point
1311 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1312 // we don't have issues with SNANs.
1313 unsigned Opc
= isTarget
? ISD::TargetConstantFP
: ISD::ConstantFP
;
1314 FoldingSetNodeID ID
;
1315 AddNodeIDNode(ID
, Opc
, getVTList(EltVT
), None
);
1318 SDNode
*N
= nullptr;
1319 if ((N
= FindNodeOrInsertPos(ID
, DL
, IP
)))
1321 return SDValue(N
, 0);
1324 N
= newSDNode
<ConstantFPSDNode
>(isTarget
, &V
, EltVT
);
1325 CSEMap
.InsertNode(N
, IP
);
1329 SDValue
Result(N
, 0);
1331 Result
= getSplatBuildVector(VT
, DL
, Result
);
1332 NewSDValueDbgMsg(Result
, "Creating fp constant: ", this);
1336 SDValue
SelectionDAG::getConstantFP(double Val
, const SDLoc
&DL
, EVT VT
,
1338 EVT EltVT
= VT
.getScalarType();
1339 if (EltVT
== MVT::f32
)
1340 return getConstantFP(APFloat((float)Val
), DL
, VT
, isTarget
);
1341 else if (EltVT
== MVT::f64
)
1342 return getConstantFP(APFloat(Val
), DL
, VT
, isTarget
);
1343 else if (EltVT
== MVT::f80
|| EltVT
== MVT::f128
|| EltVT
== MVT::ppcf128
||
1344 EltVT
== MVT::f16
) {
1346 APFloat APF
= APFloat(Val
);
1347 APF
.convert(EVTToAPFloatSemantics(EltVT
), APFloat::rmNearestTiesToEven
,
1349 return getConstantFP(APF
, DL
, VT
, isTarget
);
1351 llvm_unreachable("Unsupported type in getConstantFP");
1354 SDValue
SelectionDAG::getGlobalAddress(const GlobalValue
*GV
, const SDLoc
&DL
,
1355 EVT VT
, int64_t Offset
, bool isTargetGA
,
1356 unsigned TargetFlags
) {
1357 assert((TargetFlags
== 0 || isTargetGA
) &&
1358 "Cannot set target flags on target-independent globals");
1360 // Truncate (with sign-extension) the offset value to the pointer size.
1361 unsigned BitWidth
= getDataLayout().getPointerTypeSizeInBits(GV
->getType());
1363 Offset
= SignExtend64(Offset
, BitWidth
);
1366 if (GV
->isThreadLocal())
1367 Opc
= isTargetGA
? ISD::TargetGlobalTLSAddress
: ISD::GlobalTLSAddress
;
1369 Opc
= isTargetGA
? ISD::TargetGlobalAddress
: ISD::GlobalAddress
;
1371 FoldingSetNodeID ID
;
1372 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1374 ID
.AddInteger(Offset
);
1375 ID
.AddInteger(TargetFlags
);
1377 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
))
1378 return SDValue(E
, 0);
1380 auto *N
= newSDNode
<GlobalAddressSDNode
>(
1381 Opc
, DL
.getIROrder(), DL
.getDebugLoc(), GV
, VT
, Offset
, TargetFlags
);
1382 CSEMap
.InsertNode(N
, IP
);
1384 return SDValue(N
, 0);
1387 SDValue
SelectionDAG::getFrameIndex(int FI
, EVT VT
, bool isTarget
) {
1388 unsigned Opc
= isTarget
? ISD::TargetFrameIndex
: ISD::FrameIndex
;
1389 FoldingSetNodeID ID
;
1390 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1393 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1394 return SDValue(E
, 0);
1396 auto *N
= newSDNode
<FrameIndexSDNode
>(FI
, VT
, isTarget
);
1397 CSEMap
.InsertNode(N
, IP
);
1399 return SDValue(N
, 0);
1402 SDValue
SelectionDAG::getJumpTable(int JTI
, EVT VT
, bool isTarget
,
1403 unsigned TargetFlags
) {
1404 assert((TargetFlags
== 0 || isTarget
) &&
1405 "Cannot set target flags on target-independent jump tables");
1406 unsigned Opc
= isTarget
? ISD::TargetJumpTable
: ISD::JumpTable
;
1407 FoldingSetNodeID ID
;
1408 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1410 ID
.AddInteger(TargetFlags
);
1412 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1413 return SDValue(E
, 0);
1415 auto *N
= newSDNode
<JumpTableSDNode
>(JTI
, VT
, isTarget
, TargetFlags
);
1416 CSEMap
.InsertNode(N
, IP
);
1418 return SDValue(N
, 0);
1421 SDValue
SelectionDAG::getConstantPool(const Constant
*C
, EVT VT
,
1422 unsigned Alignment
, int Offset
,
1424 unsigned TargetFlags
) {
1425 assert((TargetFlags
== 0 || isTarget
) &&
1426 "Cannot set target flags on target-independent globals");
1428 Alignment
= MF
->getFunction().hasOptSize()
1429 ? getDataLayout().getABITypeAlignment(C
->getType())
1430 : getDataLayout().getPrefTypeAlignment(C
->getType());
1431 unsigned Opc
= isTarget
? ISD::TargetConstantPool
: ISD::ConstantPool
;
1432 FoldingSetNodeID ID
;
1433 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1434 ID
.AddInteger(Alignment
);
1435 ID
.AddInteger(Offset
);
1437 ID
.AddInteger(TargetFlags
);
1439 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1440 return SDValue(E
, 0);
1442 auto *N
= newSDNode
<ConstantPoolSDNode
>(isTarget
, C
, VT
, Offset
, Alignment
,
1444 CSEMap
.InsertNode(N
, IP
);
1446 return SDValue(N
, 0);
1449 SDValue
SelectionDAG::getConstantPool(MachineConstantPoolValue
*C
, EVT VT
,
1450 unsigned Alignment
, int Offset
,
1452 unsigned TargetFlags
) {
1453 assert((TargetFlags
== 0 || isTarget
) &&
1454 "Cannot set target flags on target-independent globals");
1456 Alignment
= getDataLayout().getPrefTypeAlignment(C
->getType());
1457 unsigned Opc
= isTarget
? ISD::TargetConstantPool
: ISD::ConstantPool
;
1458 FoldingSetNodeID ID
;
1459 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1460 ID
.AddInteger(Alignment
);
1461 ID
.AddInteger(Offset
);
1462 C
->addSelectionDAGCSEId(ID
);
1463 ID
.AddInteger(TargetFlags
);
1465 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1466 return SDValue(E
, 0);
1468 auto *N
= newSDNode
<ConstantPoolSDNode
>(isTarget
, C
, VT
, Offset
, Alignment
,
1470 CSEMap
.InsertNode(N
, IP
);
1472 return SDValue(N
, 0);
1475 SDValue
SelectionDAG::getTargetIndex(int Index
, EVT VT
, int64_t Offset
,
1476 unsigned TargetFlags
) {
1477 FoldingSetNodeID ID
;
1478 AddNodeIDNode(ID
, ISD::TargetIndex
, getVTList(VT
), None
);
1479 ID
.AddInteger(Index
);
1480 ID
.AddInteger(Offset
);
1481 ID
.AddInteger(TargetFlags
);
1483 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1484 return SDValue(E
, 0);
1486 auto *N
= newSDNode
<TargetIndexSDNode
>(Index
, VT
, Offset
, TargetFlags
);
1487 CSEMap
.InsertNode(N
, IP
);
1489 return SDValue(N
, 0);
1492 SDValue
SelectionDAG::getBasicBlock(MachineBasicBlock
*MBB
) {
1493 FoldingSetNodeID ID
;
1494 AddNodeIDNode(ID
, ISD::BasicBlock
, getVTList(MVT::Other
), None
);
1497 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1498 return SDValue(E
, 0);
1500 auto *N
= newSDNode
<BasicBlockSDNode
>(MBB
);
1501 CSEMap
.InsertNode(N
, IP
);
1503 return SDValue(N
, 0);
1506 SDValue
SelectionDAG::getValueType(EVT VT
) {
1507 if (VT
.isSimple() && (unsigned)VT
.getSimpleVT().SimpleTy
>=
1508 ValueTypeNodes
.size())
1509 ValueTypeNodes
.resize(VT
.getSimpleVT().SimpleTy
+1);
1511 SDNode
*&N
= VT
.isExtended() ?
1512 ExtendedValueTypeNodes
[VT
] : ValueTypeNodes
[VT
.getSimpleVT().SimpleTy
];
1514 if (N
) return SDValue(N
, 0);
1515 N
= newSDNode
<VTSDNode
>(VT
);
1517 return SDValue(N
, 0);
1520 SDValue
SelectionDAG::getExternalSymbol(const char *Sym
, EVT VT
) {
1521 SDNode
*&N
= ExternalSymbols
[Sym
];
1522 if (N
) return SDValue(N
, 0);
1523 N
= newSDNode
<ExternalSymbolSDNode
>(false, Sym
, 0, VT
);
1525 return SDValue(N
, 0);
1528 SDValue
SelectionDAG::getMCSymbol(MCSymbol
*Sym
, EVT VT
) {
1529 SDNode
*&N
= MCSymbols
[Sym
];
1531 return SDValue(N
, 0);
1532 N
= newSDNode
<MCSymbolSDNode
>(Sym
, VT
);
1534 return SDValue(N
, 0);
1537 SDValue
SelectionDAG::getTargetExternalSymbol(const char *Sym
, EVT VT
,
1538 unsigned TargetFlags
) {
1540 TargetExternalSymbols
[std::pair
<std::string
, unsigned>(Sym
, TargetFlags
)];
1541 if (N
) return SDValue(N
, 0);
1542 N
= newSDNode
<ExternalSymbolSDNode
>(true, Sym
, TargetFlags
, VT
);
1544 return SDValue(N
, 0);
1547 SDValue
SelectionDAG::getCondCode(ISD::CondCode Cond
) {
1548 if ((unsigned)Cond
>= CondCodeNodes
.size())
1549 CondCodeNodes
.resize(Cond
+1);
1551 if (!CondCodeNodes
[Cond
]) {
1552 auto *N
= newSDNode
<CondCodeSDNode
>(Cond
);
1553 CondCodeNodes
[Cond
] = N
;
1557 return SDValue(CondCodeNodes
[Cond
], 0);
1560 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1561 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1562 static void commuteShuffle(SDValue
&N1
, SDValue
&N2
, MutableArrayRef
<int> M
) {
1564 ShuffleVectorSDNode::commuteMask(M
);
1567 SDValue
SelectionDAG::getVectorShuffle(EVT VT
, const SDLoc
&dl
, SDValue N1
,
1568 SDValue N2
, ArrayRef
<int> Mask
) {
1569 assert(VT
.getVectorNumElements() == Mask
.size() &&
1570 "Must have the same number of vector elements as mask elements!");
1571 assert(VT
== N1
.getValueType() && VT
== N2
.getValueType() &&
1572 "Invalid VECTOR_SHUFFLE");
1574 // Canonicalize shuffle undef, undef -> undef
1575 if (N1
.isUndef() && N2
.isUndef())
1576 return getUNDEF(VT
);
1578 // Validate that all indices in Mask are within the range of the elements
1579 // input to the shuffle.
1580 int NElts
= Mask
.size();
1581 assert(llvm::all_of(Mask
,
1582 [&](int M
) { return M
< (NElts
* 2) && M
>= -1; }) &&
1583 "Index out of range");
1585 // Copy the mask so we can do any needed cleanup.
1586 SmallVector
<int, 8> MaskVec(Mask
.begin(), Mask
.end());
1588 // Canonicalize shuffle v, v -> v, undef
1591 for (int i
= 0; i
!= NElts
; ++i
)
1592 if (MaskVec
[i
] >= NElts
) MaskVec
[i
] -= NElts
;
1595 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1597 commuteShuffle(N1
, N2
, MaskVec
);
1599 if (TLI
->hasVectorBlend()) {
1600 // If shuffling a splat, try to blend the splat instead. We do this here so
1601 // that even when this arises during lowering we don't have to re-handle it.
1602 auto BlendSplat
= [&](BuildVectorSDNode
*BV
, int Offset
) {
1603 BitVector UndefElements
;
1604 SDValue Splat
= BV
->getSplatValue(&UndefElements
);
1608 for (int i
= 0; i
< NElts
; ++i
) {
1609 if (MaskVec
[i
] < Offset
|| MaskVec
[i
] >= (Offset
+ NElts
))
1612 // If this input comes from undef, mark it as such.
1613 if (UndefElements
[MaskVec
[i
] - Offset
]) {
1618 // If we can blend a non-undef lane, use that instead.
1619 if (!UndefElements
[i
])
1620 MaskVec
[i
] = i
+ Offset
;
1623 if (auto *N1BV
= dyn_cast
<BuildVectorSDNode
>(N1
))
1624 BlendSplat(N1BV
, 0);
1625 if (auto *N2BV
= dyn_cast
<BuildVectorSDNode
>(N2
))
1626 BlendSplat(N2BV
, NElts
);
1629 // Canonicalize all index into lhs, -> shuffle lhs, undef
1630 // Canonicalize all index into rhs, -> shuffle rhs, undef
1631 bool AllLHS
= true, AllRHS
= true;
1632 bool N2Undef
= N2
.isUndef();
1633 for (int i
= 0; i
!= NElts
; ++i
) {
1634 if (MaskVec
[i
] >= NElts
) {
1639 } else if (MaskVec
[i
] >= 0) {
1643 if (AllLHS
&& AllRHS
)
1644 return getUNDEF(VT
);
1645 if (AllLHS
&& !N2Undef
)
1649 commuteShuffle(N1
, N2
, MaskVec
);
1651 // Reset our undef status after accounting for the mask.
1652 N2Undef
= N2
.isUndef();
1653 // Re-check whether both sides ended up undef.
1654 if (N1
.isUndef() && N2Undef
)
1655 return getUNDEF(VT
);
1657 // If Identity shuffle return that node.
1658 bool Identity
= true, AllSame
= true;
1659 for (int i
= 0; i
!= NElts
; ++i
) {
1660 if (MaskVec
[i
] >= 0 && MaskVec
[i
] != i
) Identity
= false;
1661 if (MaskVec
[i
] != MaskVec
[0]) AllSame
= false;
1663 if (Identity
&& NElts
)
1666 // Shuffling a constant splat doesn't change the result.
1670 // Look through any bitcasts. We check that these don't change the number
1671 // (and size) of elements and just changes their types.
1672 while (V
.getOpcode() == ISD::BITCAST
)
1673 V
= V
->getOperand(0);
1675 // A splat should always show up as a build vector node.
1676 if (auto *BV
= dyn_cast
<BuildVectorSDNode
>(V
)) {
1677 BitVector UndefElements
;
1678 SDValue Splat
= BV
->getSplatValue(&UndefElements
);
1679 // If this is a splat of an undef, shuffling it is also undef.
1680 if (Splat
&& Splat
.isUndef())
1681 return getUNDEF(VT
);
1684 V
.getValueType().getVectorNumElements() == VT
.getVectorNumElements();
1686 // We only have a splat which can skip shuffles if there is a splatted
1687 // value and no undef lanes rearranged by the shuffle.
1688 if (Splat
&& UndefElements
.none()) {
1689 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1690 // number of elements match or the value splatted is a zero constant.
1693 if (auto *C
= dyn_cast
<ConstantSDNode
>(Splat
))
1694 if (C
->isNullValue())
1698 // If the shuffle itself creates a splat, build the vector directly.
1699 if (AllSame
&& SameNumElts
) {
1700 EVT BuildVT
= BV
->getValueType(0);
1701 const SDValue
&Splatted
= BV
->getOperand(MaskVec
[0]);
1702 SDValue NewBV
= getSplatBuildVector(BuildVT
, dl
, Splatted
);
1704 // We may have jumped through bitcasts, so the type of the
1705 // BUILD_VECTOR may not match the type of the shuffle.
1707 NewBV
= getNode(ISD::BITCAST
, dl
, VT
, NewBV
);
1713 FoldingSetNodeID ID
;
1714 SDValue Ops
[2] = { N1
, N2
};
1715 AddNodeIDNode(ID
, ISD::VECTOR_SHUFFLE
, getVTList(VT
), Ops
);
1716 for (int i
= 0; i
!= NElts
; ++i
)
1717 ID
.AddInteger(MaskVec
[i
]);
1720 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
))
1721 return SDValue(E
, 0);
1723 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1724 // SDNode doesn't have access to it. This memory will be "leaked" when
1725 // the node is deallocated, but recovered when the NodeAllocator is released.
1726 int *MaskAlloc
= OperandAllocator
.Allocate
<int>(NElts
);
1727 llvm::copy(MaskVec
, MaskAlloc
);
1729 auto *N
= newSDNode
<ShuffleVectorSDNode
>(VT
, dl
.getIROrder(),
1730 dl
.getDebugLoc(), MaskAlloc
);
1731 createOperands(N
, Ops
);
1733 CSEMap
.InsertNode(N
, IP
);
1735 SDValue V
= SDValue(N
, 0);
1736 NewSDValueDbgMsg(V
, "Creating new node: ", this);
1740 SDValue
SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode
&SV
) {
1741 EVT VT
= SV
.getValueType(0);
1742 SmallVector
<int, 8> MaskVec(SV
.getMask().begin(), SV
.getMask().end());
1743 ShuffleVectorSDNode::commuteMask(MaskVec
);
1745 SDValue Op0
= SV
.getOperand(0);
1746 SDValue Op1
= SV
.getOperand(1);
1747 return getVectorShuffle(VT
, SDLoc(&SV
), Op1
, Op0
, MaskVec
);
1750 SDValue
SelectionDAG::getRegister(unsigned RegNo
, EVT VT
) {
1751 FoldingSetNodeID ID
;
1752 AddNodeIDNode(ID
, ISD::Register
, getVTList(VT
), None
);
1753 ID
.AddInteger(RegNo
);
1755 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1756 return SDValue(E
, 0);
1758 auto *N
= newSDNode
<RegisterSDNode
>(RegNo
, VT
);
1759 N
->SDNodeBits
.IsDivergent
= TLI
->isSDNodeSourceOfDivergence(N
, FLI
, DA
);
1760 CSEMap
.InsertNode(N
, IP
);
1762 return SDValue(N
, 0);
1765 SDValue
SelectionDAG::getRegisterMask(const uint32_t *RegMask
) {
1766 FoldingSetNodeID ID
;
1767 AddNodeIDNode(ID
, ISD::RegisterMask
, getVTList(MVT::Untyped
), None
);
1768 ID
.AddPointer(RegMask
);
1770 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1771 return SDValue(E
, 0);
1773 auto *N
= newSDNode
<RegisterMaskSDNode
>(RegMask
);
1774 CSEMap
.InsertNode(N
, IP
);
1776 return SDValue(N
, 0);
1779 SDValue
SelectionDAG::getEHLabel(const SDLoc
&dl
, SDValue Root
,
1781 return getLabelNode(ISD::EH_LABEL
, dl
, Root
, Label
);
1784 SDValue
SelectionDAG::getLabelNode(unsigned Opcode
, const SDLoc
&dl
,
1785 SDValue Root
, MCSymbol
*Label
) {
1786 FoldingSetNodeID ID
;
1787 SDValue Ops
[] = { Root
};
1788 AddNodeIDNode(ID
, Opcode
, getVTList(MVT::Other
), Ops
);
1789 ID
.AddPointer(Label
);
1791 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1792 return SDValue(E
, 0);
1795 newSDNode
<LabelSDNode
>(Opcode
, dl
.getIROrder(), dl
.getDebugLoc(), Label
);
1796 createOperands(N
, Ops
);
1798 CSEMap
.InsertNode(N
, IP
);
1800 return SDValue(N
, 0);
1803 SDValue
SelectionDAG::getBlockAddress(const BlockAddress
*BA
, EVT VT
,
1804 int64_t Offset
, bool isTarget
,
1805 unsigned TargetFlags
) {
1806 unsigned Opc
= isTarget
? ISD::TargetBlockAddress
: ISD::BlockAddress
;
1808 FoldingSetNodeID ID
;
1809 AddNodeIDNode(ID
, Opc
, getVTList(VT
), None
);
1811 ID
.AddInteger(Offset
);
1812 ID
.AddInteger(TargetFlags
);
1814 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1815 return SDValue(E
, 0);
1817 auto *N
= newSDNode
<BlockAddressSDNode
>(Opc
, VT
, BA
, Offset
, TargetFlags
);
1818 CSEMap
.InsertNode(N
, IP
);
1820 return SDValue(N
, 0);
1823 SDValue
SelectionDAG::getSrcValue(const Value
*V
) {
1824 assert((!V
|| V
->getType()->isPointerTy()) &&
1825 "SrcValue is not a pointer?");
1827 FoldingSetNodeID ID
;
1828 AddNodeIDNode(ID
, ISD::SRCVALUE
, getVTList(MVT::Other
), None
);
1832 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1833 return SDValue(E
, 0);
1835 auto *N
= newSDNode
<SrcValueSDNode
>(V
);
1836 CSEMap
.InsertNode(N
, IP
);
1838 return SDValue(N
, 0);
1841 SDValue
SelectionDAG::getMDNode(const MDNode
*MD
) {
1842 FoldingSetNodeID ID
;
1843 AddNodeIDNode(ID
, ISD::MDNODE_SDNODE
, getVTList(MVT::Other
), None
);
1847 if (SDNode
*E
= FindNodeOrInsertPos(ID
, IP
))
1848 return SDValue(E
, 0);
1850 auto *N
= newSDNode
<MDNodeSDNode
>(MD
);
1851 CSEMap
.InsertNode(N
, IP
);
1853 return SDValue(N
, 0);
1856 SDValue
SelectionDAG::getBitcast(EVT VT
, SDValue V
) {
1857 if (VT
== V
.getValueType())
1860 return getNode(ISD::BITCAST
, SDLoc(V
), VT
, V
);
1863 SDValue
SelectionDAG::getAddrSpaceCast(const SDLoc
&dl
, EVT VT
, SDValue Ptr
,
1864 unsigned SrcAS
, unsigned DestAS
) {
1865 SDValue Ops
[] = {Ptr
};
1866 FoldingSetNodeID ID
;
1867 AddNodeIDNode(ID
, ISD::ADDRSPACECAST
, getVTList(VT
), Ops
);
1868 ID
.AddInteger(SrcAS
);
1869 ID
.AddInteger(DestAS
);
1872 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
))
1873 return SDValue(E
, 0);
1875 auto *N
= newSDNode
<AddrSpaceCastSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(),
1877 createOperands(N
, Ops
);
1879 CSEMap
.InsertNode(N
, IP
);
1881 return SDValue(N
, 0);
1884 /// getShiftAmountOperand - Return the specified value casted to
1885 /// the target's desired shift amount type.
1886 SDValue
SelectionDAG::getShiftAmountOperand(EVT LHSTy
, SDValue Op
) {
1887 EVT OpTy
= Op
.getValueType();
1888 EVT ShTy
= TLI
->getShiftAmountTy(LHSTy
, getDataLayout());
1889 if (OpTy
== ShTy
|| OpTy
.isVector()) return Op
;
1891 return getZExtOrTrunc(Op
, SDLoc(Op
), ShTy
);
1894 SDValue
SelectionDAG::expandVAArg(SDNode
*Node
) {
1896 const TargetLowering
&TLI
= getTargetLoweringInfo();
1897 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
1898 EVT VT
= Node
->getValueType(0);
1899 SDValue Tmp1
= Node
->getOperand(0);
1900 SDValue Tmp2
= Node
->getOperand(1);
1901 unsigned Align
= Node
->getConstantOperandVal(3);
1903 SDValue VAListLoad
= getLoad(TLI
.getPointerTy(getDataLayout()), dl
, Tmp1
,
1904 Tmp2
, MachinePointerInfo(V
));
1905 SDValue VAList
= VAListLoad
;
1907 if (Align
> TLI
.getMinStackArgumentAlignment()) {
1908 assert(((Align
& (Align
-1)) == 0) && "Expected Align to be a power of 2");
1910 VAList
= getNode(ISD::ADD
, dl
, VAList
.getValueType(), VAList
,
1911 getConstant(Align
- 1, dl
, VAList
.getValueType()));
1913 VAList
= getNode(ISD::AND
, dl
, VAList
.getValueType(), VAList
,
1914 getConstant(-(int64_t)Align
, dl
, VAList
.getValueType()));
1917 // Increment the pointer, VAList, to the next vaarg
1918 Tmp1
= getNode(ISD::ADD
, dl
, VAList
.getValueType(), VAList
,
1919 getConstant(getDataLayout().getTypeAllocSize(
1920 VT
.getTypeForEVT(*getContext())),
1921 dl
, VAList
.getValueType()));
1922 // Store the incremented VAList to the legalized pointer
1924 getStore(VAListLoad
.getValue(1), dl
, Tmp1
, Tmp2
, MachinePointerInfo(V
));
1925 // Load the actual argument out of the pointer VAList
1926 return getLoad(VT
, dl
, Tmp1
, VAList
, MachinePointerInfo());
1929 SDValue
SelectionDAG::expandVACopy(SDNode
*Node
) {
1931 const TargetLowering
&TLI
= getTargetLoweringInfo();
1932 // This defaults to loading a pointer from the input and storing it to the
1933 // output, returning the chain.
1934 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
1935 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
1937 getLoad(TLI
.getPointerTy(getDataLayout()), dl
, Node
->getOperand(0),
1938 Node
->getOperand(2), MachinePointerInfo(VS
));
1939 return getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1),
1940 MachinePointerInfo(VD
));
1943 SDValue
SelectionDAG::CreateStackTemporary(EVT VT
, unsigned minAlign
) {
1944 MachineFrameInfo
&MFI
= getMachineFunction().getFrameInfo();
1945 unsigned ByteSize
= VT
.getStoreSize();
1946 Type
*Ty
= VT
.getTypeForEVT(*getContext());
1947 unsigned StackAlign
=
1948 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty
), minAlign
);
1950 int FrameIdx
= MFI
.CreateStackObject(ByteSize
, StackAlign
, false);
1951 return getFrameIndex(FrameIdx
, TLI
->getFrameIndexTy(getDataLayout()));
1954 SDValue
SelectionDAG::CreateStackTemporary(EVT VT1
, EVT VT2
) {
1955 unsigned Bytes
= std::max(VT1
.getStoreSize(), VT2
.getStoreSize());
1956 Type
*Ty1
= VT1
.getTypeForEVT(*getContext());
1957 Type
*Ty2
= VT2
.getTypeForEVT(*getContext());
1958 const DataLayout
&DL
= getDataLayout();
1960 std::max(DL
.getPrefTypeAlignment(Ty1
), DL
.getPrefTypeAlignment(Ty2
));
1962 MachineFrameInfo
&MFI
= getMachineFunction().getFrameInfo();
1963 int FrameIdx
= MFI
.CreateStackObject(Bytes
, Align
, false);
1964 return getFrameIndex(FrameIdx
, TLI
->getFrameIndexTy(getDataLayout()));
1967 SDValue
SelectionDAG::FoldSetCC(EVT VT
, SDValue N1
, SDValue N2
,
1968 ISD::CondCode Cond
, const SDLoc
&dl
) {
1969 EVT OpVT
= N1
.getValueType();
1971 // These setcc operations always fold.
1975 case ISD::SETFALSE2
: return getBoolConstant(false, dl
, VT
, OpVT
);
1977 case ISD::SETTRUE2
: return getBoolConstant(true, dl
, VT
, OpVT
);
1989 assert(!OpVT
.isInteger() && "Illegal setcc for integer!");
1993 if (OpVT
.isInteger()) {
1994 // For EQ and NE, we can always pick a value for the undef to make the
1995 // predicate pass or fail, so we can return undef.
1996 // Matches behavior in llvm::ConstantFoldCompareInstruction.
1997 // icmp eq/ne X, undef -> undef.
1998 if ((N1
.isUndef() || N2
.isUndef()) &&
1999 (Cond
== ISD::SETEQ
|| Cond
== ISD::SETNE
))
2000 return getUNDEF(VT
);
2002 // If both operands are undef, we can return undef for int comparison.
2003 // icmp undef, undef -> undef.
2004 if (N1
.isUndef() && N2
.isUndef())
2005 return getUNDEF(VT
);
2007 // icmp X, X -> true/false
2008 // icmp X, undef -> true/false because undef could be X.
2010 return getBoolConstant(ISD::isTrueWhenEqual(Cond
), dl
, VT
, OpVT
);
2013 if (ConstantSDNode
*N2C
= dyn_cast
<ConstantSDNode
>(N2
)) {
2014 const APInt
&C2
= N2C
->getAPIntValue();
2015 if (ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
)) {
2016 const APInt
&C1
= N1C
->getAPIntValue();
2019 default: llvm_unreachable("Unknown integer setcc!");
2020 case ISD::SETEQ
: return getBoolConstant(C1
== C2
, dl
, VT
, OpVT
);
2021 case ISD::SETNE
: return getBoolConstant(C1
!= C2
, dl
, VT
, OpVT
);
2022 case ISD::SETULT
: return getBoolConstant(C1
.ult(C2
), dl
, VT
, OpVT
);
2023 case ISD::SETUGT
: return getBoolConstant(C1
.ugt(C2
), dl
, VT
, OpVT
);
2024 case ISD::SETULE
: return getBoolConstant(C1
.ule(C2
), dl
, VT
, OpVT
);
2025 case ISD::SETUGE
: return getBoolConstant(C1
.uge(C2
), dl
, VT
, OpVT
);
2026 case ISD::SETLT
: return getBoolConstant(C1
.slt(C2
), dl
, VT
, OpVT
);
2027 case ISD::SETGT
: return getBoolConstant(C1
.sgt(C2
), dl
, VT
, OpVT
);
2028 case ISD::SETLE
: return getBoolConstant(C1
.sle(C2
), dl
, VT
, OpVT
);
2029 case ISD::SETGE
: return getBoolConstant(C1
.sge(C2
), dl
, VT
, OpVT
);
2034 auto *N1CFP
= dyn_cast
<ConstantFPSDNode
>(N1
);
2035 auto *N2CFP
= dyn_cast
<ConstantFPSDNode
>(N2
);
2037 if (N1CFP
&& N2CFP
) {
2038 APFloat::cmpResult R
= N1CFP
->getValueAPF().compare(N2CFP
->getValueAPF());
2041 case ISD::SETEQ
: if (R
==APFloat::cmpUnordered
)
2042 return getUNDEF(VT
);
2044 case ISD::SETOEQ
: return getBoolConstant(R
==APFloat::cmpEqual
, dl
, VT
,
2046 case ISD::SETNE
: if (R
==APFloat::cmpUnordered
)
2047 return getUNDEF(VT
);
2049 case ISD::SETONE
: return getBoolConstant(R
==APFloat::cmpGreaterThan
||
2050 R
==APFloat::cmpLessThan
, dl
, VT
,
2052 case ISD::SETLT
: if (R
==APFloat::cmpUnordered
)
2053 return getUNDEF(VT
);
2055 case ISD::SETOLT
: return getBoolConstant(R
==APFloat::cmpLessThan
, dl
, VT
,
2057 case ISD::SETGT
: if (R
==APFloat::cmpUnordered
)
2058 return getUNDEF(VT
);
2060 case ISD::SETOGT
: return getBoolConstant(R
==APFloat::cmpGreaterThan
, dl
,
2062 case ISD::SETLE
: if (R
==APFloat::cmpUnordered
)
2063 return getUNDEF(VT
);
2065 case ISD::SETOLE
: return getBoolConstant(R
==APFloat::cmpLessThan
||
2066 R
==APFloat::cmpEqual
, dl
, VT
,
2068 case ISD::SETGE
: if (R
==APFloat::cmpUnordered
)
2069 return getUNDEF(VT
);
2071 case ISD::SETOGE
: return getBoolConstant(R
==APFloat::cmpGreaterThan
||
2072 R
==APFloat::cmpEqual
, dl
, VT
, OpVT
);
2073 case ISD::SETO
: return getBoolConstant(R
!=APFloat::cmpUnordered
, dl
, VT
,
2075 case ISD::SETUO
: return getBoolConstant(R
==APFloat::cmpUnordered
, dl
, VT
,
2077 case ISD::SETUEQ
: return getBoolConstant(R
==APFloat::cmpUnordered
||
2078 R
==APFloat::cmpEqual
, dl
, VT
,
2080 case ISD::SETUNE
: return getBoolConstant(R
!=APFloat::cmpEqual
, dl
, VT
,
2082 case ISD::SETULT
: return getBoolConstant(R
==APFloat::cmpUnordered
||
2083 R
==APFloat::cmpLessThan
, dl
, VT
,
2085 case ISD::SETUGT
: return getBoolConstant(R
==APFloat::cmpGreaterThan
||
2086 R
==APFloat::cmpUnordered
, dl
, VT
,
2088 case ISD::SETULE
: return getBoolConstant(R
!=APFloat::cmpGreaterThan
, dl
,
2090 case ISD::SETUGE
: return getBoolConstant(R
!=APFloat::cmpLessThan
, dl
, VT
,
2093 } else if (N1CFP
&& OpVT
.isSimple() && !N2
.isUndef()) {
2094 // Ensure that the constant occurs on the RHS.
2095 ISD::CondCode SwappedCond
= ISD::getSetCCSwappedOperands(Cond
);
2096 if (!TLI
->isCondCodeLegal(SwappedCond
, OpVT
.getSimpleVT()))
2098 return getSetCC(dl
, VT
, N2
, N1
, SwappedCond
);
2099 } else if ((N2CFP
&& N2CFP
->getValueAPF().isNaN()) ||
2100 (OpVT
.isFloatingPoint() && (N1
.isUndef() || N2
.isUndef()))) {
2101 // If an operand is known to be a nan (or undef that could be a nan), we can
2103 // Choosing NaN for the undef will always make unordered comparison succeed
2104 // and ordered comparison fails.
2105 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2106 switch (ISD::getUnorderedFlavor(Cond
)) {
2108 llvm_unreachable("Unknown flavor!");
2109 case 0: // Known false.
2110 return getBoolConstant(false, dl
, VT
, OpVT
);
2111 case 1: // Known true.
2112 return getBoolConstant(true, dl
, VT
, OpVT
);
2113 case 2: // Undefined.
2114 return getUNDEF(VT
);
2118 // Could not fold it.
2122 /// See if the specified operand can be simplified with the knowledge that only
2123 /// the bits specified by DemandedBits are used.
2124 /// TODO: really we should be making this into the DAG equivalent of
2125 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2126 SDValue
SelectionDAG::GetDemandedBits(SDValue V
, const APInt
&DemandedBits
) {
2127 EVT VT
= V
.getValueType();
2128 APInt DemandedElts
= VT
.isVector()
2129 ? APInt::getAllOnesValue(VT
.getVectorNumElements())
2131 return GetDemandedBits(V
, DemandedBits
, DemandedElts
);
2134 /// See if the specified operand can be simplified with the knowledge that only
2135 /// the bits specified by DemandedBits are used in the elements specified by
2137 /// TODO: really we should be making this into the DAG equivalent of
2138 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2139 SDValue
SelectionDAG::GetDemandedBits(SDValue V
, const APInt
&DemandedBits
,
2140 const APInt
&DemandedElts
) {
2141 switch (V
.getOpcode()) {
2144 case ISD::Constant
: {
2145 auto *CV
= cast
<ConstantSDNode
>(V
.getNode());
2146 assert(CV
&& "Const value should be ConstSDNode.");
2147 const APInt
&CVal
= CV
->getAPIntValue();
2148 APInt NewVal
= CVal
& DemandedBits
;
2150 return getConstant(NewVal
, SDLoc(V
), V
.getValueType());
2155 case ISD::SIGN_EXTEND_INREG
:
2156 return TLI
->SimplifyMultipleUseDemandedBits(V
, DemandedBits
, DemandedElts
,
2159 // Only look at single-use SRLs.
2160 if (!V
.getNode()->hasOneUse())
2162 if (auto *RHSC
= dyn_cast
<ConstantSDNode
>(V
.getOperand(1))) {
2163 // See if we can recursively simplify the LHS.
2164 unsigned Amt
= RHSC
->getZExtValue();
2166 // Watch out for shift count overflow though.
2167 if (Amt
>= DemandedBits
.getBitWidth())
2169 APInt SrcDemandedBits
= DemandedBits
<< Amt
;
2170 if (SDValue SimplifyLHS
=
2171 GetDemandedBits(V
.getOperand(0), SrcDemandedBits
))
2172 return getNode(ISD::SRL
, SDLoc(V
), V
.getValueType(), SimplifyLHS
,
2177 // X & -1 -> X (ignoring bits which aren't demanded).
2178 // Also handle the case where masked out bits in X are known to be zero.
2179 if (ConstantSDNode
*RHSC
= isConstOrConstSplat(V
.getOperand(1))) {
2180 const APInt
&AndVal
= RHSC
->getAPIntValue();
2181 if (DemandedBits
.isSubsetOf(AndVal
) ||
2182 DemandedBits
.isSubsetOf(computeKnownBits(V
.getOperand(0)).Zero
|
2184 return V
.getOperand(0);
2188 case ISD::ANY_EXTEND
: {
2189 SDValue Src
= V
.getOperand(0);
2190 unsigned SrcBitWidth
= Src
.getScalarValueSizeInBits();
2191 // Being conservative here - only peek through if we only demand bits in the
2192 // non-extended source (even though the extended bits are technically
2194 if (DemandedBits
.getActiveBits() > SrcBitWidth
)
2196 APInt SrcDemandedBits
= DemandedBits
.trunc(SrcBitWidth
);
2197 if (SDValue DemandedSrc
= GetDemandedBits(Src
, SrcDemandedBits
))
2198 return getNode(ISD::ANY_EXTEND
, SDLoc(V
), V
.getValueType(), DemandedSrc
);
2205 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2206 /// use this predicate to simplify operations downstream.
2207 bool SelectionDAG::SignBitIsZero(SDValue Op
, unsigned Depth
) const {
2208 unsigned BitWidth
= Op
.getScalarValueSizeInBits();
2209 return MaskedValueIsZero(Op
, APInt::getSignMask(BitWidth
), Depth
);
2212 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2213 /// this predicate to simplify operations downstream. Mask is known to be zero
2214 /// for bits that V cannot have.
2215 bool SelectionDAG::MaskedValueIsZero(SDValue V
, const APInt
&Mask
,
2216 unsigned Depth
) const {
2217 EVT VT
= V
.getValueType();
2218 APInt DemandedElts
= VT
.isVector()
2219 ? APInt::getAllOnesValue(VT
.getVectorNumElements())
2221 return MaskedValueIsZero(V
, Mask
, DemandedElts
, Depth
);
2224 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2225 /// DemandedElts. We use this predicate to simplify operations downstream.
2226 /// Mask is known to be zero for bits that V cannot have.
2227 bool SelectionDAG::MaskedValueIsZero(SDValue V
, const APInt
&Mask
,
2228 const APInt
&DemandedElts
,
2229 unsigned Depth
) const {
2230 return Mask
.isSubsetOf(computeKnownBits(V
, DemandedElts
, Depth
).Zero
);
2233 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2234 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V
, const APInt
&Mask
,
2235 unsigned Depth
) const {
2236 return Mask
.isSubsetOf(computeKnownBits(V
, Depth
).One
);
2239 /// isSplatValue - Return true if the vector V has the same value
2240 /// across all DemandedElts.
2241 bool SelectionDAG::isSplatValue(SDValue V
, const APInt
&DemandedElts
,
2244 return false; // No demanded elts, better to assume we don't know anything.
2246 EVT VT
= V
.getValueType();
2247 assert(VT
.isVector() && "Vector type expected");
2249 unsigned NumElts
= VT
.getVectorNumElements();
2250 assert(NumElts
== DemandedElts
.getBitWidth() && "Vector size mismatch");
2251 UndefElts
= APInt::getNullValue(NumElts
);
2253 switch (V
.getOpcode()) {
2254 case ISD::BUILD_VECTOR
: {
2256 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2257 SDValue Op
= V
.getOperand(i
);
2259 UndefElts
.setBit(i
);
2262 if (!DemandedElts
[i
])
2264 if (Scl
&& Scl
!= Op
)
2270 case ISD::VECTOR_SHUFFLE
: {
2271 // Check if this is a shuffle node doing a splat.
2272 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2273 int SplatIndex
= -1;
2274 ArrayRef
<int> Mask
= cast
<ShuffleVectorSDNode
>(V
)->getMask();
2275 for (int i
= 0; i
!= (int)NumElts
; ++i
) {
2278 UndefElts
.setBit(i
);
2281 if (!DemandedElts
[i
])
2283 if (0 <= SplatIndex
&& SplatIndex
!= M
)
2289 case ISD::EXTRACT_SUBVECTOR
: {
2290 SDValue Src
= V
.getOperand(0);
2291 ConstantSDNode
*SubIdx
= dyn_cast
<ConstantSDNode
>(V
.getOperand(1));
2292 unsigned NumSrcElts
= Src
.getValueType().getVectorNumElements();
2293 if (SubIdx
&& SubIdx
->getAPIntValue().ule(NumSrcElts
- NumElts
)) {
2294 // Offset the demanded elts by the subvector index.
2295 uint64_t Idx
= SubIdx
->getZExtValue();
2297 APInt DemandedSrc
= DemandedElts
.zextOrSelf(NumSrcElts
).shl(Idx
);
2298 if (isSplatValue(Src
, DemandedSrc
, UndefSrcElts
)) {
2299 UndefElts
= UndefSrcElts
.extractBits(NumElts
, Idx
);
2308 APInt UndefLHS
, UndefRHS
;
2309 SDValue LHS
= V
.getOperand(0);
2310 SDValue RHS
= V
.getOperand(1);
2311 if (isSplatValue(LHS
, DemandedElts
, UndefLHS
) &&
2312 isSplatValue(RHS
, DemandedElts
, UndefRHS
)) {
2313 UndefElts
= UndefLHS
| UndefRHS
;
2323 /// Helper wrapper to main isSplatValue function.
2324 bool SelectionDAG::isSplatValue(SDValue V
, bool AllowUndefs
) {
2325 EVT VT
= V
.getValueType();
2326 assert(VT
.isVector() && "Vector type expected");
2327 unsigned NumElts
= VT
.getVectorNumElements();
2330 APInt DemandedElts
= APInt::getAllOnesValue(NumElts
);
2331 return isSplatValue(V
, DemandedElts
, UndefElts
) &&
2332 (AllowUndefs
|| !UndefElts
);
2335 SDValue
SelectionDAG::getSplatSourceVector(SDValue V
, int &SplatIdx
) {
2336 V
= peekThroughExtractSubvectors(V
);
2338 EVT VT
= V
.getValueType();
2339 unsigned Opcode
= V
.getOpcode();
2343 APInt DemandedElts
= APInt::getAllOnesValue(VT
.getVectorNumElements());
2344 if (isSplatValue(V
, DemandedElts
, UndefElts
)) {
2345 // Handle case where all demanded elements are UNDEF.
2346 if (DemandedElts
.isSubsetOf(UndefElts
)) {
2348 return getUNDEF(VT
);
2350 SplatIdx
= (UndefElts
& DemandedElts
).countTrailingOnes();
2355 case ISD::VECTOR_SHUFFLE
: {
2356 // Check if this is a shuffle node doing a splat.
2357 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2358 // getTargetVShiftNode currently struggles without the splat source.
2359 auto *SVN
= cast
<ShuffleVectorSDNode
>(V
);
2360 if (!SVN
->isSplat())
2362 int Idx
= SVN
->getSplatIndex();
2363 int NumElts
= V
.getValueType().getVectorNumElements();
2364 SplatIdx
= Idx
% NumElts
;
2365 return V
.getOperand(Idx
/ NumElts
);
2372 SDValue
SelectionDAG::getSplatValue(SDValue V
) {
2374 if (SDValue SrcVector
= getSplatSourceVector(V
, SplatIdx
))
2375 return getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(V
),
2376 SrcVector
.getValueType().getScalarType(), SrcVector
,
2377 getIntPtrConstant(SplatIdx
, SDLoc(V
)));
2381 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2382 /// is less than the element bit-width of the shift node, return it.
2383 static const APInt
*getValidShiftAmountConstant(SDValue V
) {
2384 if (ConstantSDNode
*SA
= isConstOrConstSplat(V
.getOperand(1))) {
2385 // Shifting more than the bitwidth is not valid.
2386 const APInt
&ShAmt
= SA
->getAPIntValue();
2387 if (ShAmt
.ult(V
.getScalarValueSizeInBits()))
2393 /// Determine which bits of Op are known to be either zero or one and return
2394 /// them in Known. For vectors, the known bits are those that are shared by
2395 /// every vector element.
2396 KnownBits
SelectionDAG::computeKnownBits(SDValue Op
, unsigned Depth
) const {
2397 EVT VT
= Op
.getValueType();
2398 APInt DemandedElts
= VT
.isVector()
2399 ? APInt::getAllOnesValue(VT
.getVectorNumElements())
2401 return computeKnownBits(Op
, DemandedElts
, Depth
);
2404 /// Determine which bits of Op are known to be either zero or one and return
2405 /// them in Known. The DemandedElts argument allows us to only collect the known
2406 /// bits that are shared by the requested vector elements.
2407 KnownBits
SelectionDAG::computeKnownBits(SDValue Op
, const APInt
&DemandedElts
,
2408 unsigned Depth
) const {
2409 unsigned BitWidth
= Op
.getScalarValueSizeInBits();
2411 KnownBits
Known(BitWidth
); // Don't know anything.
2413 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
)) {
2414 // We know all of the bits for a constant!
2415 Known
.One
= C
->getAPIntValue();
2416 Known
.Zero
= ~Known
.One
;
2419 if (auto *C
= dyn_cast
<ConstantFPSDNode
>(Op
)) {
2420 // We know all of the bits for a constant fp!
2421 Known
.One
= C
->getValueAPF().bitcastToAPInt();
2422 Known
.Zero
= ~Known
.One
;
2427 return Known
; // Limit search depth.
2430 unsigned NumElts
= DemandedElts
.getBitWidth();
2431 assert((!Op
.getValueType().isVector() ||
2432 NumElts
== Op
.getValueType().getVectorNumElements()) &&
2433 "Unexpected vector size");
2436 return Known
; // No demanded elts, better to assume we don't know anything.
2438 unsigned Opcode
= Op
.getOpcode();
2440 case ISD::BUILD_VECTOR
:
2441 // Collect the known bits that are shared by every demanded vector element.
2442 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
2443 for (unsigned i
= 0, e
= Op
.getNumOperands(); i
!= e
; ++i
) {
2444 if (!DemandedElts
[i
])
2447 SDValue SrcOp
= Op
.getOperand(i
);
2448 Known2
= computeKnownBits(SrcOp
, Depth
+ 1);
2450 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2451 if (SrcOp
.getValueSizeInBits() != BitWidth
) {
2452 assert(SrcOp
.getValueSizeInBits() > BitWidth
&&
2453 "Expected BUILD_VECTOR implicit truncation");
2454 Known2
= Known2
.trunc(BitWidth
);
2457 // Known bits are the values that are shared by every demanded element.
2458 Known
.One
&= Known2
.One
;
2459 Known
.Zero
&= Known2
.Zero
;
2461 // If we don't know any bits, early out.
2462 if (Known
.isUnknown())
2466 case ISD::VECTOR_SHUFFLE
: {
2467 // Collect the known bits that are shared by every vector element referenced
2469 APInt
DemandedLHS(NumElts
, 0), DemandedRHS(NumElts
, 0);
2470 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
2471 const ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op
);
2472 assert(NumElts
== SVN
->getMask().size() && "Unexpected vector size");
2473 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2474 if (!DemandedElts
[i
])
2477 int M
= SVN
->getMaskElt(i
);
2479 // For UNDEF elements, we don't know anything about the common state of
2480 // the shuffle result.
2482 DemandedLHS
.clearAllBits();
2483 DemandedRHS
.clearAllBits();
2487 if ((unsigned)M
< NumElts
)
2488 DemandedLHS
.setBit((unsigned)M
% NumElts
);
2490 DemandedRHS
.setBit((unsigned)M
% NumElts
);
2492 // Known bits are the values that are shared by every demanded element.
2493 if (!!DemandedLHS
) {
2494 SDValue LHS
= Op
.getOperand(0);
2495 Known2
= computeKnownBits(LHS
, DemandedLHS
, Depth
+ 1);
2496 Known
.One
&= Known2
.One
;
2497 Known
.Zero
&= Known2
.Zero
;
2499 // If we don't know any bits, early out.
2500 if (Known
.isUnknown())
2502 if (!!DemandedRHS
) {
2503 SDValue RHS
= Op
.getOperand(1);
2504 Known2
= computeKnownBits(RHS
, DemandedRHS
, Depth
+ 1);
2505 Known
.One
&= Known2
.One
;
2506 Known
.Zero
&= Known2
.Zero
;
2510 case ISD::CONCAT_VECTORS
: {
2511 // Split DemandedElts and test each of the demanded subvectors.
2512 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
2513 EVT SubVectorVT
= Op
.getOperand(0).getValueType();
2514 unsigned NumSubVectorElts
= SubVectorVT
.getVectorNumElements();
2515 unsigned NumSubVectors
= Op
.getNumOperands();
2516 for (unsigned i
= 0; i
!= NumSubVectors
; ++i
) {
2517 APInt DemandedSub
= DemandedElts
.lshr(i
* NumSubVectorElts
);
2518 DemandedSub
= DemandedSub
.trunc(NumSubVectorElts
);
2519 if (!!DemandedSub
) {
2520 SDValue Sub
= Op
.getOperand(i
);
2521 Known2
= computeKnownBits(Sub
, DemandedSub
, Depth
+ 1);
2522 Known
.One
&= Known2
.One
;
2523 Known
.Zero
&= Known2
.Zero
;
2525 // If we don't know any bits, early out.
2526 if (Known
.isUnknown())
2531 case ISD::INSERT_SUBVECTOR
: {
2532 // If we know the element index, demand any elements from the subvector and
2533 // the remainder from the src its inserted into, otherwise demand them all.
2534 SDValue Src
= Op
.getOperand(0);
2535 SDValue Sub
= Op
.getOperand(1);
2536 ConstantSDNode
*SubIdx
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(2));
2537 unsigned NumSubElts
= Sub
.getValueType().getVectorNumElements();
2538 if (SubIdx
&& SubIdx
->getAPIntValue().ule(NumElts
- NumSubElts
)) {
2539 Known
.One
.setAllBits();
2540 Known
.Zero
.setAllBits();
2541 uint64_t Idx
= SubIdx
->getZExtValue();
2542 APInt DemandedSubElts
= DemandedElts
.extractBits(NumSubElts
, Idx
);
2543 if (!!DemandedSubElts
) {
2544 Known
= computeKnownBits(Sub
, DemandedSubElts
, Depth
+ 1);
2545 if (Known
.isUnknown())
2546 break; // early-out.
2548 APInt SubMask
= APInt::getBitsSet(NumElts
, Idx
, Idx
+ NumSubElts
);
2549 APInt DemandedSrcElts
= DemandedElts
& ~SubMask
;
2550 if (!!DemandedSrcElts
) {
2551 Known2
= computeKnownBits(Src
, DemandedSrcElts
, Depth
+ 1);
2552 Known
.One
&= Known2
.One
;
2553 Known
.Zero
&= Known2
.Zero
;
2556 Known
= computeKnownBits(Sub
, Depth
+ 1);
2557 if (Known
.isUnknown())
2558 break; // early-out.
2559 Known2
= computeKnownBits(Src
, Depth
+ 1);
2560 Known
.One
&= Known2
.One
;
2561 Known
.Zero
&= Known2
.Zero
;
2565 case ISD::EXTRACT_SUBVECTOR
: {
2566 // If we know the element index, just demand that subvector elements,
2567 // otherwise demand them all.
2568 SDValue Src
= Op
.getOperand(0);
2569 ConstantSDNode
*SubIdx
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1));
2570 unsigned NumSrcElts
= Src
.getValueType().getVectorNumElements();
2571 if (SubIdx
&& SubIdx
->getAPIntValue().ule(NumSrcElts
- NumElts
)) {
2572 // Offset the demanded elts by the subvector index.
2573 uint64_t Idx
= SubIdx
->getZExtValue();
2574 APInt DemandedSrc
= DemandedElts
.zextOrSelf(NumSrcElts
).shl(Idx
);
2575 Known
= computeKnownBits(Src
, DemandedSrc
, Depth
+ 1);
2577 Known
= computeKnownBits(Src
, Depth
+ 1);
2581 case ISD::SCALAR_TO_VECTOR
: {
2582 // We know about scalar_to_vector as much as we know about it source,
2583 // which becomes the first element of otherwise unknown vector.
2584 if (DemandedElts
!= 1)
2587 SDValue N0
= Op
.getOperand(0);
2588 Known
= computeKnownBits(N0
, Depth
+ 1);
2589 if (N0
.getValueSizeInBits() != BitWidth
)
2590 Known
= Known
.trunc(BitWidth
);
2594 case ISD::BITCAST
: {
2595 SDValue N0
= Op
.getOperand(0);
2596 EVT SubVT
= N0
.getValueType();
2597 unsigned SubBitWidth
= SubVT
.getScalarSizeInBits();
2599 // Ignore bitcasts from unsupported types.
2600 if (!(SubVT
.isInteger() || SubVT
.isFloatingPoint()))
2603 // Fast handling of 'identity' bitcasts.
2604 if (BitWidth
== SubBitWidth
) {
2605 Known
= computeKnownBits(N0
, DemandedElts
, Depth
+ 1);
2609 bool IsLE
= getDataLayout().isLittleEndian();
2611 // Bitcast 'small element' vector to 'large element' scalar/vector.
2612 if ((BitWidth
% SubBitWidth
) == 0) {
2613 assert(N0
.getValueType().isVector() && "Expected bitcast from vector");
2615 // Collect known bits for the (larger) output by collecting the known
2616 // bits from each set of sub elements and shift these into place.
2617 // We need to separately call computeKnownBits for each set of
2618 // sub elements as the knownbits for each is likely to be different.
2619 unsigned SubScale
= BitWidth
/ SubBitWidth
;
2620 APInt
SubDemandedElts(NumElts
* SubScale
, 0);
2621 for (unsigned i
= 0; i
!= NumElts
; ++i
)
2622 if (DemandedElts
[i
])
2623 SubDemandedElts
.setBit(i
* SubScale
);
2625 for (unsigned i
= 0; i
!= SubScale
; ++i
) {
2626 Known2
= computeKnownBits(N0
, SubDemandedElts
.shl(i
),
2628 unsigned Shifts
= IsLE
? i
: SubScale
- 1 - i
;
2629 Known
.One
|= Known2
.One
.zext(BitWidth
).shl(SubBitWidth
* Shifts
);
2630 Known
.Zero
|= Known2
.Zero
.zext(BitWidth
).shl(SubBitWidth
* Shifts
);
2634 // Bitcast 'large element' scalar/vector to 'small element' vector.
2635 if ((SubBitWidth
% BitWidth
) == 0) {
2636 assert(Op
.getValueType().isVector() && "Expected bitcast to vector");
2638 // Collect known bits for the (smaller) output by collecting the known
2639 // bits from the overlapping larger input elements and extracting the
2640 // sub sections we actually care about.
2641 unsigned SubScale
= SubBitWidth
/ BitWidth
;
2642 APInt
SubDemandedElts(NumElts
/ SubScale
, 0);
2643 for (unsigned i
= 0; i
!= NumElts
; ++i
)
2644 if (DemandedElts
[i
])
2645 SubDemandedElts
.setBit(i
/ SubScale
);
2647 Known2
= computeKnownBits(N0
, SubDemandedElts
, Depth
+ 1);
2649 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
2650 for (unsigned i
= 0; i
!= NumElts
; ++i
)
2651 if (DemandedElts
[i
]) {
2652 unsigned Shifts
= IsLE
? i
: NumElts
- 1 - i
;
2653 unsigned Offset
= (Shifts
% SubScale
) * BitWidth
;
2654 Known
.One
&= Known2
.One
.lshr(Offset
).trunc(BitWidth
);
2655 Known
.Zero
&= Known2
.Zero
.lshr(Offset
).trunc(BitWidth
);
2656 // If we don't know any bits, early out.
2657 if (Known
.isUnknown())
2664 // If either the LHS or the RHS are Zero, the result is zero.
2665 Known
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2666 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2668 // Output known-1 bits are only known if set in both the LHS & RHS.
2669 Known
.One
&= Known2
.One
;
2670 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2671 Known
.Zero
|= Known2
.Zero
;
2674 Known
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2675 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2677 // Output known-0 bits are only known if clear in both the LHS & RHS.
2678 Known
.Zero
&= Known2
.Zero
;
2679 // Output known-1 are known to be set if set in either the LHS | RHS.
2680 Known
.One
|= Known2
.One
;
2683 Known
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2684 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2686 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2687 APInt KnownZeroOut
= (Known
.Zero
& Known2
.Zero
) | (Known
.One
& Known2
.One
);
2688 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2689 Known
.One
= (Known
.Zero
& Known2
.One
) | (Known
.One
& Known2
.Zero
);
2690 Known
.Zero
= KnownZeroOut
;
2694 Known
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2695 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2697 // If low bits are zero in either operand, output low known-0 bits.
2698 // Also compute a conservative estimate for high known-0 bits.
2699 // More trickiness is possible, but this is sufficient for the
2700 // interesting case of alignment computation.
2701 unsigned TrailZ
= Known
.countMinTrailingZeros() +
2702 Known2
.countMinTrailingZeros();
2703 unsigned LeadZ
= std::max(Known
.countMinLeadingZeros() +
2704 Known2
.countMinLeadingZeros(),
2705 BitWidth
) - BitWidth
;
2708 Known
.Zero
.setLowBits(std::min(TrailZ
, BitWidth
));
2709 Known
.Zero
.setHighBits(std::min(LeadZ
, BitWidth
));
2713 // For the purposes of computing leading zeros we can conservatively
2714 // treat a udiv as a logical right shift by the power of 2 known to
2715 // be less than the denominator.
2716 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2717 unsigned LeadZ
= Known2
.countMinLeadingZeros();
2719 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2720 unsigned RHSMaxLeadingZeros
= Known2
.countMaxLeadingZeros();
2721 if (RHSMaxLeadingZeros
!= BitWidth
)
2722 LeadZ
= std::min(BitWidth
, LeadZ
+ BitWidth
- RHSMaxLeadingZeros
- 1);
2724 Known
.Zero
.setHighBits(LeadZ
);
2729 Known
= computeKnownBits(Op
.getOperand(2), DemandedElts
, Depth
+1);
2730 // If we don't know any bits, early out.
2731 if (Known
.isUnknown())
2733 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+1);
2735 // Only known if known in both the LHS and RHS.
2736 Known
.One
&= Known2
.One
;
2737 Known
.Zero
&= Known2
.Zero
;
2739 case ISD::SELECT_CC
:
2740 Known
= computeKnownBits(Op
.getOperand(3), DemandedElts
, Depth
+1);
2741 // If we don't know any bits, early out.
2742 if (Known
.isUnknown())
2744 Known2
= computeKnownBits(Op
.getOperand(2), DemandedElts
, Depth
+1);
2746 // Only known if known in both the LHS and RHS.
2747 Known
.One
&= Known2
.One
;
2748 Known
.Zero
&= Known2
.Zero
;
2752 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
:
2753 if (Op
.getResNo() != 1)
2755 // The boolean result conforms to getBooleanContents.
2756 // If we know the result of a setcc has the top bits zero, use this info.
2757 // We know that we have an integer-based boolean since these operations
2758 // are only available for integer.
2759 if (TLI
->getBooleanContents(Op
.getValueType().isVector(), false) ==
2760 TargetLowering::ZeroOrOneBooleanContent
&&
2762 Known
.Zero
.setBitsFrom(1);
2765 // If we know the result of a setcc has the top bits zero, use this info.
2766 if (TLI
->getBooleanContents(Op
.getOperand(0).getValueType()) ==
2767 TargetLowering::ZeroOrOneBooleanContent
&&
2769 Known
.Zero
.setBitsFrom(1);
2772 if (const APInt
*ShAmt
= getValidShiftAmountConstant(Op
)) {
2773 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2774 unsigned Shift
= ShAmt
->getZExtValue();
2775 Known
.Zero
<<= Shift
;
2776 Known
.One
<<= Shift
;
2777 // Low bits are known zero.
2778 Known
.Zero
.setLowBits(Shift
);
2782 if (const APInt
*ShAmt
= getValidShiftAmountConstant(Op
)) {
2783 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2784 unsigned Shift
= ShAmt
->getZExtValue();
2785 Known
.Zero
.lshrInPlace(Shift
);
2786 Known
.One
.lshrInPlace(Shift
);
2787 // High bits are known zero.
2788 Known
.Zero
.setHighBits(Shift
);
2789 } else if (auto *BV
= dyn_cast
<BuildVectorSDNode
>(Op
.getOperand(1))) {
2790 // If the shift amount is a vector of constants see if we can bound
2791 // the number of upper zero bits.
2792 unsigned ShiftAmountMin
= BitWidth
;
2793 for (unsigned i
= 0; i
!= BV
->getNumOperands(); ++i
) {
2794 if (auto *C
= dyn_cast
<ConstantSDNode
>(BV
->getOperand(i
))) {
2795 const APInt
&ShAmt
= C
->getAPIntValue();
2796 if (ShAmt
.ult(BitWidth
)) {
2797 ShiftAmountMin
= std::min
<unsigned>(ShiftAmountMin
,
2798 ShAmt
.getZExtValue());
2802 // Don't know anything.
2807 Known
.Zero
.setHighBits(ShiftAmountMin
);
2811 if (const APInt
*ShAmt
= getValidShiftAmountConstant(Op
)) {
2812 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2813 unsigned Shift
= ShAmt
->getZExtValue();
2814 // Sign extend known zero/one bit (else is unknown).
2815 Known
.Zero
.ashrInPlace(Shift
);
2816 Known
.One
.ashrInPlace(Shift
);
2821 if (ConstantSDNode
*C
= isConstOrConstSplat(Op
.getOperand(2), DemandedElts
)) {
2822 unsigned Amt
= C
->getAPIntValue().urem(BitWidth
);
2824 // For fshl, 0-shift returns the 1st arg.
2825 // For fshr, 0-shift returns the 2nd arg.
2827 Known
= computeKnownBits(Op
.getOperand(Opcode
== ISD::FSHL
? 0 : 1),
2828 DemandedElts
, Depth
+ 1);
2832 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2833 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2834 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2835 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
2836 if (Opcode
== ISD::FSHL
) {
2839 Known2
.One
.lshrInPlace(BitWidth
- Amt
);
2840 Known2
.Zero
.lshrInPlace(BitWidth
- Amt
);
2842 Known
.One
<<= BitWidth
- Amt
;
2843 Known
.Zero
<<= BitWidth
- Amt
;
2844 Known2
.One
.lshrInPlace(Amt
);
2845 Known2
.Zero
.lshrInPlace(Amt
);
2847 Known
.One
|= Known2
.One
;
2848 Known
.Zero
|= Known2
.Zero
;
2851 case ISD::SIGN_EXTEND_INREG
: {
2852 EVT EVT
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT();
2853 unsigned EBits
= EVT
.getScalarSizeInBits();
2855 // Sign extension. Compute the demanded bits in the result that are not
2856 // present in the input.
2857 APInt NewBits
= APInt::getHighBitsSet(BitWidth
, BitWidth
- EBits
);
2859 APInt InSignMask
= APInt::getSignMask(EBits
);
2860 APInt InputDemandedBits
= APInt::getLowBitsSet(BitWidth
, EBits
);
2862 // If the sign extended bits are demanded, we know that the sign
2864 InSignMask
= InSignMask
.zext(BitWidth
);
2865 if (NewBits
.getBoolValue())
2866 InputDemandedBits
|= InSignMask
;
2868 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2869 Known
.One
&= InputDemandedBits
;
2870 Known
.Zero
&= InputDemandedBits
;
2872 // If the sign bit of the input is known set or clear, then we know the
2873 // top bits of the result.
2874 if (Known
.Zero
.intersects(InSignMask
)) { // Input sign bit known clear
2875 Known
.Zero
|= NewBits
;
2876 Known
.One
&= ~NewBits
;
2877 } else if (Known
.One
.intersects(InSignMask
)) { // Input sign bit known set
2878 Known
.One
|= NewBits
;
2879 Known
.Zero
&= ~NewBits
;
2880 } else { // Input sign bit unknown
2881 Known
.Zero
&= ~NewBits
;
2882 Known
.One
&= ~NewBits
;
2887 case ISD::CTTZ_ZERO_UNDEF
: {
2888 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2889 // If we have a known 1, its position is our upper bound.
2890 unsigned PossibleTZ
= Known2
.countMaxTrailingZeros();
2891 unsigned LowBits
= Log2_32(PossibleTZ
) + 1;
2892 Known
.Zero
.setBitsFrom(LowBits
);
2896 case ISD::CTLZ_ZERO_UNDEF
: {
2897 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2898 // If we have a known 1, its position is our upper bound.
2899 unsigned PossibleLZ
= Known2
.countMaxLeadingZeros();
2900 unsigned LowBits
= Log2_32(PossibleLZ
) + 1;
2901 Known
.Zero
.setBitsFrom(LowBits
);
2905 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2906 // If we know some of the bits are zero, they can't be one.
2907 unsigned PossibleOnes
= Known2
.countMaxPopulation();
2908 Known
.Zero
.setBitsFrom(Log2_32(PossibleOnes
) + 1);
2912 LoadSDNode
*LD
= cast
<LoadSDNode
>(Op
);
2913 const Constant
*Cst
= TLI
->getTargetConstantFromLoad(LD
);
2914 if (ISD::isNON_EXTLoad(LD
) && Cst
) {
2915 // Determine any common known bits from the loaded constant pool value.
2916 Type
*CstTy
= Cst
->getType();
2917 if ((NumElts
* BitWidth
) == CstTy
->getPrimitiveSizeInBits()) {
2918 // If its a vector splat, then we can (quickly) reuse the scalar path.
2919 // NOTE: We assume all elements match and none are UNDEF.
2920 if (CstTy
->isVectorTy()) {
2921 if (const Constant
*Splat
= Cst
->getSplatValue()) {
2923 CstTy
= Cst
->getType();
2926 // TODO - do we need to handle different bitwidths?
2927 if (CstTy
->isVectorTy() && BitWidth
== CstTy
->getScalarSizeInBits()) {
2928 // Iterate across all vector elements finding common known bits.
2929 Known
.One
.setAllBits();
2930 Known
.Zero
.setAllBits();
2931 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2932 if (!DemandedElts
[i
])
2934 if (Constant
*Elt
= Cst
->getAggregateElement(i
)) {
2935 if (auto *CInt
= dyn_cast
<ConstantInt
>(Elt
)) {
2936 const APInt
&Value
= CInt
->getValue();
2938 Known
.Zero
&= ~Value
;
2941 if (auto *CFP
= dyn_cast
<ConstantFP
>(Elt
)) {
2942 APInt Value
= CFP
->getValueAPF().bitcastToAPInt();
2944 Known
.Zero
&= ~Value
;
2948 Known
.One
.clearAllBits();
2949 Known
.Zero
.clearAllBits();
2952 } else if (BitWidth
== CstTy
->getPrimitiveSizeInBits()) {
2953 if (auto *CInt
= dyn_cast
<ConstantInt
>(Cst
)) {
2954 const APInt
&Value
= CInt
->getValue();
2956 Known
.Zero
= ~Value
;
2957 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(Cst
)) {
2958 APInt Value
= CFP
->getValueAPF().bitcastToAPInt();
2960 Known
.Zero
= ~Value
;
2964 } else if (ISD::isZEXTLoad(Op
.getNode()) && Op
.getResNo() == 0) {
2965 // If this is a ZEXTLoad and we are looking at the loaded value.
2966 EVT VT
= LD
->getMemoryVT();
2967 unsigned MemBits
= VT
.getScalarSizeInBits();
2968 Known
.Zero
.setBitsFrom(MemBits
);
2969 } else if (const MDNode
*Ranges
= LD
->getRanges()) {
2970 if (LD
->getExtensionType() == ISD::NON_EXTLOAD
)
2971 computeKnownBitsFromRangeMetadata(*Ranges
, Known
);
2975 case ISD::ZERO_EXTEND_VECTOR_INREG
: {
2976 EVT InVT
= Op
.getOperand(0).getValueType();
2977 APInt InDemandedElts
= DemandedElts
.zextOrSelf(InVT
.getVectorNumElements());
2978 Known
= computeKnownBits(Op
.getOperand(0), InDemandedElts
, Depth
+ 1);
2979 Known
= Known
.zext(BitWidth
, true /* ExtendedBitsAreKnownZero */);
2982 case ISD::ZERO_EXTEND
: {
2983 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2984 Known
= Known
.zext(BitWidth
, true /* ExtendedBitsAreKnownZero */);
2987 case ISD::SIGN_EXTEND_VECTOR_INREG
: {
2988 EVT InVT
= Op
.getOperand(0).getValueType();
2989 APInt InDemandedElts
= DemandedElts
.zextOrSelf(InVT
.getVectorNumElements());
2990 Known
= computeKnownBits(Op
.getOperand(0), InDemandedElts
, Depth
+ 1);
2991 // If the sign bit is known to be zero or one, then sext will extend
2992 // it to the top bits, else it will just zext.
2993 Known
= Known
.sext(BitWidth
);
2996 case ISD::SIGN_EXTEND
: {
2997 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
2998 // If the sign bit is known to be zero or one, then sext will extend
2999 // it to the top bits, else it will just zext.
3000 Known
= Known
.sext(BitWidth
);
3003 case ISD::ANY_EXTEND
: {
3004 Known
= computeKnownBits(Op
.getOperand(0), Depth
+1);
3005 Known
= Known
.zext(BitWidth
, false /* ExtendedBitsAreKnownZero */);
3008 case ISD::TRUNCATE
: {
3009 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3010 Known
= Known
.trunc(BitWidth
);
3013 case ISD::AssertZext
: {
3014 EVT VT
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT();
3015 APInt InMask
= APInt::getLowBitsSet(BitWidth
, VT
.getSizeInBits());
3016 Known
= computeKnownBits(Op
.getOperand(0), Depth
+1);
3017 Known
.Zero
|= (~InMask
);
3018 Known
.One
&= (~Known
.Zero
);
3022 // All bits are zero except the low bit.
3023 Known
.Zero
.setBitsFrom(1);
3027 if (Op
.getResNo() == 1) {
3028 // If we know the result of a setcc has the top bits zero, use this info.
3029 if (TLI
->getBooleanContents(Op
.getOperand(0).getValueType()) ==
3030 TargetLowering::ZeroOrOneBooleanContent
&&
3032 Known
.Zero
.setBitsFrom(1);
3038 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3039 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3040 Known
= KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3047 if (Op
.getResNo() == 1) {
3048 // If we know the result of a setcc has the top bits zero, use this info.
3049 if (TLI
->getBooleanContents(Op
.getOperand(0).getValueType()) ==
3050 TargetLowering::ZeroOrOneBooleanContent
&&
3052 Known
.Zero
.setBitsFrom(1);
3059 assert(Op
.getResNo() == 0 && "We only compute knownbits for the sum here.");
3061 // With ADDE and ADDCARRY, a carry bit may be added in.
3063 if (Opcode
== ISD::ADDE
)
3064 // Can't track carry from glue, set carry to unknown.
3066 else if (Opcode
== ISD::ADDCARRY
)
3067 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3068 // the trouble (how often will we find a known carry bit). And I haven't
3069 // tested this very much yet, but something like this might work:
3070 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3071 // Carry = Carry.zextOrTrunc(1, false);
3076 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3077 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3078 Known
= KnownBits::computeForAddCarry(Known
, Known2
, Carry
);
3082 if (ConstantSDNode
*Rem
= isConstOrConstSplat(Op
.getOperand(1))) {
3083 const APInt
&RA
= Rem
->getAPIntValue().abs();
3084 if (RA
.isPowerOf2()) {
3085 APInt LowBits
= RA
- 1;
3086 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3088 // The low bits of the first operand are unchanged by the srem.
3089 Known
.Zero
= Known2
.Zero
& LowBits
;
3090 Known
.One
= Known2
.One
& LowBits
;
3092 // If the first operand is non-negative or has all low bits zero, then
3093 // the upper bits are all zero.
3094 if (Known2
.isNonNegative() || LowBits
.isSubsetOf(Known2
.Zero
))
3095 Known
.Zero
|= ~LowBits
;
3097 // If the first operand is negative and not all low bits are zero, then
3098 // the upper bits are all one.
3099 if (Known2
.isNegative() && LowBits
.intersects(Known2
.One
))
3100 Known
.One
|= ~LowBits
;
3101 assert((Known
.Zero
& Known
.One
) == 0&&"Bits known to be one AND zero?");
3106 if (ConstantSDNode
*Rem
= isConstOrConstSplat(Op
.getOperand(1))) {
3107 const APInt
&RA
= Rem
->getAPIntValue();
3108 if (RA
.isPowerOf2()) {
3109 APInt LowBits
= (RA
- 1);
3110 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3112 // The upper bits are all zero, the lower ones are unchanged.
3113 Known
.Zero
= Known2
.Zero
| ~LowBits
;
3114 Known
.One
= Known2
.One
& LowBits
;
3119 // Since the result is less than or equal to either operand, any leading
3120 // zero bits in either operand must also exist in the result.
3121 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3122 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3125 std::max(Known
.countMinLeadingZeros(), Known2
.countMinLeadingZeros());
3127 Known
.Zero
.setHighBits(Leaders
);
3130 case ISD::EXTRACT_ELEMENT
: {
3131 Known
= computeKnownBits(Op
.getOperand(0), Depth
+1);
3132 const unsigned Index
= Op
.getConstantOperandVal(1);
3133 const unsigned EltBitWidth
= Op
.getValueSizeInBits();
3135 // Remove low part of known bits mask
3136 Known
.Zero
= Known
.Zero
.getHiBits(Known
.getBitWidth() - Index
* EltBitWidth
);
3137 Known
.One
= Known
.One
.getHiBits(Known
.getBitWidth() - Index
* EltBitWidth
);
3139 // Remove high part of known bit mask
3140 Known
= Known
.trunc(EltBitWidth
);
3143 case ISD::EXTRACT_VECTOR_ELT
: {
3144 SDValue InVec
= Op
.getOperand(0);
3145 SDValue EltNo
= Op
.getOperand(1);
3146 EVT VecVT
= InVec
.getValueType();
3147 const unsigned EltBitWidth
= VecVT
.getScalarSizeInBits();
3148 const unsigned NumSrcElts
= VecVT
.getVectorNumElements();
3149 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3150 // anything about the extended bits.
3151 if (BitWidth
> EltBitWidth
)
3152 Known
= Known
.trunc(EltBitWidth
);
3153 ConstantSDNode
*ConstEltNo
= dyn_cast
<ConstantSDNode
>(EltNo
);
3154 if (ConstEltNo
&& ConstEltNo
->getAPIntValue().ult(NumSrcElts
)) {
3155 // If we know the element index, just demand that vector element.
3156 unsigned Idx
= ConstEltNo
->getZExtValue();
3157 APInt DemandedElt
= APInt::getOneBitSet(NumSrcElts
, Idx
);
3158 Known
= computeKnownBits(InVec
, DemandedElt
, Depth
+ 1);
3160 // Unknown element index, so ignore DemandedElts and demand them all.
3161 Known
= computeKnownBits(InVec
, Depth
+ 1);
3163 if (BitWidth
> EltBitWidth
)
3164 Known
= Known
.zext(BitWidth
, false /* => any extend */);
3167 case ISD::INSERT_VECTOR_ELT
: {
3168 SDValue InVec
= Op
.getOperand(0);
3169 SDValue InVal
= Op
.getOperand(1);
3170 SDValue EltNo
= Op
.getOperand(2);
3172 ConstantSDNode
*CEltNo
= dyn_cast
<ConstantSDNode
>(EltNo
);
3173 if (CEltNo
&& CEltNo
->getAPIntValue().ult(NumElts
)) {
3174 // If we know the element index, split the demand between the
3175 // source vector and the inserted element.
3176 Known
.Zero
= Known
.One
= APInt::getAllOnesValue(BitWidth
);
3177 unsigned EltIdx
= CEltNo
->getZExtValue();
3179 // If we demand the inserted element then add its common known bits.
3180 if (DemandedElts
[EltIdx
]) {
3181 Known2
= computeKnownBits(InVal
, Depth
+ 1);
3182 Known
.One
&= Known2
.One
.zextOrTrunc(Known
.One
.getBitWidth());
3183 Known
.Zero
&= Known2
.Zero
.zextOrTrunc(Known
.Zero
.getBitWidth());
3186 // If we demand the source vector then add its common known bits, ensuring
3187 // that we don't demand the inserted element.
3188 APInt VectorElts
= DemandedElts
& ~(APInt::getOneBitSet(NumElts
, EltIdx
));
3190 Known2
= computeKnownBits(InVec
, VectorElts
, Depth
+ 1);
3191 Known
.One
&= Known2
.One
;
3192 Known
.Zero
&= Known2
.Zero
;
3195 // Unknown element index, so ignore DemandedElts and demand them all.
3196 Known
= computeKnownBits(InVec
, Depth
+ 1);
3197 Known2
= computeKnownBits(InVal
, Depth
+ 1);
3198 Known
.One
&= Known2
.One
.zextOrTrunc(Known
.One
.getBitWidth());
3199 Known
.Zero
&= Known2
.Zero
.zextOrTrunc(Known
.Zero
.getBitWidth());
3203 case ISD::BITREVERSE
: {
3204 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3205 Known
.Zero
= Known2
.Zero
.reverseBits();
3206 Known
.One
= Known2
.One
.reverseBits();
3210 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3211 Known
.Zero
= Known2
.Zero
.byteSwap();
3212 Known
.One
= Known2
.One
.byteSwap();
3216 Known2
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3218 // If the source's MSB is zero then we know the rest of the bits already.
3219 if (Known2
.isNonNegative()) {
3220 Known
.Zero
= Known2
.Zero
;
3221 Known
.One
= Known2
.One
;
3225 // We only know that the absolute values's MSB will be zero iff there is
3226 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
3227 Known2
.One
.clearSignBit();
3228 if (Known2
.One
.getBoolValue()) {
3229 Known
.Zero
= APInt::getSignMask(BitWidth
);
3235 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3236 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3238 // UMIN - we know that the result will have the maximum of the
3239 // known zero leading bits of the inputs.
3240 unsigned LeadZero
= Known
.countMinLeadingZeros();
3241 LeadZero
= std::max(LeadZero
, Known2
.countMinLeadingZeros());
3243 Known
.Zero
&= Known2
.Zero
;
3244 Known
.One
&= Known2
.One
;
3245 Known
.Zero
.setHighBits(LeadZero
);
3249 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3250 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3252 // UMAX - we know that the result will have the maximum of the
3253 // known one leading bits of the inputs.
3254 unsigned LeadOne
= Known
.countMinLeadingOnes();
3255 LeadOne
= std::max(LeadOne
, Known2
.countMinLeadingOnes());
3257 Known
.Zero
&= Known2
.Zero
;
3258 Known
.One
&= Known2
.One
;
3259 Known
.One
.setHighBits(LeadOne
);
3264 // If we have a clamp pattern, we know that the number of sign bits will be
3265 // the minimum of the clamp min/max range.
3266 bool IsMax
= (Opcode
== ISD::SMAX
);
3267 ConstantSDNode
*CstLow
= nullptr, *CstHigh
= nullptr;
3268 if ((CstLow
= isConstOrConstSplat(Op
.getOperand(1), DemandedElts
)))
3269 if (Op
.getOperand(0).getOpcode() == (IsMax
? ISD::SMIN
: ISD::SMAX
))
3271 isConstOrConstSplat(Op
.getOperand(0).getOperand(1), DemandedElts
);
3272 if (CstLow
&& CstHigh
) {
3274 std::swap(CstLow
, CstHigh
);
3276 const APInt
&ValueLow
= CstLow
->getAPIntValue();
3277 const APInt
&ValueHigh
= CstHigh
->getAPIntValue();
3278 if (ValueLow
.sle(ValueHigh
)) {
3279 unsigned LowSignBits
= ValueLow
.getNumSignBits();
3280 unsigned HighSignBits
= ValueHigh
.getNumSignBits();
3281 unsigned MinSignBits
= std::min(LowSignBits
, HighSignBits
);
3282 if (ValueLow
.isNegative() && ValueHigh
.isNegative()) {
3283 Known
.One
.setHighBits(MinSignBits
);
3286 if (ValueLow
.isNonNegative() && ValueHigh
.isNonNegative()) {
3287 Known
.Zero
.setHighBits(MinSignBits
);
3293 // Fallback - just get the shared known bits of the operands.
3294 Known
= computeKnownBits(Op
.getOperand(0), DemandedElts
, Depth
+ 1);
3295 if (Known
.isUnknown()) break; // Early-out
3296 Known2
= computeKnownBits(Op
.getOperand(1), DemandedElts
, Depth
+ 1);
3297 Known
.Zero
&= Known2
.Zero
;
3298 Known
.One
&= Known2
.One
;
3301 case ISD::FrameIndex
:
3302 case ISD::TargetFrameIndex
:
3303 TLI
->computeKnownBitsForFrameIndex(Op
, Known
, DemandedElts
, *this, Depth
);
3307 if (Opcode
< ISD::BUILTIN_OP_END
)
3310 case ISD::INTRINSIC_WO_CHAIN
:
3311 case ISD::INTRINSIC_W_CHAIN
:
3312 case ISD::INTRINSIC_VOID
:
3313 // Allow the target to implement this method for its nodes.
3314 TLI
->computeKnownBitsForTargetNode(Op
, Known
, DemandedElts
, *this, Depth
);
3318 assert(!Known
.hasConflict() && "Bits known to be one AND zero?");
3322 SelectionDAG::OverflowKind
SelectionDAG::computeOverflowKind(SDValue N0
,
3324 // X + 0 never overflow
3325 if (isNullConstant(N1
))
3328 KnownBits N1Known
= computeKnownBits(N1
);
3329 if (N1Known
.Zero
.getBoolValue()) {
3330 KnownBits N0Known
= computeKnownBits(N0
);
3333 (void)(~N0Known
.Zero
).uadd_ov(~N1Known
.Zero
, overflow
);
3338 // mulhi + 1 never overflow
3339 if (N0
.getOpcode() == ISD::UMUL_LOHI
&& N0
.getResNo() == 1 &&
3340 (~N1Known
.Zero
& 0x01) == ~N1Known
.Zero
)
3343 if (N1
.getOpcode() == ISD::UMUL_LOHI
&& N1
.getResNo() == 1) {
3344 KnownBits N0Known
= computeKnownBits(N0
);
3346 if ((~N0Known
.Zero
& 0x01) == ~N0Known
.Zero
)
3350 return OFK_Sometime
;
3353 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val
) const {
3354 EVT OpVT
= Val
.getValueType();
3355 unsigned BitWidth
= OpVT
.getScalarSizeInBits();
3357 // Is the constant a known power of 2?
3358 if (ConstantSDNode
*Const
= dyn_cast
<ConstantSDNode
>(Val
))
3359 return Const
->getAPIntValue().zextOrTrunc(BitWidth
).isPowerOf2();
3361 // A left-shift of a constant one will have exactly one bit set because
3362 // shifting the bit off the end is undefined.
3363 if (Val
.getOpcode() == ISD::SHL
) {
3364 auto *C
= isConstOrConstSplat(Val
.getOperand(0));
3365 if (C
&& C
->getAPIntValue() == 1)
3369 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3371 if (Val
.getOpcode() == ISD::SRL
) {
3372 auto *C
= isConstOrConstSplat(Val
.getOperand(0));
3373 if (C
&& C
->getAPIntValue().isSignMask())
3377 // Are all operands of a build vector constant powers of two?
3378 if (Val
.getOpcode() == ISD::BUILD_VECTOR
)
3379 if (llvm::all_of(Val
->ops(), [BitWidth
](SDValue E
) {
3380 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(E
))
3381 return C
->getAPIntValue().zextOrTrunc(BitWidth
).isPowerOf2();
3386 // More could be done here, though the above checks are enough
3387 // to handle some common cases.
3389 // Fall back to computeKnownBits to catch other known cases.
3390 KnownBits Known
= computeKnownBits(Val
);
3391 return (Known
.countMaxPopulation() == 1) && (Known
.countMinPopulation() == 1);
3394 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op
, unsigned Depth
) const {
3395 EVT VT
= Op
.getValueType();
3396 APInt DemandedElts
= VT
.isVector()
3397 ? APInt::getAllOnesValue(VT
.getVectorNumElements())
3399 return ComputeNumSignBits(Op
, DemandedElts
, Depth
);
3402 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op
, const APInt
&DemandedElts
,
3403 unsigned Depth
) const {
3404 EVT VT
= Op
.getValueType();
3405 assert((VT
.isInteger() || VT
.isFloatingPoint()) && "Invalid VT!");
3406 unsigned VTBits
= VT
.getScalarSizeInBits();
3407 unsigned NumElts
= DemandedElts
.getBitWidth();
3409 unsigned FirstAnswer
= 1;
3411 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
)) {
3412 const APInt
&Val
= C
->getAPIntValue();
3413 return Val
.getNumSignBits();
3417 return 1; // Limit search depth.
3420 return 1; // No demanded elts, better to assume we don't know anything.
3422 unsigned Opcode
= Op
.getOpcode();
3425 case ISD::AssertSext
:
3426 Tmp
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT().getSizeInBits();
3427 return VTBits
-Tmp
+1;
3428 case ISD::AssertZext
:
3429 Tmp
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT().getSizeInBits();
3432 case ISD::BUILD_VECTOR
:
3434 for (unsigned i
= 0, e
= Op
.getNumOperands(); (i
< e
) && (Tmp
> 1); ++i
) {
3435 if (!DemandedElts
[i
])
3438 SDValue SrcOp
= Op
.getOperand(i
);
3439 Tmp2
= ComputeNumSignBits(Op
.getOperand(i
), Depth
+ 1);
3441 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3442 if (SrcOp
.getValueSizeInBits() != VTBits
) {
3443 assert(SrcOp
.getValueSizeInBits() > VTBits
&&
3444 "Expected BUILD_VECTOR implicit truncation");
3445 unsigned ExtraBits
= SrcOp
.getValueSizeInBits() - VTBits
;
3446 Tmp2
= (Tmp2
> ExtraBits
? Tmp2
- ExtraBits
: 1);
3448 Tmp
= std::min(Tmp
, Tmp2
);
3452 case ISD::VECTOR_SHUFFLE
: {
3453 // Collect the minimum number of sign bits that are shared by every vector
3454 // element referenced by the shuffle.
3455 APInt
DemandedLHS(NumElts
, 0), DemandedRHS(NumElts
, 0);
3456 const ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op
);
3457 assert(NumElts
== SVN
->getMask().size() && "Unexpected vector size");
3458 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
3459 int M
= SVN
->getMaskElt(i
);
3460 if (!DemandedElts
[i
])
3462 // For UNDEF elements, we don't know anything about the common state of
3463 // the shuffle result.
3466 if ((unsigned)M
< NumElts
)
3467 DemandedLHS
.setBit((unsigned)M
% NumElts
);
3469 DemandedRHS
.setBit((unsigned)M
% NumElts
);
3471 Tmp
= std::numeric_limits
<unsigned>::max();
3473 Tmp
= ComputeNumSignBits(Op
.getOperand(0), DemandedLHS
, Depth
+ 1);
3474 if (!!DemandedRHS
) {
3475 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), DemandedRHS
, Depth
+ 1);
3476 Tmp
= std::min(Tmp
, Tmp2
);
3478 // If we don't know anything, early out and try computeKnownBits fall-back.
3481 assert(Tmp
<= VTBits
&& "Failed to determine minimum sign bits");
3485 case ISD::BITCAST
: {
3486 SDValue N0
= Op
.getOperand(0);
3487 EVT SrcVT
= N0
.getValueType();
3488 unsigned SrcBits
= SrcVT
.getScalarSizeInBits();
3490 // Ignore bitcasts from unsupported types..
3491 if (!(SrcVT
.isInteger() || SrcVT
.isFloatingPoint()))
3494 // Fast handling of 'identity' bitcasts.
3495 if (VTBits
== SrcBits
)
3496 return ComputeNumSignBits(N0
, DemandedElts
, Depth
+ 1);
3498 bool IsLE
= getDataLayout().isLittleEndian();
3500 // Bitcast 'large element' scalar/vector to 'small element' vector.
3501 if ((SrcBits
% VTBits
) == 0) {
3502 assert(VT
.isVector() && "Expected bitcast to vector");
3504 unsigned Scale
= SrcBits
/ VTBits
;
3505 APInt
SrcDemandedElts(NumElts
/ Scale
, 0);
3506 for (unsigned i
= 0; i
!= NumElts
; ++i
)
3507 if (DemandedElts
[i
])
3508 SrcDemandedElts
.setBit(i
/ Scale
);
3510 // Fast case - sign splat can be simply split across the small elements.
3511 Tmp
= ComputeNumSignBits(N0
, SrcDemandedElts
, Depth
+ 1);
3515 // Slow case - determine how far the sign extends into each sub-element.
3517 for (unsigned i
= 0; i
!= NumElts
; ++i
)
3518 if (DemandedElts
[i
]) {
3519 unsigned SubOffset
= i
% Scale
;
3520 SubOffset
= (IsLE
? ((Scale
- 1) - SubOffset
) : SubOffset
);
3521 SubOffset
= SubOffset
* VTBits
;
3522 if (Tmp
<= SubOffset
)
3524 Tmp2
= std::min(Tmp2
, Tmp
- SubOffset
);
3531 case ISD::SIGN_EXTEND
:
3532 Tmp
= VTBits
- Op
.getOperand(0).getScalarValueSizeInBits();
3533 return ComputeNumSignBits(Op
.getOperand(0), DemandedElts
, Depth
+1) + Tmp
;
3534 case ISD::SIGN_EXTEND_INREG
:
3535 // Max of the input and what this extends.
3536 Tmp
= cast
<VTSDNode
>(Op
.getOperand(1))->getVT().getScalarSizeInBits();
3538 Tmp2
= ComputeNumSignBits(Op
.getOperand(0), DemandedElts
, Depth
+1);
3539 return std::max(Tmp
, Tmp2
);
3540 case ISD::SIGN_EXTEND_VECTOR_INREG
: {
3541 SDValue Src
= Op
.getOperand(0);
3542 EVT SrcVT
= Src
.getValueType();
3543 APInt DemandedSrcElts
= DemandedElts
.zextOrSelf(SrcVT
.getVectorNumElements());
3544 Tmp
= VTBits
- SrcVT
.getScalarSizeInBits();
3545 return ComputeNumSignBits(Src
, DemandedSrcElts
, Depth
+1) + Tmp
;
3549 Tmp
= ComputeNumSignBits(Op
.getOperand(0), DemandedElts
, Depth
+1);
3550 // SRA X, C -> adds C sign bits.
3551 if (ConstantSDNode
*C
=
3552 isConstOrConstSplat(Op
.getOperand(1), DemandedElts
)) {
3553 APInt ShiftVal
= C
->getAPIntValue();
3555 Tmp
= ShiftVal
.uge(VTBits
) ? VTBits
: ShiftVal
.getZExtValue();
3559 if (ConstantSDNode
*C
=
3560 isConstOrConstSplat(Op
.getOperand(1), DemandedElts
)) {
3561 // shl destroys sign bits.
3562 Tmp
= ComputeNumSignBits(Op
.getOperand(0), DemandedElts
, Depth
+1);
3563 if (C
->getAPIntValue().uge(VTBits
) || // Bad shift.
3564 C
->getAPIntValue().uge(Tmp
)) break; // Shifted all sign bits out.
3565 return Tmp
- C
->getZExtValue();
3570 case ISD::XOR
: // NOT is handled here.
3571 // Logical binary ops preserve the number of sign bits at the worst.
3572 Tmp
= ComputeNumSignBits(Op
.getOperand(0), DemandedElts
, Depth
+1);
3574 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), DemandedElts
, Depth
+1);
3575 FirstAnswer
= std::min(Tmp
, Tmp2
);
3576 // We computed what we know about the sign bits as our first
3577 // answer. Now proceed to the generic code that uses
3578 // computeKnownBits, and pick whichever answer is better.
3584 Tmp
= ComputeNumSignBits(Op
.getOperand(1), DemandedElts
, Depth
+1);
3585 if (Tmp
== 1) return 1; // Early out.
3586 Tmp2
= ComputeNumSignBits(Op
.getOperand(2), DemandedElts
, Depth
+1);
3587 return std::min(Tmp
, Tmp2
);
3588 case ISD::SELECT_CC
:
3589 Tmp
= ComputeNumSignBits(Op
.getOperand(2), DemandedElts
, Depth
+1);
3590 if (Tmp
== 1) return 1; // Early out.
3591 Tmp2
= ComputeNumSignBits(Op
.getOperand(3), DemandedElts
, Depth
+1);
3592 return std::min(Tmp
, Tmp2
);
3596 // If we have a clamp pattern, we know that the number of sign bits will be
3597 // the minimum of the clamp min/max range.
3598 bool IsMax
= (Opcode
== ISD::SMAX
);
3599 ConstantSDNode
*CstLow
= nullptr, *CstHigh
= nullptr;
3600 if ((CstLow
= isConstOrConstSplat(Op
.getOperand(1), DemandedElts
)))
3601 if (Op
.getOperand(0).getOpcode() == (IsMax
? ISD::SMIN
: ISD::SMAX
))
3603 isConstOrConstSplat(Op
.getOperand(0).getOperand(1), DemandedElts
);
3604 if (CstLow
&& CstHigh
) {
3606 std::swap(CstLow
, CstHigh
);
3607 if (CstLow
->getAPIntValue().sle(CstHigh
->getAPIntValue())) {
3608 Tmp
= CstLow
->getAPIntValue().getNumSignBits();
3609 Tmp2
= CstHigh
->getAPIntValue().getNumSignBits();
3610 return std::min(Tmp
, Tmp2
);
3614 // Fallback - just get the minimum number of sign bits of the operands.
3615 Tmp
= ComputeNumSignBits(Op
.getOperand(0), Depth
+ 1);
3617 return 1; // Early out.
3618 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), Depth
+ 1);
3619 return std::min(Tmp
, Tmp2
);
3623 Tmp
= ComputeNumSignBits(Op
.getOperand(0), Depth
+ 1);
3625 return 1; // Early out.
3626 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), Depth
+ 1);
3627 return std::min(Tmp
, Tmp2
);
3634 if (Op
.getResNo() != 1)
3636 // The boolean result conforms to getBooleanContents. Fall through.
3637 // If setcc returns 0/-1, all bits are sign bits.
3638 // We know that we have an integer-based boolean since these operations
3639 // are only available for integer.
3640 if (TLI
->getBooleanContents(VT
.isVector(), false) ==
3641 TargetLowering::ZeroOrNegativeOneBooleanContent
)
3645 // If setcc returns 0/-1, all bits are sign bits.
3646 if (TLI
->getBooleanContents(Op
.getOperand(0).getValueType()) ==
3647 TargetLowering::ZeroOrNegativeOneBooleanContent
)
3652 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1))) {
3653 unsigned RotAmt
= C
->getAPIntValue().urem(VTBits
);
3655 // Handle rotate right by N like a rotate left by 32-N.
3656 if (Opcode
== ISD::ROTR
)
3657 RotAmt
= (VTBits
- RotAmt
) % VTBits
;
3659 // If we aren't rotating out all of the known-in sign bits, return the
3660 // number that are left. This handles rotl(sext(x), 1) for example.
3661 Tmp
= ComputeNumSignBits(Op
.getOperand(0), Depth
+1);
3662 if (Tmp
> (RotAmt
+ 1)) return (Tmp
- RotAmt
);
3667 // Add can have at most one carry bit. Thus we know that the output
3668 // is, at worst, one more bit than the inputs.
3669 Tmp
= ComputeNumSignBits(Op
.getOperand(0), Depth
+1);
3670 if (Tmp
== 1) return 1; // Early out.
3672 // Special case decrementing a value (ADD X, -1):
3673 if (ConstantSDNode
*CRHS
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1)))
3674 if (CRHS
->isAllOnesValue()) {
3675 KnownBits Known
= computeKnownBits(Op
.getOperand(0), Depth
+1);
3677 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3679 if ((Known
.Zero
| 1).isAllOnesValue())
3682 // If we are subtracting one from a positive number, there is no carry
3683 // out of the result.
3684 if (Known
.isNonNegative())
3688 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), Depth
+1);
3689 if (Tmp2
== 1) return 1;
3690 return std::min(Tmp
, Tmp2
)-1;
3693 Tmp2
= ComputeNumSignBits(Op
.getOperand(1), Depth
+1);
3694 if (Tmp2
== 1) return 1;
3697 if (ConstantSDNode
*CLHS
= isConstOrConstSplat(Op
.getOperand(0)))
3698 if (CLHS
->isNullValue()) {
3699 KnownBits Known
= computeKnownBits(Op
.getOperand(1), Depth
+1);
3700 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3702 if ((Known
.Zero
| 1).isAllOnesValue())
3705 // If the input is known to be positive (the sign bit is known clear),
3706 // the output of the NEG has the same number of sign bits as the input.
3707 if (Known
.isNonNegative())
3710 // Otherwise, we treat this like a SUB.
3713 // Sub can have at most one carry bit. Thus we know that the output
3714 // is, at worst, one more bit than the inputs.
3715 Tmp
= ComputeNumSignBits(Op
.getOperand(0), Depth
+1);
3716 if (Tmp
== 1) return 1; // Early out.
3717 return std::min(Tmp
, Tmp2
)-1;
3719 // The output of the Mul can be at most twice the valid bits in the inputs.
3720 unsigned SignBitsOp0
= ComputeNumSignBits(Op
.getOperand(0), Depth
+ 1);
3721 if (SignBitsOp0
== 1)
3723 unsigned SignBitsOp1
= ComputeNumSignBits(Op
.getOperand(1), Depth
+ 1);
3724 if (SignBitsOp1
== 1)
3726 unsigned OutValidBits
=
3727 (VTBits
- SignBitsOp0
+ 1) + (VTBits
- SignBitsOp1
+ 1);
3728 return OutValidBits
> VTBits
? 1 : VTBits
- OutValidBits
+ 1;
3730 case ISD::TRUNCATE
: {
3731 // Check if the sign bits of source go down as far as the truncated value.
3732 unsigned NumSrcBits
= Op
.getOperand(0).getScalarValueSizeInBits();
3733 unsigned NumSrcSignBits
= ComputeNumSignBits(Op
.getOperand(0), Depth
+ 1);
3734 if (NumSrcSignBits
> (NumSrcBits
- VTBits
))
3735 return NumSrcSignBits
- (NumSrcBits
- VTBits
);
3738 case ISD::EXTRACT_ELEMENT
: {
3739 const int KnownSign
= ComputeNumSignBits(Op
.getOperand(0), Depth
+1);
3740 const int BitWidth
= Op
.getValueSizeInBits();
3741 const int Items
= Op
.getOperand(0).getValueSizeInBits() / BitWidth
;
3743 // Get reverse index (starting from 1), Op1 value indexes elements from
3744 // little end. Sign starts at big end.
3745 const int rIndex
= Items
- 1 - Op
.getConstantOperandVal(1);
3747 // If the sign portion ends in our element the subtraction gives correct
3748 // result. Otherwise it gives either negative or > bitwidth result
3749 return std::max(std::min(KnownSign
- rIndex
* BitWidth
, BitWidth
), 0);
3751 case ISD::INSERT_VECTOR_ELT
: {
3752 SDValue InVec
= Op
.getOperand(0);
3753 SDValue InVal
= Op
.getOperand(1);
3754 SDValue EltNo
= Op
.getOperand(2);
3756 ConstantSDNode
*CEltNo
= dyn_cast
<ConstantSDNode
>(EltNo
);
3757 if (CEltNo
&& CEltNo
->getAPIntValue().ult(NumElts
)) {
3758 // If we know the element index, split the demand between the
3759 // source vector and the inserted element.
3760 unsigned EltIdx
= CEltNo
->getZExtValue();
3762 // If we demand the inserted element then get its sign bits.
3763 Tmp
= std::numeric_limits
<unsigned>::max();
3764 if (DemandedElts
[EltIdx
]) {
3765 // TODO - handle implicit truncation of inserted elements.
3766 if (InVal
.getScalarValueSizeInBits() != VTBits
)
3768 Tmp
= ComputeNumSignBits(InVal
, Depth
+ 1);
3771 // If we demand the source vector then get its sign bits, and determine
3773 APInt VectorElts
= DemandedElts
;
3774 VectorElts
.clearBit(EltIdx
);
3776 Tmp2
= ComputeNumSignBits(InVec
, VectorElts
, Depth
+ 1);
3777 Tmp
= std::min(Tmp
, Tmp2
);
3780 // Unknown element index, so ignore DemandedElts and demand them all.
3781 Tmp
= ComputeNumSignBits(InVec
, Depth
+ 1);
3782 Tmp2
= ComputeNumSignBits(InVal
, Depth
+ 1);
3783 Tmp
= std::min(Tmp
, Tmp2
);
3785 assert(Tmp
<= VTBits
&& "Failed to determine minimum sign bits");
3788 case ISD::EXTRACT_VECTOR_ELT
: {
3789 SDValue InVec
= Op
.getOperand(0);
3790 SDValue EltNo
= Op
.getOperand(1);
3791 EVT VecVT
= InVec
.getValueType();
3792 const unsigned BitWidth
= Op
.getValueSizeInBits();
3793 const unsigned EltBitWidth
= Op
.getOperand(0).getScalarValueSizeInBits();
3794 const unsigned NumSrcElts
= VecVT
.getVectorNumElements();
3796 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3797 // anything about sign bits. But if the sizes match we can derive knowledge
3798 // about sign bits from the vector operand.
3799 if (BitWidth
!= EltBitWidth
)
3802 // If we know the element index, just demand that vector element, else for
3803 // an unknown element index, ignore DemandedElts and demand them all.
3804 APInt DemandedSrcElts
= APInt::getAllOnesValue(NumSrcElts
);
3805 ConstantSDNode
*ConstEltNo
= dyn_cast
<ConstantSDNode
>(EltNo
);
3806 if (ConstEltNo
&& ConstEltNo
->getAPIntValue().ult(NumSrcElts
))
3808 APInt::getOneBitSet(NumSrcElts
, ConstEltNo
->getZExtValue());
3810 return ComputeNumSignBits(InVec
, DemandedSrcElts
, Depth
+ 1);
3812 case ISD::EXTRACT_SUBVECTOR
: {
3813 // If we know the element index, just demand that subvector elements,
3814 // otherwise demand them all.
3815 SDValue Src
= Op
.getOperand(0);
3816 ConstantSDNode
*SubIdx
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1));
3817 unsigned NumSrcElts
= Src
.getValueType().getVectorNumElements();
3818 if (SubIdx
&& SubIdx
->getAPIntValue().ule(NumSrcElts
- NumElts
)) {
3819 // Offset the demanded elts by the subvector index.
3820 uint64_t Idx
= SubIdx
->getZExtValue();
3821 APInt DemandedSrc
= DemandedElts
.zextOrSelf(NumSrcElts
).shl(Idx
);
3822 return ComputeNumSignBits(Src
, DemandedSrc
, Depth
+ 1);
3824 return ComputeNumSignBits(Src
, Depth
+ 1);
3826 case ISD::CONCAT_VECTORS
: {
3827 // Determine the minimum number of sign bits across all demanded
3828 // elts of the input vectors. Early out if the result is already 1.
3829 Tmp
= std::numeric_limits
<unsigned>::max();
3830 EVT SubVectorVT
= Op
.getOperand(0).getValueType();
3831 unsigned NumSubVectorElts
= SubVectorVT
.getVectorNumElements();
3832 unsigned NumSubVectors
= Op
.getNumOperands();
3833 for (unsigned i
= 0; (i
< NumSubVectors
) && (Tmp
> 1); ++i
) {
3834 APInt DemandedSub
= DemandedElts
.lshr(i
* NumSubVectorElts
);
3835 DemandedSub
= DemandedSub
.trunc(NumSubVectorElts
);
3838 Tmp2
= ComputeNumSignBits(Op
.getOperand(i
), DemandedSub
, Depth
+ 1);
3839 Tmp
= std::min(Tmp
, Tmp2
);
3841 assert(Tmp
<= VTBits
&& "Failed to determine minimum sign bits");
3844 case ISD::INSERT_SUBVECTOR
: {
3845 // If we know the element index, demand any elements from the subvector and
3846 // the remainder from the src its inserted into, otherwise demand them all.
3847 SDValue Src
= Op
.getOperand(0);
3848 SDValue Sub
= Op
.getOperand(1);
3849 auto *SubIdx
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(2));
3850 unsigned NumSubElts
= Sub
.getValueType().getVectorNumElements();
3851 if (SubIdx
&& SubIdx
->getAPIntValue().ule(NumElts
- NumSubElts
)) {
3852 Tmp
= std::numeric_limits
<unsigned>::max();
3853 uint64_t Idx
= SubIdx
->getZExtValue();
3854 APInt DemandedSubElts
= DemandedElts
.extractBits(NumSubElts
, Idx
);
3855 if (!!DemandedSubElts
) {
3856 Tmp
= ComputeNumSignBits(Sub
, DemandedSubElts
, Depth
+ 1);
3857 if (Tmp
== 1) return 1; // early-out
3859 APInt SubMask
= APInt::getBitsSet(NumElts
, Idx
, Idx
+ NumSubElts
);
3860 APInt DemandedSrcElts
= DemandedElts
& ~SubMask
;
3861 if (!!DemandedSrcElts
) {
3862 Tmp2
= ComputeNumSignBits(Src
, DemandedSrcElts
, Depth
+ 1);
3863 Tmp
= std::min(Tmp
, Tmp2
);
3865 assert(Tmp
<= VTBits
&& "Failed to determine minimum sign bits");
3869 // Not able to determine the index so just assume worst case.
3870 Tmp
= ComputeNumSignBits(Sub
, Depth
+ 1);
3871 if (Tmp
== 1) return 1; // early-out
3872 Tmp2
= ComputeNumSignBits(Src
, Depth
+ 1);
3873 Tmp
= std::min(Tmp
, Tmp2
);
3874 assert(Tmp
<= VTBits
&& "Failed to determine minimum sign bits");
3879 // If we are looking at the loaded value of the SDNode.
3880 if (Op
.getResNo() == 0) {
3881 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3882 if (LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(Op
)) {
3883 unsigned ExtType
= LD
->getExtensionType();
3886 case ISD::SEXTLOAD
: // e.g. i16->i32 = '17' bits known.
3887 Tmp
= LD
->getMemoryVT().getScalarSizeInBits();
3888 return VTBits
- Tmp
+ 1;
3889 case ISD::ZEXTLOAD
: // e.g. i16->i32 = '16' bits known.
3890 Tmp
= LD
->getMemoryVT().getScalarSizeInBits();
3891 return VTBits
- Tmp
;
3892 case ISD::NON_EXTLOAD
:
3893 if (const Constant
*Cst
= TLI
->getTargetConstantFromLoad(LD
)) {
3894 // We only need to handle vectors - computeKnownBits should handle
3896 Type
*CstTy
= Cst
->getType();
3897 if (CstTy
->isVectorTy() &&
3898 (NumElts
* VTBits
) == CstTy
->getPrimitiveSizeInBits()) {
3900 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
3901 if (!DemandedElts
[i
])
3903 if (Constant
*Elt
= Cst
->getAggregateElement(i
)) {
3904 if (auto *CInt
= dyn_cast
<ConstantInt
>(Elt
)) {
3905 const APInt
&Value
= CInt
->getValue();
3906 Tmp
= std::min(Tmp
, Value
.getNumSignBits());
3909 if (auto *CFP
= dyn_cast
<ConstantFP
>(Elt
)) {
3910 APInt Value
= CFP
->getValueAPF().bitcastToAPInt();
3911 Tmp
= std::min(Tmp
, Value
.getNumSignBits());
3915 // Unknown type. Conservatively assume no bits match sign bit.
3926 // Allow the target to implement this method for its nodes.
3927 if (Opcode
>= ISD::BUILTIN_OP_END
||
3928 Opcode
== ISD::INTRINSIC_WO_CHAIN
||
3929 Opcode
== ISD::INTRINSIC_W_CHAIN
||
3930 Opcode
== ISD::INTRINSIC_VOID
) {
3932 TLI
->ComputeNumSignBitsForTargetNode(Op
, DemandedElts
, *this, Depth
);
3934 FirstAnswer
= std::max(FirstAnswer
, NumBits
);
3937 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3938 // use this information.
3939 KnownBits Known
= computeKnownBits(Op
, DemandedElts
, Depth
);
3942 if (Known
.isNonNegative()) { // sign bit is 0
3944 } else if (Known
.isNegative()) { // sign bit is 1;
3951 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3952 // the number of identical bits in the top of the input value.
3954 Mask
<<= Mask
.getBitWidth()-VTBits
;
3955 // Return # leading zeros. We use 'min' here in case Val was zero before
3956 // shifting. We don't want to return '64' as for an i32 "0".
3957 return std::max(FirstAnswer
, std::min(VTBits
, Mask
.countLeadingZeros()));
3960 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op
) const {
3961 if ((Op
.getOpcode() != ISD::ADD
&& Op
.getOpcode() != ISD::OR
) ||
3962 !isa
<ConstantSDNode
>(Op
.getOperand(1)))
3965 if (Op
.getOpcode() == ISD::OR
&&
3966 !MaskedValueIsZero(Op
.getOperand(0), Op
.getConstantOperandAPInt(1)))
3972 bool SelectionDAG::isKnownNeverNaN(SDValue Op
, bool SNaN
, unsigned Depth
) const {
3973 // If we're told that NaNs won't happen, assume they won't.
3974 if (getTarget().Options
.NoNaNsFPMath
|| Op
->getFlags().hasNoNaNs())
3978 return false; // Limit search depth.
3980 // TODO: Handle vectors.
3981 // If the value is a constant, we can obviously see if it is a NaN or not.
3982 if (const ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Op
)) {
3983 return !C
->getValueAPF().isNaN() ||
3984 (SNaN
&& !C
->getValueAPF().isSignaling());
3987 unsigned Opcode
= Op
.getOpcode();
3998 // TODO: Need isKnownNeverInfinity
4001 case ISD::FCANONICALIZE
:
4009 case ISD::FNEARBYINT
: {
4012 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1);
4016 case ISD::FCOPYSIGN
: {
4017 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1);
4020 return isKnownNeverNaN(Op
.getOperand(1), SNaN
, Depth
+ 1) &&
4021 isKnownNeverNaN(Op
.getOperand(2), SNaN
, Depth
+ 1);
4022 case ISD::FP_EXTEND
:
4023 case ISD::FP_ROUND
: {
4026 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1);
4028 case ISD::SINT_TO_FP
:
4029 case ISD::UINT_TO_FP
:
4035 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1) &&
4036 isKnownNeverNaN(Op
.getOperand(1), SNaN
, Depth
+ 1) &&
4037 isKnownNeverNaN(Op
.getOperand(2), SNaN
, Depth
+ 1);
4039 case ISD::FSQRT
: // Need is known positive
4047 // TODO: Refine on operand
4051 case ISD::FMAXNUM
: {
4052 // Only one needs to be known not-nan, since it will be returned if the
4053 // other ends up being one.
4054 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1) ||
4055 isKnownNeverNaN(Op
.getOperand(1), SNaN
, Depth
+ 1);
4057 case ISD::FMINNUM_IEEE
:
4058 case ISD::FMAXNUM_IEEE
: {
4061 // This can return a NaN if either operand is an sNaN, or if both operands
4063 return (isKnownNeverNaN(Op
.getOperand(0), false, Depth
+ 1) &&
4064 isKnownNeverSNaN(Op
.getOperand(1), Depth
+ 1)) ||
4065 (isKnownNeverNaN(Op
.getOperand(1), false, Depth
+ 1) &&
4066 isKnownNeverSNaN(Op
.getOperand(0), Depth
+ 1));
4069 case ISD::FMAXIMUM
: {
4070 // TODO: Does this quiet or return the origina NaN as-is?
4071 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1) &&
4072 isKnownNeverNaN(Op
.getOperand(1), SNaN
, Depth
+ 1);
4074 case ISD::EXTRACT_VECTOR_ELT
: {
4075 return isKnownNeverNaN(Op
.getOperand(0), SNaN
, Depth
+ 1);
4078 if (Opcode
>= ISD::BUILTIN_OP_END
||
4079 Opcode
== ISD::INTRINSIC_WO_CHAIN
||
4080 Opcode
== ISD::INTRINSIC_W_CHAIN
||
4081 Opcode
== ISD::INTRINSIC_VOID
) {
4082 return TLI
->isKnownNeverNaNForTargetNode(Op
, *this, SNaN
, Depth
);
4089 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op
) const {
4090 assert(Op
.getValueType().isFloatingPoint() &&
4091 "Floating point type expected");
4093 // If the value is a constant, we can obviously see if it is a zero or not.
4094 // TODO: Add BuildVector support.
4095 if (const ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Op
))
4096 return !C
->isZero();
4100 bool SelectionDAG::isKnownNeverZero(SDValue Op
) const {
4101 assert(!Op
.getValueType().isFloatingPoint() &&
4102 "Floating point types unsupported - use isKnownNeverZeroFloat");
4104 // If the value is a constant, we can obviously see if it is a zero or not.
4105 if (ISD::matchUnaryPredicate(
4106 Op
, [](ConstantSDNode
*C
) { return !C
->isNullValue(); }))
4109 // TODO: Recognize more cases here.
4110 switch (Op
.getOpcode()) {
4113 if (isKnownNeverZero(Op
.getOperand(1)) ||
4114 isKnownNeverZero(Op
.getOperand(0)))
4122 bool SelectionDAG::isEqualTo(SDValue A
, SDValue B
) const {
4123 // Check the obvious case.
4124 if (A
== B
) return true;
4126 // For for negative and positive zero.
4127 if (const ConstantFPSDNode
*CA
= dyn_cast
<ConstantFPSDNode
>(A
))
4128 if (const ConstantFPSDNode
*CB
= dyn_cast
<ConstantFPSDNode
>(B
))
4129 if (CA
->isZero() && CB
->isZero()) return true;
4131 // Otherwise they may not be equal.
4135 // FIXME: unify with llvm::haveNoCommonBitsSet.
4136 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
4137 bool SelectionDAG::haveNoCommonBitsSet(SDValue A
, SDValue B
) const {
4138 assert(A
.getValueType() == B
.getValueType() &&
4139 "Values must have the same type");
4140 return (computeKnownBits(A
).Zero
| computeKnownBits(B
).Zero
).isAllOnesValue();
4143 static SDValue
FoldBUILD_VECTOR(const SDLoc
&DL
, EVT VT
,
4144 ArrayRef
<SDValue
> Ops
,
4145 SelectionDAG
&DAG
) {
4146 int NumOps
= Ops
.size();
4147 assert(NumOps
!= 0 && "Can't build an empty vector!");
4148 assert(VT
.getVectorNumElements() == (unsigned)NumOps
&&
4149 "Incorrect element count in BUILD_VECTOR!");
4151 // BUILD_VECTOR of UNDEFs is UNDEF.
4152 if (llvm::all_of(Ops
, [](SDValue Op
) { return Op
.isUndef(); }))
4153 return DAG
.getUNDEF(VT
);
4155 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4156 SDValue IdentitySrc
;
4157 bool IsIdentity
= true;
4158 for (int i
= 0; i
!= NumOps
; ++i
) {
4159 if (Ops
[i
].getOpcode() != ISD::EXTRACT_VECTOR_ELT
||
4160 Ops
[i
].getOperand(0).getValueType() != VT
||
4161 (IdentitySrc
&& Ops
[i
].getOperand(0) != IdentitySrc
) ||
4162 !isa
<ConstantSDNode
>(Ops
[i
].getOperand(1)) ||
4163 cast
<ConstantSDNode
>(Ops
[i
].getOperand(1))->getAPIntValue() != i
) {
4167 IdentitySrc
= Ops
[i
].getOperand(0);
4175 /// Try to simplify vector concatenation to an input value, undef, or build
4177 static SDValue
foldCONCAT_VECTORS(const SDLoc
&DL
, EVT VT
,
4178 ArrayRef
<SDValue
> Ops
,
4179 SelectionDAG
&DAG
) {
4180 assert(!Ops
.empty() && "Can't concatenate an empty list of vectors!");
4181 assert(llvm::all_of(Ops
,
4183 return Ops
[0].getValueType() == Op
.getValueType();
4185 "Concatenation of vectors with inconsistent value types!");
4186 assert((Ops
.size() * Ops
[0].getValueType().getVectorNumElements()) ==
4187 VT
.getVectorNumElements() &&
4188 "Incorrect element count in vector concatenation!");
4190 if (Ops
.size() == 1)
4193 // Concat of UNDEFs is UNDEF.
4194 if (llvm::all_of(Ops
, [](SDValue Op
) { return Op
.isUndef(); }))
4195 return DAG
.getUNDEF(VT
);
4197 // Scan the operands and look for extract operations from a single source
4198 // that correspond to insertion at the same location via this concatenation:
4199 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4200 SDValue IdentitySrc
;
4201 bool IsIdentity
= true;
4202 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
4203 SDValue Op
= Ops
[i
];
4204 unsigned IdentityIndex
= i
* Op
.getValueType().getVectorNumElements();
4205 if (Op
.getOpcode() != ISD::EXTRACT_SUBVECTOR
||
4206 Op
.getOperand(0).getValueType() != VT
||
4207 (IdentitySrc
&& Op
.getOperand(0) != IdentitySrc
) ||
4208 !isa
<ConstantSDNode
>(Op
.getOperand(1)) ||
4209 Op
.getConstantOperandVal(1) != IdentityIndex
) {
4213 assert((!IdentitySrc
|| IdentitySrc
== Op
.getOperand(0)) &&
4214 "Unexpected identity source vector for concat of extracts");
4215 IdentitySrc
= Op
.getOperand(0);
4218 assert(IdentitySrc
&& "Failed to set source vector of extracts");
4222 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4223 // simplified to one big BUILD_VECTOR.
4224 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4225 EVT SVT
= VT
.getScalarType();
4226 SmallVector
<SDValue
, 16> Elts
;
4227 for (SDValue Op
: Ops
) {
4228 EVT OpVT
= Op
.getValueType();
4230 Elts
.append(OpVT
.getVectorNumElements(), DAG
.getUNDEF(SVT
));
4231 else if (Op
.getOpcode() == ISD::BUILD_VECTOR
)
4232 Elts
.append(Op
->op_begin(), Op
->op_end());
4237 // BUILD_VECTOR requires all inputs to be of the same type, find the
4238 // maximum type and extend them all.
4239 for (SDValue Op
: Elts
)
4240 SVT
= (SVT
.bitsLT(Op
.getValueType()) ? Op
.getValueType() : SVT
);
4242 if (SVT
.bitsGT(VT
.getScalarType()))
4243 for (SDValue
&Op
: Elts
)
4244 Op
= DAG
.getTargetLoweringInfo().isZExtFree(Op
.getValueType(), SVT
)
4245 ? DAG
.getZExtOrTrunc(Op
, DL
, SVT
)
4246 : DAG
.getSExtOrTrunc(Op
, DL
, SVT
);
4248 SDValue V
= DAG
.getBuildVector(VT
, DL
, Elts
);
4249 NewSDValueDbgMsg(V
, "New node fold concat vectors: ", &DAG
);
4253 /// Gets or creates the specified node.
4254 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
) {
4255 FoldingSetNodeID ID
;
4256 AddNodeIDNode(ID
, Opcode
, getVTList(VT
), None
);
4258 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
))
4259 return SDValue(E
, 0);
4261 auto *N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(),
4263 CSEMap
.InsertNode(N
, IP
);
4266 SDValue V
= SDValue(N
, 0);
4267 NewSDValueDbgMsg(V
, "Creating new node: ", this);
4271 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
4272 SDValue Operand
, const SDNodeFlags Flags
) {
4273 // Constant fold unary operations with an integer constant operand. Even
4274 // opaque constant will be folded, because the folding of unary operations
4275 // doesn't create new constants with different values. Nevertheless, the
4276 // opaque flag is preserved during folding to prevent future folding with
4278 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Operand
)) {
4279 const APInt
&Val
= C
->getAPIntValue();
4282 case ISD::SIGN_EXTEND
:
4283 return getConstant(Val
.sextOrTrunc(VT
.getSizeInBits()), DL
, VT
,
4284 C
->isTargetOpcode(), C
->isOpaque());
4289 case ISD::ANY_EXTEND
:
4290 case ISD::ZERO_EXTEND
:
4291 return getConstant(Val
.zextOrTrunc(VT
.getSizeInBits()), DL
, VT
,
4292 C
->isTargetOpcode(), C
->isOpaque());
4293 case ISD::UINT_TO_FP
:
4294 case ISD::SINT_TO_FP
: {
4295 APFloat
apf(EVTToAPFloatSemantics(VT
),
4296 APInt::getNullValue(VT
.getSizeInBits()));
4297 (void)apf
.convertFromAPInt(Val
,
4298 Opcode
==ISD::SINT_TO_FP
,
4299 APFloat::rmNearestTiesToEven
);
4300 return getConstantFP(apf
, DL
, VT
);
4303 if (VT
== MVT::f16
&& C
->getValueType(0) == MVT::i16
)
4304 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val
), DL
, VT
);
4305 if (VT
== MVT::f32
&& C
->getValueType(0) == MVT::i32
)
4306 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val
), DL
, VT
);
4307 if (VT
== MVT::f64
&& C
->getValueType(0) == MVT::i64
)
4308 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val
), DL
, VT
);
4309 if (VT
== MVT::f128
&& C
->getValueType(0) == MVT::i128
)
4310 return getConstantFP(APFloat(APFloat::IEEEquad(), Val
), DL
, VT
);
4313 return getConstant(Val
.abs(), DL
, VT
, C
->isTargetOpcode(),
4315 case ISD::BITREVERSE
:
4316 return getConstant(Val
.reverseBits(), DL
, VT
, C
->isTargetOpcode(),
4319 return getConstant(Val
.byteSwap(), DL
, VT
, C
->isTargetOpcode(),
4322 return getConstant(Val
.countPopulation(), DL
, VT
, C
->isTargetOpcode(),
4325 case ISD::CTLZ_ZERO_UNDEF
:
4326 return getConstant(Val
.countLeadingZeros(), DL
, VT
, C
->isTargetOpcode(),
4329 case ISD::CTTZ_ZERO_UNDEF
:
4330 return getConstant(Val
.countTrailingZeros(), DL
, VT
, C
->isTargetOpcode(),
4332 case ISD::FP16_TO_FP
: {
4334 APFloat
FPV(APFloat::IEEEhalf(),
4335 (Val
.getBitWidth() == 16) ? Val
: Val
.trunc(16));
4337 // This can return overflow, underflow, or inexact; we don't care.
4338 // FIXME need to be more flexible about rounding mode.
4339 (void)FPV
.convert(EVTToAPFloatSemantics(VT
),
4340 APFloat::rmNearestTiesToEven
, &Ignored
);
4341 return getConstantFP(FPV
, DL
, VT
);
4346 // Constant fold unary operations with a floating point constant operand.
4347 if (ConstantFPSDNode
*C
= dyn_cast
<ConstantFPSDNode
>(Operand
)) {
4348 APFloat V
= C
->getValueAPF(); // make copy
4352 return getConstantFP(V
, DL
, VT
);
4355 return getConstantFP(V
, DL
, VT
);
4357 APFloat::opStatus fs
= V
.roundToIntegral(APFloat::rmTowardPositive
);
4358 if (fs
== APFloat::opOK
|| fs
== APFloat::opInexact
)
4359 return getConstantFP(V
, DL
, VT
);
4363 APFloat::opStatus fs
= V
.roundToIntegral(APFloat::rmTowardZero
);
4364 if (fs
== APFloat::opOK
|| fs
== APFloat::opInexact
)
4365 return getConstantFP(V
, DL
, VT
);
4369 APFloat::opStatus fs
= V
.roundToIntegral(APFloat::rmTowardNegative
);
4370 if (fs
== APFloat::opOK
|| fs
== APFloat::opInexact
)
4371 return getConstantFP(V
, DL
, VT
);
4374 case ISD::FP_EXTEND
: {
4376 // This can return overflow, underflow, or inexact; we don't care.
4377 // FIXME need to be more flexible about rounding mode.
4378 (void)V
.convert(EVTToAPFloatSemantics(VT
),
4379 APFloat::rmNearestTiesToEven
, &ignored
);
4380 return getConstantFP(V
, DL
, VT
);
4382 case ISD::FP_TO_SINT
:
4383 case ISD::FP_TO_UINT
: {
4385 APSInt
IntVal(VT
.getSizeInBits(), Opcode
== ISD::FP_TO_UINT
);
4386 // FIXME need to be more flexible about rounding mode.
4387 APFloat::opStatus s
=
4388 V
.convertToInteger(IntVal
, APFloat::rmTowardZero
, &ignored
);
4389 if (s
== APFloat::opInvalidOp
) // inexact is OK, in fact usual
4391 return getConstant(IntVal
, DL
, VT
);
4394 if (VT
== MVT::i16
&& C
->getValueType(0) == MVT::f16
)
4395 return getConstant((uint16_t)V
.bitcastToAPInt().getZExtValue(), DL
, VT
);
4396 else if (VT
== MVT::i32
&& C
->getValueType(0) == MVT::f32
)
4397 return getConstant((uint32_t)V
.bitcastToAPInt().getZExtValue(), DL
, VT
);
4398 else if (VT
== MVT::i64
&& C
->getValueType(0) == MVT::f64
)
4399 return getConstant(V
.bitcastToAPInt().getZExtValue(), DL
, VT
);
4401 case ISD::FP_TO_FP16
: {
4403 // This can return overflow, underflow, or inexact; we don't care.
4404 // FIXME need to be more flexible about rounding mode.
4405 (void)V
.convert(APFloat::IEEEhalf(),
4406 APFloat::rmNearestTiesToEven
, &Ignored
);
4407 return getConstant(V
.bitcastToAPInt(), DL
, VT
);
4412 // Constant fold unary operations with a vector integer or float operand.
4413 if (BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(Operand
)) {
4414 if (BV
->isConstant()) {
4417 // FIXME: Entirely reasonable to perform folding of other unary
4418 // operations here as the need arises.
4425 case ISD::FP_EXTEND
:
4426 case ISD::FP_TO_SINT
:
4427 case ISD::FP_TO_UINT
:
4429 case ISD::ANY_EXTEND
:
4430 case ISD::ZERO_EXTEND
:
4431 case ISD::SIGN_EXTEND
:
4432 case ISD::UINT_TO_FP
:
4433 case ISD::SINT_TO_FP
:
4435 case ISD::BITREVERSE
:
4438 case ISD::CTLZ_ZERO_UNDEF
:
4440 case ISD::CTTZ_ZERO_UNDEF
:
4442 SDValue Ops
= { Operand
};
4443 if (SDValue Fold
= FoldConstantVectorArithmetic(Opcode
, DL
, VT
, Ops
))
4450 unsigned OpOpcode
= Operand
.getNode()->getOpcode();
4452 case ISD::TokenFactor
:
4453 case ISD::MERGE_VALUES
:
4454 case ISD::CONCAT_VECTORS
:
4455 return Operand
; // Factor, merge or concat of one node? No need.
4456 case ISD::BUILD_VECTOR
: {
4457 // Attempt to simplify BUILD_VECTOR.
4458 SDValue Ops
[] = {Operand
};
4459 if (SDValue V
= FoldBUILD_VECTOR(DL
, VT
, Ops
, *this))
4463 case ISD::FP_ROUND
: llvm_unreachable("Invalid method to make FP_ROUND node");
4464 case ISD::FP_EXTEND
:
4465 assert(VT
.isFloatingPoint() &&
4466 Operand
.getValueType().isFloatingPoint() && "Invalid FP cast!");
4467 if (Operand
.getValueType() == VT
) return Operand
; // noop conversion.
4468 assert((!VT
.isVector() ||
4469 VT
.getVectorNumElements() ==
4470 Operand
.getValueType().getVectorNumElements()) &&
4471 "Vector element count mismatch!");
4472 assert(Operand
.getValueType().bitsLT(VT
) &&
4473 "Invalid fpext node, dst < src!");
4474 if (Operand
.isUndef())
4475 return getUNDEF(VT
);
4477 case ISD::FP_TO_SINT
:
4478 case ISD::FP_TO_UINT
:
4479 if (Operand
.isUndef())
4480 return getUNDEF(VT
);
4482 case ISD::SINT_TO_FP
:
4483 case ISD::UINT_TO_FP
:
4484 // [us]itofp(undef) = 0, because the result value is bounded.
4485 if (Operand
.isUndef())
4486 return getConstantFP(0.0, DL
, VT
);
4488 case ISD::SIGN_EXTEND
:
4489 assert(VT
.isInteger() && Operand
.getValueType().isInteger() &&
4490 "Invalid SIGN_EXTEND!");
4491 assert(VT
.isVector() == Operand
.getValueType().isVector() &&
4492 "SIGN_EXTEND result type type should be vector iff the operand "
4494 if (Operand
.getValueType() == VT
) return Operand
; // noop extension
4495 assert((!VT
.isVector() ||
4496 VT
.getVectorNumElements() ==
4497 Operand
.getValueType().getVectorNumElements()) &&
4498 "Vector element count mismatch!");
4499 assert(Operand
.getValueType().bitsLT(VT
) &&
4500 "Invalid sext node, dst < src!");
4501 if (OpOpcode
== ISD::SIGN_EXTEND
|| OpOpcode
== ISD::ZERO_EXTEND
)
4502 return getNode(OpOpcode
, DL
, VT
, Operand
.getOperand(0));
4503 else if (OpOpcode
== ISD::UNDEF
)
4504 // sext(undef) = 0, because the top bits will all be the same.
4505 return getConstant(0, DL
, VT
);
4507 case ISD::ZERO_EXTEND
:
4508 assert(VT
.isInteger() && Operand
.getValueType().isInteger() &&
4509 "Invalid ZERO_EXTEND!");
4510 assert(VT
.isVector() == Operand
.getValueType().isVector() &&
4511 "ZERO_EXTEND result type type should be vector iff the operand "
4513 if (Operand
.getValueType() == VT
) return Operand
; // noop extension
4514 assert((!VT
.isVector() ||
4515 VT
.getVectorNumElements() ==
4516 Operand
.getValueType().getVectorNumElements()) &&
4517 "Vector element count mismatch!");
4518 assert(Operand
.getValueType().bitsLT(VT
) &&
4519 "Invalid zext node, dst < src!");
4520 if (OpOpcode
== ISD::ZERO_EXTEND
) // (zext (zext x)) -> (zext x)
4521 return getNode(ISD::ZERO_EXTEND
, DL
, VT
, Operand
.getOperand(0));
4522 else if (OpOpcode
== ISD::UNDEF
)
4523 // zext(undef) = 0, because the top bits will be zero.
4524 return getConstant(0, DL
, VT
);
4526 case ISD::ANY_EXTEND
:
4527 assert(VT
.isInteger() && Operand
.getValueType().isInteger() &&
4528 "Invalid ANY_EXTEND!");
4529 assert(VT
.isVector() == Operand
.getValueType().isVector() &&
4530 "ANY_EXTEND result type type should be vector iff the operand "
4532 if (Operand
.getValueType() == VT
) return Operand
; // noop extension
4533 assert((!VT
.isVector() ||
4534 VT
.getVectorNumElements() ==
4535 Operand
.getValueType().getVectorNumElements()) &&
4536 "Vector element count mismatch!");
4537 assert(Operand
.getValueType().bitsLT(VT
) &&
4538 "Invalid anyext node, dst < src!");
4540 if (OpOpcode
== ISD::ZERO_EXTEND
|| OpOpcode
== ISD::SIGN_EXTEND
||
4541 OpOpcode
== ISD::ANY_EXTEND
)
4542 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4543 return getNode(OpOpcode
, DL
, VT
, Operand
.getOperand(0));
4544 else if (OpOpcode
== ISD::UNDEF
)
4545 return getUNDEF(VT
);
4547 // (ext (trunc x)) -> x
4548 if (OpOpcode
== ISD::TRUNCATE
) {
4549 SDValue OpOp
= Operand
.getOperand(0);
4550 if (OpOp
.getValueType() == VT
) {
4551 transferDbgValues(Operand
, OpOp
);
4557 assert(VT
.isInteger() && Operand
.getValueType().isInteger() &&
4558 "Invalid TRUNCATE!");
4559 assert(VT
.isVector() == Operand
.getValueType().isVector() &&
4560 "TRUNCATE result type type should be vector iff the operand "
4562 if (Operand
.getValueType() == VT
) return Operand
; // noop truncate
4563 assert((!VT
.isVector() ||
4564 VT
.getVectorNumElements() ==
4565 Operand
.getValueType().getVectorNumElements()) &&
4566 "Vector element count mismatch!");
4567 assert(Operand
.getValueType().bitsGT(VT
) &&
4568 "Invalid truncate node, src < dst!");
4569 if (OpOpcode
== ISD::TRUNCATE
)
4570 return getNode(ISD::TRUNCATE
, DL
, VT
, Operand
.getOperand(0));
4571 if (OpOpcode
== ISD::ZERO_EXTEND
|| OpOpcode
== ISD::SIGN_EXTEND
||
4572 OpOpcode
== ISD::ANY_EXTEND
) {
4573 // If the source is smaller than the dest, we still need an extend.
4574 if (Operand
.getOperand(0).getValueType().getScalarType()
4575 .bitsLT(VT
.getScalarType()))
4576 return getNode(OpOpcode
, DL
, VT
, Operand
.getOperand(0));
4577 if (Operand
.getOperand(0).getValueType().bitsGT(VT
))
4578 return getNode(ISD::TRUNCATE
, DL
, VT
, Operand
.getOperand(0));
4579 return Operand
.getOperand(0);
4581 if (OpOpcode
== ISD::UNDEF
)
4582 return getUNDEF(VT
);
4584 case ISD::ANY_EXTEND_VECTOR_INREG
:
4585 case ISD::ZERO_EXTEND_VECTOR_INREG
:
4586 case ISD::SIGN_EXTEND_VECTOR_INREG
:
4587 assert(VT
.isVector() && "This DAG node is restricted to vector types.");
4588 assert(Operand
.getValueType().bitsLE(VT
) &&
4589 "The input must be the same size or smaller than the result.");
4590 assert(VT
.getVectorNumElements() <
4591 Operand
.getValueType().getVectorNumElements() &&
4592 "The destination vector type must have fewer lanes than the input.");
4595 assert(VT
.isInteger() && VT
== Operand
.getValueType() &&
4597 if (OpOpcode
== ISD::UNDEF
)
4598 return getUNDEF(VT
);
4601 assert(VT
.isInteger() && VT
== Operand
.getValueType() &&
4603 assert((VT
.getScalarSizeInBits() % 16 == 0) &&
4604 "BSWAP types must be a multiple of 16 bits!");
4605 if (OpOpcode
== ISD::UNDEF
)
4606 return getUNDEF(VT
);
4608 case ISD::BITREVERSE
:
4609 assert(VT
.isInteger() && VT
== Operand
.getValueType() &&
4610 "Invalid BITREVERSE!");
4611 if (OpOpcode
== ISD::UNDEF
)
4612 return getUNDEF(VT
);
4615 // Basic sanity checking.
4616 assert(VT
.getSizeInBits() == Operand
.getValueSizeInBits() &&
4617 "Cannot BITCAST between types of different sizes!");
4618 if (VT
== Operand
.getValueType()) return Operand
; // noop conversion.
4619 if (OpOpcode
== ISD::BITCAST
) // bitconv(bitconv(x)) -> bitconv(x)
4620 return getNode(ISD::BITCAST
, DL
, VT
, Operand
.getOperand(0));
4621 if (OpOpcode
== ISD::UNDEF
)
4622 return getUNDEF(VT
);
4624 case ISD::SCALAR_TO_VECTOR
:
4625 assert(VT
.isVector() && !Operand
.getValueType().isVector() &&
4626 (VT
.getVectorElementType() == Operand
.getValueType() ||
4627 (VT
.getVectorElementType().isInteger() &&
4628 Operand
.getValueType().isInteger() &&
4629 VT
.getVectorElementType().bitsLE(Operand
.getValueType()))) &&
4630 "Illegal SCALAR_TO_VECTOR node!");
4631 if (OpOpcode
== ISD::UNDEF
)
4632 return getUNDEF(VT
);
4633 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4634 if (OpOpcode
== ISD::EXTRACT_VECTOR_ELT
&&
4635 isa
<ConstantSDNode
>(Operand
.getOperand(1)) &&
4636 Operand
.getConstantOperandVal(1) == 0 &&
4637 Operand
.getOperand(0).getValueType() == VT
)
4638 return Operand
.getOperand(0);
4641 // Negation of an unknown bag of bits is still completely undefined.
4642 if (OpOpcode
== ISD::UNDEF
)
4643 return getUNDEF(VT
);
4645 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
4646 if ((getTarget().Options
.NoSignedZerosFPMath
|| Flags
.hasNoSignedZeros()) &&
4647 OpOpcode
== ISD::FSUB
)
4648 return getNode(ISD::FSUB
, DL
, VT
, Operand
.getOperand(1),
4649 Operand
.getOperand(0), Flags
);
4650 if (OpOpcode
== ISD::FNEG
) // --X -> X
4651 return Operand
.getOperand(0);
4654 if (OpOpcode
== ISD::FNEG
) // abs(-X) -> abs(X)
4655 return getNode(ISD::FABS
, DL
, VT
, Operand
.getOperand(0));
4660 SDVTList VTs
= getVTList(VT
);
4661 SDValue Ops
[] = {Operand
};
4662 if (VT
!= MVT::Glue
) { // Don't CSE flag producing nodes
4663 FoldingSetNodeID ID
;
4664 AddNodeIDNode(ID
, Opcode
, VTs
, Ops
);
4666 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
)) {
4667 E
->intersectFlagsWith(Flags
);
4668 return SDValue(E
, 0);
4671 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
4673 createOperands(N
, Ops
);
4674 CSEMap
.InsertNode(N
, IP
);
4676 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
4677 createOperands(N
, Ops
);
4681 SDValue V
= SDValue(N
, 0);
4682 NewSDValueDbgMsg(V
, "Creating new node: ", this);
4686 static std::pair
<APInt
, bool> FoldValue(unsigned Opcode
, const APInt
&C1
,
4689 case ISD::ADD
: return std::make_pair(C1
+ C2
, true);
4690 case ISD::SUB
: return std::make_pair(C1
- C2
, true);
4691 case ISD::MUL
: return std::make_pair(C1
* C2
, true);
4692 case ISD::AND
: return std::make_pair(C1
& C2
, true);
4693 case ISD::OR
: return std::make_pair(C1
| C2
, true);
4694 case ISD::XOR
: return std::make_pair(C1
^ C2
, true);
4695 case ISD::SHL
: return std::make_pair(C1
<< C2
, true);
4696 case ISD::SRL
: return std::make_pair(C1
.lshr(C2
), true);
4697 case ISD::SRA
: return std::make_pair(C1
.ashr(C2
), true);
4698 case ISD::ROTL
: return std::make_pair(C1
.rotl(C2
), true);
4699 case ISD::ROTR
: return std::make_pair(C1
.rotr(C2
), true);
4700 case ISD::SMIN
: return std::make_pair(C1
.sle(C2
) ? C1
: C2
, true);
4701 case ISD::SMAX
: return std::make_pair(C1
.sge(C2
) ? C1
: C2
, true);
4702 case ISD::UMIN
: return std::make_pair(C1
.ule(C2
) ? C1
: C2
, true);
4703 case ISD::UMAX
: return std::make_pair(C1
.uge(C2
) ? C1
: C2
, true);
4704 case ISD::SADDSAT
: return std::make_pair(C1
.sadd_sat(C2
), true);
4705 case ISD::UADDSAT
: return std::make_pair(C1
.uadd_sat(C2
), true);
4706 case ISD::SSUBSAT
: return std::make_pair(C1
.ssub_sat(C2
), true);
4707 case ISD::USUBSAT
: return std::make_pair(C1
.usub_sat(C2
), true);
4709 if (!C2
.getBoolValue())
4711 return std::make_pair(C1
.udiv(C2
), true);
4713 if (!C2
.getBoolValue())
4715 return std::make_pair(C1
.urem(C2
), true);
4717 if (!C2
.getBoolValue())
4719 return std::make_pair(C1
.sdiv(C2
), true);
4721 if (!C2
.getBoolValue())
4723 return std::make_pair(C1
.srem(C2
), true);
4725 return std::make_pair(APInt(1, 0), false);
4728 SDValue
SelectionDAG::FoldConstantArithmetic(unsigned Opcode
, const SDLoc
&DL
,
4729 EVT VT
, const ConstantSDNode
*C1
,
4730 const ConstantSDNode
*C2
) {
4731 if (C1
->isOpaque() || C2
->isOpaque())
4734 std::pair
<APInt
, bool> Folded
= FoldValue(Opcode
, C1
->getAPIntValue(),
4735 C2
->getAPIntValue());
4738 return getConstant(Folded
.first
, DL
, VT
);
4741 SDValue
SelectionDAG::FoldSymbolOffset(unsigned Opcode
, EVT VT
,
4742 const GlobalAddressSDNode
*GA
,
4744 if (GA
->getOpcode() != ISD::GlobalAddress
)
4746 if (!TLI
->isOffsetFoldingLegal(GA
))
4748 auto *C2
= dyn_cast
<ConstantSDNode
>(N2
);
4751 int64_t Offset
= C2
->getSExtValue();
4753 case ISD::ADD
: break;
4754 case ISD::SUB
: Offset
= -uint64_t(Offset
); break;
4755 default: return SDValue();
4757 return getGlobalAddress(GA
->getGlobal(), SDLoc(C2
), VT
,
4758 GA
->getOffset() + uint64_t(Offset
));
4761 bool SelectionDAG::isUndef(unsigned Opcode
, ArrayRef
<SDValue
> Ops
) {
4767 // If a divisor is zero/undef or any element of a divisor vector is
4768 // zero/undef, the whole op is undef.
4769 assert(Ops
.size() == 2 && "Div/rem should have 2 operands");
4770 SDValue Divisor
= Ops
[1];
4771 if (Divisor
.isUndef() || isNullConstant(Divisor
))
4774 return ISD::isBuildVectorOfConstantSDNodes(Divisor
.getNode()) &&
4775 llvm::any_of(Divisor
->op_values(),
4776 [](SDValue V
) { return V
.isUndef() ||
4777 isNullConstant(V
); });
4778 // TODO: Handle signed overflow.
4780 // TODO: Handle oversized shifts.
4786 SDValue
SelectionDAG::FoldConstantArithmetic(unsigned Opcode
, const SDLoc
&DL
,
4787 EVT VT
, SDNode
*N1
, SDNode
*N2
) {
4788 // If the opcode is a target-specific ISD node, there's nothing we can
4789 // do here and the operand rules may not line up with the below, so
4791 if (Opcode
>= ISD::BUILTIN_OP_END
)
4794 if (isUndef(Opcode
, {SDValue(N1
, 0), SDValue(N2
, 0)}))
4795 return getUNDEF(VT
);
4797 // Handle the case of two scalars.
4798 if (auto *C1
= dyn_cast
<ConstantSDNode
>(N1
)) {
4799 if (auto *C2
= dyn_cast
<ConstantSDNode
>(N2
)) {
4800 SDValue Folded
= FoldConstantArithmetic(Opcode
, DL
, VT
, C1
, C2
);
4801 assert((!Folded
|| !VT
.isVector()) &&
4802 "Can't fold vectors ops with scalar operands");
4807 // fold (add Sym, c) -> Sym+c
4808 if (GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(N1
))
4809 return FoldSymbolOffset(Opcode
, VT
, GA
, N2
);
4810 if (TLI
->isCommutativeBinOp(Opcode
))
4811 if (GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(N2
))
4812 return FoldSymbolOffset(Opcode
, VT
, GA
, N1
);
4814 // For vectors, extract each constant element and fold them individually.
4815 // Either input may be an undef value.
4816 auto *BV1
= dyn_cast
<BuildVectorSDNode
>(N1
);
4817 if (!BV1
&& !N1
->isUndef())
4819 auto *BV2
= dyn_cast
<BuildVectorSDNode
>(N2
);
4820 if (!BV2
&& !N2
->isUndef())
4822 // If both operands are undef, that's handled the same way as scalars.
4826 assert((!BV1
|| !BV2
|| BV1
->getNumOperands() == BV2
->getNumOperands()) &&
4827 "Vector binop with different number of elements in operands?");
4829 EVT SVT
= VT
.getScalarType();
4831 if (NewNodesMustHaveLegalTypes
&& LegalSVT
.isInteger()) {
4832 LegalSVT
= TLI
->getTypeToTransformTo(*getContext(), LegalSVT
);
4833 if (LegalSVT
.bitsLT(SVT
))
4836 SmallVector
<SDValue
, 4> Outputs
;
4837 unsigned NumOps
= BV1
? BV1
->getNumOperands() : BV2
->getNumOperands();
4838 for (unsigned I
= 0; I
!= NumOps
; ++I
) {
4839 SDValue V1
= BV1
? BV1
->getOperand(I
) : getUNDEF(SVT
);
4840 SDValue V2
= BV2
? BV2
->getOperand(I
) : getUNDEF(SVT
);
4841 if (SVT
.isInteger()) {
4842 if (V1
->getValueType(0).bitsGT(SVT
))
4843 V1
= getNode(ISD::TRUNCATE
, DL
, SVT
, V1
);
4844 if (V2
->getValueType(0).bitsGT(SVT
))
4845 V2
= getNode(ISD::TRUNCATE
, DL
, SVT
, V2
);
4848 if (V1
->getValueType(0) != SVT
|| V2
->getValueType(0) != SVT
)
4851 // Fold one vector element.
4852 SDValue ScalarResult
= getNode(Opcode
, DL
, SVT
, V1
, V2
);
4853 if (LegalSVT
!= SVT
)
4854 ScalarResult
= getNode(ISD::SIGN_EXTEND
, DL
, LegalSVT
, ScalarResult
);
4856 // Scalar folding only succeeded if the result is a constant or UNDEF.
4857 if (!ScalarResult
.isUndef() && ScalarResult
.getOpcode() != ISD::Constant
&&
4858 ScalarResult
.getOpcode() != ISD::ConstantFP
)
4860 Outputs
.push_back(ScalarResult
);
4863 assert(VT
.getVectorNumElements() == Outputs
.size() &&
4864 "Vector size mismatch!");
4866 // We may have a vector type but a scalar result. Create a splat.
4867 Outputs
.resize(VT
.getVectorNumElements(), Outputs
.back());
4869 // Build a big vector out of the scalar elements we generated.
4870 return getBuildVector(VT
, SDLoc(), Outputs
);
4873 // TODO: Merge with FoldConstantArithmetic
4874 SDValue
SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode
,
4875 const SDLoc
&DL
, EVT VT
,
4876 ArrayRef
<SDValue
> Ops
,
4877 const SDNodeFlags Flags
) {
4878 // If the opcode is a target-specific ISD node, there's nothing we can
4879 // do here and the operand rules may not line up with the below, so
4881 if (Opcode
>= ISD::BUILTIN_OP_END
)
4884 if (isUndef(Opcode
, Ops
))
4885 return getUNDEF(VT
);
4887 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4891 unsigned NumElts
= VT
.getVectorNumElements();
4893 auto IsScalarOrSameVectorSize
= [&](const SDValue
&Op
) {
4894 return !Op
.getValueType().isVector() ||
4895 Op
.getValueType().getVectorNumElements() == NumElts
;
4898 auto IsConstantBuildVectorOrUndef
= [&](const SDValue
&Op
) {
4899 BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(Op
);
4900 return (Op
.isUndef()) || (Op
.getOpcode() == ISD::CONDCODE
) ||
4901 (BV
&& BV
->isConstant());
4904 // All operands must be vector types with the same number of elements as
4905 // the result type and must be either UNDEF or a build vector of constant
4906 // or UNDEF scalars.
4907 if (!llvm::all_of(Ops
, IsConstantBuildVectorOrUndef
) ||
4908 !llvm::all_of(Ops
, IsScalarOrSameVectorSize
))
4911 // If we are comparing vectors, then the result needs to be a i1 boolean
4912 // that is then sign-extended back to the legal result type.
4913 EVT SVT
= (Opcode
== ISD::SETCC
? MVT::i1
: VT
.getScalarType());
4915 // Find legal integer scalar type for constant promotion and
4916 // ensure that its scalar size is at least as large as source.
4917 EVT LegalSVT
= VT
.getScalarType();
4918 if (NewNodesMustHaveLegalTypes
&& LegalSVT
.isInteger()) {
4919 LegalSVT
= TLI
->getTypeToTransformTo(*getContext(), LegalSVT
);
4920 if (LegalSVT
.bitsLT(VT
.getScalarType()))
4924 // Constant fold each scalar lane separately.
4925 SmallVector
<SDValue
, 4> ScalarResults
;
4926 for (unsigned i
= 0; i
!= NumElts
; i
++) {
4927 SmallVector
<SDValue
, 4> ScalarOps
;
4928 for (SDValue Op
: Ops
) {
4929 EVT InSVT
= Op
.getValueType().getScalarType();
4930 BuildVectorSDNode
*InBV
= dyn_cast
<BuildVectorSDNode
>(Op
);
4932 // We've checked that this is UNDEF or a constant of some kind.
4934 ScalarOps
.push_back(getUNDEF(InSVT
));
4936 ScalarOps
.push_back(Op
);
4940 SDValue ScalarOp
= InBV
->getOperand(i
);
4941 EVT ScalarVT
= ScalarOp
.getValueType();
4943 // Build vector (integer) scalar operands may need implicit
4944 // truncation - do this before constant folding.
4945 if (ScalarVT
.isInteger() && ScalarVT
.bitsGT(InSVT
))
4946 ScalarOp
= getNode(ISD::TRUNCATE
, DL
, InSVT
, ScalarOp
);
4948 ScalarOps
.push_back(ScalarOp
);
4951 // Constant fold the scalar operands.
4952 SDValue ScalarResult
= getNode(Opcode
, DL
, SVT
, ScalarOps
, Flags
);
4954 // Legalize the (integer) scalar constant if necessary.
4955 if (LegalSVT
!= SVT
)
4956 ScalarResult
= getNode(ISD::SIGN_EXTEND
, DL
, LegalSVT
, ScalarResult
);
4958 // Scalar folding only succeeded if the result is a constant or UNDEF.
4959 if (!ScalarResult
.isUndef() && ScalarResult
.getOpcode() != ISD::Constant
&&
4960 ScalarResult
.getOpcode() != ISD::ConstantFP
)
4962 ScalarResults
.push_back(ScalarResult
);
4965 SDValue V
= getBuildVector(VT
, DL
, ScalarResults
);
4966 NewSDValueDbgMsg(V
, "New node fold constant vector: ", this);
4970 SDValue
SelectionDAG::foldConstantFPMath(unsigned Opcode
, const SDLoc
&DL
,
4971 EVT VT
, SDValue N1
, SDValue N2
) {
4972 // TODO: We don't do any constant folding for strict FP opcodes here, but we
4973 // should. That will require dealing with a potentially non-default
4974 // rounding mode, checking the "opStatus" return value from the APFloat
4975 // math calculations, and possibly other variations.
4976 auto *N1CFP
= dyn_cast
<ConstantFPSDNode
>(N1
.getNode());
4977 auto *N2CFP
= dyn_cast
<ConstantFPSDNode
>(N2
.getNode());
4978 if (N1CFP
&& N2CFP
) {
4979 APFloat C1
= N1CFP
->getValueAPF(), C2
= N2CFP
->getValueAPF();
4982 C1
.add(C2
, APFloat::rmNearestTiesToEven
);
4983 return getConstantFP(C1
, DL
, VT
);
4985 C1
.subtract(C2
, APFloat::rmNearestTiesToEven
);
4986 return getConstantFP(C1
, DL
, VT
);
4988 C1
.multiply(C2
, APFloat::rmNearestTiesToEven
);
4989 return getConstantFP(C1
, DL
, VT
);
4991 C1
.divide(C2
, APFloat::rmNearestTiesToEven
);
4992 return getConstantFP(C1
, DL
, VT
);
4995 return getConstantFP(C1
, DL
, VT
);
4996 case ISD::FCOPYSIGN
:
4998 return getConstantFP(C1
, DL
, VT
);
5002 if (N1CFP
&& Opcode
== ISD::FP_ROUND
) {
5003 APFloat C1
= N1CFP
->getValueAPF(); // make copy
5005 // This can return overflow, underflow, or inexact; we don't care.
5006 // FIXME need to be more flexible about rounding mode.
5007 (void) C1
.convert(EVTToAPFloatSemantics(VT
), APFloat::rmNearestTiesToEven
,
5009 return getConstantFP(C1
, DL
, VT
);
5018 // If both operands are undef, the result is undef. If 1 operand is undef,
5019 // the result is NaN. This should match the behavior of the IR optimizer.
5020 if (N1
.isUndef() && N2
.isUndef())
5021 return getUNDEF(VT
);
5022 if (N1
.isUndef() || N2
.isUndef())
5023 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT
)), DL
, VT
);
5028 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
5029 SDValue N1
, SDValue N2
, const SDNodeFlags Flags
) {
5030 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
);
5031 ConstantSDNode
*N2C
= dyn_cast
<ConstantSDNode
>(N2
);
5032 ConstantFPSDNode
*N1CFP
= dyn_cast
<ConstantFPSDNode
>(N1
);
5033 ConstantFPSDNode
*N2CFP
= dyn_cast
<ConstantFPSDNode
>(N2
);
5035 // Canonicalize constant to RHS if commutative.
5036 if (TLI
->isCommutativeBinOp(Opcode
)) {
5038 std::swap(N1C
, N2C
);
5040 } else if (N1CFP
&& !N2CFP
) {
5041 std::swap(N1CFP
, N2CFP
);
5048 case ISD::TokenFactor
:
5049 assert(VT
== MVT::Other
&& N1
.getValueType() == MVT::Other
&&
5050 N2
.getValueType() == MVT::Other
&& "Invalid token factor!");
5051 // Fold trivial token factors.
5052 if (N1
.getOpcode() == ISD::EntryToken
) return N2
;
5053 if (N2
.getOpcode() == ISD::EntryToken
) return N1
;
5054 if (N1
== N2
) return N1
;
5056 case ISD::BUILD_VECTOR
: {
5057 // Attempt to simplify BUILD_VECTOR.
5058 SDValue Ops
[] = {N1
, N2
};
5059 if (SDValue V
= FoldBUILD_VECTOR(DL
, VT
, Ops
, *this))
5063 case ISD::CONCAT_VECTORS
: {
5064 SDValue Ops
[] = {N1
, N2
};
5065 if (SDValue V
= foldCONCAT_VECTORS(DL
, VT
, Ops
, *this))
5070 assert(VT
.isInteger() && "This operator does not apply to FP types!");
5071 assert(N1
.getValueType() == N2
.getValueType() &&
5072 N1
.getValueType() == VT
&& "Binary operator types must match!");
5073 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5074 // worth handling here.
5075 if (N2C
&& N2C
->isNullValue())
5077 if (N2C
&& N2C
->isAllOnesValue()) // X & -1 -> X
5084 assert(VT
.isInteger() && "This operator does not apply to FP types!");
5085 assert(N1
.getValueType() == N2
.getValueType() &&
5086 N1
.getValueType() == VT
&& "Binary operator types must match!");
5087 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5088 // it's worth handling here.
5089 if (N2C
&& N2C
->isNullValue())
5107 assert(VT
.isInteger() && "This operator does not apply to FP types!");
5108 assert(N1
.getValueType() == N2
.getValueType() &&
5109 N1
.getValueType() == VT
&& "Binary operator types must match!");
5116 assert(VT
.isFloatingPoint() && "This operator only applies to FP types!");
5117 assert(N1
.getValueType() == N2
.getValueType() &&
5118 N1
.getValueType() == VT
&& "Binary operator types must match!");
5119 if (SDValue V
= simplifyFPBinop(Opcode
, N1
, N2
))
5122 case ISD::FCOPYSIGN
: // N1 and result must match. N1/N2 need not match.
5123 assert(N1
.getValueType() == VT
&&
5124 N1
.getValueType().isFloatingPoint() &&
5125 N2
.getValueType().isFloatingPoint() &&
5126 "Invalid FCOPYSIGN!");
5131 if (SDValue V
= simplifyShift(N1
, N2
))
5136 assert(VT
== N1
.getValueType() &&
5137 "Shift operators return type must be the same as their first arg");
5138 assert(VT
.isInteger() && N2
.getValueType().isInteger() &&
5139 "Shifts only work on integers");
5140 assert((!VT
.isVector() || VT
== N2
.getValueType()) &&
5141 "Vector shift amounts must be in the same as their first arg");
5142 // Verify that the shift amount VT is big enough to hold valid shift
5143 // amounts. This catches things like trying to shift an i1024 value by an
5144 // i8, which is easy to fall into in generic code that uses
5145 // TLI.getShiftAmount().
5146 assert(N2
.getValueSizeInBits() >= Log2_32_Ceil(N1
.getValueSizeInBits()) &&
5147 "Invalid use of small shift amount with oversized value!");
5149 // Always fold shifts of i1 values so the code generator doesn't need to
5150 // handle them. Since we know the size of the shift has to be less than the
5151 // size of the value, the shift/rotate count is guaranteed to be zero.
5154 if (N2C
&& N2C
->isNullValue())
5157 case ISD::FP_ROUND_INREG
: {
5158 EVT EVT
= cast
<VTSDNode
>(N2
)->getVT();
5159 assert(VT
== N1
.getValueType() && "Not an inreg round!");
5160 assert(VT
.isFloatingPoint() && EVT
.isFloatingPoint() &&
5161 "Cannot FP_ROUND_INREG integer types");
5162 assert(EVT
.isVector() == VT
.isVector() &&
5163 "FP_ROUND_INREG type should be vector iff the operand "
5165 assert((!EVT
.isVector() ||
5166 EVT
.getVectorNumElements() == VT
.getVectorNumElements()) &&
5167 "Vector element counts must match in FP_ROUND_INREG");
5168 assert(EVT
.bitsLE(VT
) && "Not rounding down!");
5170 if (cast
<VTSDNode
>(N2
)->getVT() == VT
) return N1
; // Not actually rounding.
5174 assert(VT
.isFloatingPoint() &&
5175 N1
.getValueType().isFloatingPoint() &&
5176 VT
.bitsLE(N1
.getValueType()) &&
5177 N2C
&& (N2C
->getZExtValue() == 0 || N2C
->getZExtValue() == 1) &&
5178 "Invalid FP_ROUND!");
5179 if (N1
.getValueType() == VT
) return N1
; // noop conversion.
5181 case ISD::AssertSext
:
5182 case ISD::AssertZext
: {
5183 EVT EVT
= cast
<VTSDNode
>(N2
)->getVT();
5184 assert(VT
== N1
.getValueType() && "Not an inreg extend!");
5185 assert(VT
.isInteger() && EVT
.isInteger() &&
5186 "Cannot *_EXTEND_INREG FP types");
5187 assert(!EVT
.isVector() &&
5188 "AssertSExt/AssertZExt type should be the vector element type "
5189 "rather than the vector type!");
5190 assert(EVT
.bitsLE(VT
.getScalarType()) && "Not extending!");
5191 if (VT
.getScalarType() == EVT
) return N1
; // noop assertion.
5194 case ISD::SIGN_EXTEND_INREG
: {
5195 EVT EVT
= cast
<VTSDNode
>(N2
)->getVT();
5196 assert(VT
== N1
.getValueType() && "Not an inreg extend!");
5197 assert(VT
.isInteger() && EVT
.isInteger() &&
5198 "Cannot *_EXTEND_INREG FP types");
5199 assert(EVT
.isVector() == VT
.isVector() &&
5200 "SIGN_EXTEND_INREG type should be vector iff the operand "
5202 assert((!EVT
.isVector() ||
5203 EVT
.getVectorNumElements() == VT
.getVectorNumElements()) &&
5204 "Vector element counts must match in SIGN_EXTEND_INREG");
5205 assert(EVT
.bitsLE(VT
) && "Not extending!");
5206 if (EVT
== VT
) return N1
; // Not actually extending
5208 auto SignExtendInReg
= [&](APInt Val
, llvm::EVT ConstantVT
) {
5209 unsigned FromBits
= EVT
.getScalarSizeInBits();
5210 Val
<<= Val
.getBitWidth() - FromBits
;
5211 Val
.ashrInPlace(Val
.getBitWidth() - FromBits
);
5212 return getConstant(Val
, DL
, ConstantVT
);
5216 const APInt
&Val
= N1C
->getAPIntValue();
5217 return SignExtendInReg(Val
, VT
);
5219 if (ISD::isBuildVectorOfConstantSDNodes(N1
.getNode())) {
5220 SmallVector
<SDValue
, 8> Ops
;
5221 llvm::EVT OpVT
= N1
.getOperand(0).getValueType();
5222 for (int i
= 0, e
= VT
.getVectorNumElements(); i
!= e
; ++i
) {
5223 SDValue Op
= N1
.getOperand(i
);
5225 Ops
.push_back(getUNDEF(OpVT
));
5228 ConstantSDNode
*C
= cast
<ConstantSDNode
>(Op
);
5229 APInt Val
= C
->getAPIntValue();
5230 Ops
.push_back(SignExtendInReg(Val
, OpVT
));
5232 return getBuildVector(VT
, DL
, Ops
);
5236 case ISD::EXTRACT_VECTOR_ELT
:
5237 assert(VT
.getSizeInBits() >= N1
.getValueType().getScalarSizeInBits() &&
5238 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5239 element type of the vector.");
5241 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
5243 return getUNDEF(VT
);
5245 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
5246 if (N2C
&& N2C
->getAPIntValue().uge(N1
.getValueType().getVectorNumElements()))
5247 return getUNDEF(VT
);
5249 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5250 // expanding copies of large vectors from registers.
5252 N1
.getOpcode() == ISD::CONCAT_VECTORS
&&
5253 N1
.getNumOperands() > 0) {
5255 N1
.getOperand(0).getValueType().getVectorNumElements();
5256 return getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VT
,
5257 N1
.getOperand(N2C
->getZExtValue() / Factor
),
5258 getConstant(N2C
->getZExtValue() % Factor
, DL
,
5259 N2
.getValueType()));
5262 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
5263 // expanding large vector constants.
5264 if (N2C
&& N1
.getOpcode() == ISD::BUILD_VECTOR
) {
5265 SDValue Elt
= N1
.getOperand(N2C
->getZExtValue());
5267 if (VT
!= Elt
.getValueType())
5268 // If the vector element type is not legal, the BUILD_VECTOR operands
5269 // are promoted and implicitly truncated, and the result implicitly
5270 // extended. Make that explicit here.
5271 Elt
= getAnyExtOrTrunc(Elt
, DL
, VT
);
5276 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5277 // operations are lowered to scalars.
5278 if (N1
.getOpcode() == ISD::INSERT_VECTOR_ELT
) {
5279 // If the indices are the same, return the inserted element else
5280 // if the indices are known different, extract the element from
5281 // the original vector.
5282 SDValue N1Op2
= N1
.getOperand(2);
5283 ConstantSDNode
*N1Op2C
= dyn_cast
<ConstantSDNode
>(N1Op2
);
5285 if (N1Op2C
&& N2C
) {
5286 if (N1Op2C
->getZExtValue() == N2C
->getZExtValue()) {
5287 if (VT
== N1
.getOperand(1).getValueType())
5288 return N1
.getOperand(1);
5290 return getSExtOrTrunc(N1
.getOperand(1), DL
, VT
);
5293 return getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VT
, N1
.getOperand(0), N2
);
5297 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5298 // when vector types are scalarized and v1iX is legal.
5299 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
5300 if (N1
.getOpcode() == ISD::EXTRACT_SUBVECTOR
&&
5301 N1
.getValueType().getVectorNumElements() == 1) {
5302 return getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VT
, N1
.getOperand(0),
5306 case ISD::EXTRACT_ELEMENT
:
5307 assert(N2C
&& (unsigned)N2C
->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5308 assert(!N1
.getValueType().isVector() && !VT
.isVector() &&
5309 (N1
.getValueType().isInteger() == VT
.isInteger()) &&
5310 N1
.getValueType() != VT
&&
5311 "Wrong types for EXTRACT_ELEMENT!");
5313 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5314 // 64-bit integers into 32-bit parts. Instead of building the extract of
5315 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5316 if (N1
.getOpcode() == ISD::BUILD_PAIR
)
5317 return N1
.getOperand(N2C
->getZExtValue());
5319 // EXTRACT_ELEMENT of a constant int is also very common.
5321 unsigned ElementSize
= VT
.getSizeInBits();
5322 unsigned Shift
= ElementSize
* N2C
->getZExtValue();
5323 APInt ShiftedVal
= N1C
->getAPIntValue().lshr(Shift
);
5324 return getConstant(ShiftedVal
.trunc(ElementSize
), DL
, VT
);
5327 case ISD::EXTRACT_SUBVECTOR
:
5328 if (VT
.isSimple() && N1
.getValueType().isSimple()) {
5329 assert(VT
.isVector() && N1
.getValueType().isVector() &&
5330 "Extract subvector VTs must be a vectors!");
5331 assert(VT
.getVectorElementType() ==
5332 N1
.getValueType().getVectorElementType() &&
5333 "Extract subvector VTs must have the same element type!");
5334 assert(VT
.getSimpleVT() <= N1
.getSimpleValueType() &&
5335 "Extract subvector must be from larger vector to smaller vector!");
5338 assert((VT
.getVectorNumElements() + N2C
->getZExtValue()
5339 <= N1
.getValueType().getVectorNumElements())
5340 && "Extract subvector overflow!");
5343 // Trivial extraction.
5344 if (VT
.getSimpleVT() == N1
.getSimpleValueType())
5347 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5349 return getUNDEF(VT
);
5351 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5352 // the concat have the same type as the extract.
5353 if (N2C
&& N1
.getOpcode() == ISD::CONCAT_VECTORS
&&
5354 N1
.getNumOperands() > 0 &&
5355 VT
== N1
.getOperand(0).getValueType()) {
5356 unsigned Factor
= VT
.getVectorNumElements();
5357 return N1
.getOperand(N2C
->getZExtValue() / Factor
);
5360 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5361 // during shuffle legalization.
5362 if (N1
.getOpcode() == ISD::INSERT_SUBVECTOR
&& N2
== N1
.getOperand(2) &&
5363 VT
== N1
.getOperand(1).getValueType())
5364 return N1
.getOperand(1);
5369 // Perform trivial constant folding.
5371 FoldConstantArithmetic(Opcode
, DL
, VT
, N1
.getNode(), N2
.getNode()))
5374 if (SDValue V
= foldConstantFPMath(Opcode
, DL
, VT
, N1
, N2
))
5377 // Canonicalize an UNDEF to the RHS, even over a constant.
5379 if (TLI
->isCommutativeBinOp(Opcode
)) {
5383 case ISD::FP_ROUND_INREG
:
5384 case ISD::SIGN_EXTEND_INREG
:
5386 return getUNDEF(VT
); // fold op(undef, arg2) -> undef
5393 return getConstant(0, DL
, VT
); // fold op(undef, arg2) -> 0
5398 // Fold a bunch of operators when the RHS is undef.
5403 // Handle undef ^ undef -> 0 special case. This is a common
5405 return getConstant(0, DL
, VT
);
5413 return getUNDEF(VT
); // fold op(arg1, undef) -> undef
5418 return getConstant(0, DL
, VT
); // fold op(arg1, undef) -> 0
5422 return getAllOnesConstant(DL
, VT
);
5426 // Memoize this node if possible.
5428 SDVTList VTs
= getVTList(VT
);
5429 SDValue Ops
[] = {N1
, N2
};
5430 if (VT
!= MVT::Glue
) {
5431 FoldingSetNodeID ID
;
5432 AddNodeIDNode(ID
, Opcode
, VTs
, Ops
);
5434 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
)) {
5435 E
->intersectFlagsWith(Flags
);
5436 return SDValue(E
, 0);
5439 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
5441 createOperands(N
, Ops
);
5442 CSEMap
.InsertNode(N
, IP
);
5444 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
5445 createOperands(N
, Ops
);
5449 SDValue V
= SDValue(N
, 0);
5450 NewSDValueDbgMsg(V
, "Creating new node: ", this);
5454 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
5455 SDValue N1
, SDValue N2
, SDValue N3
,
5456 const SDNodeFlags Flags
) {
5457 // Perform various simplifications.
5460 assert(VT
.isFloatingPoint() && "This operator only applies to FP types!");
5461 assert(N1
.getValueType() == VT
&& N2
.getValueType() == VT
&&
5462 N3
.getValueType() == VT
&& "FMA types must match!");
5463 ConstantFPSDNode
*N1CFP
= dyn_cast
<ConstantFPSDNode
>(N1
);
5464 ConstantFPSDNode
*N2CFP
= dyn_cast
<ConstantFPSDNode
>(N2
);
5465 ConstantFPSDNode
*N3CFP
= dyn_cast
<ConstantFPSDNode
>(N3
);
5466 if (N1CFP
&& N2CFP
&& N3CFP
) {
5467 APFloat V1
= N1CFP
->getValueAPF();
5468 const APFloat
&V2
= N2CFP
->getValueAPF();
5469 const APFloat
&V3
= N3CFP
->getValueAPF();
5470 V1
.fusedMultiplyAdd(V2
, V3
, APFloat::rmNearestTiesToEven
);
5471 return getConstantFP(V1
, DL
, VT
);
5475 case ISD::BUILD_VECTOR
: {
5476 // Attempt to simplify BUILD_VECTOR.
5477 SDValue Ops
[] = {N1
, N2
, N3
};
5478 if (SDValue V
= FoldBUILD_VECTOR(DL
, VT
, Ops
, *this))
5482 case ISD::CONCAT_VECTORS
: {
5483 SDValue Ops
[] = {N1
, N2
, N3
};
5484 if (SDValue V
= foldCONCAT_VECTORS(DL
, VT
, Ops
, *this))
5489 assert(VT
.isInteger() && "SETCC result type must be an integer!");
5490 assert(N1
.getValueType() == N2
.getValueType() &&
5491 "SETCC operands must have the same type!");
5492 assert(VT
.isVector() == N1
.getValueType().isVector() &&
5493 "SETCC type should be vector iff the operand type is vector!");
5494 assert((!VT
.isVector() ||
5495 VT
.getVectorNumElements() == N1
.getValueType().getVectorNumElements()) &&
5496 "SETCC vector element counts must match!");
5497 // Use FoldSetCC to simplify SETCC's.
5498 if (SDValue V
= FoldSetCC(VT
, N1
, N2
, cast
<CondCodeSDNode
>(N3
)->get(), DL
))
5500 // Vector constant folding.
5501 SDValue Ops
[] = {N1
, N2
, N3
};
5502 if (SDValue V
= FoldConstantVectorArithmetic(Opcode
, DL
, VT
, Ops
)) {
5503 NewSDValueDbgMsg(V
, "New node vector constant folding: ", this);
5510 if (SDValue V
= simplifySelect(N1
, N2
, N3
))
5513 case ISD::VECTOR_SHUFFLE
:
5514 llvm_unreachable("should use getVectorShuffle constructor!");
5515 case ISD::INSERT_VECTOR_ELT
: {
5516 ConstantSDNode
*N3C
= dyn_cast
<ConstantSDNode
>(N3
);
5517 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5518 if (N3C
&& N3C
->getZExtValue() >= N1
.getValueType().getVectorNumElements())
5519 return getUNDEF(VT
);
5522 case ISD::INSERT_SUBVECTOR
: {
5523 // Inserting undef into undef is still undef.
5524 if (N1
.isUndef() && N2
.isUndef())
5525 return getUNDEF(VT
);
5527 if (VT
.isSimple() && N1
.getValueType().isSimple()
5528 && N2
.getValueType().isSimple()) {
5529 assert(VT
.isVector() && N1
.getValueType().isVector() &&
5530 N2
.getValueType().isVector() &&
5531 "Insert subvector VTs must be a vectors");
5532 assert(VT
== N1
.getValueType() &&
5533 "Dest and insert subvector source types must match!");
5534 assert(N2
.getSimpleValueType() <= N1
.getSimpleValueType() &&
5535 "Insert subvector must be from smaller vector to larger vector!");
5536 if (isa
<ConstantSDNode
>(Index
)) {
5537 assert((N2
.getValueType().getVectorNumElements() +
5538 cast
<ConstantSDNode
>(Index
)->getZExtValue()
5539 <= VT
.getVectorNumElements())
5540 && "Insert subvector overflow!");
5543 // Trivial insertion.
5544 if (VT
.getSimpleVT() == N2
.getSimpleValueType())
5547 // If this is an insert of an extracted vector into an undef vector, we
5548 // can just use the input to the extract.
5549 if (N1
.isUndef() && N2
.getOpcode() == ISD::EXTRACT_SUBVECTOR
&&
5550 N2
.getOperand(1) == N3
&& N2
.getOperand(0).getValueType() == VT
)
5551 return N2
.getOperand(0);
5556 // Fold bit_convert nodes from a type to themselves.
5557 if (N1
.getValueType() == VT
)
5562 // Memoize node if it doesn't produce a flag.
5564 SDVTList VTs
= getVTList(VT
);
5565 SDValue Ops
[] = {N1
, N2
, N3
};
5566 if (VT
!= MVT::Glue
) {
5567 FoldingSetNodeID ID
;
5568 AddNodeIDNode(ID
, Opcode
, VTs
, Ops
);
5570 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
)) {
5571 E
->intersectFlagsWith(Flags
);
5572 return SDValue(E
, 0);
5575 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
5577 createOperands(N
, Ops
);
5578 CSEMap
.InsertNode(N
, IP
);
5580 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
5581 createOperands(N
, Ops
);
5585 SDValue V
= SDValue(N
, 0);
5586 NewSDValueDbgMsg(V
, "Creating new node: ", this);
5590 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
5591 SDValue N1
, SDValue N2
, SDValue N3
, SDValue N4
) {
5592 SDValue Ops
[] = { N1
, N2
, N3
, N4
};
5593 return getNode(Opcode
, DL
, VT
, Ops
);
5596 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
5597 SDValue N1
, SDValue N2
, SDValue N3
, SDValue N4
,
5599 SDValue Ops
[] = { N1
, N2
, N3
, N4
, N5
};
5600 return getNode(Opcode
, DL
, VT
, Ops
);
5603 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5604 /// the incoming stack arguments to be loaded from the stack.
5605 SDValue
SelectionDAG::getStackArgumentTokenFactor(SDValue Chain
) {
5606 SmallVector
<SDValue
, 8> ArgChains
;
5608 // Include the original chain at the beginning of the list. When this is
5609 // used by target LowerCall hooks, this helps legalize find the
5610 // CALLSEQ_BEGIN node.
5611 ArgChains
.push_back(Chain
);
5613 // Add a chain value for each stack argument.
5614 for (SDNode::use_iterator U
= getEntryNode().getNode()->use_begin(),
5615 UE
= getEntryNode().getNode()->use_end(); U
!= UE
; ++U
)
5616 if (LoadSDNode
*L
= dyn_cast
<LoadSDNode
>(*U
))
5617 if (FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(L
->getBasePtr()))
5618 if (FI
->getIndex() < 0)
5619 ArgChains
.push_back(SDValue(L
, 1));
5621 // Build a tokenfactor for all the chains.
5622 return getNode(ISD::TokenFactor
, SDLoc(Chain
), MVT::Other
, ArgChains
);
5625 /// getMemsetValue - Vectorized representation of the memset value
5627 static SDValue
getMemsetValue(SDValue Value
, EVT VT
, SelectionDAG
&DAG
,
5629 assert(!Value
.isUndef());
5631 unsigned NumBits
= VT
.getScalarSizeInBits();
5632 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Value
)) {
5633 assert(C
->getAPIntValue().getBitWidth() == 8);
5634 APInt Val
= APInt::getSplat(NumBits
, C
->getAPIntValue());
5635 if (VT
.isInteger()) {
5636 bool IsOpaque
= VT
.getSizeInBits() > 64 ||
5637 !DAG
.getTargetLoweringInfo().isLegalStoreImmediate(C
->getSExtValue());
5638 return DAG
.getConstant(Val
, dl
, VT
, false, IsOpaque
);
5640 return DAG
.getConstantFP(APFloat(DAG
.EVTToAPFloatSemantics(VT
), Val
), dl
,
5644 assert(Value
.getValueType() == MVT::i8
&& "memset with non-byte fill value?");
5645 EVT IntVT
= VT
.getScalarType();
5646 if (!IntVT
.isInteger())
5647 IntVT
= EVT::getIntegerVT(*DAG
.getContext(), IntVT
.getSizeInBits());
5649 Value
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, IntVT
, Value
);
5651 // Use a multiplication with 0x010101... to extend the input to the
5653 APInt Magic
= APInt::getSplat(NumBits
, APInt(8, 0x01));
5654 Value
= DAG
.getNode(ISD::MUL
, dl
, IntVT
, Value
,
5655 DAG
.getConstant(Magic
, dl
, IntVT
));
5658 if (VT
!= Value
.getValueType() && !VT
.isInteger())
5659 Value
= DAG
.getBitcast(VT
.getScalarType(), Value
);
5660 if (VT
!= Value
.getValueType())
5661 Value
= DAG
.getSplatBuildVector(VT
, dl
, Value
);
5666 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5667 /// used when a memcpy is turned into a memset when the source is a constant
5669 static SDValue
getMemsetStringVal(EVT VT
, const SDLoc
&dl
, SelectionDAG
&DAG
,
5670 const TargetLowering
&TLI
,
5671 const ConstantDataArraySlice
&Slice
) {
5672 // Handle vector with all elements zero.
5673 if (Slice
.Array
== nullptr) {
5675 return DAG
.getConstant(0, dl
, VT
);
5676 else if (VT
== MVT::f32
|| VT
== MVT::f64
|| VT
== MVT::f128
)
5677 return DAG
.getConstantFP(0.0, dl
, VT
);
5678 else if (VT
.isVector()) {
5679 unsigned NumElts
= VT
.getVectorNumElements();
5680 MVT EltVT
= (VT
.getVectorElementType() == MVT::f32
) ? MVT::i32
: MVT::i64
;
5681 return DAG
.getNode(ISD::BITCAST
, dl
, VT
,
5682 DAG
.getConstant(0, dl
,
5683 EVT::getVectorVT(*DAG
.getContext(),
5686 llvm_unreachable("Expected type!");
5689 assert(!VT
.isVector() && "Can't handle vector type here!");
5690 unsigned NumVTBits
= VT
.getSizeInBits();
5691 unsigned NumVTBytes
= NumVTBits
/ 8;
5692 unsigned NumBytes
= std::min(NumVTBytes
, unsigned(Slice
.Length
));
5694 APInt
Val(NumVTBits
, 0);
5695 if (DAG
.getDataLayout().isLittleEndian()) {
5696 for (unsigned i
= 0; i
!= NumBytes
; ++i
)
5697 Val
|= (uint64_t)(unsigned char)Slice
[i
] << i
*8;
5699 for (unsigned i
= 0; i
!= NumBytes
; ++i
)
5700 Val
|= (uint64_t)(unsigned char)Slice
[i
] << (NumVTBytes
-i
-1)*8;
5703 // If the "cost" of materializing the integer immediate is less than the cost
5704 // of a load, then it is cost effective to turn the load into the immediate.
5705 Type
*Ty
= VT
.getTypeForEVT(*DAG
.getContext());
5706 if (TLI
.shouldConvertConstantLoadToIntImm(Val
, Ty
))
5707 return DAG
.getConstant(Val
, dl
, VT
);
5708 return SDValue(nullptr, 0);
5711 SDValue
SelectionDAG::getMemBasePlusOffset(SDValue Base
, unsigned Offset
,
5713 EVT VT
= Base
.getValueType();
5714 return getNode(ISD::ADD
, DL
, VT
, Base
, getConstant(Offset
, DL
, VT
));
5717 /// Returns true if memcpy source is constant data.
5718 static bool isMemSrcFromConstant(SDValue Src
, ConstantDataArraySlice
&Slice
) {
5719 uint64_t SrcDelta
= 0;
5720 GlobalAddressSDNode
*G
= nullptr;
5721 if (Src
.getOpcode() == ISD::GlobalAddress
)
5722 G
= cast
<GlobalAddressSDNode
>(Src
);
5723 else if (Src
.getOpcode() == ISD::ADD
&&
5724 Src
.getOperand(0).getOpcode() == ISD::GlobalAddress
&&
5725 Src
.getOperand(1).getOpcode() == ISD::Constant
) {
5726 G
= cast
<GlobalAddressSDNode
>(Src
.getOperand(0));
5727 SrcDelta
= cast
<ConstantSDNode
>(Src
.getOperand(1))->getZExtValue();
5732 return getConstantDataArrayInfo(G
->getGlobal(), Slice
, 8,
5733 SrcDelta
+ G
->getOffset());
5736 static bool shouldLowerMemFuncForSize(const MachineFunction
&MF
) {
5737 // On Darwin, -Os means optimize for size without hurting performance, so
5738 // only really optimize for size when -Oz (MinSize) is used.
5739 if (MF
.getTarget().getTargetTriple().isOSDarwin())
5740 return MF
.getFunction().hasMinSize();
5741 return MF
.getFunction().hasOptSize();
5744 static void chainLoadsAndStoresForMemcpy(SelectionDAG
&DAG
, const SDLoc
&dl
,
5745 SmallVector
<SDValue
, 32> &OutChains
, unsigned From
,
5746 unsigned To
, SmallVector
<SDValue
, 16> &OutLoadChains
,
5747 SmallVector
<SDValue
, 16> &OutStoreChains
) {
5748 assert(OutLoadChains
.size() && "Missing loads in memcpy inlining");
5749 assert(OutStoreChains
.size() && "Missing stores in memcpy inlining");
5750 SmallVector
<SDValue
, 16> GluedLoadChains
;
5751 for (unsigned i
= From
; i
< To
; ++i
) {
5752 OutChains
.push_back(OutLoadChains
[i
]);
5753 GluedLoadChains
.push_back(OutLoadChains
[i
]);
5756 // Chain for all loads.
5757 SDValue LoadToken
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
5760 for (unsigned i
= From
; i
< To
; ++i
) {
5761 StoreSDNode
*ST
= dyn_cast
<StoreSDNode
>(OutStoreChains
[i
]);
5762 SDValue NewStore
= DAG
.getTruncStore(LoadToken
, dl
, ST
->getValue(),
5763 ST
->getBasePtr(), ST
->getMemoryVT(),
5764 ST
->getMemOperand());
5765 OutChains
.push_back(NewStore
);
5769 static SDValue
getMemcpyLoadsAndStores(SelectionDAG
&DAG
, const SDLoc
&dl
,
5770 SDValue Chain
, SDValue Dst
, SDValue Src
,
5771 uint64_t Size
, unsigned Align
,
5772 bool isVol
, bool AlwaysInline
,
5773 MachinePointerInfo DstPtrInfo
,
5774 MachinePointerInfo SrcPtrInfo
) {
5775 // Turn a memcpy of undef to nop.
5776 // FIXME: We need to honor volatile even is Src is undef.
5780 // Expand memcpy to a series of load and store ops if the size operand falls
5781 // below a certain threshold.
5782 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5783 // rather than maybe a humongous number of loads and stores.
5784 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
5785 const DataLayout
&DL
= DAG
.getDataLayout();
5786 LLVMContext
&C
= *DAG
.getContext();
5787 std::vector
<EVT
> MemOps
;
5788 bool DstAlignCanChange
= false;
5789 MachineFunction
&MF
= DAG
.getMachineFunction();
5790 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
5791 bool OptSize
= shouldLowerMemFuncForSize(MF
);
5792 FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(Dst
);
5793 if (FI
&& !MFI
.isFixedObjectIndex(FI
->getIndex()))
5794 DstAlignCanChange
= true;
5795 unsigned SrcAlign
= DAG
.InferPtrAlignment(Src
);
5796 if (Align
> SrcAlign
)
5798 ConstantDataArraySlice Slice
;
5799 bool CopyFromConstant
= isMemSrcFromConstant(Src
, Slice
);
5800 bool isZeroConstant
= CopyFromConstant
&& Slice
.Array
== nullptr;
5801 unsigned Limit
= AlwaysInline
? ~0U : TLI
.getMaxStoresPerMemcpy(OptSize
);
5803 if (!TLI
.findOptimalMemOpLowering(
5804 MemOps
, Limit
, Size
, (DstAlignCanChange
? 0 : Align
),
5805 (isZeroConstant
? 0 : SrcAlign
), /*IsMemset=*/false,
5806 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant
,
5807 /*AllowOverlap=*/!isVol
, DstPtrInfo
.getAddrSpace(),
5808 SrcPtrInfo
.getAddrSpace(), MF
.getFunction().getAttributes()))
5811 if (DstAlignCanChange
) {
5812 Type
*Ty
= MemOps
[0].getTypeForEVT(C
);
5813 unsigned NewAlign
= (unsigned)DL
.getABITypeAlignment(Ty
);
5815 // Don't promote to an alignment that would require dynamic stack
5817 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
5818 if (!TRI
->needsStackRealignment(MF
))
5819 while (NewAlign
> Align
&&
5820 DL
.exceedsNaturalStackAlignment(llvm::Align(NewAlign
)))
5823 if (NewAlign
> Align
) {
5824 // Give the stack frame object a larger alignment if needed.
5825 if (MFI
.getObjectAlignment(FI
->getIndex()) < NewAlign
)
5826 MFI
.setObjectAlignment(FI
->getIndex(), NewAlign
);
5831 MachineMemOperand::Flags MMOFlags
=
5832 isVol
? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone
;
5833 SmallVector
<SDValue
, 16> OutLoadChains
;
5834 SmallVector
<SDValue
, 16> OutStoreChains
;
5835 SmallVector
<SDValue
, 32> OutChains
;
5836 unsigned NumMemOps
= MemOps
.size();
5837 uint64_t SrcOff
= 0, DstOff
= 0;
5838 for (unsigned i
= 0; i
!= NumMemOps
; ++i
) {
5840 unsigned VTSize
= VT
.getSizeInBits() / 8;
5841 SDValue Value
, Store
;
5843 if (VTSize
> Size
) {
5844 // Issuing an unaligned load / store pair that overlaps with the previous
5845 // pair. Adjust the offset accordingly.
5846 assert(i
== NumMemOps
-1 && i
!= 0);
5847 SrcOff
-= VTSize
- Size
;
5848 DstOff
-= VTSize
- Size
;
5851 if (CopyFromConstant
&&
5852 (isZeroConstant
|| (VT
.isInteger() && !VT
.isVector()))) {
5853 // It's unlikely a store of a vector immediate can be done in a single
5854 // instruction. It would require a load from a constantpool first.
5855 // We only handle zero vectors here.
5856 // FIXME: Handle other cases where store of vector immediate is done in
5857 // a single instruction.
5858 ConstantDataArraySlice SubSlice
;
5859 if (SrcOff
< Slice
.Length
) {
5861 SubSlice
.move(SrcOff
);
5863 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5864 SubSlice
.Array
= nullptr;
5865 SubSlice
.Offset
= 0;
5866 SubSlice
.Length
= VTSize
;
5868 Value
= getMemsetStringVal(VT
, dl
, DAG
, TLI
, SubSlice
);
5869 if (Value
.getNode()) {
5870 Store
= DAG
.getStore(Chain
, dl
, Value
,
5871 DAG
.getMemBasePlusOffset(Dst
, DstOff
, dl
),
5872 DstPtrInfo
.getWithOffset(DstOff
), Align
,
5874 OutChains
.push_back(Store
);
5878 if (!Store
.getNode()) {
5879 // The type might not be legal for the target. This should only happen
5880 // if the type is smaller than a legal type, as on PPC, so the right
5881 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5882 // to Load/Store if NVT==VT.
5883 // FIXME does the case above also need this?
5884 EVT NVT
= TLI
.getTypeToTransformTo(C
, VT
);
5885 assert(NVT
.bitsGE(VT
));
5887 bool isDereferenceable
=
5888 SrcPtrInfo
.getWithOffset(SrcOff
).isDereferenceable(VTSize
, C
, DL
);
5889 MachineMemOperand::Flags SrcMMOFlags
= MMOFlags
;
5890 if (isDereferenceable
)
5891 SrcMMOFlags
|= MachineMemOperand::MODereferenceable
;
5893 Value
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, NVT
, Chain
,
5894 DAG
.getMemBasePlusOffset(Src
, SrcOff
, dl
),
5895 SrcPtrInfo
.getWithOffset(SrcOff
), VT
,
5896 MinAlign(SrcAlign
, SrcOff
), SrcMMOFlags
);
5897 OutLoadChains
.push_back(Value
.getValue(1));
5899 Store
= DAG
.getTruncStore(
5900 Chain
, dl
, Value
, DAG
.getMemBasePlusOffset(Dst
, DstOff
, dl
),
5901 DstPtrInfo
.getWithOffset(DstOff
), VT
, Align
, MMOFlags
);
5902 OutStoreChains
.push_back(Store
);
5909 unsigned GluedLdStLimit
= MaxLdStGlue
== 0 ?
5910 TLI
.getMaxGluedStoresPerMemcpy() : MaxLdStGlue
;
5911 unsigned NumLdStInMemcpy
= OutStoreChains
.size();
5913 if (NumLdStInMemcpy
) {
5914 // It may be that memcpy might be converted to memset if it's memcpy
5915 // of constants. In such a case, we won't have loads and stores, but
5916 // just stores. In the absence of loads, there is nothing to gang up.
5917 if ((GluedLdStLimit
<= 1) || !EnableMemCpyDAGOpt
) {
5918 // If target does not care, just leave as it.
5919 for (unsigned i
= 0; i
< NumLdStInMemcpy
; ++i
) {
5920 OutChains
.push_back(OutLoadChains
[i
]);
5921 OutChains
.push_back(OutStoreChains
[i
]);
5924 // Ld/St less than/equal limit set by target.
5925 if (NumLdStInMemcpy
<= GluedLdStLimit
) {
5926 chainLoadsAndStoresForMemcpy(DAG
, dl
, OutChains
, 0,
5927 NumLdStInMemcpy
, OutLoadChains
,
5930 unsigned NumberLdChain
= NumLdStInMemcpy
/ GluedLdStLimit
;
5931 unsigned RemainingLdStInMemcpy
= NumLdStInMemcpy
% GluedLdStLimit
;
5932 unsigned GlueIter
= 0;
5934 for (unsigned cnt
= 0; cnt
< NumberLdChain
; ++cnt
) {
5935 unsigned IndexFrom
= NumLdStInMemcpy
- GlueIter
- GluedLdStLimit
;
5936 unsigned IndexTo
= NumLdStInMemcpy
- GlueIter
;
5938 chainLoadsAndStoresForMemcpy(DAG
, dl
, OutChains
, IndexFrom
, IndexTo
,
5939 OutLoadChains
, OutStoreChains
);
5940 GlueIter
+= GluedLdStLimit
;
5944 if (RemainingLdStInMemcpy
) {
5945 chainLoadsAndStoresForMemcpy(DAG
, dl
, OutChains
, 0,
5946 RemainingLdStInMemcpy
, OutLoadChains
,
5952 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
5955 static SDValue
getMemmoveLoadsAndStores(SelectionDAG
&DAG
, const SDLoc
&dl
,
5956 SDValue Chain
, SDValue Dst
, SDValue Src
,
5957 uint64_t Size
, unsigned Align
,
5958 bool isVol
, bool AlwaysInline
,
5959 MachinePointerInfo DstPtrInfo
,
5960 MachinePointerInfo SrcPtrInfo
) {
5961 // Turn a memmove of undef to nop.
5962 // FIXME: We need to honor volatile even is Src is undef.
5966 // Expand memmove to a series of load and store ops if the size operand falls
5967 // below a certain threshold.
5968 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
5969 const DataLayout
&DL
= DAG
.getDataLayout();
5970 LLVMContext
&C
= *DAG
.getContext();
5971 std::vector
<EVT
> MemOps
;
5972 bool DstAlignCanChange
= false;
5973 MachineFunction
&MF
= DAG
.getMachineFunction();
5974 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
5975 bool OptSize
= shouldLowerMemFuncForSize(MF
);
5976 FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(Dst
);
5977 if (FI
&& !MFI
.isFixedObjectIndex(FI
->getIndex()))
5978 DstAlignCanChange
= true;
5979 unsigned SrcAlign
= DAG
.InferPtrAlignment(Src
);
5980 if (Align
> SrcAlign
)
5982 unsigned Limit
= AlwaysInline
? ~0U : TLI
.getMaxStoresPerMemmove(OptSize
);
5983 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in
5984 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the
5986 bool AllowOverlap
= false;
5987 if (!TLI
.findOptimalMemOpLowering(
5988 MemOps
, Limit
, Size
, (DstAlignCanChange
? 0 : Align
), SrcAlign
,
5989 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
5990 AllowOverlap
, DstPtrInfo
.getAddrSpace(), SrcPtrInfo
.getAddrSpace(),
5991 MF
.getFunction().getAttributes()))
5994 if (DstAlignCanChange
) {
5995 Type
*Ty
= MemOps
[0].getTypeForEVT(C
);
5996 unsigned NewAlign
= (unsigned)DL
.getABITypeAlignment(Ty
);
5997 if (NewAlign
> Align
) {
5998 // Give the stack frame object a larger alignment if needed.
5999 if (MFI
.getObjectAlignment(FI
->getIndex()) < NewAlign
)
6000 MFI
.setObjectAlignment(FI
->getIndex(), NewAlign
);
6005 MachineMemOperand::Flags MMOFlags
=
6006 isVol
? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone
;
6007 uint64_t SrcOff
= 0, DstOff
= 0;
6008 SmallVector
<SDValue
, 8> LoadValues
;
6009 SmallVector
<SDValue
, 8> LoadChains
;
6010 SmallVector
<SDValue
, 8> OutChains
;
6011 unsigned NumMemOps
= MemOps
.size();
6012 for (unsigned i
= 0; i
< NumMemOps
; i
++) {
6014 unsigned VTSize
= VT
.getSizeInBits() / 8;
6017 bool isDereferenceable
=
6018 SrcPtrInfo
.getWithOffset(SrcOff
).isDereferenceable(VTSize
, C
, DL
);
6019 MachineMemOperand::Flags SrcMMOFlags
= MMOFlags
;
6020 if (isDereferenceable
)
6021 SrcMMOFlags
|= MachineMemOperand::MODereferenceable
;
6024 DAG
.getLoad(VT
, dl
, Chain
, DAG
.getMemBasePlusOffset(Src
, SrcOff
, dl
),
6025 SrcPtrInfo
.getWithOffset(SrcOff
), SrcAlign
, SrcMMOFlags
);
6026 LoadValues
.push_back(Value
);
6027 LoadChains
.push_back(Value
.getValue(1));
6030 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, LoadChains
);
6032 for (unsigned i
= 0; i
< NumMemOps
; i
++) {
6034 unsigned VTSize
= VT
.getSizeInBits() / 8;
6037 Store
= DAG
.getStore(Chain
, dl
, LoadValues
[i
],
6038 DAG
.getMemBasePlusOffset(Dst
, DstOff
, dl
),
6039 DstPtrInfo
.getWithOffset(DstOff
), Align
, MMOFlags
);
6040 OutChains
.push_back(Store
);
6044 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
6047 /// Lower the call to 'memset' intrinsic function into a series of store
6050 /// \param DAG Selection DAG where lowered code is placed.
6051 /// \param dl Link to corresponding IR location.
6052 /// \param Chain Control flow dependency.
6053 /// \param Dst Pointer to destination memory location.
6054 /// \param Src Value of byte to write into the memory.
6055 /// \param Size Number of bytes to write.
6056 /// \param Align Alignment of the destination in bytes.
6057 /// \param isVol True if destination is volatile.
6058 /// \param DstPtrInfo IR information on the memory pointer.
6059 /// \returns New head in the control flow, if lowering was successful, empty
6060 /// SDValue otherwise.
6062 /// The function tries to replace 'llvm.memset' intrinsic with several store
6063 /// operations and value calculation code. This is usually profitable for small
6065 static SDValue
getMemsetStores(SelectionDAG
&DAG
, const SDLoc
&dl
,
6066 SDValue Chain
, SDValue Dst
, SDValue Src
,
6067 uint64_t Size
, unsigned Align
, bool isVol
,
6068 MachinePointerInfo DstPtrInfo
) {
6069 // Turn a memset of undef to nop.
6070 // FIXME: We need to honor volatile even is Src is undef.
6074 // Expand memset to a series of load/store ops if the size operand
6075 // falls below a certain threshold.
6076 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
6077 std::vector
<EVT
> MemOps
;
6078 bool DstAlignCanChange
= false;
6079 MachineFunction
&MF
= DAG
.getMachineFunction();
6080 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
6081 bool OptSize
= shouldLowerMemFuncForSize(MF
);
6082 FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(Dst
);
6083 if (FI
&& !MFI
.isFixedObjectIndex(FI
->getIndex()))
6084 DstAlignCanChange
= true;
6086 isa
<ConstantSDNode
>(Src
) && cast
<ConstantSDNode
>(Src
)->isNullValue();
6087 if (!TLI
.findOptimalMemOpLowering(
6088 MemOps
, TLI
.getMaxStoresPerMemset(OptSize
), Size
,
6089 (DstAlignCanChange
? 0 : Align
), 0, /*IsMemset=*/true,
6090 /*ZeroMemset=*/IsZeroVal
, /*MemcpyStrSrc=*/false,
6091 /*AllowOverlap=*/!isVol
, DstPtrInfo
.getAddrSpace(), ~0u,
6092 MF
.getFunction().getAttributes()))
6095 if (DstAlignCanChange
) {
6096 Type
*Ty
= MemOps
[0].getTypeForEVT(*DAG
.getContext());
6097 unsigned NewAlign
= (unsigned)DAG
.getDataLayout().getABITypeAlignment(Ty
);
6098 if (NewAlign
> Align
) {
6099 // Give the stack frame object a larger alignment if needed.
6100 if (MFI
.getObjectAlignment(FI
->getIndex()) < NewAlign
)
6101 MFI
.setObjectAlignment(FI
->getIndex(), NewAlign
);
6106 SmallVector
<SDValue
, 8> OutChains
;
6107 uint64_t DstOff
= 0;
6108 unsigned NumMemOps
= MemOps
.size();
6110 // Find the largest store and generate the bit pattern for it.
6111 EVT LargestVT
= MemOps
[0];
6112 for (unsigned i
= 1; i
< NumMemOps
; i
++)
6113 if (MemOps
[i
].bitsGT(LargestVT
))
6114 LargestVT
= MemOps
[i
];
6115 SDValue MemSetValue
= getMemsetValue(Src
, LargestVT
, DAG
, dl
);
6117 for (unsigned i
= 0; i
< NumMemOps
; i
++) {
6119 unsigned VTSize
= VT
.getSizeInBits() / 8;
6120 if (VTSize
> Size
) {
6121 // Issuing an unaligned load / store pair that overlaps with the previous
6122 // pair. Adjust the offset accordingly.
6123 assert(i
== NumMemOps
-1 && i
!= 0);
6124 DstOff
-= VTSize
- Size
;
6127 // If this store is smaller than the largest store see whether we can get
6128 // the smaller value for free with a truncate.
6129 SDValue Value
= MemSetValue
;
6130 if (VT
.bitsLT(LargestVT
)) {
6131 if (!LargestVT
.isVector() && !VT
.isVector() &&
6132 TLI
.isTruncateFree(LargestVT
, VT
))
6133 Value
= DAG
.getNode(ISD::TRUNCATE
, dl
, VT
, MemSetValue
);
6135 Value
= getMemsetValue(Src
, VT
, DAG
, dl
);
6137 assert(Value
.getValueType() == VT
&& "Value with wrong type.");
6138 SDValue Store
= DAG
.getStore(
6139 Chain
, dl
, Value
, DAG
.getMemBasePlusOffset(Dst
, DstOff
, dl
),
6140 DstPtrInfo
.getWithOffset(DstOff
), Align
,
6141 isVol
? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone
);
6142 OutChains
.push_back(Store
);
6143 DstOff
+= VT
.getSizeInBits() / 8;
6147 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, OutChains
);
6150 static void checkAddrSpaceIsValidForLibcall(const TargetLowering
*TLI
,
6152 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6153 // pointer operands can be losslessly bitcasted to pointers of address space 0
6154 if (AS
!= 0 && !TLI
->isNoopAddrSpaceCast(AS
, 0)) {
6155 report_fatal_error("cannot lower memory intrinsic in address space " +
6160 SDValue
SelectionDAG::getMemcpy(SDValue Chain
, const SDLoc
&dl
, SDValue Dst
,
6161 SDValue Src
, SDValue Size
, unsigned Align
,
6162 bool isVol
, bool AlwaysInline
, bool isTailCall
,
6163 MachinePointerInfo DstPtrInfo
,
6164 MachinePointerInfo SrcPtrInfo
) {
6165 assert(Align
&& "The SDAG layer expects explicit alignment and reserves 0");
6167 // Check to see if we should lower the memcpy to loads and stores first.
6168 // For cases within the target-specified limits, this is the best choice.
6169 ConstantSDNode
*ConstantSize
= dyn_cast
<ConstantSDNode
>(Size
);
6171 // Memcpy with size zero? Just return the original chain.
6172 if (ConstantSize
->isNullValue())
6175 SDValue Result
= getMemcpyLoadsAndStores(*this, dl
, Chain
, Dst
, Src
,
6176 ConstantSize
->getZExtValue(),Align
,
6177 isVol
, false, DstPtrInfo
, SrcPtrInfo
);
6178 if (Result
.getNode())
6182 // Then check to see if we should lower the memcpy with target-specific
6183 // code. If the target chooses to do this, this is the next best.
6185 SDValue Result
= TSI
->EmitTargetCodeForMemcpy(
6186 *this, dl
, Chain
, Dst
, Src
, Size
, Align
, isVol
, AlwaysInline
,
6187 DstPtrInfo
, SrcPtrInfo
);
6188 if (Result
.getNode())
6192 // If we really need inline code and the target declined to provide it,
6193 // use a (potentially long) sequence of loads and stores.
6195 assert(ConstantSize
&& "AlwaysInline requires a constant size!");
6196 return getMemcpyLoadsAndStores(*this, dl
, Chain
, Dst
, Src
,
6197 ConstantSize
->getZExtValue(), Align
, isVol
,
6198 true, DstPtrInfo
, SrcPtrInfo
);
6201 checkAddrSpaceIsValidForLibcall(TLI
, DstPtrInfo
.getAddrSpace());
6202 checkAddrSpaceIsValidForLibcall(TLI
, SrcPtrInfo
.getAddrSpace());
6204 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6205 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6206 // respect volatile, so they may do things like read or write memory
6207 // beyond the given memory regions. But fixing this isn't easy, and most
6208 // people don't care.
6210 // Emit a library call.
6211 TargetLowering::ArgListTy Args
;
6212 TargetLowering::ArgListEntry Entry
;
6213 Entry
.Ty
= Type::getInt8PtrTy(*getContext());
6214 Entry
.Node
= Dst
; Args
.push_back(Entry
);
6215 Entry
.Node
= Src
; Args
.push_back(Entry
);
6217 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6218 Entry
.Node
= Size
; Args
.push_back(Entry
);
6219 // FIXME: pass in SDLoc
6220 TargetLowering::CallLoweringInfo
CLI(*this);
6223 .setLibCallee(TLI
->getLibcallCallingConv(RTLIB::MEMCPY
),
6224 Dst
.getValueType().getTypeForEVT(*getContext()),
6225 getExternalSymbol(TLI
->getLibcallName(RTLIB::MEMCPY
),
6226 TLI
->getPointerTy(getDataLayout())),
6229 .setTailCall(isTailCall
);
6231 std::pair
<SDValue
,SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6232 return CallResult
.second
;
6235 SDValue
SelectionDAG::getAtomicMemcpy(SDValue Chain
, const SDLoc
&dl
,
6236 SDValue Dst
, unsigned DstAlign
,
6237 SDValue Src
, unsigned SrcAlign
,
6238 SDValue Size
, Type
*SizeTy
,
6239 unsigned ElemSz
, bool isTailCall
,
6240 MachinePointerInfo DstPtrInfo
,
6241 MachinePointerInfo SrcPtrInfo
) {
6242 // Emit a library call.
6243 TargetLowering::ArgListTy Args
;
6244 TargetLowering::ArgListEntry Entry
;
6245 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6247 Args
.push_back(Entry
);
6250 Args
.push_back(Entry
);
6254 Args
.push_back(Entry
);
6256 RTLIB::Libcall LibraryCall
=
6257 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz
);
6258 if (LibraryCall
== RTLIB::UNKNOWN_LIBCALL
)
6259 report_fatal_error("Unsupported element size");
6261 TargetLowering::CallLoweringInfo
CLI(*this);
6264 .setLibCallee(TLI
->getLibcallCallingConv(LibraryCall
),
6265 Type::getVoidTy(*getContext()),
6266 getExternalSymbol(TLI
->getLibcallName(LibraryCall
),
6267 TLI
->getPointerTy(getDataLayout())),
6270 .setTailCall(isTailCall
);
6272 std::pair
<SDValue
, SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6273 return CallResult
.second
;
6276 SDValue
SelectionDAG::getMemmove(SDValue Chain
, const SDLoc
&dl
, SDValue Dst
,
6277 SDValue Src
, SDValue Size
, unsigned Align
,
6278 bool isVol
, bool isTailCall
,
6279 MachinePointerInfo DstPtrInfo
,
6280 MachinePointerInfo SrcPtrInfo
) {
6281 assert(Align
&& "The SDAG layer expects explicit alignment and reserves 0");
6283 // Check to see if we should lower the memmove to loads and stores first.
6284 // For cases within the target-specified limits, this is the best choice.
6285 ConstantSDNode
*ConstantSize
= dyn_cast
<ConstantSDNode
>(Size
);
6287 // Memmove with size zero? Just return the original chain.
6288 if (ConstantSize
->isNullValue())
6292 getMemmoveLoadsAndStores(*this, dl
, Chain
, Dst
, Src
,
6293 ConstantSize
->getZExtValue(), Align
, isVol
,
6294 false, DstPtrInfo
, SrcPtrInfo
);
6295 if (Result
.getNode())
6299 // Then check to see if we should lower the memmove with target-specific
6300 // code. If the target chooses to do this, this is the next best.
6302 SDValue Result
= TSI
->EmitTargetCodeForMemmove(
6303 *this, dl
, Chain
, Dst
, Src
, Size
, Align
, isVol
, DstPtrInfo
, SrcPtrInfo
);
6304 if (Result
.getNode())
6308 checkAddrSpaceIsValidForLibcall(TLI
, DstPtrInfo
.getAddrSpace());
6309 checkAddrSpaceIsValidForLibcall(TLI
, SrcPtrInfo
.getAddrSpace());
6311 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6312 // not be safe. See memcpy above for more details.
6314 // Emit a library call.
6315 TargetLowering::ArgListTy Args
;
6316 TargetLowering::ArgListEntry Entry
;
6317 Entry
.Ty
= Type::getInt8PtrTy(*getContext());
6318 Entry
.Node
= Dst
; Args
.push_back(Entry
);
6319 Entry
.Node
= Src
; Args
.push_back(Entry
);
6321 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6322 Entry
.Node
= Size
; Args
.push_back(Entry
);
6323 // FIXME: pass in SDLoc
6324 TargetLowering::CallLoweringInfo
CLI(*this);
6327 .setLibCallee(TLI
->getLibcallCallingConv(RTLIB::MEMMOVE
),
6328 Dst
.getValueType().getTypeForEVT(*getContext()),
6329 getExternalSymbol(TLI
->getLibcallName(RTLIB::MEMMOVE
),
6330 TLI
->getPointerTy(getDataLayout())),
6333 .setTailCall(isTailCall
);
6335 std::pair
<SDValue
,SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6336 return CallResult
.second
;
6339 SDValue
SelectionDAG::getAtomicMemmove(SDValue Chain
, const SDLoc
&dl
,
6340 SDValue Dst
, unsigned DstAlign
,
6341 SDValue Src
, unsigned SrcAlign
,
6342 SDValue Size
, Type
*SizeTy
,
6343 unsigned ElemSz
, bool isTailCall
,
6344 MachinePointerInfo DstPtrInfo
,
6345 MachinePointerInfo SrcPtrInfo
) {
6346 // Emit a library call.
6347 TargetLowering::ArgListTy Args
;
6348 TargetLowering::ArgListEntry Entry
;
6349 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6351 Args
.push_back(Entry
);
6354 Args
.push_back(Entry
);
6358 Args
.push_back(Entry
);
6360 RTLIB::Libcall LibraryCall
=
6361 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz
);
6362 if (LibraryCall
== RTLIB::UNKNOWN_LIBCALL
)
6363 report_fatal_error("Unsupported element size");
6365 TargetLowering::CallLoweringInfo
CLI(*this);
6368 .setLibCallee(TLI
->getLibcallCallingConv(LibraryCall
),
6369 Type::getVoidTy(*getContext()),
6370 getExternalSymbol(TLI
->getLibcallName(LibraryCall
),
6371 TLI
->getPointerTy(getDataLayout())),
6374 .setTailCall(isTailCall
);
6376 std::pair
<SDValue
, SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6377 return CallResult
.second
;
6380 SDValue
SelectionDAG::getMemset(SDValue Chain
, const SDLoc
&dl
, SDValue Dst
,
6381 SDValue Src
, SDValue Size
, unsigned Align
,
6382 bool isVol
, bool isTailCall
,
6383 MachinePointerInfo DstPtrInfo
) {
6384 assert(Align
&& "The SDAG layer expects explicit alignment and reserves 0");
6386 // Check to see if we should lower the memset to stores first.
6387 // For cases within the target-specified limits, this is the best choice.
6388 ConstantSDNode
*ConstantSize
= dyn_cast
<ConstantSDNode
>(Size
);
6390 // Memset with size zero? Just return the original chain.
6391 if (ConstantSize
->isNullValue())
6395 getMemsetStores(*this, dl
, Chain
, Dst
, Src
, ConstantSize
->getZExtValue(),
6396 Align
, isVol
, DstPtrInfo
);
6398 if (Result
.getNode())
6402 // Then check to see if we should lower the memset with target-specific
6403 // code. If the target chooses to do this, this is the next best.
6405 SDValue Result
= TSI
->EmitTargetCodeForMemset(
6406 *this, dl
, Chain
, Dst
, Src
, Size
, Align
, isVol
, DstPtrInfo
);
6407 if (Result
.getNode())
6411 checkAddrSpaceIsValidForLibcall(TLI
, DstPtrInfo
.getAddrSpace());
6413 // Emit a library call.
6414 TargetLowering::ArgListTy Args
;
6415 TargetLowering::ArgListEntry Entry
;
6416 Entry
.Node
= Dst
; Entry
.Ty
= Type::getInt8PtrTy(*getContext());
6417 Args
.push_back(Entry
);
6419 Entry
.Ty
= Src
.getValueType().getTypeForEVT(*getContext());
6420 Args
.push_back(Entry
);
6422 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6423 Args
.push_back(Entry
);
6425 // FIXME: pass in SDLoc
6426 TargetLowering::CallLoweringInfo
CLI(*this);
6429 .setLibCallee(TLI
->getLibcallCallingConv(RTLIB::MEMSET
),
6430 Dst
.getValueType().getTypeForEVT(*getContext()),
6431 getExternalSymbol(TLI
->getLibcallName(RTLIB::MEMSET
),
6432 TLI
->getPointerTy(getDataLayout())),
6435 .setTailCall(isTailCall
);
6437 std::pair
<SDValue
,SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6438 return CallResult
.second
;
6441 SDValue
SelectionDAG::getAtomicMemset(SDValue Chain
, const SDLoc
&dl
,
6442 SDValue Dst
, unsigned DstAlign
,
6443 SDValue Value
, SDValue Size
, Type
*SizeTy
,
6444 unsigned ElemSz
, bool isTailCall
,
6445 MachinePointerInfo DstPtrInfo
) {
6446 // Emit a library call.
6447 TargetLowering::ArgListTy Args
;
6448 TargetLowering::ArgListEntry Entry
;
6449 Entry
.Ty
= getDataLayout().getIntPtrType(*getContext());
6451 Args
.push_back(Entry
);
6453 Entry
.Ty
= Type::getInt8Ty(*getContext());
6455 Args
.push_back(Entry
);
6459 Args
.push_back(Entry
);
6461 RTLIB::Libcall LibraryCall
=
6462 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz
);
6463 if (LibraryCall
== RTLIB::UNKNOWN_LIBCALL
)
6464 report_fatal_error("Unsupported element size");
6466 TargetLowering::CallLoweringInfo
CLI(*this);
6469 .setLibCallee(TLI
->getLibcallCallingConv(LibraryCall
),
6470 Type::getVoidTy(*getContext()),
6471 getExternalSymbol(TLI
->getLibcallName(LibraryCall
),
6472 TLI
->getPointerTy(getDataLayout())),
6475 .setTailCall(isTailCall
);
6477 std::pair
<SDValue
, SDValue
> CallResult
= TLI
->LowerCallTo(CLI
);
6478 return CallResult
.second
;
6481 SDValue
SelectionDAG::getAtomic(unsigned Opcode
, const SDLoc
&dl
, EVT MemVT
,
6482 SDVTList VTList
, ArrayRef
<SDValue
> Ops
,
6483 MachineMemOperand
*MMO
) {
6484 FoldingSetNodeID ID
;
6485 ID
.AddInteger(MemVT
.getRawBits());
6486 AddNodeIDNode(ID
, Opcode
, VTList
, Ops
);
6487 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6489 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6490 cast
<AtomicSDNode
>(E
)->refineAlignment(MMO
);
6491 return SDValue(E
, 0);
6494 auto *N
= newSDNode
<AtomicSDNode
>(Opcode
, dl
.getIROrder(), dl
.getDebugLoc(),
6495 VTList
, MemVT
, MMO
);
6496 createOperands(N
, Ops
);
6498 CSEMap
.InsertNode(N
, IP
);
6500 return SDValue(N
, 0);
6503 SDValue
SelectionDAG::getAtomicCmpSwap(unsigned Opcode
, const SDLoc
&dl
,
6504 EVT MemVT
, SDVTList VTs
, SDValue Chain
,
6505 SDValue Ptr
, SDValue Cmp
, SDValue Swp
,
6506 MachineMemOperand
*MMO
) {
6507 assert(Opcode
== ISD::ATOMIC_CMP_SWAP
||
6508 Opcode
== ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
);
6509 assert(Cmp
.getValueType() == Swp
.getValueType() && "Invalid Atomic Op Types");
6511 SDValue Ops
[] = {Chain
, Ptr
, Cmp
, Swp
};
6512 return getAtomic(Opcode
, dl
, MemVT
, VTs
, Ops
, MMO
);
6515 SDValue
SelectionDAG::getAtomic(unsigned Opcode
, const SDLoc
&dl
, EVT MemVT
,
6516 SDValue Chain
, SDValue Ptr
, SDValue Val
,
6517 MachineMemOperand
*MMO
) {
6518 assert((Opcode
== ISD::ATOMIC_LOAD_ADD
||
6519 Opcode
== ISD::ATOMIC_LOAD_SUB
||
6520 Opcode
== ISD::ATOMIC_LOAD_AND
||
6521 Opcode
== ISD::ATOMIC_LOAD_CLR
||
6522 Opcode
== ISD::ATOMIC_LOAD_OR
||
6523 Opcode
== ISD::ATOMIC_LOAD_XOR
||
6524 Opcode
== ISD::ATOMIC_LOAD_NAND
||
6525 Opcode
== ISD::ATOMIC_LOAD_MIN
||
6526 Opcode
== ISD::ATOMIC_LOAD_MAX
||
6527 Opcode
== ISD::ATOMIC_LOAD_UMIN
||
6528 Opcode
== ISD::ATOMIC_LOAD_UMAX
||
6529 Opcode
== ISD::ATOMIC_LOAD_FADD
||
6530 Opcode
== ISD::ATOMIC_LOAD_FSUB
||
6531 Opcode
== ISD::ATOMIC_SWAP
||
6532 Opcode
== ISD::ATOMIC_STORE
) &&
6533 "Invalid Atomic Op");
6535 EVT VT
= Val
.getValueType();
6537 SDVTList VTs
= Opcode
== ISD::ATOMIC_STORE
? getVTList(MVT::Other
) :
6538 getVTList(VT
, MVT::Other
);
6539 SDValue Ops
[] = {Chain
, Ptr
, Val
};
6540 return getAtomic(Opcode
, dl
, MemVT
, VTs
, Ops
, MMO
);
6543 SDValue
SelectionDAG::getAtomic(unsigned Opcode
, const SDLoc
&dl
, EVT MemVT
,
6544 EVT VT
, SDValue Chain
, SDValue Ptr
,
6545 MachineMemOperand
*MMO
) {
6546 assert(Opcode
== ISD::ATOMIC_LOAD
&& "Invalid Atomic Op");
6548 SDVTList VTs
= getVTList(VT
, MVT::Other
);
6549 SDValue Ops
[] = {Chain
, Ptr
};
6550 return getAtomic(Opcode
, dl
, MemVT
, VTs
, Ops
, MMO
);
6553 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
6554 SDValue
SelectionDAG::getMergeValues(ArrayRef
<SDValue
> Ops
, const SDLoc
&dl
) {
6555 if (Ops
.size() == 1)
6558 SmallVector
<EVT
, 4> VTs
;
6559 VTs
.reserve(Ops
.size());
6560 for (unsigned i
= 0; i
< Ops
.size(); ++i
)
6561 VTs
.push_back(Ops
[i
].getValueType());
6562 return getNode(ISD::MERGE_VALUES
, dl
, getVTList(VTs
), Ops
);
6565 SDValue
SelectionDAG::getMemIntrinsicNode(
6566 unsigned Opcode
, const SDLoc
&dl
, SDVTList VTList
, ArrayRef
<SDValue
> Ops
,
6567 EVT MemVT
, MachinePointerInfo PtrInfo
, unsigned Align
,
6568 MachineMemOperand::Flags Flags
, unsigned Size
, const AAMDNodes
&AAInfo
) {
6569 if (Align
== 0) // Ensure that codegen never sees alignment 0
6570 Align
= getEVTAlignment(MemVT
);
6573 Size
= MemVT
.getStoreSize();
6575 MachineFunction
&MF
= getMachineFunction();
6576 MachineMemOperand
*MMO
=
6577 MF
.getMachineMemOperand(PtrInfo
, Flags
, Size
, Align
, AAInfo
);
6579 return getMemIntrinsicNode(Opcode
, dl
, VTList
, Ops
, MemVT
, MMO
);
6582 SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode
, const SDLoc
&dl
,
6584 ArrayRef
<SDValue
> Ops
, EVT MemVT
,
6585 MachineMemOperand
*MMO
) {
6586 assert((Opcode
== ISD::INTRINSIC_VOID
||
6587 Opcode
== ISD::INTRINSIC_W_CHAIN
||
6588 Opcode
== ISD::PREFETCH
||
6589 Opcode
== ISD::LIFETIME_START
||
6590 Opcode
== ISD::LIFETIME_END
||
6591 ((int)Opcode
<= std::numeric_limits
<int>::max() &&
6592 (int)Opcode
>= ISD::FIRST_TARGET_MEMORY_OPCODE
)) &&
6593 "Opcode is not a memory-accessing opcode!");
6595 // Memoize the node unless it returns a flag.
6596 MemIntrinsicSDNode
*N
;
6597 if (VTList
.VTs
[VTList
.NumVTs
-1] != MVT::Glue
) {
6598 FoldingSetNodeID ID
;
6599 AddNodeIDNode(ID
, Opcode
, VTList
, Ops
);
6600 ID
.AddInteger(getSyntheticNodeSubclassData
<MemIntrinsicSDNode
>(
6601 Opcode
, dl
.getIROrder(), VTList
, MemVT
, MMO
));
6602 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6604 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6605 cast
<MemIntrinsicSDNode
>(E
)->refineAlignment(MMO
);
6606 return SDValue(E
, 0);
6609 N
= newSDNode
<MemIntrinsicSDNode
>(Opcode
, dl
.getIROrder(), dl
.getDebugLoc(),
6610 VTList
, MemVT
, MMO
);
6611 createOperands(N
, Ops
);
6613 CSEMap
.InsertNode(N
, IP
);
6615 N
= newSDNode
<MemIntrinsicSDNode
>(Opcode
, dl
.getIROrder(), dl
.getDebugLoc(),
6616 VTList
, MemVT
, MMO
);
6617 createOperands(N
, Ops
);
6621 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6625 SDValue
SelectionDAG::getLifetimeNode(bool IsStart
, const SDLoc
&dl
,
6626 SDValue Chain
, int FrameIndex
,
6627 int64_t Size
, int64_t Offset
) {
6628 const unsigned Opcode
= IsStart
? ISD::LIFETIME_START
: ISD::LIFETIME_END
;
6629 const auto VTs
= getVTList(MVT::Other
);
6632 getFrameIndex(FrameIndex
,
6633 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6636 FoldingSetNodeID ID
;
6637 AddNodeIDNode(ID
, Opcode
, VTs
, Ops
);
6638 ID
.AddInteger(FrameIndex
);
6639 ID
.AddInteger(Size
);
6640 ID
.AddInteger(Offset
);
6642 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
))
6643 return SDValue(E
, 0);
6645 LifetimeSDNode
*N
= newSDNode
<LifetimeSDNode
>(
6646 Opcode
, dl
.getIROrder(), dl
.getDebugLoc(), VTs
, Size
, Offset
);
6647 createOperands(N
, Ops
);
6648 CSEMap
.InsertNode(N
, IP
);
6651 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6655 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6656 /// MachinePointerInfo record from it. This is particularly useful because the
6657 /// code generator has many cases where it doesn't bother passing in a
6658 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6659 static MachinePointerInfo
InferPointerInfo(const MachinePointerInfo
&Info
,
6660 SelectionDAG
&DAG
, SDValue Ptr
,
6661 int64_t Offset
= 0) {
6662 // If this is FI+Offset, we can model it.
6663 if (const FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(Ptr
))
6664 return MachinePointerInfo::getFixedStack(DAG
.getMachineFunction(),
6665 FI
->getIndex(), Offset
);
6667 // If this is (FI+Offset1)+Offset2, we can model it.
6668 if (Ptr
.getOpcode() != ISD::ADD
||
6669 !isa
<ConstantSDNode
>(Ptr
.getOperand(1)) ||
6670 !isa
<FrameIndexSDNode
>(Ptr
.getOperand(0)))
6673 int FI
= cast
<FrameIndexSDNode
>(Ptr
.getOperand(0))->getIndex();
6674 return MachinePointerInfo::getFixedStack(
6675 DAG
.getMachineFunction(), FI
,
6676 Offset
+ cast
<ConstantSDNode
>(Ptr
.getOperand(1))->getSExtValue());
6679 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6680 /// MachinePointerInfo record from it. This is particularly useful because the
6681 /// code generator has many cases where it doesn't bother passing in a
6682 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6683 static MachinePointerInfo
InferPointerInfo(const MachinePointerInfo
&Info
,
6684 SelectionDAG
&DAG
, SDValue Ptr
,
6686 // If the 'Offset' value isn't a constant, we can't handle this.
6687 if (ConstantSDNode
*OffsetNode
= dyn_cast
<ConstantSDNode
>(OffsetOp
))
6688 return InferPointerInfo(Info
, DAG
, Ptr
, OffsetNode
->getSExtValue());
6689 if (OffsetOp
.isUndef())
6690 return InferPointerInfo(Info
, DAG
, Ptr
);
6694 SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM
, ISD::LoadExtType ExtType
,
6695 EVT VT
, const SDLoc
&dl
, SDValue Chain
,
6696 SDValue Ptr
, SDValue Offset
,
6697 MachinePointerInfo PtrInfo
, EVT MemVT
,
6699 MachineMemOperand::Flags MMOFlags
,
6700 const AAMDNodes
&AAInfo
, const MDNode
*Ranges
) {
6701 assert(Chain
.getValueType() == MVT::Other
&&
6702 "Invalid chain type");
6703 if (Alignment
== 0) // Ensure that codegen never sees alignment 0
6704 Alignment
= getEVTAlignment(MemVT
);
6706 MMOFlags
|= MachineMemOperand::MOLoad
;
6707 assert((MMOFlags
& MachineMemOperand::MOStore
) == 0);
6708 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6710 if (PtrInfo
.V
.isNull())
6711 PtrInfo
= InferPointerInfo(PtrInfo
, *this, Ptr
, Offset
);
6713 MachineFunction
&MF
= getMachineFunction();
6714 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
6715 PtrInfo
, MMOFlags
, MemVT
.getStoreSize(), Alignment
, AAInfo
, Ranges
);
6716 return getLoad(AM
, ExtType
, VT
, dl
, Chain
, Ptr
, Offset
, MemVT
, MMO
);
6719 SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM
, ISD::LoadExtType ExtType
,
6720 EVT VT
, const SDLoc
&dl
, SDValue Chain
,
6721 SDValue Ptr
, SDValue Offset
, EVT MemVT
,
6722 MachineMemOperand
*MMO
) {
6724 ExtType
= ISD::NON_EXTLOAD
;
6725 } else if (ExtType
== ISD::NON_EXTLOAD
) {
6726 assert(VT
== MemVT
&& "Non-extending load from different memory type!");
6729 assert(MemVT
.getScalarType().bitsLT(VT
.getScalarType()) &&
6730 "Should only be an extending load, not truncating!");
6731 assert(VT
.isInteger() == MemVT
.isInteger() &&
6732 "Cannot convert from FP to Int or Int -> FP!");
6733 assert(VT
.isVector() == MemVT
.isVector() &&
6734 "Cannot use an ext load to convert to or from a vector!");
6735 assert((!VT
.isVector() ||
6736 VT
.getVectorNumElements() == MemVT
.getVectorNumElements()) &&
6737 "Cannot use an ext load to change the number of vector elements!");
6740 bool Indexed
= AM
!= ISD::UNINDEXED
;
6741 assert((Indexed
|| Offset
.isUndef()) && "Unindexed load with an offset!");
6743 SDVTList VTs
= Indexed
?
6744 getVTList(VT
, Ptr
.getValueType(), MVT::Other
) : getVTList(VT
, MVT::Other
);
6745 SDValue Ops
[] = { Chain
, Ptr
, Offset
};
6746 FoldingSetNodeID ID
;
6747 AddNodeIDNode(ID
, ISD::LOAD
, VTs
, Ops
);
6748 ID
.AddInteger(MemVT
.getRawBits());
6749 ID
.AddInteger(getSyntheticNodeSubclassData
<LoadSDNode
>(
6750 dl
.getIROrder(), VTs
, AM
, ExtType
, MemVT
, MMO
));
6751 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6753 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6754 cast
<LoadSDNode
>(E
)->refineAlignment(MMO
);
6755 return SDValue(E
, 0);
6757 auto *N
= newSDNode
<LoadSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
, AM
,
6758 ExtType
, MemVT
, MMO
);
6759 createOperands(N
, Ops
);
6761 CSEMap
.InsertNode(N
, IP
);
6764 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6768 SDValue
SelectionDAG::getLoad(EVT VT
, const SDLoc
&dl
, SDValue Chain
,
6769 SDValue Ptr
, MachinePointerInfo PtrInfo
,
6771 MachineMemOperand::Flags MMOFlags
,
6772 const AAMDNodes
&AAInfo
, const MDNode
*Ranges
) {
6773 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6774 return getLoad(ISD::UNINDEXED
, ISD::NON_EXTLOAD
, VT
, dl
, Chain
, Ptr
, Undef
,
6775 PtrInfo
, VT
, Alignment
, MMOFlags
, AAInfo
, Ranges
);
6778 SDValue
SelectionDAG::getLoad(EVT VT
, const SDLoc
&dl
, SDValue Chain
,
6779 SDValue Ptr
, MachineMemOperand
*MMO
) {
6780 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6781 return getLoad(ISD::UNINDEXED
, ISD::NON_EXTLOAD
, VT
, dl
, Chain
, Ptr
, Undef
,
6785 SDValue
SelectionDAG::getExtLoad(ISD::LoadExtType ExtType
, const SDLoc
&dl
,
6786 EVT VT
, SDValue Chain
, SDValue Ptr
,
6787 MachinePointerInfo PtrInfo
, EVT MemVT
,
6789 MachineMemOperand::Flags MMOFlags
,
6790 const AAMDNodes
&AAInfo
) {
6791 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6792 return getLoad(ISD::UNINDEXED
, ExtType
, VT
, dl
, Chain
, Ptr
, Undef
, PtrInfo
,
6793 MemVT
, Alignment
, MMOFlags
, AAInfo
);
6796 SDValue
SelectionDAG::getExtLoad(ISD::LoadExtType ExtType
, const SDLoc
&dl
,
6797 EVT VT
, SDValue Chain
, SDValue Ptr
, EVT MemVT
,
6798 MachineMemOperand
*MMO
) {
6799 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6800 return getLoad(ISD::UNINDEXED
, ExtType
, VT
, dl
, Chain
, Ptr
, Undef
,
6804 SDValue
SelectionDAG::getIndexedLoad(SDValue OrigLoad
, const SDLoc
&dl
,
6805 SDValue Base
, SDValue Offset
,
6806 ISD::MemIndexedMode AM
) {
6807 LoadSDNode
*LD
= cast
<LoadSDNode
>(OrigLoad
);
6808 assert(LD
->getOffset().isUndef() && "Load is already a indexed load!");
6809 // Don't propagate the invariant or dereferenceable flags.
6811 LD
->getMemOperand()->getFlags() &
6812 ~(MachineMemOperand::MOInvariant
| MachineMemOperand::MODereferenceable
);
6813 return getLoad(AM
, LD
->getExtensionType(), OrigLoad
.getValueType(), dl
,
6814 LD
->getChain(), Base
, Offset
, LD
->getPointerInfo(),
6815 LD
->getMemoryVT(), LD
->getAlignment(), MMOFlags
,
6819 SDValue
SelectionDAG::getStore(SDValue Chain
, const SDLoc
&dl
, SDValue Val
,
6820 SDValue Ptr
, MachinePointerInfo PtrInfo
,
6822 MachineMemOperand::Flags MMOFlags
,
6823 const AAMDNodes
&AAInfo
) {
6824 assert(Chain
.getValueType() == MVT::Other
&& "Invalid chain type");
6825 if (Alignment
== 0) // Ensure that codegen never sees alignment 0
6826 Alignment
= getEVTAlignment(Val
.getValueType());
6828 MMOFlags
|= MachineMemOperand::MOStore
;
6829 assert((MMOFlags
& MachineMemOperand::MOLoad
) == 0);
6831 if (PtrInfo
.V
.isNull())
6832 PtrInfo
= InferPointerInfo(PtrInfo
, *this, Ptr
);
6834 MachineFunction
&MF
= getMachineFunction();
6835 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
6836 PtrInfo
, MMOFlags
, Val
.getValueType().getStoreSize(), Alignment
, AAInfo
);
6837 return getStore(Chain
, dl
, Val
, Ptr
, MMO
);
6840 SDValue
SelectionDAG::getStore(SDValue Chain
, const SDLoc
&dl
, SDValue Val
,
6841 SDValue Ptr
, MachineMemOperand
*MMO
) {
6842 assert(Chain
.getValueType() == MVT::Other
&&
6843 "Invalid chain type");
6844 EVT VT
= Val
.getValueType();
6845 SDVTList VTs
= getVTList(MVT::Other
);
6846 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6847 SDValue Ops
[] = { Chain
, Val
, Ptr
, Undef
};
6848 FoldingSetNodeID ID
;
6849 AddNodeIDNode(ID
, ISD::STORE
, VTs
, Ops
);
6850 ID
.AddInteger(VT
.getRawBits());
6851 ID
.AddInteger(getSyntheticNodeSubclassData
<StoreSDNode
>(
6852 dl
.getIROrder(), VTs
, ISD::UNINDEXED
, false, VT
, MMO
));
6853 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6855 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6856 cast
<StoreSDNode
>(E
)->refineAlignment(MMO
);
6857 return SDValue(E
, 0);
6859 auto *N
= newSDNode
<StoreSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
,
6860 ISD::UNINDEXED
, false, VT
, MMO
);
6861 createOperands(N
, Ops
);
6863 CSEMap
.InsertNode(N
, IP
);
6866 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6870 SDValue
SelectionDAG::getTruncStore(SDValue Chain
, const SDLoc
&dl
, SDValue Val
,
6871 SDValue Ptr
, MachinePointerInfo PtrInfo
,
6872 EVT SVT
, unsigned Alignment
,
6873 MachineMemOperand::Flags MMOFlags
,
6874 const AAMDNodes
&AAInfo
) {
6875 assert(Chain
.getValueType() == MVT::Other
&&
6876 "Invalid chain type");
6877 if (Alignment
== 0) // Ensure that codegen never sees alignment 0
6878 Alignment
= getEVTAlignment(SVT
);
6880 MMOFlags
|= MachineMemOperand::MOStore
;
6881 assert((MMOFlags
& MachineMemOperand::MOLoad
) == 0);
6883 if (PtrInfo
.V
.isNull())
6884 PtrInfo
= InferPointerInfo(PtrInfo
, *this, Ptr
);
6886 MachineFunction
&MF
= getMachineFunction();
6887 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
6888 PtrInfo
, MMOFlags
, SVT
.getStoreSize(), Alignment
, AAInfo
);
6889 return getTruncStore(Chain
, dl
, Val
, Ptr
, SVT
, MMO
);
6892 SDValue
SelectionDAG::getTruncStore(SDValue Chain
, const SDLoc
&dl
, SDValue Val
,
6893 SDValue Ptr
, EVT SVT
,
6894 MachineMemOperand
*MMO
) {
6895 EVT VT
= Val
.getValueType();
6897 assert(Chain
.getValueType() == MVT::Other
&&
6898 "Invalid chain type");
6900 return getStore(Chain
, dl
, Val
, Ptr
, MMO
);
6902 assert(SVT
.getScalarType().bitsLT(VT
.getScalarType()) &&
6903 "Should only be a truncating store, not extending!");
6904 assert(VT
.isInteger() == SVT
.isInteger() &&
6905 "Can't do FP-INT conversion!");
6906 assert(VT
.isVector() == SVT
.isVector() &&
6907 "Cannot use trunc store to convert to or from a vector!");
6908 assert((!VT
.isVector() ||
6909 VT
.getVectorNumElements() == SVT
.getVectorNumElements()) &&
6910 "Cannot use trunc store to change the number of vector elements!");
6912 SDVTList VTs
= getVTList(MVT::Other
);
6913 SDValue Undef
= getUNDEF(Ptr
.getValueType());
6914 SDValue Ops
[] = { Chain
, Val
, Ptr
, Undef
};
6915 FoldingSetNodeID ID
;
6916 AddNodeIDNode(ID
, ISD::STORE
, VTs
, Ops
);
6917 ID
.AddInteger(SVT
.getRawBits());
6918 ID
.AddInteger(getSyntheticNodeSubclassData
<StoreSDNode
>(
6919 dl
.getIROrder(), VTs
, ISD::UNINDEXED
, true, SVT
, MMO
));
6920 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6922 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6923 cast
<StoreSDNode
>(E
)->refineAlignment(MMO
);
6924 return SDValue(E
, 0);
6926 auto *N
= newSDNode
<StoreSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
,
6927 ISD::UNINDEXED
, true, SVT
, MMO
);
6928 createOperands(N
, Ops
);
6930 CSEMap
.InsertNode(N
, IP
);
6933 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6937 SDValue
SelectionDAG::getIndexedStore(SDValue OrigStore
, const SDLoc
&dl
,
6938 SDValue Base
, SDValue Offset
,
6939 ISD::MemIndexedMode AM
) {
6940 StoreSDNode
*ST
= cast
<StoreSDNode
>(OrigStore
);
6941 assert(ST
->getOffset().isUndef() && "Store is already a indexed store!");
6942 SDVTList VTs
= getVTList(Base
.getValueType(), MVT::Other
);
6943 SDValue Ops
[] = { ST
->getChain(), ST
->getValue(), Base
, Offset
};
6944 FoldingSetNodeID ID
;
6945 AddNodeIDNode(ID
, ISD::STORE
, VTs
, Ops
);
6946 ID
.AddInteger(ST
->getMemoryVT().getRawBits());
6947 ID
.AddInteger(ST
->getRawSubclassData());
6948 ID
.AddInteger(ST
->getPointerInfo().getAddrSpace());
6950 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
))
6951 return SDValue(E
, 0);
6953 auto *N
= newSDNode
<StoreSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
, AM
,
6954 ST
->isTruncatingStore(), ST
->getMemoryVT(),
6955 ST
->getMemOperand());
6956 createOperands(N
, Ops
);
6958 CSEMap
.InsertNode(N
, IP
);
6961 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6965 SDValue
SelectionDAG::getMaskedLoad(EVT VT
, const SDLoc
&dl
, SDValue Chain
,
6966 SDValue Ptr
, SDValue Mask
, SDValue PassThru
,
6967 EVT MemVT
, MachineMemOperand
*MMO
,
6968 ISD::LoadExtType ExtTy
, bool isExpanding
) {
6969 SDVTList VTs
= getVTList(VT
, MVT::Other
);
6970 SDValue Ops
[] = { Chain
, Ptr
, Mask
, PassThru
};
6971 FoldingSetNodeID ID
;
6972 AddNodeIDNode(ID
, ISD::MLOAD
, VTs
, Ops
);
6973 ID
.AddInteger(MemVT
.getRawBits());
6974 ID
.AddInteger(getSyntheticNodeSubclassData
<MaskedLoadSDNode
>(
6975 dl
.getIROrder(), VTs
, ExtTy
, isExpanding
, MemVT
, MMO
));
6976 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
6978 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
6979 cast
<MaskedLoadSDNode
>(E
)->refineAlignment(MMO
);
6980 return SDValue(E
, 0);
6982 auto *N
= newSDNode
<MaskedLoadSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
,
6983 ExtTy
, isExpanding
, MemVT
, MMO
);
6984 createOperands(N
, Ops
);
6986 CSEMap
.InsertNode(N
, IP
);
6989 NewSDValueDbgMsg(V
, "Creating new node: ", this);
6993 SDValue
SelectionDAG::getMaskedStore(SDValue Chain
, const SDLoc
&dl
,
6994 SDValue Val
, SDValue Ptr
, SDValue Mask
,
6995 EVT MemVT
, MachineMemOperand
*MMO
,
6996 bool IsTruncating
, bool IsCompressing
) {
6997 assert(Chain
.getValueType() == MVT::Other
&&
6998 "Invalid chain type");
6999 SDVTList VTs
= getVTList(MVT::Other
);
7000 SDValue Ops
[] = { Chain
, Val
, Ptr
, Mask
};
7001 FoldingSetNodeID ID
;
7002 AddNodeIDNode(ID
, ISD::MSTORE
, VTs
, Ops
);
7003 ID
.AddInteger(MemVT
.getRawBits());
7004 ID
.AddInteger(getSyntheticNodeSubclassData
<MaskedStoreSDNode
>(
7005 dl
.getIROrder(), VTs
, IsTruncating
, IsCompressing
, MemVT
, MMO
));
7006 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
7008 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
7009 cast
<MaskedStoreSDNode
>(E
)->refineAlignment(MMO
);
7010 return SDValue(E
, 0);
7012 auto *N
= newSDNode
<MaskedStoreSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(), VTs
,
7013 IsTruncating
, IsCompressing
, MemVT
, MMO
);
7014 createOperands(N
, Ops
);
7016 CSEMap
.InsertNode(N
, IP
);
7019 NewSDValueDbgMsg(V
, "Creating new node: ", this);
7023 SDValue
SelectionDAG::getMaskedGather(SDVTList VTs
, EVT VT
, const SDLoc
&dl
,
7024 ArrayRef
<SDValue
> Ops
,
7025 MachineMemOperand
*MMO
,
7026 ISD::MemIndexType IndexType
) {
7027 assert(Ops
.size() == 6 && "Incompatible number of operands");
7029 FoldingSetNodeID ID
;
7030 AddNodeIDNode(ID
, ISD::MGATHER
, VTs
, Ops
);
7031 ID
.AddInteger(VT
.getRawBits());
7032 ID
.AddInteger(getSyntheticNodeSubclassData
<MaskedGatherSDNode
>(
7033 dl
.getIROrder(), VTs
, VT
, MMO
, IndexType
));
7034 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
7036 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
7037 cast
<MaskedGatherSDNode
>(E
)->refineAlignment(MMO
);
7038 return SDValue(E
, 0);
7041 auto *N
= newSDNode
<MaskedGatherSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(),
7042 VTs
, VT
, MMO
, IndexType
);
7043 createOperands(N
, Ops
);
7045 assert(N
->getPassThru().getValueType() == N
->getValueType(0) &&
7046 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7047 assert(N
->getMask().getValueType().getVectorNumElements() ==
7048 N
->getValueType(0).getVectorNumElements() &&
7049 "Vector width mismatch between mask and data");
7050 assert(N
->getIndex().getValueType().getVectorNumElements() >=
7051 N
->getValueType(0).getVectorNumElements() &&
7052 "Vector width mismatch between index and data");
7053 assert(isa
<ConstantSDNode
>(N
->getScale()) &&
7054 cast
<ConstantSDNode
>(N
->getScale())->getAPIntValue().isPowerOf2() &&
7055 "Scale should be a constant power of 2");
7057 CSEMap
.InsertNode(N
, IP
);
7060 NewSDValueDbgMsg(V
, "Creating new node: ", this);
7064 SDValue
SelectionDAG::getMaskedScatter(SDVTList VTs
, EVT VT
, const SDLoc
&dl
,
7065 ArrayRef
<SDValue
> Ops
,
7066 MachineMemOperand
*MMO
,
7067 ISD::MemIndexType IndexType
) {
7068 assert(Ops
.size() == 6 && "Incompatible number of operands");
7070 FoldingSetNodeID ID
;
7071 AddNodeIDNode(ID
, ISD::MSCATTER
, VTs
, Ops
);
7072 ID
.AddInteger(VT
.getRawBits());
7073 ID
.AddInteger(getSyntheticNodeSubclassData
<MaskedScatterSDNode
>(
7074 dl
.getIROrder(), VTs
, VT
, MMO
, IndexType
));
7075 ID
.AddInteger(MMO
->getPointerInfo().getAddrSpace());
7077 if (SDNode
*E
= FindNodeOrInsertPos(ID
, dl
, IP
)) {
7078 cast
<MaskedScatterSDNode
>(E
)->refineAlignment(MMO
);
7079 return SDValue(E
, 0);
7081 auto *N
= newSDNode
<MaskedScatterSDNode
>(dl
.getIROrder(), dl
.getDebugLoc(),
7082 VTs
, VT
, MMO
, IndexType
);
7083 createOperands(N
, Ops
);
7085 assert(N
->getMask().getValueType().getVectorNumElements() ==
7086 N
->getValue().getValueType().getVectorNumElements() &&
7087 "Vector width mismatch between mask and data");
7088 assert(N
->getIndex().getValueType().getVectorNumElements() >=
7089 N
->getValue().getValueType().getVectorNumElements() &&
7090 "Vector width mismatch between index and data");
7091 assert(isa
<ConstantSDNode
>(N
->getScale()) &&
7092 cast
<ConstantSDNode
>(N
->getScale())->getAPIntValue().isPowerOf2() &&
7093 "Scale should be a constant power of 2");
7095 CSEMap
.InsertNode(N
, IP
);
7098 NewSDValueDbgMsg(V
, "Creating new node: ", this);
7102 SDValue
SelectionDAG::simplifySelect(SDValue Cond
, SDValue T
, SDValue F
) {
7103 // select undef, T, F --> T (if T is a constant), otherwise F
7104 // select, ?, undef, F --> F
7105 // select, ?, T, undef --> T
7107 return isConstantValueOfAnyType(T
) ? T
: F
;
7113 // select true, T, F --> T
7114 // select false, T, F --> F
7115 if (auto *CondC
= dyn_cast
<ConstantSDNode
>(Cond
))
7116 return CondC
->isNullValue() ? F
: T
;
7118 // TODO: This should simplify VSELECT with constant condition using something
7119 // like this (but check boolean contents to be complete?):
7120 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7122 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7125 // select ?, T, T --> T
7132 SDValue
SelectionDAG::simplifyShift(SDValue X
, SDValue Y
) {
7133 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7135 return getConstant(0, SDLoc(X
.getNode()), X
.getValueType());
7136 // shift X, undef --> undef (because it may shift by the bitwidth)
7138 return getUNDEF(X
.getValueType());
7142 if (isNullOrNullSplat(X
) || isNullOrNullSplat(Y
))
7145 // shift X, C >= bitwidth(X) --> undef
7146 // All vector elements must be too big (or undef) to avoid partial undefs.
7147 auto isShiftTooBig
= [X
](ConstantSDNode
*Val
) {
7148 return !Val
|| Val
->getAPIntValue().uge(X
.getScalarValueSizeInBits());
7150 if (ISD::matchUnaryPredicate(Y
, isShiftTooBig
, true))
7151 return getUNDEF(X
.getValueType());
7156 // TODO: Use fast-math-flags to enable more simplifications.
7157 SDValue
SelectionDAG::simplifyFPBinop(unsigned Opcode
, SDValue X
, SDValue Y
) {
7158 ConstantFPSDNode
*YC
= isConstOrConstSplatFP(Y
, /* AllowUndefs */ true);
7163 if (Opcode
== ISD::FADD
)
7164 if (YC
->getValueAPF().isNegZero())
7168 if (Opcode
== ISD::FSUB
)
7169 if (YC
->getValueAPF().isPosZero())
7174 if (Opcode
== ISD::FMUL
|| Opcode
== ISD::FDIV
)
7175 if (YC
->getValueAPF().isExactlyValue(1.0))
7181 SDValue
SelectionDAG::getVAArg(EVT VT
, const SDLoc
&dl
, SDValue Chain
,
7182 SDValue Ptr
, SDValue SV
, unsigned Align
) {
7183 SDValue Ops
[] = { Chain
, Ptr
, SV
, getTargetConstant(Align
, dl
, MVT::i32
) };
7184 return getNode(ISD::VAARG
, dl
, getVTList(VT
, MVT::Other
), Ops
);
7187 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
7188 ArrayRef
<SDUse
> Ops
) {
7189 switch (Ops
.size()) {
7190 case 0: return getNode(Opcode
, DL
, VT
);
7191 case 1: return getNode(Opcode
, DL
, VT
, static_cast<const SDValue
>(Ops
[0]));
7192 case 2: return getNode(Opcode
, DL
, VT
, Ops
[0], Ops
[1]);
7193 case 3: return getNode(Opcode
, DL
, VT
, Ops
[0], Ops
[1], Ops
[2]);
7197 // Copy from an SDUse array into an SDValue array for use with
7198 // the regular getNode logic.
7199 SmallVector
<SDValue
, 8> NewOps(Ops
.begin(), Ops
.end());
7200 return getNode(Opcode
, DL
, VT
, NewOps
);
7203 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, EVT VT
,
7204 ArrayRef
<SDValue
> Ops
, const SDNodeFlags Flags
) {
7205 unsigned NumOps
= Ops
.size();
7207 case 0: return getNode(Opcode
, DL
, VT
);
7208 case 1: return getNode(Opcode
, DL
, VT
, Ops
[0], Flags
);
7209 case 2: return getNode(Opcode
, DL
, VT
, Ops
[0], Ops
[1], Flags
);
7210 case 3: return getNode(Opcode
, DL
, VT
, Ops
[0], Ops
[1], Ops
[2], Flags
);
7216 case ISD::BUILD_VECTOR
:
7217 // Attempt to simplify BUILD_VECTOR.
7218 if (SDValue V
= FoldBUILD_VECTOR(DL
, VT
, Ops
, *this))
7221 case ISD::CONCAT_VECTORS
:
7222 if (SDValue V
= foldCONCAT_VECTORS(DL
, VT
, Ops
, *this))
7225 case ISD::SELECT_CC
:
7226 assert(NumOps
== 5 && "SELECT_CC takes 5 operands!");
7227 assert(Ops
[0].getValueType() == Ops
[1].getValueType() &&
7228 "LHS and RHS of condition must have same type!");
7229 assert(Ops
[2].getValueType() == Ops
[3].getValueType() &&
7230 "True and False arms of SelectCC must have same type!");
7231 assert(Ops
[2].getValueType() == VT
&&
7232 "select_cc node must be of same type as true and false value!");
7235 assert(NumOps
== 5 && "BR_CC takes 5 operands!");
7236 assert(Ops
[2].getValueType() == Ops
[3].getValueType() &&
7237 "LHS/RHS of comparison should match types!");
7243 SDVTList VTs
= getVTList(VT
);
7245 if (VT
!= MVT::Glue
) {
7246 FoldingSetNodeID ID
;
7247 AddNodeIDNode(ID
, Opcode
, VTs
, Ops
);
7250 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
))
7251 return SDValue(E
, 0);
7253 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
7254 createOperands(N
, Ops
);
7256 CSEMap
.InsertNode(N
, IP
);
7258 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
7259 createOperands(N
, Ops
);
7264 NewSDValueDbgMsg(V
, "Creating new node: ", this);
7268 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
,
7269 ArrayRef
<EVT
> ResultTys
, ArrayRef
<SDValue
> Ops
) {
7270 return getNode(Opcode
, DL
, getVTList(ResultTys
), Ops
);
7273 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7274 ArrayRef
<SDValue
> Ops
) {
7275 if (VTList
.NumVTs
== 1)
7276 return getNode(Opcode
, DL
, VTList
.VTs
[0], Ops
);
7280 // FIXME: figure out how to safely handle things like
7281 // int foo(int x) { return 1 << (x & 255); }
7282 // int bar() { return foo(256); }
7283 case ISD::SRA_PARTS
:
7284 case ISD::SRL_PARTS
:
7285 case ISD::SHL_PARTS
:
7286 if (N3
.getOpcode() == ISD::SIGN_EXTEND_INREG
&&
7287 cast
<VTSDNode
>(N3
.getOperand(1))->getVT() != MVT::i1
)
7288 return getNode(Opcode
, DL
, VT
, N1
, N2
, N3
.getOperand(0));
7289 else if (N3
.getOpcode() == ISD::AND
)
7290 if (ConstantSDNode
*AndRHS
= dyn_cast
<ConstantSDNode
>(N3
.getOperand(1))) {
7291 // If the and is only masking out bits that cannot effect the shift,
7292 // eliminate the and.
7293 unsigned NumBits
= VT
.getScalarSizeInBits()*2;
7294 if ((AndRHS
->getValue() & (NumBits
-1)) == NumBits
-1)
7295 return getNode(Opcode
, DL
, VT
, N1
, N2
, N3
.getOperand(0));
7301 // Memoize the node unless it returns a flag.
7303 if (VTList
.VTs
[VTList
.NumVTs
-1] != MVT::Glue
) {
7304 FoldingSetNodeID ID
;
7305 AddNodeIDNode(ID
, Opcode
, VTList
, Ops
);
7307 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
))
7308 return SDValue(E
, 0);
7310 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTList
);
7311 createOperands(N
, Ops
);
7312 CSEMap
.InsertNode(N
, IP
);
7314 N
= newSDNode
<SDNode
>(Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTList
);
7315 createOperands(N
, Ops
);
7319 NewSDValueDbgMsg(V
, "Creating new node: ", this);
7323 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
,
7325 return getNode(Opcode
, DL
, VTList
, None
);
7328 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7330 SDValue Ops
[] = { N1
};
7331 return getNode(Opcode
, DL
, VTList
, Ops
);
7334 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7335 SDValue N1
, SDValue N2
) {
7336 SDValue Ops
[] = { N1
, N2
};
7337 return getNode(Opcode
, DL
, VTList
, Ops
);
7340 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7341 SDValue N1
, SDValue N2
, SDValue N3
) {
7342 SDValue Ops
[] = { N1
, N2
, N3
};
7343 return getNode(Opcode
, DL
, VTList
, Ops
);
7346 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7347 SDValue N1
, SDValue N2
, SDValue N3
, SDValue N4
) {
7348 SDValue Ops
[] = { N1
, N2
, N3
, N4
};
7349 return getNode(Opcode
, DL
, VTList
, Ops
);
7352 SDValue
SelectionDAG::getNode(unsigned Opcode
, const SDLoc
&DL
, SDVTList VTList
,
7353 SDValue N1
, SDValue N2
, SDValue N3
, SDValue N4
,
7355 SDValue Ops
[] = { N1
, N2
, N3
, N4
, N5
};
7356 return getNode(Opcode
, DL
, VTList
, Ops
);
7359 SDVTList
SelectionDAG::getVTList(EVT VT
) {
7360 return makeVTList(SDNode::getValueTypeList(VT
), 1);
7363 SDVTList
SelectionDAG::getVTList(EVT VT1
, EVT VT2
) {
7364 FoldingSetNodeID ID
;
7366 ID
.AddInteger(VT1
.getRawBits());
7367 ID
.AddInteger(VT2
.getRawBits());
7370 SDVTListNode
*Result
= VTListMap
.FindNodeOrInsertPos(ID
, IP
);
7372 EVT
*Array
= Allocator
.Allocate
<EVT
>(2);
7375 Result
= new (Allocator
) SDVTListNode(ID
.Intern(Allocator
), Array
, 2);
7376 VTListMap
.InsertNode(Result
, IP
);
7378 return Result
->getSDVTList();
7381 SDVTList
SelectionDAG::getVTList(EVT VT1
, EVT VT2
, EVT VT3
) {
7382 FoldingSetNodeID ID
;
7384 ID
.AddInteger(VT1
.getRawBits());
7385 ID
.AddInteger(VT2
.getRawBits());
7386 ID
.AddInteger(VT3
.getRawBits());
7389 SDVTListNode
*Result
= VTListMap
.FindNodeOrInsertPos(ID
, IP
);
7391 EVT
*Array
= Allocator
.Allocate
<EVT
>(3);
7395 Result
= new (Allocator
) SDVTListNode(ID
.Intern(Allocator
), Array
, 3);
7396 VTListMap
.InsertNode(Result
, IP
);
7398 return Result
->getSDVTList();
7401 SDVTList
SelectionDAG::getVTList(EVT VT1
, EVT VT2
, EVT VT3
, EVT VT4
) {
7402 FoldingSetNodeID ID
;
7404 ID
.AddInteger(VT1
.getRawBits());
7405 ID
.AddInteger(VT2
.getRawBits());
7406 ID
.AddInteger(VT3
.getRawBits());
7407 ID
.AddInteger(VT4
.getRawBits());
7410 SDVTListNode
*Result
= VTListMap
.FindNodeOrInsertPos(ID
, IP
);
7412 EVT
*Array
= Allocator
.Allocate
<EVT
>(4);
7417 Result
= new (Allocator
) SDVTListNode(ID
.Intern(Allocator
), Array
, 4);
7418 VTListMap
.InsertNode(Result
, IP
);
7420 return Result
->getSDVTList();
7423 SDVTList
SelectionDAG::getVTList(ArrayRef
<EVT
> VTs
) {
7424 unsigned NumVTs
= VTs
.size();
7425 FoldingSetNodeID ID
;
7426 ID
.AddInteger(NumVTs
);
7427 for (unsigned index
= 0; index
< NumVTs
; index
++) {
7428 ID
.AddInteger(VTs
[index
].getRawBits());
7432 SDVTListNode
*Result
= VTListMap
.FindNodeOrInsertPos(ID
, IP
);
7434 EVT
*Array
= Allocator
.Allocate
<EVT
>(NumVTs
);
7435 llvm::copy(VTs
, Array
);
7436 Result
= new (Allocator
) SDVTListNode(ID
.Intern(Allocator
), Array
, NumVTs
);
7437 VTListMap
.InsertNode(Result
, IP
);
7439 return Result
->getSDVTList();
7443 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7444 /// specified operands. If the resultant node already exists in the DAG,
7445 /// this does not modify the specified node, instead it returns the node that
7446 /// already exists. If the resultant node does not exist in the DAG, the
7447 /// input node is returned. As a degenerate case, if you specify the same
7448 /// input operands as the node already has, the input node is returned.
7449 SDNode
*SelectionDAG::UpdateNodeOperands(SDNode
*N
, SDValue Op
) {
7450 assert(N
->getNumOperands() == 1 && "Update with wrong number of operands");
7452 // Check to see if there is no change.
7453 if (Op
== N
->getOperand(0)) return N
;
7455 // See if the modified node already exists.
7456 void *InsertPos
= nullptr;
7457 if (SDNode
*Existing
= FindModifiedNodeSlot(N
, Op
, InsertPos
))
7460 // Nope it doesn't. Remove the node from its current place in the maps.
7462 if (!RemoveNodeFromCSEMaps(N
))
7463 InsertPos
= nullptr;
7465 // Now we update the operands.
7466 N
->OperandList
[0].set(Op
);
7468 updateDivergence(N
);
7469 // If this gets put into a CSE map, add it.
7470 if (InsertPos
) CSEMap
.InsertNode(N
, InsertPos
);
7474 SDNode
*SelectionDAG::UpdateNodeOperands(SDNode
*N
, SDValue Op1
, SDValue Op2
) {
7475 assert(N
->getNumOperands() == 2 && "Update with wrong number of operands");
7477 // Check to see if there is no change.
7478 if (Op1
== N
->getOperand(0) && Op2
== N
->getOperand(1))
7479 return N
; // No operands changed, just return the input node.
7481 // See if the modified node already exists.
7482 void *InsertPos
= nullptr;
7483 if (SDNode
*Existing
= FindModifiedNodeSlot(N
, Op1
, Op2
, InsertPos
))
7486 // Nope it doesn't. Remove the node from its current place in the maps.
7488 if (!RemoveNodeFromCSEMaps(N
))
7489 InsertPos
= nullptr;
7491 // Now we update the operands.
7492 if (N
->OperandList
[0] != Op1
)
7493 N
->OperandList
[0].set(Op1
);
7494 if (N
->OperandList
[1] != Op2
)
7495 N
->OperandList
[1].set(Op2
);
7497 updateDivergence(N
);
7498 // If this gets put into a CSE map, add it.
7499 if (InsertPos
) CSEMap
.InsertNode(N
, InsertPos
);
7503 SDNode
*SelectionDAG::
7504 UpdateNodeOperands(SDNode
*N
, SDValue Op1
, SDValue Op2
, SDValue Op3
) {
7505 SDValue Ops
[] = { Op1
, Op2
, Op3
};
7506 return UpdateNodeOperands(N
, Ops
);
7509 SDNode
*SelectionDAG::
7510 UpdateNodeOperands(SDNode
*N
, SDValue Op1
, SDValue Op2
,
7511 SDValue Op3
, SDValue Op4
) {
7512 SDValue Ops
[] = { Op1
, Op2
, Op3
, Op4
};
7513 return UpdateNodeOperands(N
, Ops
);
7516 SDNode
*SelectionDAG::
7517 UpdateNodeOperands(SDNode
*N
, SDValue Op1
, SDValue Op2
,
7518 SDValue Op3
, SDValue Op4
, SDValue Op5
) {
7519 SDValue Ops
[] = { Op1
, Op2
, Op3
, Op4
, Op5
};
7520 return UpdateNodeOperands(N
, Ops
);
7523 SDNode
*SelectionDAG::
7524 UpdateNodeOperands(SDNode
*N
, ArrayRef
<SDValue
> Ops
) {
7525 unsigned NumOps
= Ops
.size();
7526 assert(N
->getNumOperands() == NumOps
&&
7527 "Update with wrong number of operands");
7529 // If no operands changed just return the input node.
7530 if (std::equal(Ops
.begin(), Ops
.end(), N
->op_begin()))
7533 // See if the modified node already exists.
7534 void *InsertPos
= nullptr;
7535 if (SDNode
*Existing
= FindModifiedNodeSlot(N
, Ops
, InsertPos
))
7538 // Nope it doesn't. Remove the node from its current place in the maps.
7540 if (!RemoveNodeFromCSEMaps(N
))
7541 InsertPos
= nullptr;
7543 // Now we update the operands.
7544 for (unsigned i
= 0; i
!= NumOps
; ++i
)
7545 if (N
->OperandList
[i
] != Ops
[i
])
7546 N
->OperandList
[i
].set(Ops
[i
]);
7548 updateDivergence(N
);
7549 // If this gets put into a CSE map, add it.
7550 if (InsertPos
) CSEMap
.InsertNode(N
, InsertPos
);
7554 /// DropOperands - Release the operands and set this node to have
7556 void SDNode::DropOperands() {
7557 // Unlike the code in MorphNodeTo that does this, we don't need to
7558 // watch for dead nodes here.
7559 for (op_iterator I
= op_begin(), E
= op_end(); I
!= E
; ) {
7565 void SelectionDAG::setNodeMemRefs(MachineSDNode
*N
,
7566 ArrayRef
<MachineMemOperand
*> NewMemRefs
) {
7567 if (NewMemRefs
.empty()) {
7572 // Check if we can avoid allocating by storing a single reference directly.
7573 if (NewMemRefs
.size() == 1) {
7574 N
->MemRefs
= NewMemRefs
[0];
7579 MachineMemOperand
**MemRefsBuffer
=
7580 Allocator
.template Allocate
<MachineMemOperand
*>(NewMemRefs
.size());
7581 llvm::copy(NewMemRefs
, MemRefsBuffer
);
7582 N
->MemRefs
= MemRefsBuffer
;
7583 N
->NumMemRefs
= static_cast<int>(NewMemRefs
.size());
7586 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7589 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7591 SDVTList VTs
= getVTList(VT
);
7592 return SelectNodeTo(N
, MachineOpc
, VTs
, None
);
7595 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7596 EVT VT
, SDValue Op1
) {
7597 SDVTList VTs
= getVTList(VT
);
7598 SDValue Ops
[] = { Op1
};
7599 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7602 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7603 EVT VT
, SDValue Op1
,
7605 SDVTList VTs
= getVTList(VT
);
7606 SDValue Ops
[] = { Op1
, Op2
};
7607 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7610 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7611 EVT VT
, SDValue Op1
,
7612 SDValue Op2
, SDValue Op3
) {
7613 SDVTList VTs
= getVTList(VT
);
7614 SDValue Ops
[] = { Op1
, Op2
, Op3
};
7615 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7618 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7619 EVT VT
, ArrayRef
<SDValue
> Ops
) {
7620 SDVTList VTs
= getVTList(VT
);
7621 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7624 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7625 EVT VT1
, EVT VT2
, ArrayRef
<SDValue
> Ops
) {
7626 SDVTList VTs
= getVTList(VT1
, VT2
);
7627 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7630 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7632 SDVTList VTs
= getVTList(VT1
, VT2
);
7633 return SelectNodeTo(N
, MachineOpc
, VTs
, None
);
7636 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7637 EVT VT1
, EVT VT2
, EVT VT3
,
7638 ArrayRef
<SDValue
> Ops
) {
7639 SDVTList VTs
= getVTList(VT1
, VT2
, VT3
);
7640 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7643 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7645 SDValue Op1
, SDValue Op2
) {
7646 SDVTList VTs
= getVTList(VT1
, VT2
);
7647 SDValue Ops
[] = { Op1
, Op2
};
7648 return SelectNodeTo(N
, MachineOpc
, VTs
, Ops
);
7651 SDNode
*SelectionDAG::SelectNodeTo(SDNode
*N
, unsigned MachineOpc
,
7652 SDVTList VTs
,ArrayRef
<SDValue
> Ops
) {
7653 SDNode
*New
= MorphNodeTo(N
, ~MachineOpc
, VTs
, Ops
);
7654 // Reset the NodeID to -1.
7657 ReplaceAllUsesWith(N
, New
);
7663 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7664 /// the line number information on the merged node since it is not possible to
7665 /// preserve the information that operation is associated with multiple lines.
7666 /// This will make the debugger working better at -O0, were there is a higher
7667 /// probability having other instructions associated with that line.
7669 /// For IROrder, we keep the smaller of the two
7670 SDNode
*SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode
*N
, const SDLoc
&OLoc
) {
7671 DebugLoc NLoc
= N
->getDebugLoc();
7672 if (NLoc
&& OptLevel
== CodeGenOpt::None
&& OLoc
.getDebugLoc() != NLoc
) {
7673 N
->setDebugLoc(DebugLoc());
7675 unsigned Order
= std::min(N
->getIROrder(), OLoc
.getIROrder());
7676 N
->setIROrder(Order
);
7680 /// MorphNodeTo - This *mutates* the specified node to have the specified
7681 /// return type, opcode, and operands.
7683 /// Note that MorphNodeTo returns the resultant node. If there is already a
7684 /// node of the specified opcode and operands, it returns that node instead of
7685 /// the current one. Note that the SDLoc need not be the same.
7687 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7688 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7689 /// node, and because it doesn't require CSE recalculation for any of
7690 /// the node's users.
7692 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7693 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7694 /// the legalizer which maintain worklists that would need to be updated when
7695 /// deleting things.
7696 SDNode
*SelectionDAG::MorphNodeTo(SDNode
*N
, unsigned Opc
,
7697 SDVTList VTs
, ArrayRef
<SDValue
> Ops
) {
7698 // If an identical node already exists, use it.
7700 if (VTs
.VTs
[VTs
.NumVTs
-1] != MVT::Glue
) {
7701 FoldingSetNodeID ID
;
7702 AddNodeIDNode(ID
, Opc
, VTs
, Ops
);
7703 if (SDNode
*ON
= FindNodeOrInsertPos(ID
, SDLoc(N
), IP
))
7704 return UpdateSDLocOnMergeSDNode(ON
, SDLoc(N
));
7707 if (!RemoveNodeFromCSEMaps(N
))
7710 // Start the morphing.
7712 N
->ValueList
= VTs
.VTs
;
7713 N
->NumValues
= VTs
.NumVTs
;
7715 // Clear the operands list, updating used nodes to remove this from their
7716 // use list. Keep track of any operands that become dead as a result.
7717 SmallPtrSet
<SDNode
*, 16> DeadNodeSet
;
7718 for (SDNode::op_iterator I
= N
->op_begin(), E
= N
->op_end(); I
!= E
; ) {
7720 SDNode
*Used
= Use
.getNode();
7722 if (Used
->use_empty())
7723 DeadNodeSet
.insert(Used
);
7726 // For MachineNode, initialize the memory references information.
7727 if (MachineSDNode
*MN
= dyn_cast
<MachineSDNode
>(N
))
7730 // Swap for an appropriately sized array from the recycler.
7732 createOperands(N
, Ops
);
7734 // Delete any nodes that are still dead after adding the uses for the
7736 if (!DeadNodeSet
.empty()) {
7737 SmallVector
<SDNode
*, 16> DeadNodes
;
7738 for (SDNode
*N
: DeadNodeSet
)
7740 DeadNodes
.push_back(N
);
7741 RemoveDeadNodes(DeadNodes
);
7745 CSEMap
.InsertNode(N
, IP
); // Memoize the new node.
7749 SDNode
* SelectionDAG::mutateStrictFPToFP(SDNode
*Node
) {
7750 unsigned OrigOpc
= Node
->getOpcode();
7754 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7755 case ISD::STRICT_FADD
: NewOpc
= ISD::FADD
; break;
7756 case ISD::STRICT_FSUB
: NewOpc
= ISD::FSUB
; break;
7757 case ISD::STRICT_FMUL
: NewOpc
= ISD::FMUL
; break;
7758 case ISD::STRICT_FDIV
: NewOpc
= ISD::FDIV
; break;
7759 case ISD::STRICT_FREM
: NewOpc
= ISD::FREM
; break;
7760 case ISD::STRICT_FMA
: NewOpc
= ISD::FMA
; break;
7761 case ISD::STRICT_FSQRT
: NewOpc
= ISD::FSQRT
; break;
7762 case ISD::STRICT_FPOW
: NewOpc
= ISD::FPOW
; break;
7763 case ISD::STRICT_FPOWI
: NewOpc
= ISD::FPOWI
; break;
7764 case ISD::STRICT_FSIN
: NewOpc
= ISD::FSIN
; break;
7765 case ISD::STRICT_FCOS
: NewOpc
= ISD::FCOS
; break;
7766 case ISD::STRICT_FEXP
: NewOpc
= ISD::FEXP
; break;
7767 case ISD::STRICT_FEXP2
: NewOpc
= ISD::FEXP2
; break;
7768 case ISD::STRICT_FLOG
: NewOpc
= ISD::FLOG
; break;
7769 case ISD::STRICT_FLOG10
: NewOpc
= ISD::FLOG10
; break;
7770 case ISD::STRICT_FLOG2
: NewOpc
= ISD::FLOG2
; break;
7771 case ISD::STRICT_FRINT
: NewOpc
= ISD::FRINT
; break;
7772 case ISD::STRICT_FNEARBYINT
: NewOpc
= ISD::FNEARBYINT
; break;
7773 case ISD::STRICT_FMAXNUM
: NewOpc
= ISD::FMAXNUM
; break;
7774 case ISD::STRICT_FMINNUM
: NewOpc
= ISD::FMINNUM
; break;
7775 case ISD::STRICT_FCEIL
: NewOpc
= ISD::FCEIL
; break;
7776 case ISD::STRICT_FFLOOR
: NewOpc
= ISD::FFLOOR
; break;
7777 case ISD::STRICT_FROUND
: NewOpc
= ISD::FROUND
; break;
7778 case ISD::STRICT_FTRUNC
: NewOpc
= ISD::FTRUNC
; break;
7779 case ISD::STRICT_FP_ROUND
: NewOpc
= ISD::FP_ROUND
; break;
7780 case ISD::STRICT_FP_EXTEND
: NewOpc
= ISD::FP_EXTEND
; break;
7781 case ISD::STRICT_FP_TO_SINT
: NewOpc
= ISD::FP_TO_SINT
; break;
7782 case ISD::STRICT_FP_TO_UINT
: NewOpc
= ISD::FP_TO_UINT
; break;
7785 assert(Node
->getNumValues() == 2 && "Unexpected number of results!");
7787 // We're taking this node out of the chain, so we need to re-link things.
7788 SDValue InputChain
= Node
->getOperand(0);
7789 SDValue OutputChain
= SDValue(Node
, 1);
7790 ReplaceAllUsesOfValueWith(OutputChain
, InputChain
);
7792 SmallVector
<SDValue
, 3> Ops
;
7793 for (unsigned i
= 1, e
= Node
->getNumOperands(); i
!= e
; ++i
)
7794 Ops
.push_back(Node
->getOperand(i
));
7796 SDVTList VTs
= getVTList(Node
->getValueType(0));
7797 SDNode
*Res
= MorphNodeTo(Node
, NewOpc
, VTs
, Ops
);
7799 // MorphNodeTo can operate in two ways: if an existing node with the
7800 // specified operands exists, it can just return it. Otherwise, it
7801 // updates the node in place to have the requested operands.
7803 // If we updated the node in place, reset the node ID. To the isel,
7804 // this should be just like a newly allocated machine node.
7807 ReplaceAllUsesWith(Node
, Res
);
7808 RemoveDeadNode(Node
);
7814 /// getMachineNode - These are used for target selectors to create a new node
7815 /// with specified return type(s), MachineInstr opcode, and operands.
7817 /// Note that getMachineNode returns the resultant node. If there is already a
7818 /// node of the specified opcode and operands, it returns that node instead of
7819 /// the current one.
7820 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7822 SDVTList VTs
= getVTList(VT
);
7823 return getMachineNode(Opcode
, dl
, VTs
, None
);
7826 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7827 EVT VT
, SDValue Op1
) {
7828 SDVTList VTs
= getVTList(VT
);
7829 SDValue Ops
[] = { Op1
};
7830 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7833 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7834 EVT VT
, SDValue Op1
, SDValue Op2
) {
7835 SDVTList VTs
= getVTList(VT
);
7836 SDValue Ops
[] = { Op1
, Op2
};
7837 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7840 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7841 EVT VT
, SDValue Op1
, SDValue Op2
,
7843 SDVTList VTs
= getVTList(VT
);
7844 SDValue Ops
[] = { Op1
, Op2
, Op3
};
7845 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7848 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7849 EVT VT
, ArrayRef
<SDValue
> Ops
) {
7850 SDVTList VTs
= getVTList(VT
);
7851 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7854 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7855 EVT VT1
, EVT VT2
, SDValue Op1
,
7857 SDVTList VTs
= getVTList(VT1
, VT2
);
7858 SDValue Ops
[] = { Op1
, Op2
};
7859 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7862 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7863 EVT VT1
, EVT VT2
, SDValue Op1
,
7864 SDValue Op2
, SDValue Op3
) {
7865 SDVTList VTs
= getVTList(VT1
, VT2
);
7866 SDValue Ops
[] = { Op1
, Op2
, Op3
};
7867 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7870 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7872 ArrayRef
<SDValue
> Ops
) {
7873 SDVTList VTs
= getVTList(VT1
, VT2
);
7874 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7877 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7878 EVT VT1
, EVT VT2
, EVT VT3
,
7879 SDValue Op1
, SDValue Op2
) {
7880 SDVTList VTs
= getVTList(VT1
, VT2
, VT3
);
7881 SDValue Ops
[] = { Op1
, Op2
};
7882 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7885 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7886 EVT VT1
, EVT VT2
, EVT VT3
,
7887 SDValue Op1
, SDValue Op2
,
7889 SDVTList VTs
= getVTList(VT1
, VT2
, VT3
);
7890 SDValue Ops
[] = { Op1
, Op2
, Op3
};
7891 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7894 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7895 EVT VT1
, EVT VT2
, EVT VT3
,
7896 ArrayRef
<SDValue
> Ops
) {
7897 SDVTList VTs
= getVTList(VT1
, VT2
, VT3
);
7898 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7901 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&dl
,
7902 ArrayRef
<EVT
> ResultTys
,
7903 ArrayRef
<SDValue
> Ops
) {
7904 SDVTList VTs
= getVTList(ResultTys
);
7905 return getMachineNode(Opcode
, dl
, VTs
, Ops
);
7908 MachineSDNode
*SelectionDAG::getMachineNode(unsigned Opcode
, const SDLoc
&DL
,
7910 ArrayRef
<SDValue
> Ops
) {
7911 bool DoCSE
= VTs
.VTs
[VTs
.NumVTs
-1] != MVT::Glue
;
7916 FoldingSetNodeID ID
;
7917 AddNodeIDNode(ID
, ~Opcode
, VTs
, Ops
);
7919 if (SDNode
*E
= FindNodeOrInsertPos(ID
, DL
, IP
)) {
7920 return cast
<MachineSDNode
>(UpdateSDLocOnMergeSDNode(E
, DL
));
7924 // Allocate a new MachineSDNode.
7925 N
= newSDNode
<MachineSDNode
>(~Opcode
, DL
.getIROrder(), DL
.getDebugLoc(), VTs
);
7926 createOperands(N
, Ops
);
7929 CSEMap
.InsertNode(N
, IP
);
7932 NewSDValueDbgMsg(SDValue(N
, 0), "Creating new machine node: ", this);
7936 /// getTargetExtractSubreg - A convenience function for creating
7937 /// TargetOpcode::EXTRACT_SUBREG nodes.
7938 SDValue
SelectionDAG::getTargetExtractSubreg(int SRIdx
, const SDLoc
&DL
, EVT VT
,
7940 SDValue SRIdxVal
= getTargetConstant(SRIdx
, DL
, MVT::i32
);
7941 SDNode
*Subreg
= getMachineNode(TargetOpcode::EXTRACT_SUBREG
, DL
,
7942 VT
, Operand
, SRIdxVal
);
7943 return SDValue(Subreg
, 0);
7946 /// getTargetInsertSubreg - A convenience function for creating
7947 /// TargetOpcode::INSERT_SUBREG nodes.
7948 SDValue
SelectionDAG::getTargetInsertSubreg(int SRIdx
, const SDLoc
&DL
, EVT VT
,
7949 SDValue Operand
, SDValue Subreg
) {
7950 SDValue SRIdxVal
= getTargetConstant(SRIdx
, DL
, MVT::i32
);
7951 SDNode
*Result
= getMachineNode(TargetOpcode::INSERT_SUBREG
, DL
,
7952 VT
, Operand
, Subreg
, SRIdxVal
);
7953 return SDValue(Result
, 0);
7956 /// getNodeIfExists - Get the specified node if it's already available, or
7957 /// else return NULL.
7958 SDNode
*SelectionDAG::getNodeIfExists(unsigned Opcode
, SDVTList VTList
,
7959 ArrayRef
<SDValue
> Ops
,
7960 const SDNodeFlags Flags
) {
7961 if (VTList
.VTs
[VTList
.NumVTs
- 1] != MVT::Glue
) {
7962 FoldingSetNodeID ID
;
7963 AddNodeIDNode(ID
, Opcode
, VTList
, Ops
);
7965 if (SDNode
*E
= FindNodeOrInsertPos(ID
, SDLoc(), IP
)) {
7966 E
->intersectFlagsWith(Flags
);
7973 /// getDbgValue - Creates a SDDbgValue node.
7976 SDDbgValue
*SelectionDAG::getDbgValue(DIVariable
*Var
, DIExpression
*Expr
,
7977 SDNode
*N
, unsigned R
, bool IsIndirect
,
7978 const DebugLoc
&DL
, unsigned O
) {
7979 assert(cast
<DILocalVariable
>(Var
)->isValidLocationForIntrinsic(DL
) &&
7980 "Expected inlined-at fields to agree");
7981 return new (DbgInfo
->getAlloc())
7982 SDDbgValue(Var
, Expr
, N
, R
, IsIndirect
, DL
, O
);
7986 SDDbgValue
*SelectionDAG::getConstantDbgValue(DIVariable
*Var
,
7989 const DebugLoc
&DL
, unsigned O
) {
7990 assert(cast
<DILocalVariable
>(Var
)->isValidLocationForIntrinsic(DL
) &&
7991 "Expected inlined-at fields to agree");
7992 return new (DbgInfo
->getAlloc()) SDDbgValue(Var
, Expr
, C
, DL
, O
);
7996 SDDbgValue
*SelectionDAG::getFrameIndexDbgValue(DIVariable
*Var
,
7997 DIExpression
*Expr
, unsigned FI
,
8001 assert(cast
<DILocalVariable
>(Var
)->isValidLocationForIntrinsic(DL
) &&
8002 "Expected inlined-at fields to agree");
8003 return new (DbgInfo
->getAlloc())
8004 SDDbgValue(Var
, Expr
, FI
, IsIndirect
, DL
, O
, SDDbgValue::FRAMEIX
);
8008 SDDbgValue
*SelectionDAG::getVRegDbgValue(DIVariable
*Var
,
8010 unsigned VReg
, bool IsIndirect
,
8011 const DebugLoc
&DL
, unsigned O
) {
8012 assert(cast
<DILocalVariable
>(Var
)->isValidLocationForIntrinsic(DL
) &&
8013 "Expected inlined-at fields to agree");
8014 return new (DbgInfo
->getAlloc())
8015 SDDbgValue(Var
, Expr
, VReg
, IsIndirect
, DL
, O
, SDDbgValue::VREG
);
8018 void SelectionDAG::transferDbgValues(SDValue From
, SDValue To
,
8019 unsigned OffsetInBits
, unsigned SizeInBits
,
8020 bool InvalidateDbg
) {
8021 SDNode
*FromNode
= From
.getNode();
8022 SDNode
*ToNode
= To
.getNode();
8023 assert(FromNode
&& ToNode
&& "Can't modify dbg values");
8026 // TODO: assert(From != To && "Redundant dbg value transfer");
8027 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8028 if (From
== To
|| FromNode
== ToNode
)
8031 if (!FromNode
->getHasDebugValue())
8034 SmallVector
<SDDbgValue
*, 2> ClonedDVs
;
8035 for (SDDbgValue
*Dbg
: GetDbgValues(FromNode
)) {
8036 if (Dbg
->getKind() != SDDbgValue::SDNODE
|| Dbg
->isInvalidated())
8039 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8041 // Just transfer the dbg value attached to From.
8042 if (Dbg
->getResNo() != From
.getResNo())
8045 DIVariable
*Var
= Dbg
->getVariable();
8046 auto *Expr
= Dbg
->getExpression();
8047 // If a fragment is requested, update the expression.
8049 // When splitting a larger (e.g., sign-extended) value whose
8050 // lower bits are described with an SDDbgValue, do not attempt
8051 // to transfer the SDDbgValue to the upper bits.
8052 if (auto FI
= Expr
->getFragmentInfo())
8053 if (OffsetInBits
+ SizeInBits
> FI
->SizeInBits
)
8055 auto Fragment
= DIExpression::createFragmentExpression(Expr
, OffsetInBits
,
8061 // Clone the SDDbgValue and move it to To.
8063 getDbgValue(Var
, Expr
, ToNode
, To
.getResNo(), Dbg
->isIndirect(),
8064 Dbg
->getDebugLoc(), Dbg
->getOrder());
8065 ClonedDVs
.push_back(Clone
);
8067 if (InvalidateDbg
) {
8068 // Invalidate value and indicate the SDDbgValue should not be emitted.
8069 Dbg
->setIsInvalidated();
8070 Dbg
->setIsEmitted();
8074 for (SDDbgValue
*Dbg
: ClonedDVs
)
8075 AddDbgValue(Dbg
, ToNode
, false);
8078 void SelectionDAG::salvageDebugInfo(SDNode
&N
) {
8079 if (!N
.getHasDebugValue())
8082 SmallVector
<SDDbgValue
*, 2> ClonedDVs
;
8083 for (auto DV
: GetDbgValues(&N
)) {
8084 if (DV
->isInvalidated())
8086 switch (N
.getOpcode()) {
8090 SDValue N0
= N
.getOperand(0);
8091 SDValue N1
= N
.getOperand(1);
8092 if (!isConstantIntBuildVectorOrConstantInt(N0
) &&
8093 isConstantIntBuildVectorOrConstantInt(N1
)) {
8094 uint64_t Offset
= N
.getConstantOperandVal(1);
8095 // Rewrite an ADD constant node into a DIExpression. Since we are
8096 // performing arithmetic to compute the variable's *value* in the
8097 // DIExpression, we need to mark the expression with a
8098 // DW_OP_stack_value.
8099 auto *DIExpr
= DV
->getExpression();
8101 DIExpression::prepend(DIExpr
, DIExpression::StackValue
, Offset
);
8103 getDbgValue(DV
->getVariable(), DIExpr
, N0
.getNode(), N0
.getResNo(),
8104 DV
->isIndirect(), DV
->getDebugLoc(), DV
->getOrder());
8105 ClonedDVs
.push_back(Clone
);
8106 DV
->setIsInvalidated();
8108 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8109 N0
.getNode()->dumprFull(this);
8110 dbgs() << " into " << *DIExpr
<< '\n');
8115 for (SDDbgValue
*Dbg
: ClonedDVs
)
8116 AddDbgValue(Dbg
, Dbg
->getSDNode(), false);
8119 /// Creates a SDDbgLabel node.
8120 SDDbgLabel
*SelectionDAG::getDbgLabel(DILabel
*Label
,
8121 const DebugLoc
&DL
, unsigned O
) {
8122 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(DL
) &&
8123 "Expected inlined-at fields to agree");
8124 return new (DbgInfo
->getAlloc()) SDDbgLabel(Label
, DL
, O
);
8129 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8130 /// pointed to by a use iterator is deleted, increment the use iterator
8131 /// so that it doesn't dangle.
8133 class RAUWUpdateListener
: public SelectionDAG::DAGUpdateListener
{
8134 SDNode::use_iterator
&UI
;
8135 SDNode::use_iterator
&UE
;
8137 void NodeDeleted(SDNode
*N
, SDNode
*E
) override
{
8138 // Increment the iterator as needed.
8139 while (UI
!= UE
&& N
== *UI
)
8144 RAUWUpdateListener(SelectionDAG
&d
,
8145 SDNode::use_iterator
&ui
,
8146 SDNode::use_iterator
&ue
)
8147 : SelectionDAG::DAGUpdateListener(d
), UI(ui
), UE(ue
) {}
8150 } // end anonymous namespace
8152 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8153 /// This can cause recursive merging of nodes in the DAG.
8155 /// This version assumes From has a single result value.
8157 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN
, SDValue To
) {
8158 SDNode
*From
= FromN
.getNode();
8159 assert(From
->getNumValues() == 1 && FromN
.getResNo() == 0 &&
8160 "Cannot replace with this method!");
8161 assert(From
!= To
.getNode() && "Cannot replace uses of with self");
8163 // Preserve Debug Values
8164 transferDbgValues(FromN
, To
);
8166 // Iterate over all the existing uses of From. New uses will be added
8167 // to the beginning of the use list, which we avoid visiting.
8168 // This specifically avoids visiting uses of From that arise while the
8169 // replacement is happening, because any such uses would be the result
8170 // of CSE: If an existing node looks like From after one of its operands
8171 // is replaced by To, we don't want to replace of all its users with To
8172 // too. See PR3018 for more info.
8173 SDNode::use_iterator UI
= From
->use_begin(), UE
= From
->use_end();
8174 RAUWUpdateListener
Listener(*this, UI
, UE
);
8178 // This node is about to morph, remove its old self from the CSE maps.
8179 RemoveNodeFromCSEMaps(User
);
8181 // A user can appear in a use list multiple times, and when this
8182 // happens the uses are usually next to each other in the list.
8183 // To help reduce the number of CSE recomputations, process all
8184 // the uses of this user that we can find this way.
8186 SDUse
&Use
= UI
.getUse();
8189 if (To
->isDivergent() != From
->isDivergent())
8190 updateDivergence(User
);
8191 } while (UI
!= UE
&& *UI
== User
);
8192 // Now that we have modified User, add it back to the CSE maps. If it
8193 // already exists there, recursively merge the results together.
8194 AddModifiedNodeToCSEMaps(User
);
8197 // If we just RAUW'd the root, take note.
8198 if (FromN
== getRoot())
8202 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8203 /// This can cause recursive merging of nodes in the DAG.
8205 /// This version assumes that for each value of From, there is a
8206 /// corresponding value in To in the same position with the same type.
8208 void SelectionDAG::ReplaceAllUsesWith(SDNode
*From
, SDNode
*To
) {
8210 for (unsigned i
= 0, e
= From
->getNumValues(); i
!= e
; ++i
)
8211 assert((!From
->hasAnyUseOfValue(i
) ||
8212 From
->getValueType(i
) == To
->getValueType(i
)) &&
8213 "Cannot use this version of ReplaceAllUsesWith!");
8216 // Handle the trivial case.
8220 // Preserve Debug Info. Only do this if there's a use.
8221 for (unsigned i
= 0, e
= From
->getNumValues(); i
!= e
; ++i
)
8222 if (From
->hasAnyUseOfValue(i
)) {
8223 assert((i
< To
->getNumValues()) && "Invalid To location");
8224 transferDbgValues(SDValue(From
, i
), SDValue(To
, i
));
8227 // Iterate over just the existing users of From. See the comments in
8228 // the ReplaceAllUsesWith above.
8229 SDNode::use_iterator UI
= From
->use_begin(), UE
= From
->use_end();
8230 RAUWUpdateListener
Listener(*this, UI
, UE
);
8234 // This node is about to morph, remove its old self from the CSE maps.
8235 RemoveNodeFromCSEMaps(User
);
8237 // A user can appear in a use list multiple times, and when this
8238 // happens the uses are usually next to each other in the list.
8239 // To help reduce the number of CSE recomputations, process all
8240 // the uses of this user that we can find this way.
8242 SDUse
&Use
= UI
.getUse();
8245 if (To
->isDivergent() != From
->isDivergent())
8246 updateDivergence(User
);
8247 } while (UI
!= UE
&& *UI
== User
);
8249 // Now that we have modified User, add it back to the CSE maps. If it
8250 // already exists there, recursively merge the results together.
8251 AddModifiedNodeToCSEMaps(User
);
8254 // If we just RAUW'd the root, take note.
8255 if (From
== getRoot().getNode())
8256 setRoot(SDValue(To
, getRoot().getResNo()));
8259 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8260 /// This can cause recursive merging of nodes in the DAG.
8262 /// This version can replace From with any result values. To must match the
8263 /// number and types of values returned by From.
8264 void SelectionDAG::ReplaceAllUsesWith(SDNode
*From
, const SDValue
*To
) {
8265 if (From
->getNumValues() == 1) // Handle the simple case efficiently.
8266 return ReplaceAllUsesWith(SDValue(From
, 0), To
[0]);
8268 // Preserve Debug Info.
8269 for (unsigned i
= 0, e
= From
->getNumValues(); i
!= e
; ++i
)
8270 transferDbgValues(SDValue(From
, i
), To
[i
]);
8272 // Iterate over just the existing users of From. See the comments in
8273 // the ReplaceAllUsesWith above.
8274 SDNode::use_iterator UI
= From
->use_begin(), UE
= From
->use_end();
8275 RAUWUpdateListener
Listener(*this, UI
, UE
);
8279 // This node is about to morph, remove its old self from the CSE maps.
8280 RemoveNodeFromCSEMaps(User
);
8282 // A user can appear in a use list multiple times, and when this happens the
8283 // uses are usually next to each other in the list. To help reduce the
8284 // number of CSE and divergence recomputations, process all the uses of this
8285 // user that we can find this way.
8286 bool To_IsDivergent
= false;
8288 SDUse
&Use
= UI
.getUse();
8289 const SDValue
&ToOp
= To
[Use
.getResNo()];
8292 To_IsDivergent
|= ToOp
->isDivergent();
8293 } while (UI
!= UE
&& *UI
== User
);
8295 if (To_IsDivergent
!= From
->isDivergent())
8296 updateDivergence(User
);
8298 // Now that we have modified User, add it back to the CSE maps. If it
8299 // already exists there, recursively merge the results together.
8300 AddModifiedNodeToCSEMaps(User
);
8303 // If we just RAUW'd the root, take note.
8304 if (From
== getRoot().getNode())
8305 setRoot(SDValue(To
[getRoot().getResNo()]));
8308 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8309 /// uses of other values produced by From.getNode() alone. The Deleted
8310 /// vector is handled the same way as for ReplaceAllUsesWith.
8311 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From
, SDValue To
){
8312 // Handle the really simple, really trivial case efficiently.
8313 if (From
== To
) return;
8315 // Handle the simple, trivial, case efficiently.
8316 if (From
.getNode()->getNumValues() == 1) {
8317 ReplaceAllUsesWith(From
, To
);
8321 // Preserve Debug Info.
8322 transferDbgValues(From
, To
);
8324 // Iterate over just the existing users of From. See the comments in
8325 // the ReplaceAllUsesWith above.
8326 SDNode::use_iterator UI
= From
.getNode()->use_begin(),
8327 UE
= From
.getNode()->use_end();
8328 RAUWUpdateListener
Listener(*this, UI
, UE
);
8331 bool UserRemovedFromCSEMaps
= false;
8333 // A user can appear in a use list multiple times, and when this
8334 // happens the uses are usually next to each other in the list.
8335 // To help reduce the number of CSE recomputations, process all
8336 // the uses of this user that we can find this way.
8338 SDUse
&Use
= UI
.getUse();
8340 // Skip uses of different values from the same node.
8341 if (Use
.getResNo() != From
.getResNo()) {
8346 // If this node hasn't been modified yet, it's still in the CSE maps,
8347 // so remove its old self from the CSE maps.
8348 if (!UserRemovedFromCSEMaps
) {
8349 RemoveNodeFromCSEMaps(User
);
8350 UserRemovedFromCSEMaps
= true;
8355 if (To
->isDivergent() != From
->isDivergent())
8356 updateDivergence(User
);
8357 } while (UI
!= UE
&& *UI
== User
);
8358 // We are iterating over all uses of the From node, so if a use
8359 // doesn't use the specific value, no changes are made.
8360 if (!UserRemovedFromCSEMaps
)
8363 // Now that we have modified User, add it back to the CSE maps. If it
8364 // already exists there, recursively merge the results together.
8365 AddModifiedNodeToCSEMaps(User
);
8368 // If we just RAUW'd the root, take note.
8369 if (From
== getRoot())
8375 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8376 /// to record information about a use.
8383 /// operator< - Sort Memos by User.
8384 bool operator<(const UseMemo
&L
, const UseMemo
&R
) {
8385 return (intptr_t)L
.User
< (intptr_t)R
.User
;
8388 } // end anonymous namespace
8390 void SelectionDAG::updateDivergence(SDNode
* N
)
8392 if (TLI
->isSDNodeAlwaysUniform(N
))
8394 bool IsDivergent
= TLI
->isSDNodeSourceOfDivergence(N
, FLI
, DA
);
8395 for (auto &Op
: N
->ops()) {
8396 if (Op
.Val
.getValueType() != MVT::Other
)
8397 IsDivergent
|= Op
.getNode()->isDivergent();
8399 if (N
->SDNodeBits
.IsDivergent
!= IsDivergent
) {
8400 N
->SDNodeBits
.IsDivergent
= IsDivergent
;
8401 for (auto U
: N
->uses()) {
8402 updateDivergence(U
);
8407 void SelectionDAG::CreateTopologicalOrder(std::vector
<SDNode
*> &Order
) {
8408 DenseMap
<SDNode
*, unsigned> Degree
;
8409 Order
.reserve(AllNodes
.size());
8410 for (auto &N
: allnodes()) {
8411 unsigned NOps
= N
.getNumOperands();
8414 Order
.push_back(&N
);
8416 for (size_t I
= 0; I
!= Order
.size(); ++I
) {
8417 SDNode
*N
= Order
[I
];
8418 for (auto U
: N
->uses()) {
8419 unsigned &UnsortedOps
= Degree
[U
];
8420 if (0 == --UnsortedOps
)
8427 void SelectionDAG::VerifyDAGDiverence() {
8428 std::vector
<SDNode
*> TopoOrder
;
8429 CreateTopologicalOrder(TopoOrder
);
8430 const TargetLowering
&TLI
= getTargetLoweringInfo();
8431 DenseMap
<const SDNode
*, bool> DivergenceMap
;
8432 for (auto &N
: allnodes()) {
8433 DivergenceMap
[&N
] = false;
8435 for (auto N
: TopoOrder
) {
8436 bool IsDivergent
= DivergenceMap
[N
];
8437 bool IsSDNodeDivergent
= TLI
.isSDNodeSourceOfDivergence(N
, FLI
, DA
);
8438 for (auto &Op
: N
->ops()) {
8439 if (Op
.Val
.getValueType() != MVT::Other
)
8440 IsSDNodeDivergent
|= DivergenceMap
[Op
.getNode()];
8442 if (!IsDivergent
&& IsSDNodeDivergent
&& !TLI
.isSDNodeAlwaysUniform(N
)) {
8443 DivergenceMap
[N
] = true;
8446 for (auto &N
: allnodes()) {
8448 assert(DivergenceMap
[&N
] == N
.isDivergent() &&
8449 "Divergence bit inconsistency detected\n");
8454 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8455 /// uses of other values produced by From.getNode() alone. The same value
8456 /// may appear in both the From and To list. The Deleted vector is
8457 /// handled the same way as for ReplaceAllUsesWith.
8458 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue
*From
,
8461 // Handle the simple, trivial case efficiently.
8463 return ReplaceAllUsesOfValueWith(*From
, *To
);
8465 transferDbgValues(*From
, *To
);
8467 // Read up all the uses and make records of them. This helps
8468 // processing new uses that are introduced during the
8469 // replacement process.
8470 SmallVector
<UseMemo
, 4> Uses
;
8471 for (unsigned i
= 0; i
!= Num
; ++i
) {
8472 unsigned FromResNo
= From
[i
].getResNo();
8473 SDNode
*FromNode
= From
[i
].getNode();
8474 for (SDNode::use_iterator UI
= FromNode
->use_begin(),
8475 E
= FromNode
->use_end(); UI
!= E
; ++UI
) {
8476 SDUse
&Use
= UI
.getUse();
8477 if (Use
.getResNo() == FromResNo
) {
8478 UseMemo Memo
= { *UI
, i
, &Use
};
8479 Uses
.push_back(Memo
);
8484 // Sort the uses, so that all the uses from a given User are together.
8487 for (unsigned UseIndex
= 0, UseIndexEnd
= Uses
.size();
8488 UseIndex
!= UseIndexEnd
; ) {
8489 // We know that this user uses some value of From. If it is the right
8490 // value, update it.
8491 SDNode
*User
= Uses
[UseIndex
].User
;
8493 // This node is about to morph, remove its old self from the CSE maps.
8494 RemoveNodeFromCSEMaps(User
);
8496 // The Uses array is sorted, so all the uses for a given User
8497 // are next to each other in the list.
8498 // To help reduce the number of CSE recomputations, process all
8499 // the uses of this user that we can find this way.
8501 unsigned i
= Uses
[UseIndex
].Index
;
8502 SDUse
&Use
= *Uses
[UseIndex
].Use
;
8506 } while (UseIndex
!= UseIndexEnd
&& Uses
[UseIndex
].User
== User
);
8508 // Now that we have modified User, add it back to the CSE maps. If it
8509 // already exists there, recursively merge the results together.
8510 AddModifiedNodeToCSEMaps(User
);
8514 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8515 /// based on their topological order. It returns the maximum id and a vector
8516 /// of the SDNodes* in assigned order by reference.
8517 unsigned SelectionDAG::AssignTopologicalOrder() {
8518 unsigned DAGSize
= 0;
8520 // SortedPos tracks the progress of the algorithm. Nodes before it are
8521 // sorted, nodes after it are unsorted. When the algorithm completes
8522 // it is at the end of the list.
8523 allnodes_iterator SortedPos
= allnodes_begin();
8525 // Visit all the nodes. Move nodes with no operands to the front of
8526 // the list immediately. Annotate nodes that do have operands with their
8527 // operand count. Before we do this, the Node Id fields of the nodes
8528 // may contain arbitrary values. After, the Node Id fields for nodes
8529 // before SortedPos will contain the topological sort index, and the
8530 // Node Id fields for nodes At SortedPos and after will contain the
8531 // count of outstanding operands.
8532 for (allnodes_iterator I
= allnodes_begin(),E
= allnodes_end(); I
!= E
; ) {
8534 checkForCycles(N
, this);
8535 unsigned Degree
= N
->getNumOperands();
8537 // A node with no uses, add it to the result array immediately.
8538 N
->setNodeId(DAGSize
++);
8539 allnodes_iterator
Q(N
);
8541 SortedPos
= AllNodes
.insert(SortedPos
, AllNodes
.remove(Q
));
8542 assert(SortedPos
!= AllNodes
.end() && "Overran node list");
8545 // Temporarily use the Node Id as scratch space for the degree count.
8546 N
->setNodeId(Degree
);
8550 // Visit all the nodes. As we iterate, move nodes into sorted order,
8551 // such that by the time the end is reached all nodes will be sorted.
8552 for (SDNode
&Node
: allnodes()) {
8554 checkForCycles(N
, this);
8555 // N is in sorted position, so all its uses have one less operand
8556 // that needs to be sorted.
8557 for (SDNode::use_iterator UI
= N
->use_begin(), UE
= N
->use_end();
8560 unsigned Degree
= P
->getNodeId();
8561 assert(Degree
!= 0 && "Invalid node degree");
8564 // All of P's operands are sorted, so P may sorted now.
8565 P
->setNodeId(DAGSize
++);
8566 if (P
->getIterator() != SortedPos
)
8567 SortedPos
= AllNodes
.insert(SortedPos
, AllNodes
.remove(P
));
8568 assert(SortedPos
!= AllNodes
.end() && "Overran node list");
8571 // Update P's outstanding operand count.
8572 P
->setNodeId(Degree
);
8575 if (Node
.getIterator() == SortedPos
) {
8577 allnodes_iterator
I(N
);
8579 dbgs() << "Overran sorted position:\n";
8580 S
->dumprFull(this); dbgs() << "\n";
8581 dbgs() << "Checking if this is due to cycles\n";
8582 checkForCycles(this, true);
8584 llvm_unreachable(nullptr);
8588 assert(SortedPos
== AllNodes
.end() &&
8589 "Topological sort incomplete!");
8590 assert(AllNodes
.front().getOpcode() == ISD::EntryToken
&&
8591 "First node in topological sort is not the entry token!");
8592 assert(AllNodes
.front().getNodeId() == 0 &&
8593 "First node in topological sort has non-zero id!");
8594 assert(AllNodes
.front().getNumOperands() == 0 &&
8595 "First node in topological sort has operands!");
8596 assert(AllNodes
.back().getNodeId() == (int)DAGSize
-1 &&
8597 "Last node in topologic sort has unexpected id!");
8598 assert(AllNodes
.back().use_empty() &&
8599 "Last node in topologic sort has users!");
8600 assert(DAGSize
== allnodes_size() && "Node count mismatch!");
8604 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8605 /// value is produced by SD.
8606 void SelectionDAG::AddDbgValue(SDDbgValue
*DB
, SDNode
*SD
, bool isParameter
) {
8608 assert(DbgInfo
->getSDDbgValues(SD
).empty() || SD
->getHasDebugValue());
8609 SD
->setHasDebugValue(true);
8611 DbgInfo
->add(DB
, SD
, isParameter
);
8614 void SelectionDAG::AddDbgLabel(SDDbgLabel
*DB
) {
8618 SDValue
SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode
*OldLoad
,
8620 assert(isa
<MemSDNode
>(NewMemOp
.getNode()) && "Expected a memop node");
8621 // The new memory operation must have the same position as the old load in
8622 // terms of memory dependency. Create a TokenFactor for the old load and new
8623 // memory operation and update uses of the old load's output chain to use that
8625 SDValue OldChain
= SDValue(OldLoad
, 1);
8626 SDValue NewChain
= SDValue(NewMemOp
.getNode(), 1);
8627 if (OldChain
== NewChain
|| !OldLoad
->hasAnyUseOfValue(1))
8630 SDValue TokenFactor
=
8631 getNode(ISD::TokenFactor
, SDLoc(OldLoad
), MVT::Other
, OldChain
, NewChain
);
8632 ReplaceAllUsesOfValueWith(OldChain
, TokenFactor
);
8633 UpdateNodeOperands(TokenFactor
.getNode(), OldChain
, NewChain
);
8637 SDValue
SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op
,
8638 Function
**OutFunction
) {
8639 assert(isa
<ExternalSymbolSDNode
>(Op
) && "Node should be an ExternalSymbol");
8641 auto *Symbol
= cast
<ExternalSymbolSDNode
>(Op
)->getSymbol();
8642 auto *Module
= MF
->getFunction().getParent();
8643 auto *Function
= Module
->getFunction(Symbol
);
8645 if (OutFunction
!= nullptr)
8646 *OutFunction
= Function
;
8648 if (Function
!= nullptr) {
8649 auto PtrTy
= TLI
->getPointerTy(getDataLayout(), Function
->getAddressSpace());
8650 return getGlobalAddress(Function
, SDLoc(Op
), PtrTy
);
8653 std::string ErrorStr
;
8654 raw_string_ostream
ErrorFormatter(ErrorStr
);
8656 ErrorFormatter
<< "Undefined external symbol ";
8657 ErrorFormatter
<< '"' << Symbol
<< '"';
8658 ErrorFormatter
.flush();
8660 report_fatal_error(ErrorStr
);
8663 //===----------------------------------------------------------------------===//
8665 //===----------------------------------------------------------------------===//
8667 bool llvm::isNullConstant(SDValue V
) {
8668 ConstantSDNode
*Const
= dyn_cast
<ConstantSDNode
>(V
);
8669 return Const
!= nullptr && Const
->isNullValue();
8672 bool llvm::isNullFPConstant(SDValue V
) {
8673 ConstantFPSDNode
*Const
= dyn_cast
<ConstantFPSDNode
>(V
);
8674 return Const
!= nullptr && Const
->isZero() && !Const
->isNegative();
8677 bool llvm::isAllOnesConstant(SDValue V
) {
8678 ConstantSDNode
*Const
= dyn_cast
<ConstantSDNode
>(V
);
8679 return Const
!= nullptr && Const
->isAllOnesValue();
8682 bool llvm::isOneConstant(SDValue V
) {
8683 ConstantSDNode
*Const
= dyn_cast
<ConstantSDNode
>(V
);
8684 return Const
!= nullptr && Const
->isOne();
8687 SDValue
llvm::peekThroughBitcasts(SDValue V
) {
8688 while (V
.getOpcode() == ISD::BITCAST
)
8689 V
= V
.getOperand(0);
8693 SDValue
llvm::peekThroughOneUseBitcasts(SDValue V
) {
8694 while (V
.getOpcode() == ISD::BITCAST
&& V
.getOperand(0).hasOneUse())
8695 V
= V
.getOperand(0);
8699 SDValue
llvm::peekThroughExtractSubvectors(SDValue V
) {
8700 while (V
.getOpcode() == ISD::EXTRACT_SUBVECTOR
)
8701 V
= V
.getOperand(0);
8705 bool llvm::isBitwiseNot(SDValue V
, bool AllowUndefs
) {
8706 if (V
.getOpcode() != ISD::XOR
)
8708 V
= peekThroughBitcasts(V
.getOperand(1));
8709 unsigned NumBits
= V
.getScalarValueSizeInBits();
8711 isConstOrConstSplat(V
, AllowUndefs
, /*AllowTruncation*/ true);
8712 return C
&& (C
->getAPIntValue().countTrailingOnes() >= NumBits
);
8715 ConstantSDNode
*llvm::isConstOrConstSplat(SDValue N
, bool AllowUndefs
,
8716 bool AllowTruncation
) {
8717 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
))
8720 if (BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(N
)) {
8721 BitVector UndefElements
;
8722 ConstantSDNode
*CN
= BV
->getConstantSplatNode(&UndefElements
);
8724 // BuildVectors can truncate their operands. Ignore that case here unless
8725 // AllowTruncation is set.
8726 if (CN
&& (UndefElements
.none() || AllowUndefs
)) {
8727 EVT CVT
= CN
->getValueType(0);
8728 EVT NSVT
= N
.getValueType().getScalarType();
8729 assert(CVT
.bitsGE(NSVT
) && "Illegal build vector element extension");
8730 if (AllowTruncation
|| (CVT
== NSVT
))
8738 ConstantSDNode
*llvm::isConstOrConstSplat(SDValue N
, const APInt
&DemandedElts
,
8740 bool AllowTruncation
) {
8741 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
))
8744 if (BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(N
)) {
8745 BitVector UndefElements
;
8746 ConstantSDNode
*CN
= BV
->getConstantSplatNode(DemandedElts
, &UndefElements
);
8748 // BuildVectors can truncate their operands. Ignore that case here unless
8749 // AllowTruncation is set.
8750 if (CN
&& (UndefElements
.none() || AllowUndefs
)) {
8751 EVT CVT
= CN
->getValueType(0);
8752 EVT NSVT
= N
.getValueType().getScalarType();
8753 assert(CVT
.bitsGE(NSVT
) && "Illegal build vector element extension");
8754 if (AllowTruncation
|| (CVT
== NSVT
))
8762 ConstantFPSDNode
*llvm::isConstOrConstSplatFP(SDValue N
, bool AllowUndefs
) {
8763 if (ConstantFPSDNode
*CN
= dyn_cast
<ConstantFPSDNode
>(N
))
8766 if (BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(N
)) {
8767 BitVector UndefElements
;
8768 ConstantFPSDNode
*CN
= BV
->getConstantFPSplatNode(&UndefElements
);
8769 if (CN
&& (UndefElements
.none() || AllowUndefs
))
8776 ConstantFPSDNode
*llvm::isConstOrConstSplatFP(SDValue N
,
8777 const APInt
&DemandedElts
,
8779 if (ConstantFPSDNode
*CN
= dyn_cast
<ConstantFPSDNode
>(N
))
8782 if (BuildVectorSDNode
*BV
= dyn_cast
<BuildVectorSDNode
>(N
)) {
8783 BitVector UndefElements
;
8784 ConstantFPSDNode
*CN
=
8785 BV
->getConstantFPSplatNode(DemandedElts
, &UndefElements
);
8786 if (CN
&& (UndefElements
.none() || AllowUndefs
))
8793 bool llvm::isNullOrNullSplat(SDValue N
, bool AllowUndefs
) {
8794 // TODO: may want to use peekThroughBitcast() here.
8795 ConstantSDNode
*C
= isConstOrConstSplat(N
, AllowUndefs
);
8796 return C
&& C
->isNullValue();
8799 bool llvm::isOneOrOneSplat(SDValue N
) {
8800 // TODO: may want to use peekThroughBitcast() here.
8801 unsigned BitWidth
= N
.getScalarValueSizeInBits();
8802 ConstantSDNode
*C
= isConstOrConstSplat(N
);
8803 return C
&& C
->isOne() && C
->getValueSizeInBits(0) == BitWidth
;
8806 bool llvm::isAllOnesOrAllOnesSplat(SDValue N
) {
8807 N
= peekThroughBitcasts(N
);
8808 unsigned BitWidth
= N
.getScalarValueSizeInBits();
8809 ConstantSDNode
*C
= isConstOrConstSplat(N
);
8810 return C
&& C
->isAllOnesValue() && C
->getValueSizeInBits(0) == BitWidth
;
8813 HandleSDNode::~HandleSDNode() {
8817 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc
, unsigned Order
,
8819 const GlobalValue
*GA
, EVT VT
,
8820 int64_t o
, unsigned TF
)
8821 : SDNode(Opc
, Order
, DL
, getSDVTList(VT
)), Offset(o
), TargetFlags(TF
) {
8825 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order
, const DebugLoc
&dl
,
8826 EVT VT
, unsigned SrcAS
,
8828 : SDNode(ISD::ADDRSPACECAST
, Order
, dl
, getSDVTList(VT
)),
8829 SrcAddrSpace(SrcAS
), DestAddrSpace(DestAS
) {}
8831 MemSDNode::MemSDNode(unsigned Opc
, unsigned Order
, const DebugLoc
&dl
,
8832 SDVTList VTs
, EVT memvt
, MachineMemOperand
*mmo
)
8833 : SDNode(Opc
, Order
, dl
, VTs
), MemoryVT(memvt
), MMO(mmo
) {
8834 MemSDNodeBits
.IsVolatile
= MMO
->isVolatile();
8835 MemSDNodeBits
.IsNonTemporal
= MMO
->isNonTemporal();
8836 MemSDNodeBits
.IsDereferenceable
= MMO
->isDereferenceable();
8837 MemSDNodeBits
.IsInvariant
= MMO
->isInvariant();
8839 // We check here that the size of the memory operand fits within the size of
8840 // the MMO. This is because the MMO might indicate only a possible address
8841 // range instead of specifying the affected memory addresses precisely.
8842 assert(memvt
.getStoreSize() <= MMO
->getSize() && "Size mismatch!");
8845 /// Profile - Gather unique data for the node.
8847 void SDNode::Profile(FoldingSetNodeID
&ID
) const {
8848 AddNodeIDNode(ID
, this);
8854 std::vector
<EVT
> VTs
;
8857 VTs
.reserve(MVT::LAST_VALUETYPE
);
8858 for (unsigned i
= 0; i
< MVT::LAST_VALUETYPE
; ++i
)
8859 VTs
.push_back(MVT((MVT::SimpleValueType
)i
));
8863 } // end anonymous namespace
8865 static ManagedStatic
<std::set
<EVT
, EVT::compareRawBits
>> EVTs
;
8866 static ManagedStatic
<EVTArray
> SimpleVTArray
;
8867 static ManagedStatic
<sys::SmartMutex
<true>> VTMutex
;
8869 /// getValueTypeList - Return a pointer to the specified value type.
8871 const EVT
*SDNode::getValueTypeList(EVT VT
) {
8872 if (VT
.isExtended()) {
8873 sys::SmartScopedLock
<true> Lock(*VTMutex
);
8874 return &(*EVTs
->insert(VT
).first
);
8876 assert(VT
.getSimpleVT() < MVT::LAST_VALUETYPE
&&
8877 "Value type out of range!");
8878 return &SimpleVTArray
->VTs
[VT
.getSimpleVT().SimpleTy
];
8882 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
8883 /// indicated value. This method ignores uses of other values defined by this
8885 bool SDNode::hasNUsesOfValue(unsigned NUses
, unsigned Value
) const {
8886 assert(Value
< getNumValues() && "Bad value!");
8888 // TODO: Only iterate over uses of a given value of the node
8889 for (SDNode::use_iterator UI
= use_begin(), E
= use_end(); UI
!= E
; ++UI
) {
8890 if (UI
.getUse().getResNo() == Value
) {
8897 // Found exactly the right number of uses?
8901 /// hasAnyUseOfValue - Return true if there are any use of the indicated
8902 /// value. This method ignores uses of other values defined by this operation.
8903 bool SDNode::hasAnyUseOfValue(unsigned Value
) const {
8904 assert(Value
< getNumValues() && "Bad value!");
8906 for (SDNode::use_iterator UI
= use_begin(), E
= use_end(); UI
!= E
; ++UI
)
8907 if (UI
.getUse().getResNo() == Value
)
8913 /// isOnlyUserOf - Return true if this node is the only use of N.
8914 bool SDNode::isOnlyUserOf(const SDNode
*N
) const {
8916 for (SDNode::use_iterator I
= N
->use_begin(), E
= N
->use_end(); I
!= E
; ++I
) {
8927 /// Return true if the only users of N are contained in Nodes.
8928 bool SDNode::areOnlyUsersOf(ArrayRef
<const SDNode
*> Nodes
, const SDNode
*N
) {
8930 for (SDNode::use_iterator I
= N
->use_begin(), E
= N
->use_end(); I
!= E
; ++I
) {
8932 if (llvm::any_of(Nodes
,
8933 [&User
](const SDNode
*Node
) { return User
== Node
; }))
8942 /// isOperand - Return true if this node is an operand of N.
8943 bool SDValue::isOperandOf(const SDNode
*N
) const {
8944 return any_of(N
->op_values(), [this](SDValue Op
) { return *this == Op
; });
8947 bool SDNode::isOperandOf(const SDNode
*N
) const {
8948 return any_of(N
->op_values(),
8949 [this](SDValue Op
) { return this == Op
.getNode(); });
8952 /// reachesChainWithoutSideEffects - Return true if this operand (which must
8953 /// be a chain) reaches the specified operand without crossing any
8954 /// side-effecting instructions on any chain path. In practice, this looks
8955 /// through token factors and non-volatile loads. In order to remain efficient,
8956 /// this only looks a couple of nodes in, it does not do an exhaustive search.
8958 /// Note that we only need to examine chains when we're searching for
8959 /// side-effects; SelectionDAG requires that all side-effects are represented
8960 /// by chains, even if another operand would force a specific ordering. This
8961 /// constraint is necessary to allow transformations like splitting loads.
8962 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest
,
8963 unsigned Depth
) const {
8964 if (*this == Dest
) return true;
8966 // Don't search too deeply, we just want to be able to see through
8967 // TokenFactor's etc.
8968 if (Depth
== 0) return false;
8970 // If this is a token factor, all inputs to the TF happen in parallel.
8971 if (getOpcode() == ISD::TokenFactor
) {
8972 // First, try a shallow search.
8973 if (is_contained((*this)->ops(), Dest
)) {
8974 // We found the chain we want as an operand of this TokenFactor.
8975 // Essentially, we reach the chain without side-effects if we could
8976 // serialize the TokenFactor into a simple chain of operations with
8977 // Dest as the last operation. This is automatically true if the
8978 // chain has one use: there are no other ordering constraints.
8979 // If the chain has more than one use, we give up: some other
8980 // use of Dest might force a side-effect between Dest and the current
8982 if (Dest
.hasOneUse())
8985 // Next, try a deep search: check whether every operand of the TokenFactor
8987 return llvm::all_of((*this)->ops(), [=](SDValue Op
) {
8988 return Op
.reachesChainWithoutSideEffects(Dest
, Depth
- 1);
8992 // Loads don't have side effects, look through them.
8993 if (LoadSDNode
*Ld
= dyn_cast
<LoadSDNode
>(*this)) {
8994 if (!Ld
->isVolatile())
8995 return Ld
->getChain().reachesChainWithoutSideEffects(Dest
, Depth
-1);
9000 bool SDNode::hasPredecessor(const SDNode
*N
) const {
9001 SmallPtrSet
<const SDNode
*, 32> Visited
;
9002 SmallVector
<const SDNode
*, 16> Worklist
;
9003 Worklist
.push_back(this);
9004 return hasPredecessorHelper(N
, Visited
, Worklist
);
9007 void SDNode::intersectFlagsWith(const SDNodeFlags Flags
) {
9008 this->Flags
.intersectWith(Flags
);
9012 SelectionDAG::matchBinOpReduction(SDNode
*Extract
, ISD::NodeType
&BinOp
,
9013 ArrayRef
<ISD::NodeType
> CandidateBinOps
,
9014 bool AllowPartials
) {
9015 // The pattern must end in an extract from index 0.
9016 if (Extract
->getOpcode() != ISD::EXTRACT_VECTOR_ELT
||
9017 !isNullConstant(Extract
->getOperand(1)))
9020 // Match against one of the candidate binary ops.
9021 SDValue Op
= Extract
->getOperand(0);
9022 if (llvm::none_of(CandidateBinOps
, [Op
](ISD::NodeType BinOp
) {
9023 return Op
.getOpcode() == unsigned(BinOp
);
9027 // Floating-point reductions may require relaxed constraints on the final step
9028 // of the reduction because they may reorder intermediate operations.
9029 unsigned CandidateBinOp
= Op
.getOpcode();
9030 if (Op
.getValueType().isFloatingPoint()) {
9031 SDNodeFlags Flags
= Op
->getFlags();
9032 switch (CandidateBinOp
) {
9034 if (!Flags
.hasNoSignedZeros() || !Flags
.hasAllowReassociation())
9038 llvm_unreachable("Unhandled FP opcode for binop reduction");
9042 // Matching failed - attempt to see if we did enough stages that a partial
9043 // reduction from a subvector is possible.
9044 auto PartialReduction
= [&](SDValue Op
, unsigned NumSubElts
) {
9045 if (!AllowPartials
|| !Op
)
9047 EVT OpVT
= Op
.getValueType();
9048 EVT OpSVT
= OpVT
.getScalarType();
9049 EVT SubVT
= EVT::getVectorVT(*getContext(), OpSVT
, NumSubElts
);
9050 if (!TLI
->isExtractSubvectorCheap(SubVT
, OpVT
, 0))
9052 BinOp
= (ISD::NodeType
)CandidateBinOp
;
9054 ISD::EXTRACT_SUBVECTOR
, SDLoc(Op
), SubVT
, Op
,
9055 getConstant(0, SDLoc(Op
), TLI
->getVectorIdxTy(getDataLayout())));
9058 // At each stage, we're looking for something that looks like:
9059 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9060 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9061 // i32 undef, i32 undef, i32 undef, i32 undef>
9062 // %a = binop <8 x i32> %op, %s
9063 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9064 // we expect something like:
9065 // <4,5,6,7,u,u,u,u>
9066 // <2,3,u,u,u,u,u,u>
9067 // <1,u,u,u,u,u,u,u>
9068 // While a partial reduction match would be:
9069 // <2,3,u,u,u,u,u,u>
9070 // <1,u,u,u,u,u,u,u>
9071 unsigned Stages
= Log2_32(Op
.getValueType().getVectorNumElements());
9073 for (unsigned i
= 0; i
< Stages
; ++i
) {
9074 unsigned MaskEnd
= (1 << i
);
9076 if (Op
.getOpcode() != CandidateBinOp
)
9077 return PartialReduction(PrevOp
, MaskEnd
);
9079 SDValue Op0
= Op
.getOperand(0);
9080 SDValue Op1
= Op
.getOperand(1);
9082 ShuffleVectorSDNode
*Shuffle
= dyn_cast
<ShuffleVectorSDNode
>(Op0
);
9086 Shuffle
= dyn_cast
<ShuffleVectorSDNode
>(Op1
);
9090 // The first operand of the shuffle should be the same as the other operand
9092 if (!Shuffle
|| Shuffle
->getOperand(0) != Op
)
9093 return PartialReduction(PrevOp
, MaskEnd
);
9095 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9096 for (int Index
= 0; Index
< (int)MaskEnd
; ++Index
)
9097 if (Shuffle
->getMaskElt(Index
) != (int)(MaskEnd
+ Index
))
9098 return PartialReduction(PrevOp
, MaskEnd
);
9103 BinOp
= (ISD::NodeType
)CandidateBinOp
;
9107 SDValue
SelectionDAG::UnrollVectorOp(SDNode
*N
, unsigned ResNE
) {
9108 assert(N
->getNumValues() == 1 &&
9109 "Can't unroll a vector with multiple results!");
9111 EVT VT
= N
->getValueType(0);
9112 unsigned NE
= VT
.getVectorNumElements();
9113 EVT EltVT
= VT
.getVectorElementType();
9116 SmallVector
<SDValue
, 8> Scalars
;
9117 SmallVector
<SDValue
, 4> Operands(N
->getNumOperands());
9119 // If ResNE is 0, fully unroll the vector op.
9122 else if (NE
> ResNE
)
9126 for (i
= 0; i
!= NE
; ++i
) {
9127 for (unsigned j
= 0, e
= N
->getNumOperands(); j
!= e
; ++j
) {
9128 SDValue Operand
= N
->getOperand(j
);
9129 EVT OperandVT
= Operand
.getValueType();
9130 if (OperandVT
.isVector()) {
9131 // A vector operand; extract a single element.
9132 EVT OperandEltVT
= OperandVT
.getVectorElementType();
9134 getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, OperandEltVT
, Operand
,
9135 getConstant(i
, dl
, TLI
->getVectorIdxTy(getDataLayout())));
9137 // A scalar operand; just use it as is.
9138 Operands
[j
] = Operand
;
9142 switch (N
->getOpcode()) {
9144 Scalars
.push_back(getNode(N
->getOpcode(), dl
, EltVT
, Operands
,
9149 Scalars
.push_back(getNode(ISD::SELECT
, dl
, EltVT
, Operands
));
9156 Scalars
.push_back(getNode(N
->getOpcode(), dl
, EltVT
, Operands
[0],
9157 getShiftAmountOperand(Operands
[0].getValueType(),
9160 case ISD::SIGN_EXTEND_INREG
:
9161 case ISD::FP_ROUND_INREG
: {
9162 EVT ExtVT
= cast
<VTSDNode
>(Operands
[1])->getVT().getVectorElementType();
9163 Scalars
.push_back(getNode(N
->getOpcode(), dl
, EltVT
,
9165 getValueType(ExtVT
)));
9170 for (; i
< ResNE
; ++i
)
9171 Scalars
.push_back(getUNDEF(EltVT
));
9173 EVT VecVT
= EVT::getVectorVT(*getContext(), EltVT
, ResNE
);
9174 return getBuildVector(VecVT
, dl
, Scalars
);
9177 std::pair
<SDValue
, SDValue
> SelectionDAG::UnrollVectorOverflowOp(
9178 SDNode
*N
, unsigned ResNE
) {
9179 unsigned Opcode
= N
->getOpcode();
9180 assert((Opcode
== ISD::UADDO
|| Opcode
== ISD::SADDO
||
9181 Opcode
== ISD::USUBO
|| Opcode
== ISD::SSUBO
||
9182 Opcode
== ISD::UMULO
|| Opcode
== ISD::SMULO
) &&
9183 "Expected an overflow opcode");
9185 EVT ResVT
= N
->getValueType(0);
9186 EVT OvVT
= N
->getValueType(1);
9187 EVT ResEltVT
= ResVT
.getVectorElementType();
9188 EVT OvEltVT
= OvVT
.getVectorElementType();
9191 // If ResNE is 0, fully unroll the vector op.
9192 unsigned NE
= ResVT
.getVectorNumElements();
9195 else if (NE
> ResNE
)
9198 SmallVector
<SDValue
, 8> LHSScalars
;
9199 SmallVector
<SDValue
, 8> RHSScalars
;
9200 ExtractVectorElements(N
->getOperand(0), LHSScalars
, 0, NE
);
9201 ExtractVectorElements(N
->getOperand(1), RHSScalars
, 0, NE
);
9203 EVT SVT
= TLI
->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT
);
9204 SDVTList VTs
= getVTList(ResEltVT
, SVT
);
9205 SmallVector
<SDValue
, 8> ResScalars
;
9206 SmallVector
<SDValue
, 8> OvScalars
;
9207 for (unsigned i
= 0; i
< NE
; ++i
) {
9208 SDValue Res
= getNode(Opcode
, dl
, VTs
, LHSScalars
[i
], RHSScalars
[i
]);
9210 getSelect(dl
, OvEltVT
, Res
.getValue(1),
9211 getBoolConstant(true, dl
, OvEltVT
, ResVT
),
9212 getConstant(0, dl
, OvEltVT
));
9214 ResScalars
.push_back(Res
);
9215 OvScalars
.push_back(Ov
);
9218 ResScalars
.append(ResNE
- NE
, getUNDEF(ResEltVT
));
9219 OvScalars
.append(ResNE
- NE
, getUNDEF(OvEltVT
));
9221 EVT NewResVT
= EVT::getVectorVT(*getContext(), ResEltVT
, ResNE
);
9222 EVT NewOvVT
= EVT::getVectorVT(*getContext(), OvEltVT
, ResNE
);
9223 return std::make_pair(getBuildVector(NewResVT
, dl
, ResScalars
),
9224 getBuildVector(NewOvVT
, dl
, OvScalars
));
9227 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode
*LD
,
9231 if (LD
->isVolatile() || Base
->isVolatile())
9233 if (LD
->isIndexed() || Base
->isIndexed())
9235 if (LD
->getChain() != Base
->getChain())
9237 EVT VT
= LD
->getValueType(0);
9238 if (VT
.getSizeInBits() / 8 != Bytes
)
9241 auto BaseLocDecomp
= BaseIndexOffset::match(Base
, *this);
9242 auto LocDecomp
= BaseIndexOffset::match(LD
, *this);
9245 if (BaseLocDecomp
.equalBaseIndex(LocDecomp
, *this, Offset
))
9246 return (Dist
* Bytes
== Offset
);
9250 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
9251 /// it cannot be inferred.
9252 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr
) const {
9253 // If this is a GlobalAddress + cst, return the alignment.
9254 const GlobalValue
*GV
;
9255 int64_t GVOffset
= 0;
9256 if (TLI
->isGAPlusOffset(Ptr
.getNode(), GV
, GVOffset
)) {
9257 unsigned IdxWidth
= getDataLayout().getIndexTypeSizeInBits(GV
->getType());
9258 KnownBits
Known(IdxWidth
);
9259 llvm::computeKnownBits(GV
, Known
, getDataLayout());
9260 unsigned AlignBits
= Known
.countMinTrailingZeros();
9261 unsigned Align
= AlignBits
? 1 << std::min(31U, AlignBits
) : 0;
9263 return MinAlign(Align
, GVOffset
);
9266 // If this is a direct reference to a stack slot, use information about the
9267 // stack slot's alignment.
9268 int FrameIdx
= INT_MIN
;
9269 int64_t FrameOffset
= 0;
9270 if (FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(Ptr
)) {
9271 FrameIdx
= FI
->getIndex();
9272 } else if (isBaseWithConstantOffset(Ptr
) &&
9273 isa
<FrameIndexSDNode
>(Ptr
.getOperand(0))) {
9275 FrameIdx
= cast
<FrameIndexSDNode
>(Ptr
.getOperand(0))->getIndex();
9276 FrameOffset
= Ptr
.getConstantOperandVal(1);
9279 if (FrameIdx
!= INT_MIN
) {
9280 const MachineFrameInfo
&MFI
= getMachineFunction().getFrameInfo();
9281 unsigned FIInfoAlign
= MinAlign(MFI
.getObjectAlignment(FrameIdx
),
9289 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9290 /// which is split (or expanded) into two not necessarily identical pieces.
9291 std::pair
<EVT
, EVT
> SelectionDAG::GetSplitDestVTs(const EVT
&VT
) const {
9292 // Currently all types are split in half.
9295 LoVT
= HiVT
= TLI
->getTypeToTransformTo(*getContext(), VT
);
9297 LoVT
= HiVT
= VT
.getHalfNumVectorElementsVT(*getContext());
9299 return std::make_pair(LoVT
, HiVT
);
9302 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9304 std::pair
<SDValue
, SDValue
>
9305 SelectionDAG::SplitVector(const SDValue
&N
, const SDLoc
&DL
, const EVT
&LoVT
,
9307 assert(LoVT
.getVectorNumElements() + HiVT
.getVectorNumElements() <=
9308 N
.getValueType().getVectorNumElements() &&
9309 "More vector elements requested than available!");
9311 Lo
= getNode(ISD::EXTRACT_SUBVECTOR
, DL
, LoVT
, N
,
9312 getConstant(0, DL
, TLI
->getVectorIdxTy(getDataLayout())));
9313 Hi
= getNode(ISD::EXTRACT_SUBVECTOR
, DL
, HiVT
, N
,
9314 getConstant(LoVT
.getVectorNumElements(), DL
,
9315 TLI
->getVectorIdxTy(getDataLayout())));
9316 return std::make_pair(Lo
, Hi
);
9319 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
9320 SDValue
SelectionDAG::WidenVector(const SDValue
&N
, const SDLoc
&DL
) {
9321 EVT VT
= N
.getValueType();
9322 EVT WideVT
= EVT::getVectorVT(*getContext(), VT
.getVectorElementType(),
9323 NextPowerOf2(VT
.getVectorNumElements()));
9324 return getNode(ISD::INSERT_SUBVECTOR
, DL
, WideVT
, getUNDEF(WideVT
), N
,
9325 getConstant(0, DL
, TLI
->getVectorIdxTy(getDataLayout())));
9328 void SelectionDAG::ExtractVectorElements(SDValue Op
,
9329 SmallVectorImpl
<SDValue
> &Args
,
9330 unsigned Start
, unsigned Count
) {
9331 EVT VT
= Op
.getValueType();
9333 Count
= VT
.getVectorNumElements();
9335 EVT EltVT
= VT
.getVectorElementType();
9336 EVT IdxTy
= TLI
->getVectorIdxTy(getDataLayout());
9338 for (unsigned i
= Start
, e
= Start
+ Count
; i
!= e
; ++i
) {
9339 Args
.push_back(getNode(ISD::EXTRACT_VECTOR_ELT
, SL
, EltVT
,
9340 Op
, getConstant(i
, SL
, IdxTy
)));
9344 // getAddressSpace - Return the address space this GlobalAddress belongs to.
9345 unsigned GlobalAddressSDNode::getAddressSpace() const {
9346 return getGlobal()->getType()->getAddressSpace();
9349 Type
*ConstantPoolSDNode::getType() const {
9350 if (isMachineConstantPoolEntry())
9351 return Val
.MachineCPVal
->getType();
9352 return Val
.ConstVal
->getType();
9355 bool BuildVectorSDNode::isConstantSplat(APInt
&SplatValue
, APInt
&SplatUndef
,
9356 unsigned &SplatBitSize
,
9358 unsigned MinSplatBits
,
9359 bool IsBigEndian
) const {
9360 EVT VT
= getValueType(0);
9361 assert(VT
.isVector() && "Expected a vector type");
9362 unsigned VecWidth
= VT
.getSizeInBits();
9363 if (MinSplatBits
> VecWidth
)
9366 // FIXME: The widths are based on this node's type, but build vectors can
9367 // truncate their operands.
9368 SplatValue
= APInt(VecWidth
, 0);
9369 SplatUndef
= APInt(VecWidth
, 0);
9371 // Get the bits. Bits with undefined values (when the corresponding element
9372 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9373 // in SplatValue. If any of the values are not constant, give up and return
9375 unsigned int NumOps
= getNumOperands();
9376 assert(NumOps
> 0 && "isConstantSplat has 0-size build vector");
9377 unsigned EltWidth
= VT
.getScalarSizeInBits();
9379 for (unsigned j
= 0; j
< NumOps
; ++j
) {
9380 unsigned i
= IsBigEndian
? NumOps
- 1 - j
: j
;
9381 SDValue OpVal
= getOperand(i
);
9382 unsigned BitPos
= j
* EltWidth
;
9384 if (OpVal
.isUndef())
9385 SplatUndef
.setBits(BitPos
, BitPos
+ EltWidth
);
9386 else if (auto *CN
= dyn_cast
<ConstantSDNode
>(OpVal
))
9387 SplatValue
.insertBits(CN
->getAPIntValue().zextOrTrunc(EltWidth
), BitPos
);
9388 else if (auto *CN
= dyn_cast
<ConstantFPSDNode
>(OpVal
))
9389 SplatValue
.insertBits(CN
->getValueAPF().bitcastToAPInt(), BitPos
);
9394 // The build_vector is all constants or undefs. Find the smallest element
9395 // size that splats the vector.
9396 HasAnyUndefs
= (SplatUndef
!= 0);
9398 // FIXME: This does not work for vectors with elements less than 8 bits.
9399 while (VecWidth
> 8) {
9400 unsigned HalfSize
= VecWidth
/ 2;
9401 APInt HighValue
= SplatValue
.lshr(HalfSize
).trunc(HalfSize
);
9402 APInt LowValue
= SplatValue
.trunc(HalfSize
);
9403 APInt HighUndef
= SplatUndef
.lshr(HalfSize
).trunc(HalfSize
);
9404 APInt LowUndef
= SplatUndef
.trunc(HalfSize
);
9406 // If the two halves do not match (ignoring undef bits), stop here.
9407 if ((HighValue
& ~LowUndef
) != (LowValue
& ~HighUndef
) ||
9408 MinSplatBits
> HalfSize
)
9411 SplatValue
= HighValue
| LowValue
;
9412 SplatUndef
= HighUndef
& LowUndef
;
9414 VecWidth
= HalfSize
;
9417 SplatBitSize
= VecWidth
;
9421 SDValue
BuildVectorSDNode::getSplatValue(const APInt
&DemandedElts
,
9422 BitVector
*UndefElements
) const {
9423 if (UndefElements
) {
9424 UndefElements
->clear();
9425 UndefElements
->resize(getNumOperands());
9427 assert(getNumOperands() == DemandedElts
.getBitWidth() &&
9428 "Unexpected vector size");
9432 for (unsigned i
= 0, e
= getNumOperands(); i
!= e
; ++i
) {
9433 if (!DemandedElts
[i
])
9435 SDValue Op
= getOperand(i
);
9438 (*UndefElements
)[i
] = true;
9439 } else if (!Splatted
) {
9441 } else if (Splatted
!= Op
) {
9447 unsigned FirstDemandedIdx
= DemandedElts
.countTrailingZeros();
9448 assert(getOperand(FirstDemandedIdx
).isUndef() &&
9449 "Can only have a splat without a constant for all undefs.");
9450 return getOperand(FirstDemandedIdx
);
9456 SDValue
BuildVectorSDNode::getSplatValue(BitVector
*UndefElements
) const {
9457 APInt DemandedElts
= APInt::getAllOnesValue(getNumOperands());
9458 return getSplatValue(DemandedElts
, UndefElements
);
9462 BuildVectorSDNode::getConstantSplatNode(const APInt
&DemandedElts
,
9463 BitVector
*UndefElements
) const {
9464 return dyn_cast_or_null
<ConstantSDNode
>(
9465 getSplatValue(DemandedElts
, UndefElements
));
9469 BuildVectorSDNode::getConstantSplatNode(BitVector
*UndefElements
) const {
9470 return dyn_cast_or_null
<ConstantSDNode
>(getSplatValue(UndefElements
));
9474 BuildVectorSDNode::getConstantFPSplatNode(const APInt
&DemandedElts
,
9475 BitVector
*UndefElements
) const {
9476 return dyn_cast_or_null
<ConstantFPSDNode
>(
9477 getSplatValue(DemandedElts
, UndefElements
));
9481 BuildVectorSDNode::getConstantFPSplatNode(BitVector
*UndefElements
) const {
9482 return dyn_cast_or_null
<ConstantFPSDNode
>(getSplatValue(UndefElements
));
9486 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector
*UndefElements
,
9487 uint32_t BitWidth
) const {
9488 if (ConstantFPSDNode
*CN
=
9489 dyn_cast_or_null
<ConstantFPSDNode
>(getSplatValue(UndefElements
))) {
9491 APSInt
IntVal(BitWidth
);
9492 const APFloat
&APF
= CN
->getValueAPF();
9493 if (APF
.convertToInteger(IntVal
, APFloat::rmTowardZero
, &IsExact
) !=
9498 return IntVal
.exactLogBase2();
9503 bool BuildVectorSDNode::isConstant() const {
9504 for (const SDValue
&Op
: op_values()) {
9505 unsigned Opc
= Op
.getOpcode();
9506 if (Opc
!= ISD::UNDEF
&& Opc
!= ISD::Constant
&& Opc
!= ISD::ConstantFP
)
9512 bool ShuffleVectorSDNode::isSplatMask(const int *Mask
, EVT VT
) {
9513 // Find the first non-undef value in the shuffle mask.
9515 for (i
= 0, e
= VT
.getVectorNumElements(); i
!= e
&& Mask
[i
] < 0; ++i
)
9518 // If all elements are undefined, this shuffle can be considered a splat
9519 // (although it should eventually get simplified away completely).
9523 // Make sure all remaining elements are either undef or the same as the first
9525 for (int Idx
= Mask
[i
]; i
!= e
; ++i
)
9526 if (Mask
[i
] >= 0 && Mask
[i
] != Idx
)
9531 // Returns the SDNode if it is a constant integer BuildVector
9532 // or constant integer.
9533 SDNode
*SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N
) {
9534 if (isa
<ConstantSDNode
>(N
))
9536 if (ISD::isBuildVectorOfConstantSDNodes(N
.getNode()))
9538 // Treat a GlobalAddress supporting constant offset folding as a
9539 // constant integer.
9540 if (GlobalAddressSDNode
*GA
= dyn_cast
<GlobalAddressSDNode
>(N
))
9541 if (GA
->getOpcode() == ISD::GlobalAddress
&&
9542 TLI
->isOffsetFoldingLegal(GA
))
9547 SDNode
*SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N
) {
9548 if (isa
<ConstantFPSDNode
>(N
))
9551 if (ISD::isBuildVectorOfConstantFPSDNodes(N
.getNode()))
9557 void SelectionDAG::createOperands(SDNode
*Node
, ArrayRef
<SDValue
> Vals
) {
9558 assert(!Node
->OperandList
&& "Node already has operands");
9559 assert(SDNode::getMaxNumOperands() >= Vals
.size() &&
9560 "too many operands to fit into SDNode");
9561 SDUse
*Ops
= OperandRecycler
.allocate(
9562 ArrayRecycler
<SDUse
>::Capacity::get(Vals
.size()), OperandAllocator
);
9564 bool IsDivergent
= false;
9565 for (unsigned I
= 0; I
!= Vals
.size(); ++I
) {
9566 Ops
[I
].setUser(Node
);
9567 Ops
[I
].setInitial(Vals
[I
]);
9568 if (Ops
[I
].Val
.getValueType() != MVT::Other
) // Skip Chain. It does not carry divergence.
9569 IsDivergent
= IsDivergent
|| Ops
[I
].getNode()->isDivergent();
9571 Node
->NumOperands
= Vals
.size();
9572 Node
->OperandList
= Ops
;
9573 IsDivergent
|= TLI
->isSDNodeSourceOfDivergence(Node
, FLI
, DA
);
9574 if (!TLI
->isSDNodeAlwaysUniform(Node
))
9575 Node
->SDNodeBits
.IsDivergent
= IsDivergent
;
9576 checkForCycles(Node
);
9579 SDValue
SelectionDAG::getTokenFactor(const SDLoc
&DL
,
9580 SmallVectorImpl
<SDValue
> &Vals
) {
9581 size_t Limit
= SDNode::getMaxNumOperands();
9582 while (Vals
.size() > Limit
) {
9583 unsigned SliceIdx
= Vals
.size() - Limit
;
9584 auto ExtractedTFs
= ArrayRef
<SDValue
>(Vals
).slice(SliceIdx
, Limit
);
9585 SDValue NewTF
= getNode(ISD::TokenFactor
, DL
, MVT::Other
, ExtractedTFs
);
9586 Vals
.erase(Vals
.begin() + SliceIdx
, Vals
.end());
9587 Vals
.emplace_back(NewTF
);
9589 return getNode(ISD::TokenFactor
, DL
, MVT::Other
, Vals
);
9593 static void checkForCyclesHelper(const SDNode
*N
,
9594 SmallPtrSetImpl
<const SDNode
*> &Visited
,
9595 SmallPtrSetImpl
<const SDNode
*> &Checked
,
9596 const llvm::SelectionDAG
*DAG
) {
9597 // If this node has already been checked, don't check it again.
9598 if (Checked
.count(N
))
9601 // If a node has already been visited on this depth-first walk, reject it as
9603 if (!Visited
.insert(N
).second
) {
9604 errs() << "Detected cycle in SelectionDAG\n";
9605 dbgs() << "Offending node:\n";
9606 N
->dumprFull(DAG
); dbgs() << "\n";
9610 for (const SDValue
&Op
: N
->op_values())
9611 checkForCyclesHelper(Op
.getNode(), Visited
, Checked
, DAG
);
9618 void llvm::checkForCycles(const llvm::SDNode
*N
,
9619 const llvm::SelectionDAG
*DAG
,
9623 #ifdef EXPENSIVE_CHECKS
9625 #endif // EXPENSIVE_CHECKS
9627 assert(N
&& "Checking nonexistent SDNode");
9628 SmallPtrSet
<const SDNode
*, 32> visited
;
9629 SmallPtrSet
<const SDNode
*, 32> checked
;
9630 checkForCyclesHelper(N
, visited
, checked
, DAG
);
9635 void llvm::checkForCycles(const llvm::SelectionDAG
*DAG
, bool force
) {
9636 checkForCycles(DAG
->getRoot().getNode(), DAG
, force
);