[PowerPC] Do not emit record-form rotates when record-form andi/andis suffices
[llvm-core.git] / lib / CodeGen / SelectionDAG / SelectionDAG.cpp
blobafe7bfbf9f5dcf7a2f9fffa632e218d90b474873
1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/FoldingSet.h"
22 #include "llvm/ADT/None.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/RuntimeLibcalls.h"
36 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetRegisterInfo.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfoMetadata.h"
47 #include "llvm/IR/DebugLoc.h"
48 #include "llvm/IR/DerivedTypes.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CodeGen.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/KnownBits.h"
60 #include "llvm/Support/MachineValueType.h"
61 #include "llvm/Support/ManagedStatic.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/Mutex.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include "llvm/Target/TargetOptions.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <cstdlib>
71 #include <limits>
72 #include <set>
73 #include <string>
74 #include <utility>
75 #include <vector>
77 using namespace llvm;
79 /// makeVTList - Return an instance of the SDVTList struct initialized with the
80 /// specified members.
81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
82 SDVTList Res = {VTs, NumVTs};
83 return Res;
86 // Default null implementations of the callbacks.
87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
90 #define DEBUG_TYPE "selectiondag"
92 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
93 cl::Hidden, cl::init(true),
94 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
96 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
97 cl::desc("Number limit for gluing ld/st of memcpy."),
98 cl::Hidden, cl::init(0));
100 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
101 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
104 //===----------------------------------------------------------------------===//
105 // ConstantFPSDNode Class
106 //===----------------------------------------------------------------------===//
108 /// isExactlyValue - We don't rely on operator== working on double values, as
109 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
110 /// As such, this method can be used to do an exact bit-for-bit comparison of
111 /// two floating point values.
112 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
113 return getValueAPF().bitwiseIsEqual(V);
116 bool ConstantFPSDNode::isValueValidForType(EVT VT,
117 const APFloat& Val) {
118 assert(VT.isFloatingPoint() && "Can only convert between FP types");
120 // convert modifies in place, so make a copy.
121 APFloat Val2 = APFloat(Val);
122 bool losesInfo;
123 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
124 APFloat::rmNearestTiesToEven,
125 &losesInfo);
126 return !losesInfo;
129 //===----------------------------------------------------------------------===//
130 // ISD Namespace
131 //===----------------------------------------------------------------------===//
133 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
134 auto *BV = dyn_cast<BuildVectorSDNode>(N);
135 if (!BV)
136 return false;
138 APInt SplatUndef;
139 unsigned SplatBitSize;
140 bool HasUndefs;
141 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
142 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
143 EltSize) &&
144 EltSize == SplatBitSize;
147 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
148 // specializations of the more general isConstantSplatVector()?
150 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
151 // Look through a bit convert.
152 while (N->getOpcode() == ISD::BITCAST)
153 N = N->getOperand(0).getNode();
155 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
157 unsigned i = 0, e = N->getNumOperands();
159 // Skip over all of the undef values.
160 while (i != e && N->getOperand(i).isUndef())
161 ++i;
163 // Do not accept an all-undef vector.
164 if (i == e) return false;
166 // Do not accept build_vectors that aren't all constants or which have non-~0
167 // elements. We have to be a bit careful here, as the type of the constant
168 // may not be the same as the type of the vector elements due to type
169 // legalization (the elements are promoted to a legal type for the target and
170 // a vector of a type may be legal when the base element type is not).
171 // We only want to check enough bits to cover the vector elements, because
172 // we care if the resultant vector is all ones, not whether the individual
173 // constants are.
174 SDValue NotZero = N->getOperand(i);
175 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
176 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
177 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
178 return false;
179 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
180 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
181 return false;
182 } else
183 return false;
185 // Okay, we have at least one ~0 value, check to see if the rest match or are
186 // undefs. Even with the above element type twiddling, this should be OK, as
187 // the same type legalization should have applied to all the elements.
188 for (++i; i != e; ++i)
189 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
190 return false;
191 return true;
194 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
195 // Look through a bit convert.
196 while (N->getOpcode() == ISD::BITCAST)
197 N = N->getOperand(0).getNode();
199 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
201 bool IsAllUndef = true;
202 for (const SDValue &Op : N->op_values()) {
203 if (Op.isUndef())
204 continue;
205 IsAllUndef = false;
206 // Do not accept build_vectors that aren't all constants or which have non-0
207 // elements. We have to be a bit careful here, as the type of the constant
208 // may not be the same as the type of the vector elements due to type
209 // legalization (the elements are promoted to a legal type for the target
210 // and a vector of a type may be legal when the base element type is not).
211 // We only want to check enough bits to cover the vector elements, because
212 // we care if the resultant vector is all zeros, not whether the individual
213 // constants are.
214 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
215 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
216 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
217 return false;
218 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
219 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
220 return false;
221 } else
222 return false;
225 // Do not accept an all-undef vector.
226 if (IsAllUndef)
227 return false;
228 return true;
231 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
232 if (N->getOpcode() != ISD::BUILD_VECTOR)
233 return false;
235 for (const SDValue &Op : N->op_values()) {
236 if (Op.isUndef())
237 continue;
238 if (!isa<ConstantSDNode>(Op))
239 return false;
241 return true;
244 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
245 if (N->getOpcode() != ISD::BUILD_VECTOR)
246 return false;
248 for (const SDValue &Op : N->op_values()) {
249 if (Op.isUndef())
250 continue;
251 if (!isa<ConstantFPSDNode>(Op))
252 return false;
254 return true;
257 bool ISD::allOperandsUndef(const SDNode *N) {
258 // Return false if the node has no operands.
259 // This is "logically inconsistent" with the definition of "all" but
260 // is probably the desired behavior.
261 if (N->getNumOperands() == 0)
262 return false;
264 for (const SDValue &Op : N->op_values())
265 if (!Op.isUndef())
266 return false;
268 return true;
271 bool ISD::matchUnaryPredicate(SDValue Op,
272 std::function<bool(ConstantSDNode *)> Match) {
273 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
274 return Match(Cst);
276 if (ISD::BUILD_VECTOR != Op.getOpcode())
277 return false;
279 EVT SVT = Op.getValueType().getScalarType();
280 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
281 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
282 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
283 return false;
285 return true;
288 bool ISD::matchBinaryPredicate(
289 SDValue LHS, SDValue RHS,
290 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match) {
291 if (LHS.getValueType() != RHS.getValueType())
292 return false;
294 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
295 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
296 return Match(LHSCst, RHSCst);
298 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
299 ISD::BUILD_VECTOR != RHS.getOpcode())
300 return false;
302 EVT SVT = LHS.getValueType().getScalarType();
303 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
304 auto *LHSCst = dyn_cast<ConstantSDNode>(LHS.getOperand(i));
305 auto *RHSCst = dyn_cast<ConstantSDNode>(RHS.getOperand(i));
306 if (!LHSCst || !RHSCst)
307 return false;
308 if (LHSCst->getValueType(0) != SVT ||
309 LHSCst->getValueType(0) != RHSCst->getValueType(0))
310 return false;
311 if (!Match(LHSCst, RHSCst))
312 return false;
314 return true;
317 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
318 switch (ExtType) {
319 case ISD::EXTLOAD:
320 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
321 case ISD::SEXTLOAD:
322 return ISD::SIGN_EXTEND;
323 case ISD::ZEXTLOAD:
324 return ISD::ZERO_EXTEND;
325 default:
326 break;
329 llvm_unreachable("Invalid LoadExtType");
332 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
333 // To perform this operation, we just need to swap the L and G bits of the
334 // operation.
335 unsigned OldL = (Operation >> 2) & 1;
336 unsigned OldG = (Operation >> 1) & 1;
337 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
338 (OldL << 1) | // New G bit
339 (OldG << 2)); // New L bit.
342 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
343 unsigned Operation = Op;
344 if (isInteger)
345 Operation ^= 7; // Flip L, G, E bits, but not U.
346 else
347 Operation ^= 15; // Flip all of the condition bits.
349 if (Operation > ISD::SETTRUE2)
350 Operation &= ~8; // Don't let N and U bits get set.
352 return ISD::CondCode(Operation);
355 /// For an integer comparison, return 1 if the comparison is a signed operation
356 /// and 2 if the result is an unsigned comparison. Return zero if the operation
357 /// does not depend on the sign of the input (setne and seteq).
358 static int isSignedOp(ISD::CondCode Opcode) {
359 switch (Opcode) {
360 default: llvm_unreachable("Illegal integer setcc operation!");
361 case ISD::SETEQ:
362 case ISD::SETNE: return 0;
363 case ISD::SETLT:
364 case ISD::SETLE:
365 case ISD::SETGT:
366 case ISD::SETGE: return 1;
367 case ISD::SETULT:
368 case ISD::SETULE:
369 case ISD::SETUGT:
370 case ISD::SETUGE: return 2;
374 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
375 bool IsInteger) {
376 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
377 // Cannot fold a signed integer setcc with an unsigned integer setcc.
378 return ISD::SETCC_INVALID;
380 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
382 // If the N and U bits get set, then the resultant comparison DOES suddenly
383 // care about orderedness, and it is true when ordered.
384 if (Op > ISD::SETTRUE2)
385 Op &= ~16; // Clear the U bit if the N bit is set.
387 // Canonicalize illegal integer setcc's.
388 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
389 Op = ISD::SETNE;
391 return ISD::CondCode(Op);
394 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
395 bool IsInteger) {
396 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
397 // Cannot fold a signed setcc with an unsigned setcc.
398 return ISD::SETCC_INVALID;
400 // Combine all of the condition bits.
401 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
403 // Canonicalize illegal integer setcc's.
404 if (IsInteger) {
405 switch (Result) {
406 default: break;
407 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
408 case ISD::SETOEQ: // SETEQ & SETU[LG]E
409 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
410 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
411 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
415 return Result;
418 //===----------------------------------------------------------------------===//
419 // SDNode Profile Support
420 //===----------------------------------------------------------------------===//
422 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
423 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
424 ID.AddInteger(OpC);
427 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
428 /// solely with their pointer.
429 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
430 ID.AddPointer(VTList.VTs);
433 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
434 static void AddNodeIDOperands(FoldingSetNodeID &ID,
435 ArrayRef<SDValue> Ops) {
436 for (auto& Op : Ops) {
437 ID.AddPointer(Op.getNode());
438 ID.AddInteger(Op.getResNo());
442 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
443 static void AddNodeIDOperands(FoldingSetNodeID &ID,
444 ArrayRef<SDUse> Ops) {
445 for (auto& Op : Ops) {
446 ID.AddPointer(Op.getNode());
447 ID.AddInteger(Op.getResNo());
451 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
452 SDVTList VTList, ArrayRef<SDValue> OpList) {
453 AddNodeIDOpcode(ID, OpC);
454 AddNodeIDValueTypes(ID, VTList);
455 AddNodeIDOperands(ID, OpList);
458 /// If this is an SDNode with special info, add this info to the NodeID data.
459 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
460 switch (N->getOpcode()) {
461 case ISD::TargetExternalSymbol:
462 case ISD::ExternalSymbol:
463 case ISD::MCSymbol:
464 llvm_unreachable("Should only be used on nodes with operands");
465 default: break; // Normal nodes don't need extra info.
466 case ISD::TargetConstant:
467 case ISD::Constant: {
468 const ConstantSDNode *C = cast<ConstantSDNode>(N);
469 ID.AddPointer(C->getConstantIntValue());
470 ID.AddBoolean(C->isOpaque());
471 break;
473 case ISD::TargetConstantFP:
474 case ISD::ConstantFP:
475 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
476 break;
477 case ISD::TargetGlobalAddress:
478 case ISD::GlobalAddress:
479 case ISD::TargetGlobalTLSAddress:
480 case ISD::GlobalTLSAddress: {
481 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
482 ID.AddPointer(GA->getGlobal());
483 ID.AddInteger(GA->getOffset());
484 ID.AddInteger(GA->getTargetFlags());
485 break;
487 case ISD::BasicBlock:
488 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
489 break;
490 case ISD::Register:
491 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
492 break;
493 case ISD::RegisterMask:
494 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
495 break;
496 case ISD::SRCVALUE:
497 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
498 break;
499 case ISD::FrameIndex:
500 case ISD::TargetFrameIndex:
501 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
502 break;
503 case ISD::JumpTable:
504 case ISD::TargetJumpTable:
505 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
506 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
507 break;
508 case ISD::ConstantPool:
509 case ISD::TargetConstantPool: {
510 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
511 ID.AddInteger(CP->getAlignment());
512 ID.AddInteger(CP->getOffset());
513 if (CP->isMachineConstantPoolEntry())
514 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
515 else
516 ID.AddPointer(CP->getConstVal());
517 ID.AddInteger(CP->getTargetFlags());
518 break;
520 case ISD::TargetIndex: {
521 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
522 ID.AddInteger(TI->getIndex());
523 ID.AddInteger(TI->getOffset());
524 ID.AddInteger(TI->getTargetFlags());
525 break;
527 case ISD::LOAD: {
528 const LoadSDNode *LD = cast<LoadSDNode>(N);
529 ID.AddInteger(LD->getMemoryVT().getRawBits());
530 ID.AddInteger(LD->getRawSubclassData());
531 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
532 break;
534 case ISD::STORE: {
535 const StoreSDNode *ST = cast<StoreSDNode>(N);
536 ID.AddInteger(ST->getMemoryVT().getRawBits());
537 ID.AddInteger(ST->getRawSubclassData());
538 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
539 break;
541 case ISD::MLOAD: {
542 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
543 ID.AddInteger(MLD->getMemoryVT().getRawBits());
544 ID.AddInteger(MLD->getRawSubclassData());
545 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
546 break;
548 case ISD::MSTORE: {
549 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
550 ID.AddInteger(MST->getMemoryVT().getRawBits());
551 ID.AddInteger(MST->getRawSubclassData());
552 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
553 break;
555 case ISD::MGATHER: {
556 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
557 ID.AddInteger(MG->getMemoryVT().getRawBits());
558 ID.AddInteger(MG->getRawSubclassData());
559 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
560 break;
562 case ISD::MSCATTER: {
563 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
564 ID.AddInteger(MS->getMemoryVT().getRawBits());
565 ID.AddInteger(MS->getRawSubclassData());
566 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
567 break;
569 case ISD::ATOMIC_CMP_SWAP:
570 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
571 case ISD::ATOMIC_SWAP:
572 case ISD::ATOMIC_LOAD_ADD:
573 case ISD::ATOMIC_LOAD_SUB:
574 case ISD::ATOMIC_LOAD_AND:
575 case ISD::ATOMIC_LOAD_CLR:
576 case ISD::ATOMIC_LOAD_OR:
577 case ISD::ATOMIC_LOAD_XOR:
578 case ISD::ATOMIC_LOAD_NAND:
579 case ISD::ATOMIC_LOAD_MIN:
580 case ISD::ATOMIC_LOAD_MAX:
581 case ISD::ATOMIC_LOAD_UMIN:
582 case ISD::ATOMIC_LOAD_UMAX:
583 case ISD::ATOMIC_LOAD:
584 case ISD::ATOMIC_STORE: {
585 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
586 ID.AddInteger(AT->getMemoryVT().getRawBits());
587 ID.AddInteger(AT->getRawSubclassData());
588 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
589 break;
591 case ISD::PREFETCH: {
592 const MemSDNode *PF = cast<MemSDNode>(N);
593 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
594 break;
596 case ISD::VECTOR_SHUFFLE: {
597 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
598 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
599 i != e; ++i)
600 ID.AddInteger(SVN->getMaskElt(i));
601 break;
603 case ISD::TargetBlockAddress:
604 case ISD::BlockAddress: {
605 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
606 ID.AddPointer(BA->getBlockAddress());
607 ID.AddInteger(BA->getOffset());
608 ID.AddInteger(BA->getTargetFlags());
609 break;
611 } // end switch (N->getOpcode())
613 // Target specific memory nodes could also have address spaces to check.
614 if (N->isTargetMemoryOpcode())
615 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
618 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
619 /// data.
620 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
621 AddNodeIDOpcode(ID, N->getOpcode());
622 // Add the return value info.
623 AddNodeIDValueTypes(ID, N->getVTList());
624 // Add the operand info.
625 AddNodeIDOperands(ID, N->ops());
627 // Handle SDNode leafs with special info.
628 AddNodeIDCustom(ID, N);
631 //===----------------------------------------------------------------------===//
632 // SelectionDAG Class
633 //===----------------------------------------------------------------------===//
635 /// doNotCSE - Return true if CSE should not be performed for this node.
636 static bool doNotCSE(SDNode *N) {
637 if (N->getValueType(0) == MVT::Glue)
638 return true; // Never CSE anything that produces a flag.
640 switch (N->getOpcode()) {
641 default: break;
642 case ISD::HANDLENODE:
643 case ISD::EH_LABEL:
644 return true; // Never CSE these nodes.
647 // Check that remaining values produced are not flags.
648 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
649 if (N->getValueType(i) == MVT::Glue)
650 return true; // Never CSE anything that produces a flag.
652 return false;
655 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
656 /// SelectionDAG.
657 void SelectionDAG::RemoveDeadNodes() {
658 // Create a dummy node (which is not added to allnodes), that adds a reference
659 // to the root node, preventing it from being deleted.
660 HandleSDNode Dummy(getRoot());
662 SmallVector<SDNode*, 128> DeadNodes;
664 // Add all obviously-dead nodes to the DeadNodes worklist.
665 for (SDNode &Node : allnodes())
666 if (Node.use_empty())
667 DeadNodes.push_back(&Node);
669 RemoveDeadNodes(DeadNodes);
671 // If the root changed (e.g. it was a dead load, update the root).
672 setRoot(Dummy.getValue());
675 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
676 /// given list, and any nodes that become unreachable as a result.
677 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
679 // Process the worklist, deleting the nodes and adding their uses to the
680 // worklist.
681 while (!DeadNodes.empty()) {
682 SDNode *N = DeadNodes.pop_back_val();
683 // Skip to next node if we've already managed to delete the node. This could
684 // happen if replacing a node causes a node previously added to the node to
685 // be deleted.
686 if (N->getOpcode() == ISD::DELETED_NODE)
687 continue;
689 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
690 DUL->NodeDeleted(N, nullptr);
692 // Take the node out of the appropriate CSE map.
693 RemoveNodeFromCSEMaps(N);
695 // Next, brutally remove the operand list. This is safe to do, as there are
696 // no cycles in the graph.
697 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
698 SDUse &Use = *I++;
699 SDNode *Operand = Use.getNode();
700 Use.set(SDValue());
702 // Now that we removed this operand, see if there are no uses of it left.
703 if (Operand->use_empty())
704 DeadNodes.push_back(Operand);
707 DeallocateNode(N);
711 void SelectionDAG::RemoveDeadNode(SDNode *N){
712 SmallVector<SDNode*, 16> DeadNodes(1, N);
714 // Create a dummy node that adds a reference to the root node, preventing
715 // it from being deleted. (This matters if the root is an operand of the
716 // dead node.)
717 HandleSDNode Dummy(getRoot());
719 RemoveDeadNodes(DeadNodes);
722 void SelectionDAG::DeleteNode(SDNode *N) {
723 // First take this out of the appropriate CSE map.
724 RemoveNodeFromCSEMaps(N);
726 // Finally, remove uses due to operands of this node, remove from the
727 // AllNodes list, and delete the node.
728 DeleteNodeNotInCSEMaps(N);
731 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
732 assert(N->getIterator() != AllNodes.begin() &&
733 "Cannot delete the entry node!");
734 assert(N->use_empty() && "Cannot delete a node that is not dead!");
736 // Drop all of the operands and decrement used node's use counts.
737 N->DropOperands();
739 DeallocateNode(N);
742 void SDDbgInfo::erase(const SDNode *Node) {
743 DbgValMapType::iterator I = DbgValMap.find(Node);
744 if (I == DbgValMap.end())
745 return;
746 for (auto &Val: I->second)
747 Val->setIsInvalidated();
748 DbgValMap.erase(I);
751 void SelectionDAG::DeallocateNode(SDNode *N) {
752 // If we have operands, deallocate them.
753 removeOperands(N);
755 NodeAllocator.Deallocate(AllNodes.remove(N));
757 // Set the opcode to DELETED_NODE to help catch bugs when node
758 // memory is reallocated.
759 // FIXME: There are places in SDag that have grown a dependency on the opcode
760 // value in the released node.
761 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
762 N->NodeType = ISD::DELETED_NODE;
764 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
765 // them and forget about that node.
766 DbgInfo->erase(N);
769 #ifndef NDEBUG
770 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
771 static void VerifySDNode(SDNode *N) {
772 switch (N->getOpcode()) {
773 default:
774 break;
775 case ISD::BUILD_PAIR: {
776 EVT VT = N->getValueType(0);
777 assert(N->getNumValues() == 1 && "Too many results!");
778 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
779 "Wrong return type!");
780 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
781 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
782 "Mismatched operand types!");
783 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
784 "Wrong operand type!");
785 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
786 "Wrong return type size");
787 break;
789 case ISD::BUILD_VECTOR: {
790 assert(N->getNumValues() == 1 && "Too many results!");
791 assert(N->getValueType(0).isVector() && "Wrong return type!");
792 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
793 "Wrong number of operands!");
794 EVT EltVT = N->getValueType(0).getVectorElementType();
795 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
796 assert((I->getValueType() == EltVT ||
797 (EltVT.isInteger() && I->getValueType().isInteger() &&
798 EltVT.bitsLE(I->getValueType()))) &&
799 "Wrong operand type!");
800 assert(I->getValueType() == N->getOperand(0).getValueType() &&
801 "Operands must all have the same type");
803 break;
807 #endif // NDEBUG
809 /// Insert a newly allocated node into the DAG.
811 /// Handles insertion into the all nodes list and CSE map, as well as
812 /// verification and other common operations when a new node is allocated.
813 void SelectionDAG::InsertNode(SDNode *N) {
814 AllNodes.push_back(N);
815 #ifndef NDEBUG
816 N->PersistentId = NextPersistentId++;
817 VerifySDNode(N);
818 #endif
821 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
822 /// correspond to it. This is useful when we're about to delete or repurpose
823 /// the node. We don't want future request for structurally identical nodes
824 /// to return N anymore.
825 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
826 bool Erased = false;
827 switch (N->getOpcode()) {
828 case ISD::HANDLENODE: return false; // noop.
829 case ISD::CONDCODE:
830 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
831 "Cond code doesn't exist!");
832 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
833 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
834 break;
835 case ISD::ExternalSymbol:
836 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
837 break;
838 case ISD::TargetExternalSymbol: {
839 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
840 Erased = TargetExternalSymbols.erase(
841 std::pair<std::string,unsigned char>(ESN->getSymbol(),
842 ESN->getTargetFlags()));
843 break;
845 case ISD::MCSymbol: {
846 auto *MCSN = cast<MCSymbolSDNode>(N);
847 Erased = MCSymbols.erase(MCSN->getMCSymbol());
848 break;
850 case ISD::VALUETYPE: {
851 EVT VT = cast<VTSDNode>(N)->getVT();
852 if (VT.isExtended()) {
853 Erased = ExtendedValueTypeNodes.erase(VT);
854 } else {
855 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
856 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
858 break;
860 default:
861 // Remove it from the CSE Map.
862 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
863 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
864 Erased = CSEMap.RemoveNode(N);
865 break;
867 #ifndef NDEBUG
868 // Verify that the node was actually in one of the CSE maps, unless it has a
869 // flag result (which cannot be CSE'd) or is one of the special cases that are
870 // not subject to CSE.
871 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
872 !N->isMachineOpcode() && !doNotCSE(N)) {
873 N->dump(this);
874 dbgs() << "\n";
875 llvm_unreachable("Node is not in map!");
877 #endif
878 return Erased;
881 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
882 /// maps and modified in place. Add it back to the CSE maps, unless an identical
883 /// node already exists, in which case transfer all its users to the existing
884 /// node. This transfer can potentially trigger recursive merging.
885 void
886 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
887 // For node types that aren't CSE'd, just act as if no identical node
888 // already exists.
889 if (!doNotCSE(N)) {
890 SDNode *Existing = CSEMap.GetOrInsertNode(N);
891 if (Existing != N) {
892 // If there was already an existing matching node, use ReplaceAllUsesWith
893 // to replace the dead one with the existing one. This can cause
894 // recursive merging of other unrelated nodes down the line.
895 ReplaceAllUsesWith(N, Existing);
897 // N is now dead. Inform the listeners and delete it.
898 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
899 DUL->NodeDeleted(N, Existing);
900 DeleteNodeNotInCSEMaps(N);
901 return;
905 // If the node doesn't already exist, we updated it. Inform listeners.
906 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
907 DUL->NodeUpdated(N);
910 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
911 /// were replaced with those specified. If this node is never memoized,
912 /// return null, otherwise return a pointer to the slot it would take. If a
913 /// node already exists with these operands, the slot will be non-null.
914 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
915 void *&InsertPos) {
916 if (doNotCSE(N))
917 return nullptr;
919 SDValue Ops[] = { Op };
920 FoldingSetNodeID ID;
921 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
922 AddNodeIDCustom(ID, N);
923 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
924 if (Node)
925 Node->intersectFlagsWith(N->getFlags());
926 return Node;
929 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
930 /// were replaced with those specified. If this node is never memoized,
931 /// return null, otherwise return a pointer to the slot it would take. If a
932 /// node already exists with these operands, the slot will be non-null.
933 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
934 SDValue Op1, SDValue Op2,
935 void *&InsertPos) {
936 if (doNotCSE(N))
937 return nullptr;
939 SDValue Ops[] = { Op1, Op2 };
940 FoldingSetNodeID ID;
941 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
942 AddNodeIDCustom(ID, N);
943 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
944 if (Node)
945 Node->intersectFlagsWith(N->getFlags());
946 return Node;
949 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
950 /// were replaced with those specified. If this node is never memoized,
951 /// return null, otherwise return a pointer to the slot it would take. If a
952 /// node already exists with these operands, the slot will be non-null.
953 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
954 void *&InsertPos) {
955 if (doNotCSE(N))
956 return nullptr;
958 FoldingSetNodeID ID;
959 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
960 AddNodeIDCustom(ID, N);
961 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
962 if (Node)
963 Node->intersectFlagsWith(N->getFlags());
964 return Node;
967 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
968 Type *Ty = VT == MVT::iPTR ?
969 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
970 VT.getTypeForEVT(*getContext());
972 return getDataLayout().getABITypeAlignment(Ty);
975 // EntryNode could meaningfully have debug info if we can find it...
976 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
977 : TM(tm), OptLevel(OL),
978 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
979 Root(getEntryNode()) {
980 InsertNode(&EntryNode);
981 DbgInfo = new SDDbgInfo();
984 void SelectionDAG::init(MachineFunction &NewMF,
985 OptimizationRemarkEmitter &NewORE,
986 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
987 LegacyDivergenceAnalysis * Divergence) {
988 MF = &NewMF;
989 SDAGISelPass = PassPtr;
990 ORE = &NewORE;
991 TLI = getSubtarget().getTargetLowering();
992 TSI = getSubtarget().getSelectionDAGInfo();
993 LibInfo = LibraryInfo;
994 Context = &MF->getFunction().getContext();
995 DA = Divergence;
998 SelectionDAG::~SelectionDAG() {
999 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1000 allnodes_clear();
1001 OperandRecycler.clear(OperandAllocator);
1002 delete DbgInfo;
1005 void SelectionDAG::allnodes_clear() {
1006 assert(&*AllNodes.begin() == &EntryNode);
1007 AllNodes.remove(AllNodes.begin());
1008 while (!AllNodes.empty())
1009 DeallocateNode(&AllNodes.front());
1010 #ifndef NDEBUG
1011 NextPersistentId = 0;
1012 #endif
1015 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1016 void *&InsertPos) {
1017 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1018 if (N) {
1019 switch (N->getOpcode()) {
1020 default: break;
1021 case ISD::Constant:
1022 case ISD::ConstantFP:
1023 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1024 "debug location. Use another overload.");
1027 return N;
1030 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1031 const SDLoc &DL, void *&InsertPos) {
1032 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1033 if (N) {
1034 switch (N->getOpcode()) {
1035 case ISD::Constant:
1036 case ISD::ConstantFP:
1037 // Erase debug location from the node if the node is used at several
1038 // different places. Do not propagate one location to all uses as it
1039 // will cause a worse single stepping debugging experience.
1040 if (N->getDebugLoc() != DL.getDebugLoc())
1041 N->setDebugLoc(DebugLoc());
1042 break;
1043 default:
1044 // When the node's point of use is located earlier in the instruction
1045 // sequence than its prior point of use, update its debug info to the
1046 // earlier location.
1047 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1048 N->setDebugLoc(DL.getDebugLoc());
1049 break;
1052 return N;
1055 void SelectionDAG::clear() {
1056 allnodes_clear();
1057 OperandRecycler.clear(OperandAllocator);
1058 OperandAllocator.Reset();
1059 CSEMap.clear();
1061 ExtendedValueTypeNodes.clear();
1062 ExternalSymbols.clear();
1063 TargetExternalSymbols.clear();
1064 MCSymbols.clear();
1065 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1066 static_cast<CondCodeSDNode*>(nullptr));
1067 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1068 static_cast<SDNode*>(nullptr));
1070 EntryNode.UseList = nullptr;
1071 InsertNode(&EntryNode);
1072 Root = getEntryNode();
1073 DbgInfo->clear();
1076 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1077 return VT.bitsGT(Op.getValueType())
1078 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1079 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1082 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1083 return VT.bitsGT(Op.getValueType()) ?
1084 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1085 getNode(ISD::TRUNCATE, DL, VT, Op);
1088 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1089 return VT.bitsGT(Op.getValueType()) ?
1090 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1091 getNode(ISD::TRUNCATE, DL, VT, Op);
1094 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1095 return VT.bitsGT(Op.getValueType()) ?
1096 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1097 getNode(ISD::TRUNCATE, DL, VT, Op);
1100 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1101 EVT OpVT) {
1102 if (VT.bitsLE(Op.getValueType()))
1103 return getNode(ISD::TRUNCATE, SL, VT, Op);
1105 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1106 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1109 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1110 assert(!VT.isVector() &&
1111 "getZeroExtendInReg should use the vector element type instead of "
1112 "the vector type!");
1113 if (Op.getValueType().getScalarType() == VT) return Op;
1114 unsigned BitWidth = Op.getScalarValueSizeInBits();
1115 APInt Imm = APInt::getLowBitsSet(BitWidth,
1116 VT.getSizeInBits());
1117 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1118 getConstant(Imm, DL, Op.getValueType()));
1121 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
1122 EVT VT) {
1123 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1124 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1125 "The sizes of the input and result must match in order to perform the "
1126 "extend in-register.");
1127 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1128 "The destination vector type must have fewer lanes than the input.");
1129 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1132 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
1133 EVT VT) {
1134 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1135 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1136 "The sizes of the input and result must match in order to perform the "
1137 "extend in-register.");
1138 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1139 "The destination vector type must have fewer lanes than the input.");
1140 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1143 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
1144 EVT VT) {
1145 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1146 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1147 "The sizes of the input and result must match in order to perform the "
1148 "extend in-register.");
1149 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1150 "The destination vector type must have fewer lanes than the input.");
1151 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1154 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1155 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1156 EVT EltVT = VT.getScalarType();
1157 SDValue NegOne =
1158 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1159 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1162 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1163 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1164 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1167 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1168 EVT OpVT) {
1169 if (!V)
1170 return getConstant(0, DL, VT);
1172 switch (TLI->getBooleanContents(OpVT)) {
1173 case TargetLowering::ZeroOrOneBooleanContent:
1174 case TargetLowering::UndefinedBooleanContent:
1175 return getConstant(1, DL, VT);
1176 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1177 return getAllOnesConstant(DL, VT);
1179 llvm_unreachable("Unexpected boolean content enum!");
1182 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1183 bool isT, bool isO) {
1184 EVT EltVT = VT.getScalarType();
1185 assert((EltVT.getSizeInBits() >= 64 ||
1186 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1187 "getConstant with a uint64_t value that doesn't fit in the type!");
1188 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1191 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1192 bool isT, bool isO) {
1193 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1196 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1197 EVT VT, bool isT, bool isO) {
1198 assert(VT.isInteger() && "Cannot create FP integer constant!");
1200 EVT EltVT = VT.getScalarType();
1201 const ConstantInt *Elt = &Val;
1203 // In some cases the vector type is legal but the element type is illegal and
1204 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1205 // inserted value (the type does not need to match the vector element type).
1206 // Any extra bits introduced will be truncated away.
1207 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1208 TargetLowering::TypePromoteInteger) {
1209 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1210 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1211 Elt = ConstantInt::get(*getContext(), NewVal);
1213 // In other cases the element type is illegal and needs to be expanded, for
1214 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1215 // the value into n parts and use a vector type with n-times the elements.
1216 // Then bitcast to the type requested.
1217 // Legalizing constants too early makes the DAGCombiner's job harder so we
1218 // only legalize if the DAG tells us we must produce legal types.
1219 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1220 TLI->getTypeAction(*getContext(), EltVT) ==
1221 TargetLowering::TypeExpandInteger) {
1222 const APInt &NewVal = Elt->getValue();
1223 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1224 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1225 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1226 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1228 // Check the temporary vector is the correct size. If this fails then
1229 // getTypeToTransformTo() probably returned a type whose size (in bits)
1230 // isn't a power-of-2 factor of the requested type size.
1231 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1233 SmallVector<SDValue, 2> EltParts;
1234 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1235 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1236 .zextOrTrunc(ViaEltSizeInBits), DL,
1237 ViaEltVT, isT, isO));
1240 // EltParts is currently in little endian order. If we actually want
1241 // big-endian order then reverse it now.
1242 if (getDataLayout().isBigEndian())
1243 std::reverse(EltParts.begin(), EltParts.end());
1245 // The elements must be reversed when the element order is different
1246 // to the endianness of the elements (because the BITCAST is itself a
1247 // vector shuffle in this situation). However, we do not need any code to
1248 // perform this reversal because getConstant() is producing a vector
1249 // splat.
1250 // This situation occurs in MIPS MSA.
1252 SmallVector<SDValue, 8> Ops;
1253 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1254 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1256 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1257 return V;
1260 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1261 "APInt size does not match type size!");
1262 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1263 FoldingSetNodeID ID;
1264 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1265 ID.AddPointer(Elt);
1266 ID.AddBoolean(isO);
1267 void *IP = nullptr;
1268 SDNode *N = nullptr;
1269 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1270 if (!VT.isVector())
1271 return SDValue(N, 0);
1273 if (!N) {
1274 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1275 CSEMap.InsertNode(N, IP);
1276 InsertNode(N);
1277 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1280 SDValue Result(N, 0);
1281 if (VT.isVector())
1282 Result = getSplatBuildVector(VT, DL, Result);
1284 return Result;
1287 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1288 bool isTarget) {
1289 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1292 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1293 bool isTarget) {
1294 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1297 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1298 EVT VT, bool isTarget) {
1299 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1301 EVT EltVT = VT.getScalarType();
1303 // Do the map lookup using the actual bit pattern for the floating point
1304 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1305 // we don't have issues with SNANs.
1306 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1307 FoldingSetNodeID ID;
1308 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1309 ID.AddPointer(&V);
1310 void *IP = nullptr;
1311 SDNode *N = nullptr;
1312 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1313 if (!VT.isVector())
1314 return SDValue(N, 0);
1316 if (!N) {
1317 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1318 CSEMap.InsertNode(N, IP);
1319 InsertNode(N);
1322 SDValue Result(N, 0);
1323 if (VT.isVector())
1324 Result = getSplatBuildVector(VT, DL, Result);
1325 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1326 return Result;
1329 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1330 bool isTarget) {
1331 EVT EltVT = VT.getScalarType();
1332 if (EltVT == MVT::f32)
1333 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1334 else if (EltVT == MVT::f64)
1335 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1336 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1337 EltVT == MVT::f16) {
1338 bool Ignored;
1339 APFloat APF = APFloat(Val);
1340 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1341 &Ignored);
1342 return getConstantFP(APF, DL, VT, isTarget);
1343 } else
1344 llvm_unreachable("Unsupported type in getConstantFP");
1347 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1348 EVT VT, int64_t Offset, bool isTargetGA,
1349 unsigned char TargetFlags) {
1350 assert((TargetFlags == 0 || isTargetGA) &&
1351 "Cannot set target flags on target-independent globals");
1353 // Truncate (with sign-extension) the offset value to the pointer size.
1354 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1355 if (BitWidth < 64)
1356 Offset = SignExtend64(Offset, BitWidth);
1358 unsigned Opc;
1359 if (GV->isThreadLocal())
1360 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1361 else
1362 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1364 FoldingSetNodeID ID;
1365 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1366 ID.AddPointer(GV);
1367 ID.AddInteger(Offset);
1368 ID.AddInteger(TargetFlags);
1369 void *IP = nullptr;
1370 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1371 return SDValue(E, 0);
1373 auto *N = newSDNode<GlobalAddressSDNode>(
1374 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1375 CSEMap.InsertNode(N, IP);
1376 InsertNode(N);
1377 return SDValue(N, 0);
1380 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1381 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1382 FoldingSetNodeID ID;
1383 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1384 ID.AddInteger(FI);
1385 void *IP = nullptr;
1386 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1387 return SDValue(E, 0);
1389 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1390 CSEMap.InsertNode(N, IP);
1391 InsertNode(N);
1392 return SDValue(N, 0);
1395 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1396 unsigned char TargetFlags) {
1397 assert((TargetFlags == 0 || isTarget) &&
1398 "Cannot set target flags on target-independent jump tables");
1399 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1400 FoldingSetNodeID ID;
1401 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1402 ID.AddInteger(JTI);
1403 ID.AddInteger(TargetFlags);
1404 void *IP = nullptr;
1405 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1406 return SDValue(E, 0);
1408 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1409 CSEMap.InsertNode(N, IP);
1410 InsertNode(N);
1411 return SDValue(N, 0);
1414 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1415 unsigned Alignment, int Offset,
1416 bool isTarget,
1417 unsigned char TargetFlags) {
1418 assert((TargetFlags == 0 || isTarget) &&
1419 "Cannot set target flags on target-independent globals");
1420 if (Alignment == 0)
1421 Alignment = MF->getFunction().optForSize()
1422 ? getDataLayout().getABITypeAlignment(C->getType())
1423 : getDataLayout().getPrefTypeAlignment(C->getType());
1424 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1425 FoldingSetNodeID ID;
1426 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1427 ID.AddInteger(Alignment);
1428 ID.AddInteger(Offset);
1429 ID.AddPointer(C);
1430 ID.AddInteger(TargetFlags);
1431 void *IP = nullptr;
1432 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1433 return SDValue(E, 0);
1435 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1436 TargetFlags);
1437 CSEMap.InsertNode(N, IP);
1438 InsertNode(N);
1439 return SDValue(N, 0);
1442 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1443 unsigned Alignment, int Offset,
1444 bool isTarget,
1445 unsigned char TargetFlags) {
1446 assert((TargetFlags == 0 || isTarget) &&
1447 "Cannot set target flags on target-independent globals");
1448 if (Alignment == 0)
1449 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1450 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1451 FoldingSetNodeID ID;
1452 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1453 ID.AddInteger(Alignment);
1454 ID.AddInteger(Offset);
1455 C->addSelectionDAGCSEId(ID);
1456 ID.AddInteger(TargetFlags);
1457 void *IP = nullptr;
1458 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1459 return SDValue(E, 0);
1461 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1462 TargetFlags);
1463 CSEMap.InsertNode(N, IP);
1464 InsertNode(N);
1465 return SDValue(N, 0);
1468 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1469 unsigned char TargetFlags) {
1470 FoldingSetNodeID ID;
1471 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1472 ID.AddInteger(Index);
1473 ID.AddInteger(Offset);
1474 ID.AddInteger(TargetFlags);
1475 void *IP = nullptr;
1476 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1477 return SDValue(E, 0);
1479 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1480 CSEMap.InsertNode(N, IP);
1481 InsertNode(N);
1482 return SDValue(N, 0);
1485 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1486 FoldingSetNodeID ID;
1487 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1488 ID.AddPointer(MBB);
1489 void *IP = nullptr;
1490 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1491 return SDValue(E, 0);
1493 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1494 CSEMap.InsertNode(N, IP);
1495 InsertNode(N);
1496 return SDValue(N, 0);
1499 SDValue SelectionDAG::getValueType(EVT VT) {
1500 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1501 ValueTypeNodes.size())
1502 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1504 SDNode *&N = VT.isExtended() ?
1505 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1507 if (N) return SDValue(N, 0);
1508 N = newSDNode<VTSDNode>(VT);
1509 InsertNode(N);
1510 return SDValue(N, 0);
1513 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1514 SDNode *&N = ExternalSymbols[Sym];
1515 if (N) return SDValue(N, 0);
1516 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1517 InsertNode(N);
1518 return SDValue(N, 0);
1521 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1522 SDNode *&N = MCSymbols[Sym];
1523 if (N)
1524 return SDValue(N, 0);
1525 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1526 InsertNode(N);
1527 return SDValue(N, 0);
1530 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1531 unsigned char TargetFlags) {
1532 SDNode *&N =
1533 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1534 TargetFlags)];
1535 if (N) return SDValue(N, 0);
1536 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1537 InsertNode(N);
1538 return SDValue(N, 0);
1541 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1542 if ((unsigned)Cond >= CondCodeNodes.size())
1543 CondCodeNodes.resize(Cond+1);
1545 if (!CondCodeNodes[Cond]) {
1546 auto *N = newSDNode<CondCodeSDNode>(Cond);
1547 CondCodeNodes[Cond] = N;
1548 InsertNode(N);
1551 return SDValue(CondCodeNodes[Cond], 0);
1554 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1555 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1556 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1557 std::swap(N1, N2);
1558 ShuffleVectorSDNode::commuteMask(M);
1561 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1562 SDValue N2, ArrayRef<int> Mask) {
1563 assert(VT.getVectorNumElements() == Mask.size() &&
1564 "Must have the same number of vector elements as mask elements!");
1565 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1566 "Invalid VECTOR_SHUFFLE");
1568 // Canonicalize shuffle undef, undef -> undef
1569 if (N1.isUndef() && N2.isUndef())
1570 return getUNDEF(VT);
1572 // Validate that all indices in Mask are within the range of the elements
1573 // input to the shuffle.
1574 int NElts = Mask.size();
1575 assert(llvm::all_of(Mask,
1576 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1577 "Index out of range");
1579 // Copy the mask so we can do any needed cleanup.
1580 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1582 // Canonicalize shuffle v, v -> v, undef
1583 if (N1 == N2) {
1584 N2 = getUNDEF(VT);
1585 for (int i = 0; i != NElts; ++i)
1586 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1589 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1590 if (N1.isUndef())
1591 commuteShuffle(N1, N2, MaskVec);
1593 if (TLI->hasVectorBlend()) {
1594 // If shuffling a splat, try to blend the splat instead. We do this here so
1595 // that even when this arises during lowering we don't have to re-handle it.
1596 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1597 BitVector UndefElements;
1598 SDValue Splat = BV->getSplatValue(&UndefElements);
1599 if (!Splat)
1600 return;
1602 for (int i = 0; i < NElts; ++i) {
1603 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1604 continue;
1606 // If this input comes from undef, mark it as such.
1607 if (UndefElements[MaskVec[i] - Offset]) {
1608 MaskVec[i] = -1;
1609 continue;
1612 // If we can blend a non-undef lane, use that instead.
1613 if (!UndefElements[i])
1614 MaskVec[i] = i + Offset;
1617 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1618 BlendSplat(N1BV, 0);
1619 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1620 BlendSplat(N2BV, NElts);
1623 // Canonicalize all index into lhs, -> shuffle lhs, undef
1624 // Canonicalize all index into rhs, -> shuffle rhs, undef
1625 bool AllLHS = true, AllRHS = true;
1626 bool N2Undef = N2.isUndef();
1627 for (int i = 0; i != NElts; ++i) {
1628 if (MaskVec[i] >= NElts) {
1629 if (N2Undef)
1630 MaskVec[i] = -1;
1631 else
1632 AllLHS = false;
1633 } else if (MaskVec[i] >= 0) {
1634 AllRHS = false;
1637 if (AllLHS && AllRHS)
1638 return getUNDEF(VT);
1639 if (AllLHS && !N2Undef)
1640 N2 = getUNDEF(VT);
1641 if (AllRHS) {
1642 N1 = getUNDEF(VT);
1643 commuteShuffle(N1, N2, MaskVec);
1645 // Reset our undef status after accounting for the mask.
1646 N2Undef = N2.isUndef();
1647 // Re-check whether both sides ended up undef.
1648 if (N1.isUndef() && N2Undef)
1649 return getUNDEF(VT);
1651 // If Identity shuffle return that node.
1652 bool Identity = true, AllSame = true;
1653 for (int i = 0; i != NElts; ++i) {
1654 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1655 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1657 if (Identity && NElts)
1658 return N1;
1660 // Shuffling a constant splat doesn't change the result.
1661 if (N2Undef) {
1662 SDValue V = N1;
1664 // Look through any bitcasts. We check that these don't change the number
1665 // (and size) of elements and just changes their types.
1666 while (V.getOpcode() == ISD::BITCAST)
1667 V = V->getOperand(0);
1669 // A splat should always show up as a build vector node.
1670 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1671 BitVector UndefElements;
1672 SDValue Splat = BV->getSplatValue(&UndefElements);
1673 // If this is a splat of an undef, shuffling it is also undef.
1674 if (Splat && Splat.isUndef())
1675 return getUNDEF(VT);
1677 bool SameNumElts =
1678 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1680 // We only have a splat which can skip shuffles if there is a splatted
1681 // value and no undef lanes rearranged by the shuffle.
1682 if (Splat && UndefElements.none()) {
1683 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1684 // number of elements match or the value splatted is a zero constant.
1685 if (SameNumElts)
1686 return N1;
1687 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1688 if (C->isNullValue())
1689 return N1;
1692 // If the shuffle itself creates a splat, build the vector directly.
1693 if (AllSame && SameNumElts) {
1694 EVT BuildVT = BV->getValueType(0);
1695 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1696 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1698 // We may have jumped through bitcasts, so the type of the
1699 // BUILD_VECTOR may not match the type of the shuffle.
1700 if (BuildVT != VT)
1701 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1702 return NewBV;
1707 FoldingSetNodeID ID;
1708 SDValue Ops[2] = { N1, N2 };
1709 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1710 for (int i = 0; i != NElts; ++i)
1711 ID.AddInteger(MaskVec[i]);
1713 void* IP = nullptr;
1714 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1715 return SDValue(E, 0);
1717 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1718 // SDNode doesn't have access to it. This memory will be "leaked" when
1719 // the node is deallocated, but recovered when the NodeAllocator is released.
1720 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1721 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc);
1723 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1724 dl.getDebugLoc(), MaskAlloc);
1725 createOperands(N, Ops);
1727 CSEMap.InsertNode(N, IP);
1728 InsertNode(N);
1729 SDValue V = SDValue(N, 0);
1730 NewSDValueDbgMsg(V, "Creating new node: ", this);
1731 return V;
1734 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1735 EVT VT = SV.getValueType(0);
1736 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1737 ShuffleVectorSDNode::commuteMask(MaskVec);
1739 SDValue Op0 = SV.getOperand(0);
1740 SDValue Op1 = SV.getOperand(1);
1741 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1744 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1745 FoldingSetNodeID ID;
1746 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1747 ID.AddInteger(RegNo);
1748 void *IP = nullptr;
1749 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1750 return SDValue(E, 0);
1752 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1753 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1754 CSEMap.InsertNode(N, IP);
1755 InsertNode(N);
1756 return SDValue(N, 0);
1759 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1760 FoldingSetNodeID ID;
1761 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1762 ID.AddPointer(RegMask);
1763 void *IP = nullptr;
1764 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1765 return SDValue(E, 0);
1767 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1768 CSEMap.InsertNode(N, IP);
1769 InsertNode(N);
1770 return SDValue(N, 0);
1773 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1774 MCSymbol *Label) {
1775 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1778 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1779 SDValue Root, MCSymbol *Label) {
1780 FoldingSetNodeID ID;
1781 SDValue Ops[] = { Root };
1782 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1783 ID.AddPointer(Label);
1784 void *IP = nullptr;
1785 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1786 return SDValue(E, 0);
1788 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1789 createOperands(N, Ops);
1791 CSEMap.InsertNode(N, IP);
1792 InsertNode(N);
1793 return SDValue(N, 0);
1796 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1797 int64_t Offset,
1798 bool isTarget,
1799 unsigned char TargetFlags) {
1800 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1802 FoldingSetNodeID ID;
1803 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1804 ID.AddPointer(BA);
1805 ID.AddInteger(Offset);
1806 ID.AddInteger(TargetFlags);
1807 void *IP = nullptr;
1808 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1809 return SDValue(E, 0);
1811 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1812 CSEMap.InsertNode(N, IP);
1813 InsertNode(N);
1814 return SDValue(N, 0);
1817 SDValue SelectionDAG::getSrcValue(const Value *V) {
1818 assert((!V || V->getType()->isPointerTy()) &&
1819 "SrcValue is not a pointer?");
1821 FoldingSetNodeID ID;
1822 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1823 ID.AddPointer(V);
1825 void *IP = nullptr;
1826 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1827 return SDValue(E, 0);
1829 auto *N = newSDNode<SrcValueSDNode>(V);
1830 CSEMap.InsertNode(N, IP);
1831 InsertNode(N);
1832 return SDValue(N, 0);
1835 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1836 FoldingSetNodeID ID;
1837 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1838 ID.AddPointer(MD);
1840 void *IP = nullptr;
1841 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1842 return SDValue(E, 0);
1844 auto *N = newSDNode<MDNodeSDNode>(MD);
1845 CSEMap.InsertNode(N, IP);
1846 InsertNode(N);
1847 return SDValue(N, 0);
1850 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1851 if (VT == V.getValueType())
1852 return V;
1854 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1857 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1858 unsigned SrcAS, unsigned DestAS) {
1859 SDValue Ops[] = {Ptr};
1860 FoldingSetNodeID ID;
1861 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1862 ID.AddInteger(SrcAS);
1863 ID.AddInteger(DestAS);
1865 void *IP = nullptr;
1866 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1867 return SDValue(E, 0);
1869 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1870 VT, SrcAS, DestAS);
1871 createOperands(N, Ops);
1873 CSEMap.InsertNode(N, IP);
1874 InsertNode(N);
1875 return SDValue(N, 0);
1878 /// getShiftAmountOperand - Return the specified value casted to
1879 /// the target's desired shift amount type.
1880 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1881 EVT OpTy = Op.getValueType();
1882 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1883 if (OpTy == ShTy || OpTy.isVector()) return Op;
1885 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1888 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1889 SDLoc dl(Node);
1890 const TargetLowering &TLI = getTargetLoweringInfo();
1891 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1892 EVT VT = Node->getValueType(0);
1893 SDValue Tmp1 = Node->getOperand(0);
1894 SDValue Tmp2 = Node->getOperand(1);
1895 unsigned Align = Node->getConstantOperandVal(3);
1897 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1898 Tmp2, MachinePointerInfo(V));
1899 SDValue VAList = VAListLoad;
1901 if (Align > TLI.getMinStackArgumentAlignment()) {
1902 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1904 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1905 getConstant(Align - 1, dl, VAList.getValueType()));
1907 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1908 getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1911 // Increment the pointer, VAList, to the next vaarg
1912 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1913 getConstant(getDataLayout().getTypeAllocSize(
1914 VT.getTypeForEVT(*getContext())),
1915 dl, VAList.getValueType()));
1916 // Store the incremented VAList to the legalized pointer
1917 Tmp1 =
1918 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1919 // Load the actual argument out of the pointer VAList
1920 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1923 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1924 SDLoc dl(Node);
1925 const TargetLowering &TLI = getTargetLoweringInfo();
1926 // This defaults to loading a pointer from the input and storing it to the
1927 // output, returning the chain.
1928 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1929 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1930 SDValue Tmp1 =
1931 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1932 Node->getOperand(2), MachinePointerInfo(VS));
1933 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1934 MachinePointerInfo(VD));
1937 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1938 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1939 unsigned ByteSize = VT.getStoreSize();
1940 Type *Ty = VT.getTypeForEVT(*getContext());
1941 unsigned StackAlign =
1942 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1944 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1945 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1948 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1949 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1950 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1951 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1952 const DataLayout &DL = getDataLayout();
1953 unsigned Align =
1954 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1956 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1957 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1958 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1961 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1962 ISD::CondCode Cond, const SDLoc &dl) {
1963 EVT OpVT = N1.getValueType();
1965 // These setcc operations always fold.
1966 switch (Cond) {
1967 default: break;
1968 case ISD::SETFALSE:
1969 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
1970 case ISD::SETTRUE:
1971 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
1973 case ISD::SETOEQ:
1974 case ISD::SETOGT:
1975 case ISD::SETOGE:
1976 case ISD::SETOLT:
1977 case ISD::SETOLE:
1978 case ISD::SETONE:
1979 case ISD::SETO:
1980 case ISD::SETUO:
1981 case ISD::SETUEQ:
1982 case ISD::SETUNE:
1983 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1984 break;
1987 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1988 const APInt &C2 = N2C->getAPIntValue();
1989 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1990 const APInt &C1 = N1C->getAPIntValue();
1992 switch (Cond) {
1993 default: llvm_unreachable("Unknown integer setcc!");
1994 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
1995 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
1996 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
1997 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
1998 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
1999 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2000 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2001 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2002 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2003 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2007 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
2008 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
2009 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
2010 switch (Cond) {
2011 default: break;
2012 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2013 return getUNDEF(VT);
2014 LLVM_FALLTHROUGH;
2015 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2016 OpVT);
2017 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2018 return getUNDEF(VT);
2019 LLVM_FALLTHROUGH;
2020 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2021 R==APFloat::cmpLessThan, dl, VT,
2022 OpVT);
2023 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2024 return getUNDEF(VT);
2025 LLVM_FALLTHROUGH;
2026 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2027 OpVT);
2028 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2029 return getUNDEF(VT);
2030 LLVM_FALLTHROUGH;
2031 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2032 VT, OpVT);
2033 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2034 return getUNDEF(VT);
2035 LLVM_FALLTHROUGH;
2036 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2037 R==APFloat::cmpEqual, dl, VT,
2038 OpVT);
2039 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2040 return getUNDEF(VT);
2041 LLVM_FALLTHROUGH;
2042 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2043 R==APFloat::cmpEqual, dl, VT, OpVT);
2044 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2045 OpVT);
2046 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2047 OpVT);
2048 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2049 R==APFloat::cmpEqual, dl, VT,
2050 OpVT);
2051 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2052 OpVT);
2053 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2054 R==APFloat::cmpLessThan, dl, VT,
2055 OpVT);
2056 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2057 R==APFloat::cmpUnordered, dl, VT,
2058 OpVT);
2059 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2060 VT, OpVT);
2061 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2062 OpVT);
2064 } else {
2065 // Ensure that the constant occurs on the RHS.
2066 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2067 MVT CompVT = N1.getValueType().getSimpleVT();
2068 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
2069 return SDValue();
2071 return getSetCC(dl, VT, N2, N1, SwappedCond);
2075 // Could not fold it.
2076 return SDValue();
2079 /// See if the specified operand can be simplified with the knowledge that only
2080 /// the bits specified by Mask are used.
2081 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) {
2082 switch (V.getOpcode()) {
2083 default:
2084 break;
2085 case ISD::Constant: {
2086 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
2087 assert(CV && "Const value should be ConstSDNode.");
2088 const APInt &CVal = CV->getAPIntValue();
2089 APInt NewVal = CVal & Mask;
2090 if (NewVal != CVal)
2091 return getConstant(NewVal, SDLoc(V), V.getValueType());
2092 break;
2094 case ISD::OR:
2095 case ISD::XOR:
2096 // If the LHS or RHS don't contribute bits to the or, drop them.
2097 if (MaskedValueIsZero(V.getOperand(0), Mask))
2098 return V.getOperand(1);
2099 if (MaskedValueIsZero(V.getOperand(1), Mask))
2100 return V.getOperand(0);
2101 break;
2102 case ISD::SRL:
2103 // Only look at single-use SRLs.
2104 if (!V.getNode()->hasOneUse())
2105 break;
2106 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2107 // See if we can recursively simplify the LHS.
2108 unsigned Amt = RHSC->getZExtValue();
2110 // Watch out for shift count overflow though.
2111 if (Amt >= Mask.getBitWidth())
2112 break;
2113 APInt NewMask = Mask << Amt;
2114 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask))
2115 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2116 V.getOperand(1));
2118 break;
2119 case ISD::AND: {
2120 // X & -1 -> X (ignoring bits which aren't demanded).
2121 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1));
2122 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue()))
2123 return V.getOperand(0);
2124 break;
2126 case ISD::ANY_EXTEND: {
2127 SDValue Src = V.getOperand(0);
2128 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2129 // Being conservative here - only peek through if we only demand bits in the
2130 // non-extended source (even though the extended bits are technically undef).
2131 if (Mask.getActiveBits() > SrcBitWidth)
2132 break;
2133 APInt SrcMask = Mask.trunc(SrcBitWidth);
2134 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask))
2135 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2136 break;
2139 return SDValue();
2142 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2143 /// use this predicate to simplify operations downstream.
2144 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2145 unsigned BitWidth = Op.getScalarValueSizeInBits();
2146 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2149 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2150 /// this predicate to simplify operations downstream. Mask is known to be zero
2151 /// for bits that V cannot have.
2152 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
2153 unsigned Depth) const {
2154 return Mask.isSubsetOf(computeKnownBits(Op, Depth).Zero);
2157 /// Helper function that checks to see if a node is a constant or a
2158 /// build vector of splat constants at least within the demanded elts.
2159 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N,
2160 const APInt &DemandedElts) {
2161 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
2162 return CN;
2163 if (N.getOpcode() != ISD::BUILD_VECTOR)
2164 return nullptr;
2165 EVT VT = N.getValueType();
2166 ConstantSDNode *Cst = nullptr;
2167 unsigned NumElts = VT.getVectorNumElements();
2168 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size");
2169 for (unsigned i = 0; i != NumElts; ++i) {
2170 if (!DemandedElts[i])
2171 continue;
2172 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i));
2173 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) ||
2174 C->getValueType(0) != VT.getScalarType())
2175 return nullptr;
2176 Cst = C;
2178 return Cst;
2181 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2182 /// is less than the element bit-width of the shift node, return it.
2183 static const APInt *getValidShiftAmountConstant(SDValue V) {
2184 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
2185 // Shifting more than the bitwidth is not valid.
2186 const APInt &ShAmt = SA->getAPIntValue();
2187 if (ShAmt.ult(V.getScalarValueSizeInBits()))
2188 return &ShAmt;
2190 return nullptr;
2193 /// Determine which bits of Op are known to be either zero or one and return
2194 /// them in Known. For vectors, the known bits are those that are shared by
2195 /// every vector element.
2196 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2197 EVT VT = Op.getValueType();
2198 APInt DemandedElts = VT.isVector()
2199 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2200 : APInt(1, 1);
2201 return computeKnownBits(Op, DemandedElts, Depth);
2204 /// Determine which bits of Op are known to be either zero or one and return
2205 /// them in Known. The DemandedElts argument allows us to only collect the known
2206 /// bits that are shared by the requested vector elements.
2207 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2208 unsigned Depth) const {
2209 unsigned BitWidth = Op.getScalarValueSizeInBits();
2211 KnownBits Known(BitWidth); // Don't know anything.
2213 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2214 // We know all of the bits for a constant!
2215 Known.One = C->getAPIntValue();
2216 Known.Zero = ~Known.One;
2217 return Known;
2219 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2220 // We know all of the bits for a constant fp!
2221 Known.One = C->getValueAPF().bitcastToAPInt();
2222 Known.Zero = ~Known.One;
2223 return Known;
2226 if (Depth == 6)
2227 return Known; // Limit search depth.
2229 KnownBits Known2;
2230 unsigned NumElts = DemandedElts.getBitWidth();
2232 if (!DemandedElts)
2233 return Known; // No demanded elts, better to assume we don't know anything.
2235 unsigned Opcode = Op.getOpcode();
2236 switch (Opcode) {
2237 case ISD::BUILD_VECTOR:
2238 // Collect the known bits that are shared by every demanded vector element.
2239 assert(NumElts == Op.getValueType().getVectorNumElements() &&
2240 "Unexpected vector size");
2241 Known.Zero.setAllBits(); Known.One.setAllBits();
2242 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2243 if (!DemandedElts[i])
2244 continue;
2246 SDValue SrcOp = Op.getOperand(i);
2247 Known2 = computeKnownBits(SrcOp, Depth + 1);
2249 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2250 if (SrcOp.getValueSizeInBits() != BitWidth) {
2251 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2252 "Expected BUILD_VECTOR implicit truncation");
2253 Known2 = Known2.trunc(BitWidth);
2256 // Known bits are the values that are shared by every demanded element.
2257 Known.One &= Known2.One;
2258 Known.Zero &= Known2.Zero;
2260 // If we don't know any bits, early out.
2261 if (Known.isUnknown())
2262 break;
2264 break;
2265 case ISD::VECTOR_SHUFFLE: {
2266 // Collect the known bits that are shared by every vector element referenced
2267 // by the shuffle.
2268 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2269 Known.Zero.setAllBits(); Known.One.setAllBits();
2270 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2271 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2272 for (unsigned i = 0; i != NumElts; ++i) {
2273 if (!DemandedElts[i])
2274 continue;
2276 int M = SVN->getMaskElt(i);
2277 if (M < 0) {
2278 // For UNDEF elements, we don't know anything about the common state of
2279 // the shuffle result.
2280 Known.resetAll();
2281 DemandedLHS.clearAllBits();
2282 DemandedRHS.clearAllBits();
2283 break;
2286 if ((unsigned)M < NumElts)
2287 DemandedLHS.setBit((unsigned)M % NumElts);
2288 else
2289 DemandedRHS.setBit((unsigned)M % NumElts);
2291 // Known bits are the values that are shared by every demanded element.
2292 if (!!DemandedLHS) {
2293 SDValue LHS = Op.getOperand(0);
2294 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2295 Known.One &= Known2.One;
2296 Known.Zero &= Known2.Zero;
2298 // If we don't know any bits, early out.
2299 if (Known.isUnknown())
2300 break;
2301 if (!!DemandedRHS) {
2302 SDValue RHS = Op.getOperand(1);
2303 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2304 Known.One &= Known2.One;
2305 Known.Zero &= Known2.Zero;
2307 break;
2309 case ISD::CONCAT_VECTORS: {
2310 // Split DemandedElts and test each of the demanded subvectors.
2311 Known.Zero.setAllBits(); Known.One.setAllBits();
2312 EVT SubVectorVT = Op.getOperand(0).getValueType();
2313 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2314 unsigned NumSubVectors = Op.getNumOperands();
2315 for (unsigned i = 0; i != NumSubVectors; ++i) {
2316 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2317 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2318 if (!!DemandedSub) {
2319 SDValue Sub = Op.getOperand(i);
2320 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2321 Known.One &= Known2.One;
2322 Known.Zero &= Known2.Zero;
2324 // If we don't know any bits, early out.
2325 if (Known.isUnknown())
2326 break;
2328 break;
2330 case ISD::INSERT_SUBVECTOR: {
2331 // If we know the element index, demand any elements from the subvector and
2332 // the remainder from the src its inserted into, otherwise demand them all.
2333 SDValue Src = Op.getOperand(0);
2334 SDValue Sub = Op.getOperand(1);
2335 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2336 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2337 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2338 Known.One.setAllBits();
2339 Known.Zero.setAllBits();
2340 uint64_t Idx = SubIdx->getZExtValue();
2341 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2342 if (!!DemandedSubElts) {
2343 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2344 if (Known.isUnknown())
2345 break; // early-out.
2347 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2348 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2349 if (!!DemandedSrcElts) {
2350 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2351 Known.One &= Known2.One;
2352 Known.Zero &= Known2.Zero;
2354 } else {
2355 Known = computeKnownBits(Sub, Depth + 1);
2356 if (Known.isUnknown())
2357 break; // early-out.
2358 Known2 = computeKnownBits(Src, Depth + 1);
2359 Known.One &= Known2.One;
2360 Known.Zero &= Known2.Zero;
2362 break;
2364 case ISD::EXTRACT_SUBVECTOR: {
2365 // If we know the element index, just demand that subvector elements,
2366 // otherwise demand them all.
2367 SDValue Src = Op.getOperand(0);
2368 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2369 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2370 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2371 // Offset the demanded elts by the subvector index.
2372 uint64_t Idx = SubIdx->getZExtValue();
2373 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2374 Known = computeKnownBits(Src, DemandedSrc, Depth + 1);
2375 } else {
2376 Known = computeKnownBits(Src, Depth + 1);
2378 break;
2380 case ISD::BITCAST: {
2381 SDValue N0 = Op.getOperand(0);
2382 EVT SubVT = N0.getValueType();
2383 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2385 // Ignore bitcasts from unsupported types.
2386 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2387 break;
2389 // Fast handling of 'identity' bitcasts.
2390 if (BitWidth == SubBitWidth) {
2391 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2392 break;
2395 bool IsLE = getDataLayout().isLittleEndian();
2397 // Bitcast 'small element' vector to 'large element' scalar/vector.
2398 if ((BitWidth % SubBitWidth) == 0) {
2399 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2401 // Collect known bits for the (larger) output by collecting the known
2402 // bits from each set of sub elements and shift these into place.
2403 // We need to separately call computeKnownBits for each set of
2404 // sub elements as the knownbits for each is likely to be different.
2405 unsigned SubScale = BitWidth / SubBitWidth;
2406 APInt SubDemandedElts(NumElts * SubScale, 0);
2407 for (unsigned i = 0; i != NumElts; ++i)
2408 if (DemandedElts[i])
2409 SubDemandedElts.setBit(i * SubScale);
2411 for (unsigned i = 0; i != SubScale; ++i) {
2412 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2413 Depth + 1);
2414 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2415 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2416 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2420 // Bitcast 'large element' scalar/vector to 'small element' vector.
2421 if ((SubBitWidth % BitWidth) == 0) {
2422 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2424 // Collect known bits for the (smaller) output by collecting the known
2425 // bits from the overlapping larger input elements and extracting the
2426 // sub sections we actually care about.
2427 unsigned SubScale = SubBitWidth / BitWidth;
2428 APInt SubDemandedElts(NumElts / SubScale, 0);
2429 for (unsigned i = 0; i != NumElts; ++i)
2430 if (DemandedElts[i])
2431 SubDemandedElts.setBit(i / SubScale);
2433 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2435 Known.Zero.setAllBits(); Known.One.setAllBits();
2436 for (unsigned i = 0; i != NumElts; ++i)
2437 if (DemandedElts[i]) {
2438 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2439 unsigned Offset = (Shifts % SubScale) * BitWidth;
2440 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2441 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2442 // If we don't know any bits, early out.
2443 if (Known.isUnknown())
2444 break;
2447 break;
2449 case ISD::AND:
2450 // If either the LHS or the RHS are Zero, the result is zero.
2451 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2452 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2454 // Output known-1 bits are only known if set in both the LHS & RHS.
2455 Known.One &= Known2.One;
2456 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2457 Known.Zero |= Known2.Zero;
2458 break;
2459 case ISD::OR:
2460 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2461 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2463 // Output known-0 bits are only known if clear in both the LHS & RHS.
2464 Known.Zero &= Known2.Zero;
2465 // Output known-1 are known to be set if set in either the LHS | RHS.
2466 Known.One |= Known2.One;
2467 break;
2468 case ISD::XOR: {
2469 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2470 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2472 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2473 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2474 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2475 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2476 Known.Zero = KnownZeroOut;
2477 break;
2479 case ISD::MUL: {
2480 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2481 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2483 // If low bits are zero in either operand, output low known-0 bits.
2484 // Also compute a conservative estimate for high known-0 bits.
2485 // More trickiness is possible, but this is sufficient for the
2486 // interesting case of alignment computation.
2487 unsigned TrailZ = Known.countMinTrailingZeros() +
2488 Known2.countMinTrailingZeros();
2489 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2490 Known2.countMinLeadingZeros(),
2491 BitWidth) - BitWidth;
2493 Known.resetAll();
2494 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2495 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2496 break;
2498 case ISD::UDIV: {
2499 // For the purposes of computing leading zeros we can conservatively
2500 // treat a udiv as a logical right shift by the power of 2 known to
2501 // be less than the denominator.
2502 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2503 unsigned LeadZ = Known2.countMinLeadingZeros();
2505 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2506 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2507 if (RHSMaxLeadingZeros != BitWidth)
2508 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2510 Known.Zero.setHighBits(LeadZ);
2511 break;
2513 case ISD::SELECT:
2514 case ISD::VSELECT:
2515 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2516 // If we don't know any bits, early out.
2517 if (Known.isUnknown())
2518 break;
2519 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2521 // Only known if known in both the LHS and RHS.
2522 Known.One &= Known2.One;
2523 Known.Zero &= Known2.Zero;
2524 break;
2525 case ISD::SELECT_CC:
2526 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2527 // If we don't know any bits, early out.
2528 if (Known.isUnknown())
2529 break;
2530 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2532 // Only known if known in both the LHS and RHS.
2533 Known.One &= Known2.One;
2534 Known.Zero &= Known2.Zero;
2535 break;
2536 case ISD::SMULO:
2537 case ISD::UMULO:
2538 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2539 if (Op.getResNo() != 1)
2540 break;
2541 // The boolean result conforms to getBooleanContents.
2542 // If we know the result of a setcc has the top bits zero, use this info.
2543 // We know that we have an integer-based boolean since these operations
2544 // are only available for integer.
2545 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2546 TargetLowering::ZeroOrOneBooleanContent &&
2547 BitWidth > 1)
2548 Known.Zero.setBitsFrom(1);
2549 break;
2550 case ISD::SETCC:
2551 // If we know the result of a setcc has the top bits zero, use this info.
2552 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2553 TargetLowering::ZeroOrOneBooleanContent &&
2554 BitWidth > 1)
2555 Known.Zero.setBitsFrom(1);
2556 break;
2557 case ISD::SHL:
2558 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2559 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2560 unsigned Shift = ShAmt->getZExtValue();
2561 Known.Zero <<= Shift;
2562 Known.One <<= Shift;
2563 // Low bits are known zero.
2564 Known.Zero.setLowBits(Shift);
2566 break;
2567 case ISD::SRL:
2568 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2569 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2570 unsigned Shift = ShAmt->getZExtValue();
2571 Known.Zero.lshrInPlace(Shift);
2572 Known.One.lshrInPlace(Shift);
2573 // High bits are known zero.
2574 Known.Zero.setHighBits(Shift);
2575 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) {
2576 // If the shift amount is a vector of constants see if we can bound
2577 // the number of upper zero bits.
2578 unsigned ShiftAmountMin = BitWidth;
2579 for (unsigned i = 0; i != BV->getNumOperands(); ++i) {
2580 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) {
2581 const APInt &ShAmt = C->getAPIntValue();
2582 if (ShAmt.ult(BitWidth)) {
2583 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin,
2584 ShAmt.getZExtValue());
2585 continue;
2588 // Don't know anything.
2589 ShiftAmountMin = 0;
2590 break;
2593 Known.Zero.setHighBits(ShiftAmountMin);
2595 break;
2596 case ISD::SRA:
2597 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2598 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2599 unsigned Shift = ShAmt->getZExtValue();
2600 // Sign extend known zero/one bit (else is unknown).
2601 Known.Zero.ashrInPlace(Shift);
2602 Known.One.ashrInPlace(Shift);
2604 break;
2605 case ISD::SIGN_EXTEND_INREG: {
2606 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2607 unsigned EBits = EVT.getScalarSizeInBits();
2609 // Sign extension. Compute the demanded bits in the result that are not
2610 // present in the input.
2611 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2613 APInt InSignMask = APInt::getSignMask(EBits);
2614 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2616 // If the sign extended bits are demanded, we know that the sign
2617 // bit is demanded.
2618 InSignMask = InSignMask.zext(BitWidth);
2619 if (NewBits.getBoolValue())
2620 InputDemandedBits |= InSignMask;
2622 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2623 Known.One &= InputDemandedBits;
2624 Known.Zero &= InputDemandedBits;
2626 // If the sign bit of the input is known set or clear, then we know the
2627 // top bits of the result.
2628 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2629 Known.Zero |= NewBits;
2630 Known.One &= ~NewBits;
2631 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2632 Known.One |= NewBits;
2633 Known.Zero &= ~NewBits;
2634 } else { // Input sign bit unknown
2635 Known.Zero &= ~NewBits;
2636 Known.One &= ~NewBits;
2638 break;
2640 case ISD::CTTZ:
2641 case ISD::CTTZ_ZERO_UNDEF: {
2642 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2643 // If we have a known 1, its position is our upper bound.
2644 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2645 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2646 Known.Zero.setBitsFrom(LowBits);
2647 break;
2649 case ISD::CTLZ:
2650 case ISD::CTLZ_ZERO_UNDEF: {
2651 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2652 // If we have a known 1, its position is our upper bound.
2653 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2654 unsigned LowBits = Log2_32(PossibleLZ) + 1;
2655 Known.Zero.setBitsFrom(LowBits);
2656 break;
2658 case ISD::CTPOP: {
2659 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2660 // If we know some of the bits are zero, they can't be one.
2661 unsigned PossibleOnes = Known2.countMaxPopulation();
2662 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
2663 break;
2665 case ISD::LOAD: {
2666 LoadSDNode *LD = cast<LoadSDNode>(Op);
2667 // If this is a ZEXTLoad and we are looking at the loaded value.
2668 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2669 EVT VT = LD->getMemoryVT();
2670 unsigned MemBits = VT.getScalarSizeInBits();
2671 Known.Zero.setBitsFrom(MemBits);
2672 } else if (const MDNode *Ranges = LD->getRanges()) {
2673 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2674 computeKnownBitsFromRangeMetadata(*Ranges, Known);
2676 break;
2678 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2679 EVT InVT = Op.getOperand(0).getValueType();
2680 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2681 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2682 Known = Known.zext(BitWidth);
2683 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2684 break;
2686 case ISD::ZERO_EXTEND: {
2687 EVT InVT = Op.getOperand(0).getValueType();
2688 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2689 Known = Known.zext(BitWidth);
2690 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2691 break;
2693 // TODO ISD::SIGN_EXTEND_VECTOR_INREG
2694 case ISD::SIGN_EXTEND: {
2695 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2696 // If the sign bit is known to be zero or one, then sext will extend
2697 // it to the top bits, else it will just zext.
2698 Known = Known.sext(BitWidth);
2699 break;
2701 case ISD::ANY_EXTEND: {
2702 Known = computeKnownBits(Op.getOperand(0), Depth+1);
2703 Known = Known.zext(BitWidth);
2704 break;
2706 case ISD::TRUNCATE: {
2707 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2708 Known = Known.trunc(BitWidth);
2709 break;
2711 case ISD::AssertZext: {
2712 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2713 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2714 Known = computeKnownBits(Op.getOperand(0), Depth+1);
2715 Known.Zero |= (~InMask);
2716 Known.One &= (~Known.Zero);
2717 break;
2719 case ISD::FGETSIGN:
2720 // All bits are zero except the low bit.
2721 Known.Zero.setBitsFrom(1);
2722 break;
2723 case ISD::USUBO:
2724 case ISD::SSUBO:
2725 if (Op.getResNo() == 1) {
2726 // If we know the result of a setcc has the top bits zero, use this info.
2727 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2728 TargetLowering::ZeroOrOneBooleanContent &&
2729 BitWidth > 1)
2730 Known.Zero.setBitsFrom(1);
2731 break;
2733 LLVM_FALLTHROUGH;
2734 case ISD::SUB:
2735 case ISD::SUBC: {
2736 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2737 // We know that the top bits of C-X are clear if X contains less bits
2738 // than C (i.e. no wrap-around can happen). For example, 20-X is
2739 // positive if we can prove that X is >= 0 and < 16.
2740 if (CLHS->getAPIntValue().isNonNegative()) {
2741 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2742 // NLZ can't be BitWidth with no sign bit
2743 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2744 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts,
2745 Depth + 1);
2747 // If all of the MaskV bits are known to be zero, then we know the
2748 // output top bits are zero, because we now know that the output is
2749 // from [0-C].
2750 if ((Known2.Zero & MaskV) == MaskV) {
2751 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2752 // Top bits known zero.
2753 Known.Zero.setHighBits(NLZ2);
2758 // If low bits are know to be zero in both operands, then we know they are
2759 // going to be 0 in the result. Both addition and complement operations
2760 // preserve the low zero bits.
2761 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2762 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2763 if (KnownZeroLow == 0)
2764 break;
2766 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2767 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2768 Known.Zero.setLowBits(KnownZeroLow);
2769 break;
2771 case ISD::UADDO:
2772 case ISD::SADDO:
2773 case ISD::ADDCARRY:
2774 if (Op.getResNo() == 1) {
2775 // If we know the result of a setcc has the top bits zero, use this info.
2776 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2777 TargetLowering::ZeroOrOneBooleanContent &&
2778 BitWidth > 1)
2779 Known.Zero.setBitsFrom(1);
2780 break;
2782 LLVM_FALLTHROUGH;
2783 case ISD::ADD:
2784 case ISD::ADDC:
2785 case ISD::ADDE: {
2786 // Output known-0 bits are known if clear or set in both the low clear bits
2787 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2788 // low 3 bits clear.
2789 // Output known-0 bits are also known if the top bits of each input are
2790 // known to be clear. For example, if one input has the top 10 bits clear
2791 // and the other has the top 8 bits clear, we know the top 7 bits of the
2792 // output must be clear.
2793 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2794 unsigned KnownZeroHigh = Known2.countMinLeadingZeros();
2795 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2797 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2798 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros());
2799 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2801 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) {
2802 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only
2803 // use this information if we know (at least) that the low two bits are
2804 // clear. We then return to the caller that the low bit is unknown but
2805 // that other bits are known zero.
2806 if (KnownZeroLow >= 2)
2807 Known.Zero.setBits(1, KnownZeroLow);
2808 break;
2811 Known.Zero.setLowBits(KnownZeroLow);
2812 if (KnownZeroHigh > 1)
2813 Known.Zero.setHighBits(KnownZeroHigh - 1);
2814 break;
2816 case ISD::SREM:
2817 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2818 const APInt &RA = Rem->getAPIntValue().abs();
2819 if (RA.isPowerOf2()) {
2820 APInt LowBits = RA - 1;
2821 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2823 // The low bits of the first operand are unchanged by the srem.
2824 Known.Zero = Known2.Zero & LowBits;
2825 Known.One = Known2.One & LowBits;
2827 // If the first operand is non-negative or has all low bits zero, then
2828 // the upper bits are all zero.
2829 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits))
2830 Known.Zero |= ~LowBits;
2832 // If the first operand is negative and not all low bits are zero, then
2833 // the upper bits are all one.
2834 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0))
2835 Known.One |= ~LowBits;
2836 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
2839 break;
2840 case ISD::UREM: {
2841 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2842 const APInt &RA = Rem->getAPIntValue();
2843 if (RA.isPowerOf2()) {
2844 APInt LowBits = (RA - 1);
2845 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2847 // The upper bits are all zero, the lower ones are unchanged.
2848 Known.Zero = Known2.Zero | ~LowBits;
2849 Known.One = Known2.One & LowBits;
2850 break;
2854 // Since the result is less than or equal to either operand, any leading
2855 // zero bits in either operand must also exist in the result.
2856 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2857 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2859 uint32_t Leaders =
2860 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
2861 Known.resetAll();
2862 Known.Zero.setHighBits(Leaders);
2863 break;
2865 case ISD::EXTRACT_ELEMENT: {
2866 Known = computeKnownBits(Op.getOperand(0), Depth+1);
2867 const unsigned Index = Op.getConstantOperandVal(1);
2868 const unsigned BitWidth = Op.getValueSizeInBits();
2870 // Remove low part of known bits mask
2871 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth);
2872 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
2874 // Remove high part of known bit mask
2875 Known = Known.trunc(BitWidth);
2876 break;
2878 case ISD::EXTRACT_VECTOR_ELT: {
2879 SDValue InVec = Op.getOperand(0);
2880 SDValue EltNo = Op.getOperand(1);
2881 EVT VecVT = InVec.getValueType();
2882 const unsigned BitWidth = Op.getValueSizeInBits();
2883 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
2884 const unsigned NumSrcElts = VecVT.getVectorNumElements();
2885 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2886 // anything about the extended bits.
2887 if (BitWidth > EltBitWidth)
2888 Known = Known.trunc(EltBitWidth);
2889 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
2890 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
2891 // If we know the element index, just demand that vector element.
2892 unsigned Idx = ConstEltNo->getZExtValue();
2893 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
2894 Known = computeKnownBits(InVec, DemandedElt, Depth + 1);
2895 } else {
2896 // Unknown element index, so ignore DemandedElts and demand them all.
2897 Known = computeKnownBits(InVec, Depth + 1);
2899 if (BitWidth > EltBitWidth)
2900 Known = Known.zext(BitWidth);
2901 break;
2903 case ISD::INSERT_VECTOR_ELT: {
2904 SDValue InVec = Op.getOperand(0);
2905 SDValue InVal = Op.getOperand(1);
2906 SDValue EltNo = Op.getOperand(2);
2908 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
2909 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
2910 // If we know the element index, split the demand between the
2911 // source vector and the inserted element.
2912 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
2913 unsigned EltIdx = CEltNo->getZExtValue();
2915 // If we demand the inserted element then add its common known bits.
2916 if (DemandedElts[EltIdx]) {
2917 Known2 = computeKnownBits(InVal, Depth + 1);
2918 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
2919 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
2922 // If we demand the source vector then add its common known bits, ensuring
2923 // that we don't demand the inserted element.
2924 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
2925 if (!!VectorElts) {
2926 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1);
2927 Known.One &= Known2.One;
2928 Known.Zero &= Known2.Zero;
2930 } else {
2931 // Unknown element index, so ignore DemandedElts and demand them all.
2932 Known = computeKnownBits(InVec, Depth + 1);
2933 Known2 = computeKnownBits(InVal, Depth + 1);
2934 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
2935 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
2937 break;
2939 case ISD::BITREVERSE: {
2940 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2941 Known.Zero = Known2.Zero.reverseBits();
2942 Known.One = Known2.One.reverseBits();
2943 break;
2945 case ISD::BSWAP: {
2946 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2947 Known.Zero = Known2.Zero.byteSwap();
2948 Known.One = Known2.One.byteSwap();
2949 break;
2951 case ISD::ABS: {
2952 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2954 // If the source's MSB is zero then we know the rest of the bits already.
2955 if (Known2.isNonNegative()) {
2956 Known.Zero = Known2.Zero;
2957 Known.One = Known2.One;
2958 break;
2961 // We only know that the absolute values's MSB will be zero iff there is
2962 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
2963 Known2.One.clearSignBit();
2964 if (Known2.One.getBoolValue()) {
2965 Known.Zero = APInt::getSignMask(BitWidth);
2966 break;
2968 break;
2970 case ISD::UMIN: {
2971 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2972 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2974 // UMIN - we know that the result will have the maximum of the
2975 // known zero leading bits of the inputs.
2976 unsigned LeadZero = Known.countMinLeadingZeros();
2977 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
2979 Known.Zero &= Known2.Zero;
2980 Known.One &= Known2.One;
2981 Known.Zero.setHighBits(LeadZero);
2982 break;
2984 case ISD::UMAX: {
2985 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2986 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2988 // UMAX - we know that the result will have the maximum of the
2989 // known one leading bits of the inputs.
2990 unsigned LeadOne = Known.countMinLeadingOnes();
2991 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
2993 Known.Zero &= Known2.Zero;
2994 Known.One &= Known2.One;
2995 Known.One.setHighBits(LeadOne);
2996 break;
2998 case ISD::SMIN:
2999 case ISD::SMAX: {
3000 // If we have a clamp pattern, we know that the number of sign bits will be
3001 // the minimum of the clamp min/max range.
3002 bool IsMax = (Opcode == ISD::SMAX);
3003 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3004 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)))
3005 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3006 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1),
3007 DemandedElts);
3008 if (CstLow && CstHigh) {
3009 if (!IsMax)
3010 std::swap(CstLow, CstHigh);
3012 const APInt &ValueLow = CstLow->getAPIntValue();
3013 const APInt &ValueHigh = CstHigh->getAPIntValue();
3014 if (ValueLow.sle(ValueHigh)) {
3015 unsigned LowSignBits = ValueLow.getNumSignBits();
3016 unsigned HighSignBits = ValueHigh.getNumSignBits();
3017 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3018 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3019 Known.One.setHighBits(MinSignBits);
3020 break;
3022 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3023 Known.Zero.setHighBits(MinSignBits);
3024 break;
3029 // Fallback - just get the shared known bits of the operands.
3030 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3031 if (Known.isUnknown()) break; // Early-out
3032 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3033 Known.Zero &= Known2.Zero;
3034 Known.One &= Known2.One;
3035 break;
3037 case ISD::FrameIndex:
3038 case ISD::TargetFrameIndex:
3039 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
3040 break;
3042 default:
3043 if (Opcode < ISD::BUILTIN_OP_END)
3044 break;
3045 LLVM_FALLTHROUGH;
3046 case ISD::INTRINSIC_WO_CHAIN:
3047 case ISD::INTRINSIC_W_CHAIN:
3048 case ISD::INTRINSIC_VOID:
3049 // Allow the target to implement this method for its nodes.
3050 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3051 break;
3054 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3055 return Known;
3058 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3059 SDValue N1) const {
3060 // X + 0 never overflow
3061 if (isNullConstant(N1))
3062 return OFK_Never;
3064 KnownBits N1Known;
3065 computeKnownBits(N1, N1Known);
3066 if (N1Known.Zero.getBoolValue()) {
3067 KnownBits N0Known;
3068 computeKnownBits(N0, N0Known);
3070 bool overflow;
3071 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow);
3072 if (!overflow)
3073 return OFK_Never;
3076 // mulhi + 1 never overflow
3077 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3078 (~N1Known.Zero & 0x01) == ~N1Known.Zero)
3079 return OFK_Never;
3081 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3082 KnownBits N0Known;
3083 computeKnownBits(N0, N0Known);
3085 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero)
3086 return OFK_Never;
3089 return OFK_Sometime;
3092 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3093 EVT OpVT = Val.getValueType();
3094 unsigned BitWidth = OpVT.getScalarSizeInBits();
3096 // Is the constant a known power of 2?
3097 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3098 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3100 // A left-shift of a constant one will have exactly one bit set because
3101 // shifting the bit off the end is undefined.
3102 if (Val.getOpcode() == ISD::SHL) {
3103 auto *C = isConstOrConstSplat(Val.getOperand(0));
3104 if (C && C->getAPIntValue() == 1)
3105 return true;
3108 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3109 // one bit set.
3110 if (Val.getOpcode() == ISD::SRL) {
3111 auto *C = isConstOrConstSplat(Val.getOperand(0));
3112 if (C && C->getAPIntValue().isSignMask())
3113 return true;
3116 // Are all operands of a build vector constant powers of two?
3117 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3118 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3119 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3120 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3121 return false;
3123 return true;
3125 // More could be done here, though the above checks are enough
3126 // to handle some common cases.
3128 // Fall back to computeKnownBits to catch other known cases.
3129 KnownBits Known = computeKnownBits(Val);
3130 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3133 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3134 EVT VT = Op.getValueType();
3135 APInt DemandedElts = VT.isVector()
3136 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3137 : APInt(1, 1);
3138 return ComputeNumSignBits(Op, DemandedElts, Depth);
3141 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3142 unsigned Depth) const {
3143 EVT VT = Op.getValueType();
3144 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3145 unsigned VTBits = VT.getScalarSizeInBits();
3146 unsigned NumElts = DemandedElts.getBitWidth();
3147 unsigned Tmp, Tmp2;
3148 unsigned FirstAnswer = 1;
3150 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3151 const APInt &Val = C->getAPIntValue();
3152 return Val.getNumSignBits();
3155 if (Depth == 6)
3156 return 1; // Limit search depth.
3158 if (!DemandedElts)
3159 return 1; // No demanded elts, better to assume we don't know anything.
3161 unsigned Opcode = Op.getOpcode();
3162 switch (Opcode) {
3163 default: break;
3164 case ISD::AssertSext:
3165 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3166 return VTBits-Tmp+1;
3167 case ISD::AssertZext:
3168 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3169 return VTBits-Tmp;
3171 case ISD::BUILD_VECTOR:
3172 Tmp = VTBits;
3173 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3174 if (!DemandedElts[i])
3175 continue;
3177 SDValue SrcOp = Op.getOperand(i);
3178 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3180 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3181 if (SrcOp.getValueSizeInBits() != VTBits) {
3182 assert(SrcOp.getValueSizeInBits() > VTBits &&
3183 "Expected BUILD_VECTOR implicit truncation");
3184 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3185 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3187 Tmp = std::min(Tmp, Tmp2);
3189 return Tmp;
3191 case ISD::VECTOR_SHUFFLE: {
3192 // Collect the minimum number of sign bits that are shared by every vector
3193 // element referenced by the shuffle.
3194 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3195 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3196 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3197 for (unsigned i = 0; i != NumElts; ++i) {
3198 int M = SVN->getMaskElt(i);
3199 if (!DemandedElts[i])
3200 continue;
3201 // For UNDEF elements, we don't know anything about the common state of
3202 // the shuffle result.
3203 if (M < 0)
3204 return 1;
3205 if ((unsigned)M < NumElts)
3206 DemandedLHS.setBit((unsigned)M % NumElts);
3207 else
3208 DemandedRHS.setBit((unsigned)M % NumElts);
3210 Tmp = std::numeric_limits<unsigned>::max();
3211 if (!!DemandedLHS)
3212 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3213 if (!!DemandedRHS) {
3214 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3215 Tmp = std::min(Tmp, Tmp2);
3217 // If we don't know anything, early out and try computeKnownBits fall-back.
3218 if (Tmp == 1)
3219 break;
3220 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3221 return Tmp;
3224 case ISD::BITCAST: {
3225 SDValue N0 = Op.getOperand(0);
3226 EVT SrcVT = N0.getValueType();
3227 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3229 // Ignore bitcasts from unsupported types..
3230 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3231 break;
3233 // Fast handling of 'identity' bitcasts.
3234 if (VTBits == SrcBits)
3235 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3237 bool IsLE = getDataLayout().isLittleEndian();
3239 // Bitcast 'large element' scalar/vector to 'small element' vector.
3240 if ((SrcBits % VTBits) == 0) {
3241 assert(VT.isVector() && "Expected bitcast to vector");
3243 unsigned Scale = SrcBits / VTBits;
3244 APInt SrcDemandedElts(NumElts / Scale, 0);
3245 for (unsigned i = 0; i != NumElts; ++i)
3246 if (DemandedElts[i])
3247 SrcDemandedElts.setBit(i / Scale);
3249 // Fast case - sign splat can be simply split across the small elements.
3250 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3251 if (Tmp == SrcBits)
3252 return VTBits;
3254 // Slow case - determine how far the sign extends into each sub-element.
3255 Tmp2 = VTBits;
3256 for (unsigned i = 0; i != NumElts; ++i)
3257 if (DemandedElts[i]) {
3258 unsigned SubOffset = i % Scale;
3259 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3260 SubOffset = SubOffset * VTBits;
3261 if (Tmp <= SubOffset)
3262 return 1;
3263 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3265 return Tmp2;
3267 break;
3270 case ISD::SIGN_EXTEND:
3271 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3272 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3273 case ISD::SIGN_EXTEND_INREG:
3274 // Max of the input and what this extends.
3275 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3276 Tmp = VTBits-Tmp+1;
3277 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3278 return std::max(Tmp, Tmp2);
3279 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3280 SDValue Src = Op.getOperand(0);
3281 EVT SrcVT = Src.getValueType();
3282 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3283 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3284 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3287 case ISD::SRA:
3288 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3289 // SRA X, C -> adds C sign bits.
3290 if (ConstantSDNode *C =
3291 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3292 APInt ShiftVal = C->getAPIntValue();
3293 ShiftVal += Tmp;
3294 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
3296 return Tmp;
3297 case ISD::SHL:
3298 if (ConstantSDNode *C =
3299 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3300 // shl destroys sign bits.
3301 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3302 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
3303 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
3304 return Tmp - C->getZExtValue();
3306 break;
3307 case ISD::AND:
3308 case ISD::OR:
3309 case ISD::XOR: // NOT is handled here.
3310 // Logical binary ops preserve the number of sign bits at the worst.
3311 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3312 if (Tmp != 1) {
3313 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3314 FirstAnswer = std::min(Tmp, Tmp2);
3315 // We computed what we know about the sign bits as our first
3316 // answer. Now proceed to the generic code that uses
3317 // computeKnownBits, and pick whichever answer is better.
3319 break;
3321 case ISD::SELECT:
3322 case ISD::VSELECT:
3323 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3324 if (Tmp == 1) return 1; // Early out.
3325 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3326 return std::min(Tmp, Tmp2);
3327 case ISD::SELECT_CC:
3328 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3329 if (Tmp == 1) return 1; // Early out.
3330 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3331 return std::min(Tmp, Tmp2);
3333 case ISD::SMIN:
3334 case ISD::SMAX: {
3335 // If we have a clamp pattern, we know that the number of sign bits will be
3336 // the minimum of the clamp min/max range.
3337 bool IsMax = (Opcode == ISD::SMAX);
3338 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3339 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)))
3340 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3341 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1),
3342 DemandedElts);
3343 if (CstLow && CstHigh) {
3344 if (!IsMax)
3345 std::swap(CstLow, CstHigh);
3346 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3347 Tmp = CstLow->getAPIntValue().getNumSignBits();
3348 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3349 return std::min(Tmp, Tmp2);
3353 // Fallback - just get the minimum number of sign bits of the operands.
3354 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3355 if (Tmp == 1)
3356 return 1; // Early out.
3357 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3358 return std::min(Tmp, Tmp2);
3360 case ISD::UMIN:
3361 case ISD::UMAX:
3362 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3363 if (Tmp == 1)
3364 return 1; // Early out.
3365 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3366 return std::min(Tmp, Tmp2);
3367 case ISD::SADDO:
3368 case ISD::UADDO:
3369 case ISD::SSUBO:
3370 case ISD::USUBO:
3371 case ISD::SMULO:
3372 case ISD::UMULO:
3373 if (Op.getResNo() != 1)
3374 break;
3375 // The boolean result conforms to getBooleanContents. Fall through.
3376 // If setcc returns 0/-1, all bits are sign bits.
3377 // We know that we have an integer-based boolean since these operations
3378 // are only available for integer.
3379 if (TLI->getBooleanContents(VT.isVector(), false) ==
3380 TargetLowering::ZeroOrNegativeOneBooleanContent)
3381 return VTBits;
3382 break;
3383 case ISD::SETCC:
3384 // If setcc returns 0/-1, all bits are sign bits.
3385 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3386 TargetLowering::ZeroOrNegativeOneBooleanContent)
3387 return VTBits;
3388 break;
3389 case ISD::ROTL:
3390 case ISD::ROTR:
3391 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3392 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3394 // Handle rotate right by N like a rotate left by 32-N.
3395 if (Opcode == ISD::ROTR)
3396 RotAmt = (VTBits - RotAmt) % VTBits;
3398 // If we aren't rotating out all of the known-in sign bits, return the
3399 // number that are left. This handles rotl(sext(x), 1) for example.
3400 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3401 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3403 break;
3404 case ISD::ADD:
3405 case ISD::ADDC:
3406 // Add can have at most one carry bit. Thus we know that the output
3407 // is, at worst, one more bit than the inputs.
3408 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3409 if (Tmp == 1) return 1; // Early out.
3411 // Special case decrementing a value (ADD X, -1):
3412 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3413 if (CRHS->isAllOnesValue()) {
3414 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1);
3416 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3417 // sign bits set.
3418 if ((Known.Zero | 1).isAllOnesValue())
3419 return VTBits;
3421 // If we are subtracting one from a positive number, there is no carry
3422 // out of the result.
3423 if (Known.isNonNegative())
3424 return Tmp;
3427 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3428 if (Tmp2 == 1) return 1;
3429 return std::min(Tmp, Tmp2)-1;
3431 case ISD::SUB:
3432 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3433 if (Tmp2 == 1) return 1;
3435 // Handle NEG.
3436 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3437 if (CLHS->isNullValue()) {
3438 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1);
3439 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3440 // sign bits set.
3441 if ((Known.Zero | 1).isAllOnesValue())
3442 return VTBits;
3444 // If the input is known to be positive (the sign bit is known clear),
3445 // the output of the NEG has the same number of sign bits as the input.
3446 if (Known.isNonNegative())
3447 return Tmp2;
3449 // Otherwise, we treat this like a SUB.
3452 // Sub can have at most one carry bit. Thus we know that the output
3453 // is, at worst, one more bit than the inputs.
3454 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3455 if (Tmp == 1) return 1; // Early out.
3456 return std::min(Tmp, Tmp2)-1;
3457 case ISD::TRUNCATE: {
3458 // Check if the sign bits of source go down as far as the truncated value.
3459 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3460 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3461 if (NumSrcSignBits > (NumSrcBits - VTBits))
3462 return NumSrcSignBits - (NumSrcBits - VTBits);
3463 break;
3465 case ISD::EXTRACT_ELEMENT: {
3466 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3467 const int BitWidth = Op.getValueSizeInBits();
3468 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3470 // Get reverse index (starting from 1), Op1 value indexes elements from
3471 // little end. Sign starts at big end.
3472 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3474 // If the sign portion ends in our element the subtraction gives correct
3475 // result. Otherwise it gives either negative or > bitwidth result
3476 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3478 case ISD::INSERT_VECTOR_ELT: {
3479 SDValue InVec = Op.getOperand(0);
3480 SDValue InVal = Op.getOperand(1);
3481 SDValue EltNo = Op.getOperand(2);
3482 unsigned NumElts = InVec.getValueType().getVectorNumElements();
3484 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3485 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3486 // If we know the element index, split the demand between the
3487 // source vector and the inserted element.
3488 unsigned EltIdx = CEltNo->getZExtValue();
3490 // If we demand the inserted element then get its sign bits.
3491 Tmp = std::numeric_limits<unsigned>::max();
3492 if (DemandedElts[EltIdx]) {
3493 // TODO - handle implicit truncation of inserted elements.
3494 if (InVal.getScalarValueSizeInBits() != VTBits)
3495 break;
3496 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3499 // If we demand the source vector then get its sign bits, and determine
3500 // the minimum.
3501 APInt VectorElts = DemandedElts;
3502 VectorElts.clearBit(EltIdx);
3503 if (!!VectorElts) {
3504 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3505 Tmp = std::min(Tmp, Tmp2);
3507 } else {
3508 // Unknown element index, so ignore DemandedElts and demand them all.
3509 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3510 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3511 Tmp = std::min(Tmp, Tmp2);
3513 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3514 return Tmp;
3516 case ISD::EXTRACT_VECTOR_ELT: {
3517 SDValue InVec = Op.getOperand(0);
3518 SDValue EltNo = Op.getOperand(1);
3519 EVT VecVT = InVec.getValueType();
3520 const unsigned BitWidth = Op.getValueSizeInBits();
3521 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3522 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3524 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3525 // anything about sign bits. But if the sizes match we can derive knowledge
3526 // about sign bits from the vector operand.
3527 if (BitWidth != EltBitWidth)
3528 break;
3530 // If we know the element index, just demand that vector element, else for
3531 // an unknown element index, ignore DemandedElts and demand them all.
3532 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3533 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3534 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3535 DemandedSrcElts =
3536 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3538 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3540 case ISD::EXTRACT_SUBVECTOR: {
3541 // If we know the element index, just demand that subvector elements,
3542 // otherwise demand them all.
3543 SDValue Src = Op.getOperand(0);
3544 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3545 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3546 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3547 // Offset the demanded elts by the subvector index.
3548 uint64_t Idx = SubIdx->getZExtValue();
3549 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3550 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3552 return ComputeNumSignBits(Src, Depth + 1);
3554 case ISD::CONCAT_VECTORS:
3555 // Determine the minimum number of sign bits across all demanded
3556 // elts of the input vectors. Early out if the result is already 1.
3557 Tmp = std::numeric_limits<unsigned>::max();
3558 EVT SubVectorVT = Op.getOperand(0).getValueType();
3559 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3560 unsigned NumSubVectors = Op.getNumOperands();
3561 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3562 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3563 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3564 if (!DemandedSub)
3565 continue;
3566 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3567 Tmp = std::min(Tmp, Tmp2);
3569 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3570 return Tmp;
3573 // If we are looking at the loaded value of the SDNode.
3574 if (Op.getResNo() == 0) {
3575 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3576 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3577 unsigned ExtType = LD->getExtensionType();
3578 switch (ExtType) {
3579 default: break;
3580 case ISD::SEXTLOAD: // '17' bits known
3581 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3582 return VTBits-Tmp+1;
3583 case ISD::ZEXTLOAD: // '16' bits known
3584 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3585 return VTBits-Tmp;
3590 // Allow the target to implement this method for its nodes.
3591 if (Opcode >= ISD::BUILTIN_OP_END ||
3592 Opcode == ISD::INTRINSIC_WO_CHAIN ||
3593 Opcode == ISD::INTRINSIC_W_CHAIN ||
3594 Opcode == ISD::INTRINSIC_VOID) {
3595 unsigned NumBits =
3596 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
3597 if (NumBits > 1)
3598 FirstAnswer = std::max(FirstAnswer, NumBits);
3601 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3602 // use this information.
3603 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
3605 APInt Mask;
3606 if (Known.isNonNegative()) { // sign bit is 0
3607 Mask = Known.Zero;
3608 } else if (Known.isNegative()) { // sign bit is 1;
3609 Mask = Known.One;
3610 } else {
3611 // Nothing known.
3612 return FirstAnswer;
3615 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3616 // the number of identical bits in the top of the input value.
3617 Mask = ~Mask;
3618 Mask <<= Mask.getBitWidth()-VTBits;
3619 // Return # leading zeros. We use 'min' here in case Val was zero before
3620 // shifting. We don't want to return '64' as for an i32 "0".
3621 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3624 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3625 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3626 !isa<ConstantSDNode>(Op.getOperand(1)))
3627 return false;
3629 if (Op.getOpcode() == ISD::OR &&
3630 !MaskedValueIsZero(Op.getOperand(0),
3631 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3632 return false;
3634 return true;
3637 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
3638 // If we're told that NaNs won't happen, assume they won't.
3639 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
3640 return true;
3642 if (Depth == 6)
3643 return false; // Limit search depth.
3645 // TODO: Handle vectors.
3646 // If the value is a constant, we can obviously see if it is a NaN or not.
3647 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
3648 return !C->getValueAPF().isNaN() ||
3649 (SNaN && !C->getValueAPF().isSignaling());
3652 unsigned Opcode = Op.getOpcode();
3653 switch (Opcode) {
3654 case ISD::FADD:
3655 case ISD::FSUB:
3656 case ISD::FMUL:
3657 case ISD::FDIV:
3658 case ISD::FREM:
3659 case ISD::FSIN:
3660 case ISD::FCOS: {
3661 if (SNaN)
3662 return true;
3663 // TODO: Need isKnownNeverInfinity
3664 return false;
3666 case ISD::FCANONICALIZE:
3667 case ISD::FEXP:
3668 case ISD::FEXP2:
3669 case ISD::FTRUNC:
3670 case ISD::FFLOOR:
3671 case ISD::FCEIL:
3672 case ISD::FROUND:
3673 case ISD::FRINT:
3674 case ISD::FNEARBYINT: {
3675 if (SNaN)
3676 return true;
3677 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3679 case ISD::FABS:
3680 case ISD::FNEG:
3681 case ISD::FCOPYSIGN: {
3682 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3684 case ISD::SELECT:
3685 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
3686 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
3687 case ISD::FP_EXTEND:
3688 case ISD::FP_ROUND: {
3689 if (SNaN)
3690 return true;
3691 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3693 case ISD::SINT_TO_FP:
3694 case ISD::UINT_TO_FP:
3695 return true;
3696 case ISD::FMA:
3697 case ISD::FMAD: {
3698 if (SNaN)
3699 return true;
3700 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
3701 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
3702 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
3704 case ISD::FSQRT: // Need is known positive
3705 case ISD::FLOG:
3706 case ISD::FLOG2:
3707 case ISD::FLOG10:
3708 case ISD::FPOWI:
3709 case ISD::FPOW: {
3710 if (SNaN)
3711 return true;
3712 // TODO: Refine on operand
3713 return false;
3716 // TODO: Handle FMINNUM/FMAXNUM/FMINNAN/FMAXNAN when there is an agreement on
3717 // what they should do.
3718 case ISD::EXTRACT_VECTOR_ELT: {
3719 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3721 default:
3722 if (Opcode >= ISD::BUILTIN_OP_END ||
3723 Opcode == ISD::INTRINSIC_WO_CHAIN ||
3724 Opcode == ISD::INTRINSIC_W_CHAIN ||
3725 Opcode == ISD::INTRINSIC_VOID) {
3726 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
3729 return false;
3733 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
3734 assert(Op.getValueType().isFloatingPoint() &&
3735 "Floating point type expected");
3737 // If the value is a constant, we can obviously see if it is a zero or not.
3738 // TODO: Add BuildVector support.
3739 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3740 return !C->isZero();
3741 return false;
3744 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
3745 assert(!Op.getValueType().isFloatingPoint() &&
3746 "Floating point types unsupported - use isKnownNeverZeroFloat");
3748 // If the value is a constant, we can obviously see if it is a zero or not.
3749 if (ISD::matchUnaryPredicate(
3750 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
3751 return true;
3753 // TODO: Recognize more cases here.
3754 switch (Op.getOpcode()) {
3755 default: break;
3756 case ISD::OR:
3757 if (isKnownNeverZero(Op.getOperand(1)) ||
3758 isKnownNeverZero(Op.getOperand(0)))
3759 return true;
3760 break;
3763 return false;
3766 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
3767 // Check the obvious case.
3768 if (A == B) return true;
3770 // For for negative and positive zero.
3771 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3772 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3773 if (CA->isZero() && CB->isZero()) return true;
3775 // Otherwise they may not be equal.
3776 return false;
3779 // FIXME: unify with llvm::haveNoCommonBitsSet.
3780 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
3781 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
3782 assert(A.getValueType() == B.getValueType() &&
3783 "Values must have the same type");
3784 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
3787 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
3788 ArrayRef<SDValue> Ops,
3789 SelectionDAG &DAG) {
3790 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
3791 assert(llvm::all_of(Ops,
3792 [Ops](SDValue Op) {
3793 return Ops[0].getValueType() == Op.getValueType();
3794 }) &&
3795 "Concatenation of vectors with inconsistent value types!");
3796 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
3797 VT.getVectorNumElements() &&
3798 "Incorrect element count in vector concatenation!");
3800 if (Ops.size() == 1)
3801 return Ops[0];
3803 // Concat of UNDEFs is UNDEF.
3804 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3805 return DAG.getUNDEF(VT);
3807 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
3808 // simplified to one big BUILD_VECTOR.
3809 // FIXME: Add support for SCALAR_TO_VECTOR as well.
3810 EVT SVT = VT.getScalarType();
3811 SmallVector<SDValue, 16> Elts;
3812 for (SDValue Op : Ops) {
3813 EVT OpVT = Op.getValueType();
3814 if (Op.isUndef())
3815 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
3816 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
3817 Elts.append(Op->op_begin(), Op->op_end());
3818 else
3819 return SDValue();
3822 // BUILD_VECTOR requires all inputs to be of the same type, find the
3823 // maximum type and extend them all.
3824 for (SDValue Op : Elts)
3825 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3827 if (SVT.bitsGT(VT.getScalarType()))
3828 for (SDValue &Op : Elts)
3829 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
3830 ? DAG.getZExtOrTrunc(Op, DL, SVT)
3831 : DAG.getSExtOrTrunc(Op, DL, SVT);
3833 SDValue V = DAG.getBuildVector(VT, DL, Elts);
3834 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
3835 return V;
3838 /// Gets or creates the specified node.
3839 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
3840 FoldingSetNodeID ID;
3841 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
3842 void *IP = nullptr;
3843 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3844 return SDValue(E, 0);
3846 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
3847 getVTList(VT));
3848 CSEMap.InsertNode(N, IP);
3850 InsertNode(N);
3851 SDValue V = SDValue(N, 0);
3852 NewSDValueDbgMsg(V, "Creating new node: ", this);
3853 return V;
3856 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3857 SDValue Operand, const SDNodeFlags Flags) {
3858 // Constant fold unary operations with an integer constant operand. Even
3859 // opaque constant will be folded, because the folding of unary operations
3860 // doesn't create new constants with different values. Nevertheless, the
3861 // opaque flag is preserved during folding to prevent future folding with
3862 // other constants.
3863 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
3864 const APInt &Val = C->getAPIntValue();
3865 switch (Opcode) {
3866 default: break;
3867 case ISD::SIGN_EXTEND:
3868 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
3869 C->isTargetOpcode(), C->isOpaque());
3870 case ISD::ANY_EXTEND:
3871 case ISD::ZERO_EXTEND:
3872 case ISD::TRUNCATE:
3873 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
3874 C->isTargetOpcode(), C->isOpaque());
3875 case ISD::UINT_TO_FP:
3876 case ISD::SINT_TO_FP: {
3877 APFloat apf(EVTToAPFloatSemantics(VT),
3878 APInt::getNullValue(VT.getSizeInBits()));
3879 (void)apf.convertFromAPInt(Val,
3880 Opcode==ISD::SINT_TO_FP,
3881 APFloat::rmNearestTiesToEven);
3882 return getConstantFP(apf, DL, VT);
3884 case ISD::BITCAST:
3885 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
3886 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
3887 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
3888 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
3889 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
3890 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
3891 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
3892 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
3893 break;
3894 case ISD::ABS:
3895 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
3896 C->isOpaque());
3897 case ISD::BITREVERSE:
3898 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
3899 C->isOpaque());
3900 case ISD::BSWAP:
3901 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
3902 C->isOpaque());
3903 case ISD::CTPOP:
3904 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
3905 C->isOpaque());
3906 case ISD::CTLZ:
3907 case ISD::CTLZ_ZERO_UNDEF:
3908 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
3909 C->isOpaque());
3910 case ISD::CTTZ:
3911 case ISD::CTTZ_ZERO_UNDEF:
3912 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
3913 C->isOpaque());
3914 case ISD::FP16_TO_FP: {
3915 bool Ignored;
3916 APFloat FPV(APFloat::IEEEhalf(),
3917 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
3919 // This can return overflow, underflow, or inexact; we don't care.
3920 // FIXME need to be more flexible about rounding mode.
3921 (void)FPV.convert(EVTToAPFloatSemantics(VT),
3922 APFloat::rmNearestTiesToEven, &Ignored);
3923 return getConstantFP(FPV, DL, VT);
3928 // Constant fold unary operations with a floating point constant operand.
3929 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
3930 APFloat V = C->getValueAPF(); // make copy
3931 switch (Opcode) {
3932 case ISD::FNEG:
3933 V.changeSign();
3934 return getConstantFP(V, DL, VT);
3935 case ISD::FABS:
3936 V.clearSign();
3937 return getConstantFP(V, DL, VT);
3938 case ISD::FCEIL: {
3939 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
3940 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3941 return getConstantFP(V, DL, VT);
3942 break;
3944 case ISD::FTRUNC: {
3945 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
3946 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3947 return getConstantFP(V, DL, VT);
3948 break;
3950 case ISD::FFLOOR: {
3951 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
3952 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3953 return getConstantFP(V, DL, VT);
3954 break;
3956 case ISD::FP_EXTEND: {
3957 bool ignored;
3958 // This can return overflow, underflow, or inexact; we don't care.
3959 // FIXME need to be more flexible about rounding mode.
3960 (void)V.convert(EVTToAPFloatSemantics(VT),
3961 APFloat::rmNearestTiesToEven, &ignored);
3962 return getConstantFP(V, DL, VT);
3964 case ISD::FP_TO_SINT:
3965 case ISD::FP_TO_UINT: {
3966 bool ignored;
3967 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
3968 // FIXME need to be more flexible about rounding mode.
3969 APFloat::opStatus s =
3970 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
3971 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
3972 break;
3973 return getConstant(IntVal, DL, VT);
3975 case ISD::BITCAST:
3976 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
3977 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3978 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
3979 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3980 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
3981 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
3982 break;
3983 case ISD::FP_TO_FP16: {
3984 bool Ignored;
3985 // This can return overflow, underflow, or inexact; we don't care.
3986 // FIXME need to be more flexible about rounding mode.
3987 (void)V.convert(APFloat::IEEEhalf(),
3988 APFloat::rmNearestTiesToEven, &Ignored);
3989 return getConstant(V.bitcastToAPInt(), DL, VT);
3994 // Constant fold unary operations with a vector integer or float operand.
3995 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
3996 if (BV->isConstant()) {
3997 switch (Opcode) {
3998 default:
3999 // FIXME: Entirely reasonable to perform folding of other unary
4000 // operations here as the need arises.
4001 break;
4002 case ISD::FNEG:
4003 case ISD::FABS:
4004 case ISD::FCEIL:
4005 case ISD::FTRUNC:
4006 case ISD::FFLOOR:
4007 case ISD::FP_EXTEND:
4008 case ISD::FP_TO_SINT:
4009 case ISD::FP_TO_UINT:
4010 case ISD::TRUNCATE:
4011 case ISD::ANY_EXTEND:
4012 case ISD::ZERO_EXTEND:
4013 case ISD::SIGN_EXTEND:
4014 case ISD::UINT_TO_FP:
4015 case ISD::SINT_TO_FP:
4016 case ISD::ABS:
4017 case ISD::BITREVERSE:
4018 case ISD::BSWAP:
4019 case ISD::CTLZ:
4020 case ISD::CTLZ_ZERO_UNDEF:
4021 case ISD::CTTZ:
4022 case ISD::CTTZ_ZERO_UNDEF:
4023 case ISD::CTPOP: {
4024 SDValue Ops = { Operand };
4025 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4026 return Fold;
4032 unsigned OpOpcode = Operand.getNode()->getOpcode();
4033 switch (Opcode) {
4034 case ISD::TokenFactor:
4035 case ISD::MERGE_VALUES:
4036 case ISD::CONCAT_VECTORS:
4037 return Operand; // Factor, merge or concat of one node? No need.
4038 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4039 case ISD::FP_EXTEND:
4040 assert(VT.isFloatingPoint() &&
4041 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4042 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4043 assert((!VT.isVector() ||
4044 VT.getVectorNumElements() ==
4045 Operand.getValueType().getVectorNumElements()) &&
4046 "Vector element count mismatch!");
4047 assert(Operand.getValueType().bitsLT(VT) &&
4048 "Invalid fpext node, dst < src!");
4049 if (Operand.isUndef())
4050 return getUNDEF(VT);
4051 break;
4052 case ISD::SIGN_EXTEND:
4053 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4054 "Invalid SIGN_EXTEND!");
4055 if (Operand.getValueType() == VT) return Operand; // noop extension
4056 assert((!VT.isVector() ||
4057 VT.getVectorNumElements() ==
4058 Operand.getValueType().getVectorNumElements()) &&
4059 "Vector element count mismatch!");
4060 assert(Operand.getValueType().bitsLT(VT) &&
4061 "Invalid sext node, dst < src!");
4062 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4063 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4064 else if (OpOpcode == ISD::UNDEF)
4065 // sext(undef) = 0, because the top bits will all be the same.
4066 return getConstant(0, DL, VT);
4067 break;
4068 case ISD::ZERO_EXTEND:
4069 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4070 "Invalid ZERO_EXTEND!");
4071 if (Operand.getValueType() == VT) return Operand; // noop extension
4072 assert((!VT.isVector() ||
4073 VT.getVectorNumElements() ==
4074 Operand.getValueType().getVectorNumElements()) &&
4075 "Vector element count mismatch!");
4076 assert(Operand.getValueType().bitsLT(VT) &&
4077 "Invalid zext node, dst < src!");
4078 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4079 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4080 else if (OpOpcode == ISD::UNDEF)
4081 // zext(undef) = 0, because the top bits will be zero.
4082 return getConstant(0, DL, VT);
4083 break;
4084 case ISD::ANY_EXTEND:
4085 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4086 "Invalid ANY_EXTEND!");
4087 if (Operand.getValueType() == VT) return Operand; // noop extension
4088 assert((!VT.isVector() ||
4089 VT.getVectorNumElements() ==
4090 Operand.getValueType().getVectorNumElements()) &&
4091 "Vector element count mismatch!");
4092 assert(Operand.getValueType().bitsLT(VT) &&
4093 "Invalid anyext node, dst < src!");
4095 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4096 OpOpcode == ISD::ANY_EXTEND)
4097 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4098 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4099 else if (OpOpcode == ISD::UNDEF)
4100 return getUNDEF(VT);
4102 // (ext (trunc x)) -> x
4103 if (OpOpcode == ISD::TRUNCATE) {
4104 SDValue OpOp = Operand.getOperand(0);
4105 if (OpOp.getValueType() == VT) {
4106 transferDbgValues(Operand, OpOp);
4107 return OpOp;
4110 break;
4111 case ISD::TRUNCATE:
4112 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4113 "Invalid TRUNCATE!");
4114 if (Operand.getValueType() == VT) return Operand; // noop truncate
4115 assert((!VT.isVector() ||
4116 VT.getVectorNumElements() ==
4117 Operand.getValueType().getVectorNumElements()) &&
4118 "Vector element count mismatch!");
4119 assert(Operand.getValueType().bitsGT(VT) &&
4120 "Invalid truncate node, src < dst!");
4121 if (OpOpcode == ISD::TRUNCATE)
4122 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4123 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4124 OpOpcode == ISD::ANY_EXTEND) {
4125 // If the source is smaller than the dest, we still need an extend.
4126 if (Operand.getOperand(0).getValueType().getScalarType()
4127 .bitsLT(VT.getScalarType()))
4128 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4129 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4130 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4131 return Operand.getOperand(0);
4133 if (OpOpcode == ISD::UNDEF)
4134 return getUNDEF(VT);
4135 break;
4136 case ISD::ABS:
4137 assert(VT.isInteger() && VT == Operand.getValueType() &&
4138 "Invalid ABS!");
4139 if (OpOpcode == ISD::UNDEF)
4140 return getUNDEF(VT);
4141 break;
4142 case ISD::BSWAP:
4143 assert(VT.isInteger() && VT == Operand.getValueType() &&
4144 "Invalid BSWAP!");
4145 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4146 "BSWAP types must be a multiple of 16 bits!");
4147 if (OpOpcode == ISD::UNDEF)
4148 return getUNDEF(VT);
4149 break;
4150 case ISD::BITREVERSE:
4151 assert(VT.isInteger() && VT == Operand.getValueType() &&
4152 "Invalid BITREVERSE!");
4153 if (OpOpcode == ISD::UNDEF)
4154 return getUNDEF(VT);
4155 break;
4156 case ISD::BITCAST:
4157 // Basic sanity checking.
4158 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4159 "Cannot BITCAST between types of different sizes!");
4160 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4161 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4162 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4163 if (OpOpcode == ISD::UNDEF)
4164 return getUNDEF(VT);
4165 break;
4166 case ISD::SCALAR_TO_VECTOR:
4167 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4168 (VT.getVectorElementType() == Operand.getValueType() ||
4169 (VT.getVectorElementType().isInteger() &&
4170 Operand.getValueType().isInteger() &&
4171 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4172 "Illegal SCALAR_TO_VECTOR node!");
4173 if (OpOpcode == ISD::UNDEF)
4174 return getUNDEF(VT);
4175 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4176 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4177 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4178 Operand.getConstantOperandVal(1) == 0 &&
4179 Operand.getOperand(0).getValueType() == VT)
4180 return Operand.getOperand(0);
4181 break;
4182 case ISD::FNEG:
4183 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
4184 if ((getTarget().Options.UnsafeFPMath || Flags.hasNoSignedZeros()) &&
4185 OpOpcode == ISD::FSUB)
4186 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1),
4187 Operand.getOperand(0), Flags);
4188 if (OpOpcode == ISD::FNEG) // --X -> X
4189 return Operand.getOperand(0);
4190 break;
4191 case ISD::FABS:
4192 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4193 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4194 break;
4197 SDNode *N;
4198 SDVTList VTs = getVTList(VT);
4199 SDValue Ops[] = {Operand};
4200 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4201 FoldingSetNodeID ID;
4202 AddNodeIDNode(ID, Opcode, VTs, Ops);
4203 void *IP = nullptr;
4204 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4205 E->intersectFlagsWith(Flags);
4206 return SDValue(E, 0);
4209 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4210 N->setFlags(Flags);
4211 createOperands(N, Ops);
4212 CSEMap.InsertNode(N, IP);
4213 } else {
4214 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4215 createOperands(N, Ops);
4218 InsertNode(N);
4219 SDValue V = SDValue(N, 0);
4220 NewSDValueDbgMsg(V, "Creating new node: ", this);
4221 return V;
4224 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
4225 const APInt &C2) {
4226 switch (Opcode) {
4227 case ISD::ADD: return std::make_pair(C1 + C2, true);
4228 case ISD::SUB: return std::make_pair(C1 - C2, true);
4229 case ISD::MUL: return std::make_pair(C1 * C2, true);
4230 case ISD::AND: return std::make_pair(C1 & C2, true);
4231 case ISD::OR: return std::make_pair(C1 | C2, true);
4232 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
4233 case ISD::SHL: return std::make_pair(C1 << C2, true);
4234 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
4235 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
4236 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
4237 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
4238 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
4239 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
4240 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
4241 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
4242 case ISD::UDIV:
4243 if (!C2.getBoolValue())
4244 break;
4245 return std::make_pair(C1.udiv(C2), true);
4246 case ISD::UREM:
4247 if (!C2.getBoolValue())
4248 break;
4249 return std::make_pair(C1.urem(C2), true);
4250 case ISD::SDIV:
4251 if (!C2.getBoolValue())
4252 break;
4253 return std::make_pair(C1.sdiv(C2), true);
4254 case ISD::SREM:
4255 if (!C2.getBoolValue())
4256 break;
4257 return std::make_pair(C1.srem(C2), true);
4259 return std::make_pair(APInt(1, 0), false);
4262 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4263 EVT VT, const ConstantSDNode *Cst1,
4264 const ConstantSDNode *Cst2) {
4265 if (Cst1->isOpaque() || Cst2->isOpaque())
4266 return SDValue();
4268 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
4269 Cst2->getAPIntValue());
4270 if (!Folded.second)
4271 return SDValue();
4272 return getConstant(Folded.first, DL, VT);
4275 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4276 const GlobalAddressSDNode *GA,
4277 const SDNode *N2) {
4278 if (GA->getOpcode() != ISD::GlobalAddress)
4279 return SDValue();
4280 if (!TLI->isOffsetFoldingLegal(GA))
4281 return SDValue();
4282 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
4283 if (!Cst2)
4284 return SDValue();
4285 int64_t Offset = Cst2->getSExtValue();
4286 switch (Opcode) {
4287 case ISD::ADD: break;
4288 case ISD::SUB: Offset = -uint64_t(Offset); break;
4289 default: return SDValue();
4291 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
4292 GA->getOffset() + uint64_t(Offset));
4295 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4296 switch (Opcode) {
4297 case ISD::SDIV:
4298 case ISD::UDIV:
4299 case ISD::SREM:
4300 case ISD::UREM: {
4301 // If a divisor is zero/undef or any element of a divisor vector is
4302 // zero/undef, the whole op is undef.
4303 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4304 SDValue Divisor = Ops[1];
4305 if (Divisor.isUndef() || isNullConstant(Divisor))
4306 return true;
4308 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4309 llvm::any_of(Divisor->op_values(),
4310 [](SDValue V) { return V.isUndef() ||
4311 isNullConstant(V); });
4312 // TODO: Handle signed overflow.
4314 // TODO: Handle oversized shifts.
4315 default:
4316 return false;
4320 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4321 EVT VT, SDNode *Cst1,
4322 SDNode *Cst2) {
4323 // If the opcode is a target-specific ISD node, there's nothing we can
4324 // do here and the operand rules may not line up with the below, so
4325 // bail early.
4326 if (Opcode >= ISD::BUILTIN_OP_END)
4327 return SDValue();
4329 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)}))
4330 return getUNDEF(VT);
4332 // Handle the case of two scalars.
4333 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
4334 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
4335 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
4336 assert((!Folded || !VT.isVector()) &&
4337 "Can't fold vectors ops with scalar operands");
4338 return Folded;
4342 // fold (add Sym, c) -> Sym+c
4343 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
4344 return FoldSymbolOffset(Opcode, VT, GA, Cst2);
4345 if (TLI->isCommutativeBinOp(Opcode))
4346 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
4347 return FoldSymbolOffset(Opcode, VT, GA, Cst1);
4349 // For vectors extract each constant element into Inputs so we can constant
4350 // fold them individually.
4351 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
4352 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
4353 if (!BV1 || !BV2)
4354 return SDValue();
4356 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
4358 EVT SVT = VT.getScalarType();
4359 EVT LegalSVT = SVT;
4360 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4361 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4362 if (LegalSVT.bitsLT(SVT))
4363 return SDValue();
4365 SmallVector<SDValue, 4> Outputs;
4366 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
4367 SDValue V1 = BV1->getOperand(I);
4368 SDValue V2 = BV2->getOperand(I);
4370 if (SVT.isInteger()) {
4371 if (V1->getValueType(0).bitsGT(SVT))
4372 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4373 if (V2->getValueType(0).bitsGT(SVT))
4374 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4377 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4378 return SDValue();
4380 // Fold one vector element.
4381 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4382 if (LegalSVT != SVT)
4383 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4385 // Scalar folding only succeeded if the result is a constant or UNDEF.
4386 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4387 ScalarResult.getOpcode() != ISD::ConstantFP)
4388 return SDValue();
4389 Outputs.push_back(ScalarResult);
4392 assert(VT.getVectorNumElements() == Outputs.size() &&
4393 "Vector size mismatch!");
4395 // We may have a vector type but a scalar result. Create a splat.
4396 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4398 // Build a big vector out of the scalar elements we generated.
4399 return getBuildVector(VT, SDLoc(), Outputs);
4402 // TODO: Merge with FoldConstantArithmetic
4403 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4404 const SDLoc &DL, EVT VT,
4405 ArrayRef<SDValue> Ops,
4406 const SDNodeFlags Flags) {
4407 // If the opcode is a target-specific ISD node, there's nothing we can
4408 // do here and the operand rules may not line up with the below, so
4409 // bail early.
4410 if (Opcode >= ISD::BUILTIN_OP_END)
4411 return SDValue();
4413 if (isUndef(Opcode, Ops))
4414 return getUNDEF(VT);
4416 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4417 if (!VT.isVector())
4418 return SDValue();
4420 unsigned NumElts = VT.getVectorNumElements();
4422 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4423 return !Op.getValueType().isVector() ||
4424 Op.getValueType().getVectorNumElements() == NumElts;
4427 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4428 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
4429 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
4430 (BV && BV->isConstant());
4433 // All operands must be vector types with the same number of elements as
4434 // the result type and must be either UNDEF or a build vector of constant
4435 // or UNDEF scalars.
4436 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
4437 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
4438 return SDValue();
4440 // If we are comparing vectors, then the result needs to be a i1 boolean
4441 // that is then sign-extended back to the legal result type.
4442 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
4444 // Find legal integer scalar type for constant promotion and
4445 // ensure that its scalar size is at least as large as source.
4446 EVT LegalSVT = VT.getScalarType();
4447 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4448 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4449 if (LegalSVT.bitsLT(VT.getScalarType()))
4450 return SDValue();
4453 // Constant fold each scalar lane separately.
4454 SmallVector<SDValue, 4> ScalarResults;
4455 for (unsigned i = 0; i != NumElts; i++) {
4456 SmallVector<SDValue, 4> ScalarOps;
4457 for (SDValue Op : Ops) {
4458 EVT InSVT = Op.getValueType().getScalarType();
4459 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
4460 if (!InBV) {
4461 // We've checked that this is UNDEF or a constant of some kind.
4462 if (Op.isUndef())
4463 ScalarOps.push_back(getUNDEF(InSVT));
4464 else
4465 ScalarOps.push_back(Op);
4466 continue;
4469 SDValue ScalarOp = InBV->getOperand(i);
4470 EVT ScalarVT = ScalarOp.getValueType();
4472 // Build vector (integer) scalar operands may need implicit
4473 // truncation - do this before constant folding.
4474 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
4475 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
4477 ScalarOps.push_back(ScalarOp);
4480 // Constant fold the scalar operands.
4481 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
4483 // Legalize the (integer) scalar constant if necessary.
4484 if (LegalSVT != SVT)
4485 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4487 // Scalar folding only succeeded if the result is a constant or UNDEF.
4488 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4489 ScalarResult.getOpcode() != ISD::ConstantFP)
4490 return SDValue();
4491 ScalarResults.push_back(ScalarResult);
4494 SDValue V = getBuildVector(VT, DL, ScalarResults);
4495 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
4496 return V;
4499 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4500 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
4501 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4502 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
4503 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4504 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4506 // Canonicalize constant to RHS if commutative.
4507 if (TLI->isCommutativeBinOp(Opcode)) {
4508 if (N1C && !N2C) {
4509 std::swap(N1C, N2C);
4510 std::swap(N1, N2);
4511 } else if (N1CFP && !N2CFP) {
4512 std::swap(N1CFP, N2CFP);
4513 std::swap(N1, N2);
4517 switch (Opcode) {
4518 default: break;
4519 case ISD::TokenFactor:
4520 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
4521 N2.getValueType() == MVT::Other && "Invalid token factor!");
4522 // Fold trivial token factors.
4523 if (N1.getOpcode() == ISD::EntryToken) return N2;
4524 if (N2.getOpcode() == ISD::EntryToken) return N1;
4525 if (N1 == N2) return N1;
4526 break;
4527 case ISD::CONCAT_VECTORS: {
4528 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4529 SDValue Ops[] = {N1, N2};
4530 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4531 return V;
4532 break;
4534 case ISD::AND:
4535 assert(VT.isInteger() && "This operator does not apply to FP types!");
4536 assert(N1.getValueType() == N2.getValueType() &&
4537 N1.getValueType() == VT && "Binary operator types must match!");
4538 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
4539 // worth handling here.
4540 if (N2C && N2C->isNullValue())
4541 return N2;
4542 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
4543 return N1;
4544 break;
4545 case ISD::OR:
4546 case ISD::XOR:
4547 case ISD::ADD:
4548 case ISD::SUB:
4549 assert(VT.isInteger() && "This operator does not apply to FP types!");
4550 assert(N1.getValueType() == N2.getValueType() &&
4551 N1.getValueType() == VT && "Binary operator types must match!");
4552 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
4553 // it's worth handling here.
4554 if (N2C && N2C->isNullValue())
4555 return N1;
4556 break;
4557 case ISD::UDIV:
4558 case ISD::UREM:
4559 case ISD::MULHU:
4560 case ISD::MULHS:
4561 case ISD::MUL:
4562 case ISD::SDIV:
4563 case ISD::SREM:
4564 case ISD::SMIN:
4565 case ISD::SMAX:
4566 case ISD::UMIN:
4567 case ISD::UMAX:
4568 assert(VT.isInteger() && "This operator does not apply to FP types!");
4569 assert(N1.getValueType() == N2.getValueType() &&
4570 N1.getValueType() == VT && "Binary operator types must match!");
4571 break;
4572 case ISD::FADD:
4573 case ISD::FSUB:
4574 case ISD::FMUL:
4575 case ISD::FDIV:
4576 case ISD::FREM:
4577 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
4578 assert(N1.getValueType() == N2.getValueType() &&
4579 N1.getValueType() == VT && "Binary operator types must match!");
4580 break;
4581 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
4582 assert(N1.getValueType() == VT &&
4583 N1.getValueType().isFloatingPoint() &&
4584 N2.getValueType().isFloatingPoint() &&
4585 "Invalid FCOPYSIGN!");
4586 break;
4587 case ISD::SHL:
4588 case ISD::SRA:
4589 case ISD::SRL:
4590 case ISD::ROTL:
4591 case ISD::ROTR:
4592 assert(VT == N1.getValueType() &&
4593 "Shift operators return type must be the same as their first arg");
4594 assert(VT.isInteger() && N2.getValueType().isInteger() &&
4595 "Shifts only work on integers");
4596 assert((!VT.isVector() || VT == N2.getValueType()) &&
4597 "Vector shift amounts must be in the same as their first arg");
4598 // Verify that the shift amount VT is bit enough to hold valid shift
4599 // amounts. This catches things like trying to shift an i1024 value by an
4600 // i8, which is easy to fall into in generic code that uses
4601 // TLI.getShiftAmount().
4602 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
4603 "Invalid use of small shift amount with oversized value!");
4605 // Always fold shifts of i1 values so the code generator doesn't need to
4606 // handle them. Since we know the size of the shift has to be less than the
4607 // size of the value, the shift/rotate count is guaranteed to be zero.
4608 if (VT == MVT::i1)
4609 return N1;
4610 if (N2C && N2C->isNullValue())
4611 return N1;
4612 break;
4613 case ISD::FP_ROUND_INREG: {
4614 EVT EVT = cast<VTSDNode>(N2)->getVT();
4615 assert(VT == N1.getValueType() && "Not an inreg round!");
4616 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
4617 "Cannot FP_ROUND_INREG integer types");
4618 assert(EVT.isVector() == VT.isVector() &&
4619 "FP_ROUND_INREG type should be vector iff the operand "
4620 "type is vector!");
4621 assert((!EVT.isVector() ||
4622 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4623 "Vector element counts must match in FP_ROUND_INREG");
4624 assert(EVT.bitsLE(VT) && "Not rounding down!");
4625 (void)EVT;
4626 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
4627 break;
4629 case ISD::FP_ROUND:
4630 assert(VT.isFloatingPoint() &&
4631 N1.getValueType().isFloatingPoint() &&
4632 VT.bitsLE(N1.getValueType()) &&
4633 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
4634 "Invalid FP_ROUND!");
4635 if (N1.getValueType() == VT) return N1; // noop conversion.
4636 break;
4637 case ISD::AssertSext:
4638 case ISD::AssertZext: {
4639 EVT EVT = cast<VTSDNode>(N2)->getVT();
4640 assert(VT == N1.getValueType() && "Not an inreg extend!");
4641 assert(VT.isInteger() && EVT.isInteger() &&
4642 "Cannot *_EXTEND_INREG FP types");
4643 assert(!EVT.isVector() &&
4644 "AssertSExt/AssertZExt type should be the vector element type "
4645 "rather than the vector type!");
4646 assert(EVT.bitsLE(VT) && "Not extending!");
4647 if (VT == EVT) return N1; // noop assertion.
4648 break;
4650 case ISD::SIGN_EXTEND_INREG: {
4651 EVT EVT = cast<VTSDNode>(N2)->getVT();
4652 assert(VT == N1.getValueType() && "Not an inreg extend!");
4653 assert(VT.isInteger() && EVT.isInteger() &&
4654 "Cannot *_EXTEND_INREG FP types");
4655 assert(EVT.isVector() == VT.isVector() &&
4656 "SIGN_EXTEND_INREG type should be vector iff the operand "
4657 "type is vector!");
4658 assert((!EVT.isVector() ||
4659 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4660 "Vector element counts must match in SIGN_EXTEND_INREG");
4661 assert(EVT.bitsLE(VT) && "Not extending!");
4662 if (EVT == VT) return N1; // Not actually extending
4664 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
4665 unsigned FromBits = EVT.getScalarSizeInBits();
4666 Val <<= Val.getBitWidth() - FromBits;
4667 Val.ashrInPlace(Val.getBitWidth() - FromBits);
4668 return getConstant(Val, DL, ConstantVT);
4671 if (N1C) {
4672 const APInt &Val = N1C->getAPIntValue();
4673 return SignExtendInReg(Val, VT);
4675 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
4676 SmallVector<SDValue, 8> Ops;
4677 llvm::EVT OpVT = N1.getOperand(0).getValueType();
4678 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
4679 SDValue Op = N1.getOperand(i);
4680 if (Op.isUndef()) {
4681 Ops.push_back(getUNDEF(OpVT));
4682 continue;
4684 ConstantSDNode *C = cast<ConstantSDNode>(Op);
4685 APInt Val = C->getAPIntValue();
4686 Ops.push_back(SignExtendInReg(Val, OpVT));
4688 return getBuildVector(VT, DL, Ops);
4690 break;
4692 case ISD::EXTRACT_VECTOR_ELT:
4693 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
4694 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
4695 element type of the vector.");
4697 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
4698 if (N1.isUndef())
4699 return getUNDEF(VT);
4701 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
4702 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
4703 return getUNDEF(VT);
4705 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
4706 // expanding copies of large vectors from registers.
4707 if (N2C &&
4708 N1.getOpcode() == ISD::CONCAT_VECTORS &&
4709 N1.getNumOperands() > 0) {
4710 unsigned Factor =
4711 N1.getOperand(0).getValueType().getVectorNumElements();
4712 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
4713 N1.getOperand(N2C->getZExtValue() / Factor),
4714 getConstant(N2C->getZExtValue() % Factor, DL,
4715 N2.getValueType()));
4718 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
4719 // expanding large vector constants.
4720 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
4721 SDValue Elt = N1.getOperand(N2C->getZExtValue());
4723 if (VT != Elt.getValueType())
4724 // If the vector element type is not legal, the BUILD_VECTOR operands
4725 // are promoted and implicitly truncated, and the result implicitly
4726 // extended. Make that explicit here.
4727 Elt = getAnyExtOrTrunc(Elt, DL, VT);
4729 return Elt;
4732 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
4733 // operations are lowered to scalars.
4734 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
4735 // If the indices are the same, return the inserted element else
4736 // if the indices are known different, extract the element from
4737 // the original vector.
4738 SDValue N1Op2 = N1.getOperand(2);
4739 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
4741 if (N1Op2C && N2C) {
4742 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
4743 if (VT == N1.getOperand(1).getValueType())
4744 return N1.getOperand(1);
4745 else
4746 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
4749 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
4753 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
4754 // when vector types are scalarized and v1iX is legal.
4755 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
4756 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4757 N1.getValueType().getVectorNumElements() == 1) {
4758 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
4759 N1.getOperand(1));
4761 break;
4762 case ISD::EXTRACT_ELEMENT:
4763 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
4764 assert(!N1.getValueType().isVector() && !VT.isVector() &&
4765 (N1.getValueType().isInteger() == VT.isInteger()) &&
4766 N1.getValueType() != VT &&
4767 "Wrong types for EXTRACT_ELEMENT!");
4769 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
4770 // 64-bit integers into 32-bit parts. Instead of building the extract of
4771 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
4772 if (N1.getOpcode() == ISD::BUILD_PAIR)
4773 return N1.getOperand(N2C->getZExtValue());
4775 // EXTRACT_ELEMENT of a constant int is also very common.
4776 if (N1C) {
4777 unsigned ElementSize = VT.getSizeInBits();
4778 unsigned Shift = ElementSize * N2C->getZExtValue();
4779 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
4780 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
4782 break;
4783 case ISD::EXTRACT_SUBVECTOR:
4784 if (VT.isSimple() && N1.getValueType().isSimple()) {
4785 assert(VT.isVector() && N1.getValueType().isVector() &&
4786 "Extract subvector VTs must be a vectors!");
4787 assert(VT.getVectorElementType() ==
4788 N1.getValueType().getVectorElementType() &&
4789 "Extract subvector VTs must have the same element type!");
4790 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
4791 "Extract subvector must be from larger vector to smaller vector!");
4793 if (N2C) {
4794 assert((VT.getVectorNumElements() + N2C->getZExtValue()
4795 <= N1.getValueType().getVectorNumElements())
4796 && "Extract subvector overflow!");
4799 // Trivial extraction.
4800 if (VT.getSimpleVT() == N1.getSimpleValueType())
4801 return N1;
4803 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
4804 if (N1.isUndef())
4805 return getUNDEF(VT);
4807 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
4808 // the concat have the same type as the extract.
4809 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
4810 N1.getNumOperands() > 0 &&
4811 VT == N1.getOperand(0).getValueType()) {
4812 unsigned Factor = VT.getVectorNumElements();
4813 return N1.getOperand(N2C->getZExtValue() / Factor);
4816 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
4817 // during shuffle legalization.
4818 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
4819 VT == N1.getOperand(1).getValueType())
4820 return N1.getOperand(1);
4822 break;
4825 // Perform trivial constant folding.
4826 if (SDValue SV =
4827 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
4828 return SV;
4830 // Constant fold FP operations.
4831 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
4832 if (N1CFP) {
4833 if (N2CFP) {
4834 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
4835 APFloat::opStatus s;
4836 switch (Opcode) {
4837 case ISD::FADD:
4838 s = V1.add(V2, APFloat::rmNearestTiesToEven);
4839 if (!HasFPExceptions || s != APFloat::opInvalidOp)
4840 return getConstantFP(V1, DL, VT);
4841 break;
4842 case ISD::FSUB:
4843 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
4844 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4845 return getConstantFP(V1, DL, VT);
4846 break;
4847 case ISD::FMUL:
4848 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
4849 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4850 return getConstantFP(V1, DL, VT);
4851 break;
4852 case ISD::FDIV:
4853 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
4854 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4855 s!=APFloat::opDivByZero)) {
4856 return getConstantFP(V1, DL, VT);
4858 break;
4859 case ISD::FREM :
4860 s = V1.mod(V2);
4861 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4862 s!=APFloat::opDivByZero)) {
4863 return getConstantFP(V1, DL, VT);
4865 break;
4866 case ISD::FCOPYSIGN:
4867 V1.copySign(V2);
4868 return getConstantFP(V1, DL, VT);
4869 default: break;
4873 if (Opcode == ISD::FP_ROUND) {
4874 APFloat V = N1CFP->getValueAPF(); // make copy
4875 bool ignored;
4876 // This can return overflow, underflow, or inexact; we don't care.
4877 // FIXME need to be more flexible about rounding mode.
4878 (void)V.convert(EVTToAPFloatSemantics(VT),
4879 APFloat::rmNearestTiesToEven, &ignored);
4880 return getConstantFP(V, DL, VT);
4884 // Any FP binop with an undef operand is folded to NaN. This matches the
4885 // behavior of the IR optimizer.
4886 switch (Opcode) {
4887 case ISD::FADD:
4888 case ISD::FSUB:
4889 case ISD::FMUL:
4890 case ISD::FDIV:
4891 case ISD::FREM:
4892 if (N1.isUndef() || N2.isUndef())
4893 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
4896 // Canonicalize an UNDEF to the RHS, even over a constant.
4897 if (N1.isUndef()) {
4898 if (TLI->isCommutativeBinOp(Opcode)) {
4899 std::swap(N1, N2);
4900 } else {
4901 switch (Opcode) {
4902 case ISD::FP_ROUND_INREG:
4903 case ISD::SIGN_EXTEND_INREG:
4904 case ISD::SUB:
4905 return getUNDEF(VT); // fold op(undef, arg2) -> undef
4906 case ISD::UDIV:
4907 case ISD::SDIV:
4908 case ISD::UREM:
4909 case ISD::SREM:
4910 case ISD::SRA:
4911 case ISD::SRL:
4912 case ISD::SHL:
4913 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
4918 // Fold a bunch of operators when the RHS is undef.
4919 if (N2.isUndef()) {
4920 switch (Opcode) {
4921 case ISD::XOR:
4922 if (N1.isUndef())
4923 // Handle undef ^ undef -> 0 special case. This is a common
4924 // idiom (misuse).
4925 return getConstant(0, DL, VT);
4926 LLVM_FALLTHROUGH;
4927 case ISD::ADD:
4928 case ISD::ADDC:
4929 case ISD::ADDE:
4930 case ISD::SUB:
4931 case ISD::UDIV:
4932 case ISD::SDIV:
4933 case ISD::UREM:
4934 case ISD::SREM:
4935 case ISD::SRA:
4936 case ISD::SRL:
4937 case ISD::SHL:
4938 return getUNDEF(VT); // fold op(arg1, undef) -> undef
4939 case ISD::MUL:
4940 case ISD::AND:
4941 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
4942 case ISD::OR:
4943 return getAllOnesConstant(DL, VT);
4947 // Memoize this node if possible.
4948 SDNode *N;
4949 SDVTList VTs = getVTList(VT);
4950 SDValue Ops[] = {N1, N2};
4951 if (VT != MVT::Glue) {
4952 FoldingSetNodeID ID;
4953 AddNodeIDNode(ID, Opcode, VTs, Ops);
4954 void *IP = nullptr;
4955 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4956 E->intersectFlagsWith(Flags);
4957 return SDValue(E, 0);
4960 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4961 N->setFlags(Flags);
4962 createOperands(N, Ops);
4963 CSEMap.InsertNode(N, IP);
4964 } else {
4965 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4966 createOperands(N, Ops);
4969 InsertNode(N);
4970 SDValue V = SDValue(N, 0);
4971 NewSDValueDbgMsg(V, "Creating new node: ", this);
4972 return V;
4975 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4976 SDValue N1, SDValue N2, SDValue N3,
4977 const SDNodeFlags Flags) {
4978 // Perform various simplifications.
4979 switch (Opcode) {
4980 case ISD::FMA: {
4981 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
4982 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
4983 N3.getValueType() == VT && "FMA types must match!");
4984 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4985 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4986 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
4987 if (N1CFP && N2CFP && N3CFP) {
4988 APFloat V1 = N1CFP->getValueAPF();
4989 const APFloat &V2 = N2CFP->getValueAPF();
4990 const APFloat &V3 = N3CFP->getValueAPF();
4991 APFloat::opStatus s =
4992 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
4993 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
4994 return getConstantFP(V1, DL, VT);
4996 break;
4998 case ISD::CONCAT_VECTORS: {
4999 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
5000 SDValue Ops[] = {N1, N2, N3};
5001 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
5002 return V;
5003 break;
5005 case ISD::SETCC: {
5006 // Use FoldSetCC to simplify SETCC's.
5007 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5008 return V;
5009 // Vector constant folding.
5010 SDValue Ops[] = {N1, N2, N3};
5011 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5012 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5013 return V;
5015 break;
5017 case ISD::SELECT:
5018 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
5019 if (N1C->getZExtValue())
5020 return N2; // select true, X, Y -> X
5021 return N3; // select false, X, Y -> Y
5024 if (N2 == N3) return N2; // select C, X, X -> X
5025 break;
5026 case ISD::VECTOR_SHUFFLE:
5027 llvm_unreachable("should use getVectorShuffle constructor!");
5028 case ISD::INSERT_VECTOR_ELT: {
5029 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5030 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5031 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5032 return getUNDEF(VT);
5033 break;
5035 case ISD::INSERT_SUBVECTOR: {
5036 SDValue Index = N3;
5037 if (VT.isSimple() && N1.getValueType().isSimple()
5038 && N2.getValueType().isSimple()) {
5039 assert(VT.isVector() && N1.getValueType().isVector() &&
5040 N2.getValueType().isVector() &&
5041 "Insert subvector VTs must be a vectors");
5042 assert(VT == N1.getValueType() &&
5043 "Dest and insert subvector source types must match!");
5044 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
5045 "Insert subvector must be from smaller vector to larger vector!");
5046 if (isa<ConstantSDNode>(Index)) {
5047 assert((N2.getValueType().getVectorNumElements() +
5048 cast<ConstantSDNode>(Index)->getZExtValue()
5049 <= VT.getVectorNumElements())
5050 && "Insert subvector overflow!");
5053 // Trivial insertion.
5054 if (VT.getSimpleVT() == N2.getSimpleValueType())
5055 return N2;
5057 break;
5059 case ISD::BITCAST:
5060 // Fold bit_convert nodes from a type to themselves.
5061 if (N1.getValueType() == VT)
5062 return N1;
5063 break;
5066 // Memoize node if it doesn't produce a flag.
5067 SDNode *N;
5068 SDVTList VTs = getVTList(VT);
5069 SDValue Ops[] = {N1, N2, N3};
5070 if (VT != MVT::Glue) {
5071 FoldingSetNodeID ID;
5072 AddNodeIDNode(ID, Opcode, VTs, Ops);
5073 void *IP = nullptr;
5074 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5075 E->intersectFlagsWith(Flags);
5076 return SDValue(E, 0);
5079 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5080 N->setFlags(Flags);
5081 createOperands(N, Ops);
5082 CSEMap.InsertNode(N, IP);
5083 } else {
5084 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5085 createOperands(N, Ops);
5088 InsertNode(N);
5089 SDValue V = SDValue(N, 0);
5090 NewSDValueDbgMsg(V, "Creating new node: ", this);
5091 return V;
5094 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5095 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5096 SDValue Ops[] = { N1, N2, N3, N4 };
5097 return getNode(Opcode, DL, VT, Ops);
5100 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5101 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5102 SDValue N5) {
5103 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5104 return getNode(Opcode, DL, VT, Ops);
5107 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5108 /// the incoming stack arguments to be loaded from the stack.
5109 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5110 SmallVector<SDValue, 8> ArgChains;
5112 // Include the original chain at the beginning of the list. When this is
5113 // used by target LowerCall hooks, this helps legalize find the
5114 // CALLSEQ_BEGIN node.
5115 ArgChains.push_back(Chain);
5117 // Add a chain value for each stack argument.
5118 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5119 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5120 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5121 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5122 if (FI->getIndex() < 0)
5123 ArgChains.push_back(SDValue(L, 1));
5125 // Build a tokenfactor for all the chains.
5126 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5129 /// getMemsetValue - Vectorized representation of the memset value
5130 /// operand.
5131 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5132 const SDLoc &dl) {
5133 assert(!Value.isUndef());
5135 unsigned NumBits = VT.getScalarSizeInBits();
5136 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5137 assert(C->getAPIntValue().getBitWidth() == 8);
5138 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5139 if (VT.isInteger())
5140 return DAG.getConstant(Val, dl, VT);
5141 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5142 VT);
5145 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5146 EVT IntVT = VT.getScalarType();
5147 if (!IntVT.isInteger())
5148 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5150 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5151 if (NumBits > 8) {
5152 // Use a multiplication with 0x010101... to extend the input to the
5153 // required length.
5154 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5155 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5156 DAG.getConstant(Magic, dl, IntVT));
5159 if (VT != Value.getValueType() && !VT.isInteger())
5160 Value = DAG.getBitcast(VT.getScalarType(), Value);
5161 if (VT != Value.getValueType())
5162 Value = DAG.getSplatBuildVector(VT, dl, Value);
5164 return Value;
5167 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5168 /// used when a memcpy is turned into a memset when the source is a constant
5169 /// string ptr.
5170 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5171 const TargetLowering &TLI,
5172 const ConstantDataArraySlice &Slice) {
5173 // Handle vector with all elements zero.
5174 if (Slice.Array == nullptr) {
5175 if (VT.isInteger())
5176 return DAG.getConstant(0, dl, VT);
5177 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5178 return DAG.getConstantFP(0.0, dl, VT);
5179 else if (VT.isVector()) {
5180 unsigned NumElts = VT.getVectorNumElements();
5181 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5182 return DAG.getNode(ISD::BITCAST, dl, VT,
5183 DAG.getConstant(0, dl,
5184 EVT::getVectorVT(*DAG.getContext(),
5185 EltVT, NumElts)));
5186 } else
5187 llvm_unreachable("Expected type!");
5190 assert(!VT.isVector() && "Can't handle vector type here!");
5191 unsigned NumVTBits = VT.getSizeInBits();
5192 unsigned NumVTBytes = NumVTBits / 8;
5193 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5195 APInt Val(NumVTBits, 0);
5196 if (DAG.getDataLayout().isLittleEndian()) {
5197 for (unsigned i = 0; i != NumBytes; ++i)
5198 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5199 } else {
5200 for (unsigned i = 0; i != NumBytes; ++i)
5201 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5204 // If the "cost" of materializing the integer immediate is less than the cost
5205 // of a load, then it is cost effective to turn the load into the immediate.
5206 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5207 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5208 return DAG.getConstant(Val, dl, VT);
5209 return SDValue(nullptr, 0);
5212 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
5213 const SDLoc &DL) {
5214 EVT VT = Base.getValueType();
5215 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
5218 /// Returns true if memcpy source is constant data.
5219 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
5220 uint64_t SrcDelta = 0;
5221 GlobalAddressSDNode *G = nullptr;
5222 if (Src.getOpcode() == ISD::GlobalAddress)
5223 G = cast<GlobalAddressSDNode>(Src);
5224 else if (Src.getOpcode() == ISD::ADD &&
5225 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
5226 Src.getOperand(1).getOpcode() == ISD::Constant) {
5227 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
5228 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
5230 if (!G)
5231 return false;
5233 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
5234 SrcDelta + G->getOffset());
5237 /// Determines the optimal series of memory ops to replace the memset / memcpy.
5238 /// Return true if the number of memory ops is below the threshold (Limit).
5239 /// It returns the types of the sequence of memory ops to perform
5240 /// memset / memcpy by reference.
5241 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
5242 unsigned Limit, uint64_t Size,
5243 unsigned DstAlign, unsigned SrcAlign,
5244 bool IsMemset,
5245 bool ZeroMemset,
5246 bool MemcpyStrSrc,
5247 bool AllowOverlap,
5248 unsigned DstAS, unsigned SrcAS,
5249 SelectionDAG &DAG,
5250 const TargetLowering &TLI) {
5251 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
5252 "Expecting memcpy / memset source to meet alignment requirement!");
5253 // If 'SrcAlign' is zero, that means the memory operation does not need to
5254 // load the value, i.e. memset or memcpy from constant string. Otherwise,
5255 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
5256 // is the specified alignment of the memory operation. If it is zero, that
5257 // means it's possible to change the alignment of the destination.
5258 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
5259 // not need to be loaded.
5260 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
5261 IsMemset, ZeroMemset, MemcpyStrSrc,
5262 DAG.getMachineFunction());
5264 if (VT == MVT::Other) {
5265 // Use the largest integer type whose alignment constraints are satisfied.
5266 // We only need to check DstAlign here as SrcAlign is always greater or
5267 // equal to DstAlign (or zero).
5268 VT = MVT::i64;
5269 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 &&
5270 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign))
5271 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
5272 assert(VT.isInteger());
5274 // Find the largest legal integer type.
5275 MVT LVT = MVT::i64;
5276 while (!TLI.isTypeLegal(LVT))
5277 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
5278 assert(LVT.isInteger());
5280 // If the type we've chosen is larger than the largest legal integer type
5281 // then use that instead.
5282 if (VT.bitsGT(LVT))
5283 VT = LVT;
5286 unsigned NumMemOps = 0;
5287 while (Size != 0) {
5288 unsigned VTSize = VT.getSizeInBits() / 8;
5289 while (VTSize > Size) {
5290 // For now, only use non-vector load / store's for the left-over pieces.
5291 EVT NewVT = VT;
5292 unsigned NewVTSize;
5294 bool Found = false;
5295 if (VT.isVector() || VT.isFloatingPoint()) {
5296 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
5297 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
5298 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
5299 Found = true;
5300 else if (NewVT == MVT::i64 &&
5301 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
5302 TLI.isSafeMemOpType(MVT::f64)) {
5303 // i64 is usually not legal on 32-bit targets, but f64 may be.
5304 NewVT = MVT::f64;
5305 Found = true;
5309 if (!Found) {
5310 do {
5311 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
5312 if (NewVT == MVT::i8)
5313 break;
5314 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
5316 NewVTSize = NewVT.getSizeInBits() / 8;
5318 // If the new VT cannot cover all of the remaining bits, then consider
5319 // issuing a (or a pair of) unaligned and overlapping load / store.
5320 // FIXME: Only does this for 64-bit or more since we don't have proper
5321 // cost model for unaligned load / store.
5322 bool Fast;
5323 if (NumMemOps && AllowOverlap &&
5324 VTSize >= 8 && NewVTSize < Size &&
5325 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
5326 VTSize = Size;
5327 else {
5328 VT = NewVT;
5329 VTSize = NewVTSize;
5333 if (++NumMemOps > Limit)
5334 return false;
5336 MemOps.push_back(VT);
5337 Size -= VTSize;
5340 return true;
5343 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
5344 // On Darwin, -Os means optimize for size without hurting performance, so
5345 // only really optimize for size when -Oz (MinSize) is used.
5346 if (MF.getTarget().getTargetTriple().isOSDarwin())
5347 return MF.getFunction().optForMinSize();
5348 return MF.getFunction().optForSize();
5351 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
5352 SmallVector<SDValue, 32> &OutChains, unsigned From,
5353 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
5354 SmallVector<SDValue, 16> &OutStoreChains) {
5355 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
5356 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
5357 SmallVector<SDValue, 16> GluedLoadChains;
5358 for (unsigned i = From; i < To; ++i) {
5359 OutChains.push_back(OutLoadChains[i]);
5360 GluedLoadChains.push_back(OutLoadChains[i]);
5363 // Chain for all loads.
5364 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
5365 GluedLoadChains);
5367 for (unsigned i = From; i < To; ++i) {
5368 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
5369 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
5370 ST->getBasePtr(), ST->getMemoryVT(),
5371 ST->getMemOperand());
5372 OutChains.push_back(NewStore);
5376 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5377 SDValue Chain, SDValue Dst, SDValue Src,
5378 uint64_t Size, unsigned Align,
5379 bool isVol, bool AlwaysInline,
5380 MachinePointerInfo DstPtrInfo,
5381 MachinePointerInfo SrcPtrInfo) {
5382 // Turn a memcpy of undef to nop.
5383 if (Src.isUndef())
5384 return Chain;
5386 // Expand memcpy to a series of load and store ops if the size operand falls
5387 // below a certain threshold.
5388 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5389 // rather than maybe a humongous number of loads and stores.
5390 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5391 const DataLayout &DL = DAG.getDataLayout();
5392 LLVMContext &C = *DAG.getContext();
5393 std::vector<EVT> MemOps;
5394 bool DstAlignCanChange = false;
5395 MachineFunction &MF = DAG.getMachineFunction();
5396 MachineFrameInfo &MFI = MF.getFrameInfo();
5397 bool OptSize = shouldLowerMemFuncForSize(MF);
5398 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5399 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5400 DstAlignCanChange = true;
5401 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5402 if (Align > SrcAlign)
5403 SrcAlign = Align;
5404 ConstantDataArraySlice Slice;
5405 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5406 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5407 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5409 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5410 (DstAlignCanChange ? 0 : Align),
5411 (isZeroConstant ? 0 : SrcAlign),
5412 false, false, CopyFromConstant, true,
5413 DstPtrInfo.getAddrSpace(),
5414 SrcPtrInfo.getAddrSpace(),
5415 DAG, TLI))
5416 return SDValue();
5418 if (DstAlignCanChange) {
5419 Type *Ty = MemOps[0].getTypeForEVT(C);
5420 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5422 // Don't promote to an alignment that would require dynamic stack
5423 // realignment.
5424 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5425 if (!TRI->needsStackRealignment(MF))
5426 while (NewAlign > Align &&
5427 DL.exceedsNaturalStackAlignment(NewAlign))
5428 NewAlign /= 2;
5430 if (NewAlign > Align) {
5431 // Give the stack frame object a larger alignment if needed.
5432 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5433 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5434 Align = NewAlign;
5438 MachineMemOperand::Flags MMOFlags =
5439 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5440 SmallVector<SDValue, 16> OutLoadChains;
5441 SmallVector<SDValue, 16> OutStoreChains;
5442 SmallVector<SDValue, 32> OutChains;
5443 unsigned NumMemOps = MemOps.size();
5444 uint64_t SrcOff = 0, DstOff = 0;
5445 for (unsigned i = 0; i != NumMemOps; ++i) {
5446 EVT VT = MemOps[i];
5447 unsigned VTSize = VT.getSizeInBits() / 8;
5448 SDValue Value, Store;
5450 if (VTSize > Size) {
5451 // Issuing an unaligned load / store pair that overlaps with the previous
5452 // pair. Adjust the offset accordingly.
5453 assert(i == NumMemOps-1 && i != 0);
5454 SrcOff -= VTSize - Size;
5455 DstOff -= VTSize - Size;
5458 if (CopyFromConstant &&
5459 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5460 // It's unlikely a store of a vector immediate can be done in a single
5461 // instruction. It would require a load from a constantpool first.
5462 // We only handle zero vectors here.
5463 // FIXME: Handle other cases where store of vector immediate is done in
5464 // a single instruction.
5465 ConstantDataArraySlice SubSlice;
5466 if (SrcOff < Slice.Length) {
5467 SubSlice = Slice;
5468 SubSlice.move(SrcOff);
5469 } else {
5470 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5471 SubSlice.Array = nullptr;
5472 SubSlice.Offset = 0;
5473 SubSlice.Length = VTSize;
5475 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5476 if (Value.getNode()) {
5477 Store = DAG.getStore(Chain, dl, Value,
5478 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5479 DstPtrInfo.getWithOffset(DstOff), Align,
5480 MMOFlags);
5481 OutChains.push_back(Store);
5485 if (!Store.getNode()) {
5486 // The type might not be legal for the target. This should only happen
5487 // if the type is smaller than a legal type, as on PPC, so the right
5488 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5489 // to Load/Store if NVT==VT.
5490 // FIXME does the case above also need this?
5491 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5492 assert(NVT.bitsGE(VT));
5494 bool isDereferenceable =
5495 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5496 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5497 if (isDereferenceable)
5498 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5500 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5501 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5502 SrcPtrInfo.getWithOffset(SrcOff), VT,
5503 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5504 OutLoadChains.push_back(Value.getValue(1));
5506 Store = DAG.getTruncStore(
5507 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5508 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
5509 OutStoreChains.push_back(Store);
5511 SrcOff += VTSize;
5512 DstOff += VTSize;
5513 Size -= VTSize;
5516 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
5517 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
5518 unsigned NumLdStInMemcpy = OutStoreChains.size();
5520 if (NumLdStInMemcpy) {
5521 // It may be that memcpy might be converted to memset if it's memcpy
5522 // of constants. In such a case, we won't have loads and stores, but
5523 // just stores. In the absence of loads, there is nothing to gang up.
5524 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
5525 // If target does not care, just leave as it.
5526 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
5527 OutChains.push_back(OutLoadChains[i]);
5528 OutChains.push_back(OutStoreChains[i]);
5530 } else {
5531 // Ld/St less than/equal limit set by target.
5532 if (NumLdStInMemcpy <= GluedLdStLimit) {
5533 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5534 NumLdStInMemcpy, OutLoadChains,
5535 OutStoreChains);
5536 } else {
5537 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
5538 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
5539 unsigned GlueIter = 0;
5541 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
5542 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
5543 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
5545 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
5546 OutLoadChains, OutStoreChains);
5547 GlueIter += GluedLdStLimit;
5550 // Residual ld/st.
5551 if (RemainingLdStInMemcpy) {
5552 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5553 RemainingLdStInMemcpy, OutLoadChains,
5554 OutStoreChains);
5559 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5562 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5563 SDValue Chain, SDValue Dst, SDValue Src,
5564 uint64_t Size, unsigned Align,
5565 bool isVol, bool AlwaysInline,
5566 MachinePointerInfo DstPtrInfo,
5567 MachinePointerInfo SrcPtrInfo) {
5568 // Turn a memmove of undef to nop.
5569 if (Src.isUndef())
5570 return Chain;
5572 // Expand memmove to a series of load and store ops if the size operand falls
5573 // below a certain threshold.
5574 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5575 const DataLayout &DL = DAG.getDataLayout();
5576 LLVMContext &C = *DAG.getContext();
5577 std::vector<EVT> MemOps;
5578 bool DstAlignCanChange = false;
5579 MachineFunction &MF = DAG.getMachineFunction();
5580 MachineFrameInfo &MFI = MF.getFrameInfo();
5581 bool OptSize = shouldLowerMemFuncForSize(MF);
5582 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5583 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5584 DstAlignCanChange = true;
5585 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5586 if (Align > SrcAlign)
5587 SrcAlign = Align;
5588 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
5590 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5591 (DstAlignCanChange ? 0 : Align), SrcAlign,
5592 false, false, false, false,
5593 DstPtrInfo.getAddrSpace(),
5594 SrcPtrInfo.getAddrSpace(),
5595 DAG, TLI))
5596 return SDValue();
5598 if (DstAlignCanChange) {
5599 Type *Ty = MemOps[0].getTypeForEVT(C);
5600 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5601 if (NewAlign > Align) {
5602 // Give the stack frame object a larger alignment if needed.
5603 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5604 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5605 Align = NewAlign;
5609 MachineMemOperand::Flags MMOFlags =
5610 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5611 uint64_t SrcOff = 0, DstOff = 0;
5612 SmallVector<SDValue, 8> LoadValues;
5613 SmallVector<SDValue, 8> LoadChains;
5614 SmallVector<SDValue, 8> OutChains;
5615 unsigned NumMemOps = MemOps.size();
5616 for (unsigned i = 0; i < NumMemOps; i++) {
5617 EVT VT = MemOps[i];
5618 unsigned VTSize = VT.getSizeInBits() / 8;
5619 SDValue Value;
5621 bool isDereferenceable =
5622 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5623 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5624 if (isDereferenceable)
5625 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5627 Value =
5628 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5629 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
5630 LoadValues.push_back(Value);
5631 LoadChains.push_back(Value.getValue(1));
5632 SrcOff += VTSize;
5634 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
5635 OutChains.clear();
5636 for (unsigned i = 0; i < NumMemOps; i++) {
5637 EVT VT = MemOps[i];
5638 unsigned VTSize = VT.getSizeInBits() / 8;
5639 SDValue Store;
5641 Store = DAG.getStore(Chain, dl, LoadValues[i],
5642 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5643 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
5644 OutChains.push_back(Store);
5645 DstOff += VTSize;
5648 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5651 /// Lower the call to 'memset' intrinsic function into a series of store
5652 /// operations.
5654 /// \param DAG Selection DAG where lowered code is placed.
5655 /// \param dl Link to corresponding IR location.
5656 /// \param Chain Control flow dependency.
5657 /// \param Dst Pointer to destination memory location.
5658 /// \param Src Value of byte to write into the memory.
5659 /// \param Size Number of bytes to write.
5660 /// \param Align Alignment of the destination in bytes.
5661 /// \param isVol True if destination is volatile.
5662 /// \param DstPtrInfo IR information on the memory pointer.
5663 /// \returns New head in the control flow, if lowering was successful, empty
5664 /// SDValue otherwise.
5666 /// The function tries to replace 'llvm.memset' intrinsic with several store
5667 /// operations and value calculation code. This is usually profitable for small
5668 /// memory size.
5669 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
5670 SDValue Chain, SDValue Dst, SDValue Src,
5671 uint64_t Size, unsigned Align, bool isVol,
5672 MachinePointerInfo DstPtrInfo) {
5673 // Turn a memset of undef to nop.
5674 if (Src.isUndef())
5675 return Chain;
5677 // Expand memset to a series of load/store ops if the size operand
5678 // falls below a certain threshold.
5679 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5680 std::vector<EVT> MemOps;
5681 bool DstAlignCanChange = false;
5682 MachineFunction &MF = DAG.getMachineFunction();
5683 MachineFrameInfo &MFI = MF.getFrameInfo();
5684 bool OptSize = shouldLowerMemFuncForSize(MF);
5685 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5686 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5687 DstAlignCanChange = true;
5688 bool IsZeroVal =
5689 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
5690 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
5691 Size, (DstAlignCanChange ? 0 : Align), 0,
5692 true, IsZeroVal, false, true,
5693 DstPtrInfo.getAddrSpace(), ~0u,
5694 DAG, TLI))
5695 return SDValue();
5697 if (DstAlignCanChange) {
5698 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
5699 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
5700 if (NewAlign > Align) {
5701 // Give the stack frame object a larger alignment if needed.
5702 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5703 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5704 Align = NewAlign;
5708 SmallVector<SDValue, 8> OutChains;
5709 uint64_t DstOff = 0;
5710 unsigned NumMemOps = MemOps.size();
5712 // Find the largest store and generate the bit pattern for it.
5713 EVT LargestVT = MemOps[0];
5714 for (unsigned i = 1; i < NumMemOps; i++)
5715 if (MemOps[i].bitsGT(LargestVT))
5716 LargestVT = MemOps[i];
5717 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
5719 for (unsigned i = 0; i < NumMemOps; i++) {
5720 EVT VT = MemOps[i];
5721 unsigned VTSize = VT.getSizeInBits() / 8;
5722 if (VTSize > Size) {
5723 // Issuing an unaligned load / store pair that overlaps with the previous
5724 // pair. Adjust the offset accordingly.
5725 assert(i == NumMemOps-1 && i != 0);
5726 DstOff -= VTSize - Size;
5729 // If this store is smaller than the largest store see whether we can get
5730 // the smaller value for free with a truncate.
5731 SDValue Value = MemSetValue;
5732 if (VT.bitsLT(LargestVT)) {
5733 if (!LargestVT.isVector() && !VT.isVector() &&
5734 TLI.isTruncateFree(LargestVT, VT))
5735 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
5736 else
5737 Value = getMemsetValue(Src, VT, DAG, dl);
5739 assert(Value.getValueType() == VT && "Value with wrong type.");
5740 SDValue Store = DAG.getStore(
5741 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5742 DstPtrInfo.getWithOffset(DstOff), Align,
5743 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
5744 OutChains.push_back(Store);
5745 DstOff += VT.getSizeInBits() / 8;
5746 Size -= VTSize;
5749 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5752 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
5753 unsigned AS) {
5754 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
5755 // pointer operands can be losslessly bitcasted to pointers of address space 0
5756 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
5757 report_fatal_error("cannot lower memory intrinsic in address space " +
5758 Twine(AS));
5762 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
5763 SDValue Src, SDValue Size, unsigned Align,
5764 bool isVol, bool AlwaysInline, bool isTailCall,
5765 MachinePointerInfo DstPtrInfo,
5766 MachinePointerInfo SrcPtrInfo) {
5767 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5769 // Check to see if we should lower the memcpy to loads and stores first.
5770 // For cases within the target-specified limits, this is the best choice.
5771 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5772 if (ConstantSize) {
5773 // Memcpy with size zero? Just return the original chain.
5774 if (ConstantSize->isNullValue())
5775 return Chain;
5777 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
5778 ConstantSize->getZExtValue(),Align,
5779 isVol, false, DstPtrInfo, SrcPtrInfo);
5780 if (Result.getNode())
5781 return Result;
5784 // Then check to see if we should lower the memcpy with target-specific
5785 // code. If the target chooses to do this, this is the next best.
5786 if (TSI) {
5787 SDValue Result = TSI->EmitTargetCodeForMemcpy(
5788 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
5789 DstPtrInfo, SrcPtrInfo);
5790 if (Result.getNode())
5791 return Result;
5794 // If we really need inline code and the target declined to provide it,
5795 // use a (potentially long) sequence of loads and stores.
5796 if (AlwaysInline) {
5797 assert(ConstantSize && "AlwaysInline requires a constant size!");
5798 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
5799 ConstantSize->getZExtValue(), Align, isVol,
5800 true, DstPtrInfo, SrcPtrInfo);
5803 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5804 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
5806 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
5807 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
5808 // respect volatile, so they may do things like read or write memory
5809 // beyond the given memory regions. But fixing this isn't easy, and most
5810 // people don't care.
5812 // Emit a library call.
5813 TargetLowering::ArgListTy Args;
5814 TargetLowering::ArgListEntry Entry;
5815 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5816 Entry.Node = Dst; Args.push_back(Entry);
5817 Entry.Node = Src; Args.push_back(Entry);
5818 Entry.Node = Size; Args.push_back(Entry);
5819 // FIXME: pass in SDLoc
5820 TargetLowering::CallLoweringInfo CLI(*this);
5821 CLI.setDebugLoc(dl)
5822 .setChain(Chain)
5823 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
5824 Dst.getValueType().getTypeForEVT(*getContext()),
5825 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
5826 TLI->getPointerTy(getDataLayout())),
5827 std::move(Args))
5828 .setDiscardResult()
5829 .setTailCall(isTailCall);
5831 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5832 return CallResult.second;
5835 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
5836 SDValue Dst, unsigned DstAlign,
5837 SDValue Src, unsigned SrcAlign,
5838 SDValue Size, Type *SizeTy,
5839 unsigned ElemSz, bool isTailCall,
5840 MachinePointerInfo DstPtrInfo,
5841 MachinePointerInfo SrcPtrInfo) {
5842 // Emit a library call.
5843 TargetLowering::ArgListTy Args;
5844 TargetLowering::ArgListEntry Entry;
5845 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5846 Entry.Node = Dst;
5847 Args.push_back(Entry);
5849 Entry.Node = Src;
5850 Args.push_back(Entry);
5852 Entry.Ty = SizeTy;
5853 Entry.Node = Size;
5854 Args.push_back(Entry);
5856 RTLIB::Libcall LibraryCall =
5857 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
5858 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
5859 report_fatal_error("Unsupported element size");
5861 TargetLowering::CallLoweringInfo CLI(*this);
5862 CLI.setDebugLoc(dl)
5863 .setChain(Chain)
5864 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
5865 Type::getVoidTy(*getContext()),
5866 getExternalSymbol(TLI->getLibcallName(LibraryCall),
5867 TLI->getPointerTy(getDataLayout())),
5868 std::move(Args))
5869 .setDiscardResult()
5870 .setTailCall(isTailCall);
5872 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
5873 return CallResult.second;
5876 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
5877 SDValue Src, SDValue Size, unsigned Align,
5878 bool isVol, bool isTailCall,
5879 MachinePointerInfo DstPtrInfo,
5880 MachinePointerInfo SrcPtrInfo) {
5881 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5883 // Check to see if we should lower the memmove to loads and stores first.
5884 // For cases within the target-specified limits, this is the best choice.
5885 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5886 if (ConstantSize) {
5887 // Memmove with size zero? Just return the original chain.
5888 if (ConstantSize->isNullValue())
5889 return Chain;
5891 SDValue Result =
5892 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
5893 ConstantSize->getZExtValue(), Align, isVol,
5894 false, DstPtrInfo, SrcPtrInfo);
5895 if (Result.getNode())
5896 return Result;
5899 // Then check to see if we should lower the memmove with target-specific
5900 // code. If the target chooses to do this, this is the next best.
5901 if (TSI) {
5902 SDValue Result = TSI->EmitTargetCodeForMemmove(
5903 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
5904 if (Result.getNode())
5905 return Result;
5908 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5909 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
5911 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
5912 // not be safe. See memcpy above for more details.
5914 // Emit a library call.
5915 TargetLowering::ArgListTy Args;
5916 TargetLowering::ArgListEntry Entry;
5917 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5918 Entry.Node = Dst; Args.push_back(Entry);
5919 Entry.Node = Src; Args.push_back(Entry);
5920 Entry.Node = Size; Args.push_back(Entry);
5921 // FIXME: pass in SDLoc
5922 TargetLowering::CallLoweringInfo CLI(*this);
5923 CLI.setDebugLoc(dl)
5924 .setChain(Chain)
5925 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
5926 Dst.getValueType().getTypeForEVT(*getContext()),
5927 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
5928 TLI->getPointerTy(getDataLayout())),
5929 std::move(Args))
5930 .setDiscardResult()
5931 .setTailCall(isTailCall);
5933 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5934 return CallResult.second;
5937 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
5938 SDValue Dst, unsigned DstAlign,
5939 SDValue Src, unsigned SrcAlign,
5940 SDValue Size, Type *SizeTy,
5941 unsigned ElemSz, bool isTailCall,
5942 MachinePointerInfo DstPtrInfo,
5943 MachinePointerInfo SrcPtrInfo) {
5944 // Emit a library call.
5945 TargetLowering::ArgListTy Args;
5946 TargetLowering::ArgListEntry Entry;
5947 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5948 Entry.Node = Dst;
5949 Args.push_back(Entry);
5951 Entry.Node = Src;
5952 Args.push_back(Entry);
5954 Entry.Ty = SizeTy;
5955 Entry.Node = Size;
5956 Args.push_back(Entry);
5958 RTLIB::Libcall LibraryCall =
5959 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
5960 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
5961 report_fatal_error("Unsupported element size");
5963 TargetLowering::CallLoweringInfo CLI(*this);
5964 CLI.setDebugLoc(dl)
5965 .setChain(Chain)
5966 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
5967 Type::getVoidTy(*getContext()),
5968 getExternalSymbol(TLI->getLibcallName(LibraryCall),
5969 TLI->getPointerTy(getDataLayout())),
5970 std::move(Args))
5971 .setDiscardResult()
5972 .setTailCall(isTailCall);
5974 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
5975 return CallResult.second;
5978 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
5979 SDValue Src, SDValue Size, unsigned Align,
5980 bool isVol, bool isTailCall,
5981 MachinePointerInfo DstPtrInfo) {
5982 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5984 // Check to see if we should lower the memset to stores first.
5985 // For cases within the target-specified limits, this is the best choice.
5986 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5987 if (ConstantSize) {
5988 // Memset with size zero? Just return the original chain.
5989 if (ConstantSize->isNullValue())
5990 return Chain;
5992 SDValue Result =
5993 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
5994 Align, isVol, DstPtrInfo);
5996 if (Result.getNode())
5997 return Result;
6000 // Then check to see if we should lower the memset with target-specific
6001 // code. If the target chooses to do this, this is the next best.
6002 if (TSI) {
6003 SDValue Result = TSI->EmitTargetCodeForMemset(
6004 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
6005 if (Result.getNode())
6006 return Result;
6009 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6011 // Emit a library call.
6012 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
6013 TargetLowering::ArgListTy Args;
6014 TargetLowering::ArgListEntry Entry;
6015 Entry.Node = Dst; Entry.Ty = IntPtrTy;
6016 Args.push_back(Entry);
6017 Entry.Node = Src;
6018 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6019 Args.push_back(Entry);
6020 Entry.Node = Size;
6021 Entry.Ty = IntPtrTy;
6022 Args.push_back(Entry);
6024 // FIXME: pass in SDLoc
6025 TargetLowering::CallLoweringInfo CLI(*this);
6026 CLI.setDebugLoc(dl)
6027 .setChain(Chain)
6028 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6029 Dst.getValueType().getTypeForEVT(*getContext()),
6030 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6031 TLI->getPointerTy(getDataLayout())),
6032 std::move(Args))
6033 .setDiscardResult()
6034 .setTailCall(isTailCall);
6036 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6037 return CallResult.second;
6040 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6041 SDValue Dst, unsigned DstAlign,
6042 SDValue Value, SDValue Size, Type *SizeTy,
6043 unsigned ElemSz, bool isTailCall,
6044 MachinePointerInfo DstPtrInfo) {
6045 // Emit a library call.
6046 TargetLowering::ArgListTy Args;
6047 TargetLowering::ArgListEntry Entry;
6048 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6049 Entry.Node = Dst;
6050 Args.push_back(Entry);
6052 Entry.Ty = Type::getInt8Ty(*getContext());
6053 Entry.Node = Value;
6054 Args.push_back(Entry);
6056 Entry.Ty = SizeTy;
6057 Entry.Node = Size;
6058 Args.push_back(Entry);
6060 RTLIB::Libcall LibraryCall =
6061 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6062 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6063 report_fatal_error("Unsupported element size");
6065 TargetLowering::CallLoweringInfo CLI(*this);
6066 CLI.setDebugLoc(dl)
6067 .setChain(Chain)
6068 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6069 Type::getVoidTy(*getContext()),
6070 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6071 TLI->getPointerTy(getDataLayout())),
6072 std::move(Args))
6073 .setDiscardResult()
6074 .setTailCall(isTailCall);
6076 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6077 return CallResult.second;
6080 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6081 SDVTList VTList, ArrayRef<SDValue> Ops,
6082 MachineMemOperand *MMO) {
6083 FoldingSetNodeID ID;
6084 ID.AddInteger(MemVT.getRawBits());
6085 AddNodeIDNode(ID, Opcode, VTList, Ops);
6086 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6087 void* IP = nullptr;
6088 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6089 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6090 return SDValue(E, 0);
6093 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6094 VTList, MemVT, MMO);
6095 createOperands(N, Ops);
6097 CSEMap.InsertNode(N, IP);
6098 InsertNode(N);
6099 return SDValue(N, 0);
6102 SDValue SelectionDAG::getAtomicCmpSwap(
6103 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
6104 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
6105 unsigned Alignment, AtomicOrdering SuccessOrdering,
6106 AtomicOrdering FailureOrdering, SyncScope::ID SSID) {
6107 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6108 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6109 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6111 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6112 Alignment = getEVTAlignment(MemVT);
6114 MachineFunction &MF = getMachineFunction();
6116 // FIXME: Volatile isn't really correct; we should keep track of atomic
6117 // orderings in the memoperand.
6118 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
6119 MachineMemOperand::MOStore;
6120 MachineMemOperand *MMO =
6121 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
6122 AAMDNodes(), nullptr, SSID, SuccessOrdering,
6123 FailureOrdering);
6125 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
6128 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6129 EVT MemVT, SDVTList VTs, SDValue Chain,
6130 SDValue Ptr, SDValue Cmp, SDValue Swp,
6131 MachineMemOperand *MMO) {
6132 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6133 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6134 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6136 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6137 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6140 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6141 SDValue Chain, SDValue Ptr, SDValue Val,
6142 const Value *PtrVal, unsigned Alignment,
6143 AtomicOrdering Ordering,
6144 SyncScope::ID SSID) {
6145 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6146 Alignment = getEVTAlignment(MemVT);
6148 MachineFunction &MF = getMachineFunction();
6149 // An atomic store does not load. An atomic load does not store.
6150 // (An atomicrmw obviously both loads and stores.)
6151 // For now, atomics are considered to be volatile always, and they are
6152 // chained as such.
6153 // FIXME: Volatile isn't really correct; we should keep track of atomic
6154 // orderings in the memoperand.
6155 auto Flags = MachineMemOperand::MOVolatile;
6156 if (Opcode != ISD::ATOMIC_STORE)
6157 Flags |= MachineMemOperand::MOLoad;
6158 if (Opcode != ISD::ATOMIC_LOAD)
6159 Flags |= MachineMemOperand::MOStore;
6161 MachineMemOperand *MMO =
6162 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
6163 MemVT.getStoreSize(), Alignment, AAMDNodes(),
6164 nullptr, SSID, Ordering);
6166 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
6169 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6170 SDValue Chain, SDValue Ptr, SDValue Val,
6171 MachineMemOperand *MMO) {
6172 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6173 Opcode == ISD::ATOMIC_LOAD_SUB ||
6174 Opcode == ISD::ATOMIC_LOAD_AND ||
6175 Opcode == ISD::ATOMIC_LOAD_CLR ||
6176 Opcode == ISD::ATOMIC_LOAD_OR ||
6177 Opcode == ISD::ATOMIC_LOAD_XOR ||
6178 Opcode == ISD::ATOMIC_LOAD_NAND ||
6179 Opcode == ISD::ATOMIC_LOAD_MIN ||
6180 Opcode == ISD::ATOMIC_LOAD_MAX ||
6181 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6182 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6183 Opcode == ISD::ATOMIC_SWAP ||
6184 Opcode == ISD::ATOMIC_STORE) &&
6185 "Invalid Atomic Op");
6187 EVT VT = Val.getValueType();
6189 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6190 getVTList(VT, MVT::Other);
6191 SDValue Ops[] = {Chain, Ptr, Val};
6192 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6195 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6196 EVT VT, SDValue Chain, SDValue Ptr,
6197 MachineMemOperand *MMO) {
6198 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6200 SDVTList VTs = getVTList(VT, MVT::Other);
6201 SDValue Ops[] = {Chain, Ptr};
6202 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6205 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
6206 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6207 if (Ops.size() == 1)
6208 return Ops[0];
6210 SmallVector<EVT, 4> VTs;
6211 VTs.reserve(Ops.size());
6212 for (unsigned i = 0; i < Ops.size(); ++i)
6213 VTs.push_back(Ops[i].getValueType());
6214 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6217 SDValue SelectionDAG::getMemIntrinsicNode(
6218 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6219 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
6220 MachineMemOperand::Flags Flags, unsigned Size) {
6221 if (Align == 0) // Ensure that codegen never sees alignment 0
6222 Align = getEVTAlignment(MemVT);
6224 if (!Size)
6225 Size = MemVT.getStoreSize();
6227 MachineFunction &MF = getMachineFunction();
6228 MachineMemOperand *MMO =
6229 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
6231 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6234 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6235 SDVTList VTList,
6236 ArrayRef<SDValue> Ops, EVT MemVT,
6237 MachineMemOperand *MMO) {
6238 assert((Opcode == ISD::INTRINSIC_VOID ||
6239 Opcode == ISD::INTRINSIC_W_CHAIN ||
6240 Opcode == ISD::PREFETCH ||
6241 Opcode == ISD::LIFETIME_START ||
6242 Opcode == ISD::LIFETIME_END ||
6243 ((int)Opcode <= std::numeric_limits<int>::max() &&
6244 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6245 "Opcode is not a memory-accessing opcode!");
6247 // Memoize the node unless it returns a flag.
6248 MemIntrinsicSDNode *N;
6249 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6250 FoldingSetNodeID ID;
6251 AddNodeIDNode(ID, Opcode, VTList, Ops);
6252 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6253 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6254 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6255 void *IP = nullptr;
6256 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6257 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6258 return SDValue(E, 0);
6261 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6262 VTList, MemVT, MMO);
6263 createOperands(N, Ops);
6265 CSEMap.InsertNode(N, IP);
6266 } else {
6267 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6268 VTList, MemVT, MMO);
6269 createOperands(N, Ops);
6271 InsertNode(N);
6272 return SDValue(N, 0);
6275 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6276 /// MachinePointerInfo record from it. This is particularly useful because the
6277 /// code generator has many cases where it doesn't bother passing in a
6278 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6279 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6280 SelectionDAG &DAG, SDValue Ptr,
6281 int64_t Offset = 0) {
6282 // If this is FI+Offset, we can model it.
6283 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6284 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6285 FI->getIndex(), Offset);
6287 // If this is (FI+Offset1)+Offset2, we can model it.
6288 if (Ptr.getOpcode() != ISD::ADD ||
6289 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6290 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6291 return Info;
6293 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6294 return MachinePointerInfo::getFixedStack(
6295 DAG.getMachineFunction(), FI,
6296 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6299 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6300 /// MachinePointerInfo record from it. This is particularly useful because the
6301 /// code generator has many cases where it doesn't bother passing in a
6302 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6303 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6304 SelectionDAG &DAG, SDValue Ptr,
6305 SDValue OffsetOp) {
6306 // If the 'Offset' value isn't a constant, we can't handle this.
6307 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6308 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6309 if (OffsetOp.isUndef())
6310 return InferPointerInfo(Info, DAG, Ptr);
6311 return Info;
6314 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6315 EVT VT, const SDLoc &dl, SDValue Chain,
6316 SDValue Ptr, SDValue Offset,
6317 MachinePointerInfo PtrInfo, EVT MemVT,
6318 unsigned Alignment,
6319 MachineMemOperand::Flags MMOFlags,
6320 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6321 assert(Chain.getValueType() == MVT::Other &&
6322 "Invalid chain type");
6323 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6324 Alignment = getEVTAlignment(MemVT);
6326 MMOFlags |= MachineMemOperand::MOLoad;
6327 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
6328 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6329 // clients.
6330 if (PtrInfo.V.isNull())
6331 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
6333 MachineFunction &MF = getMachineFunction();
6334 MachineMemOperand *MMO = MF.getMachineMemOperand(
6335 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
6336 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
6339 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6340 EVT VT, const SDLoc &dl, SDValue Chain,
6341 SDValue Ptr, SDValue Offset, EVT MemVT,
6342 MachineMemOperand *MMO) {
6343 if (VT == MemVT) {
6344 ExtType = ISD::NON_EXTLOAD;
6345 } else if (ExtType == ISD::NON_EXTLOAD) {
6346 assert(VT == MemVT && "Non-extending load from different memory type!");
6347 } else {
6348 // Extending load.
6349 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
6350 "Should only be an extending load, not truncating!");
6351 assert(VT.isInteger() == MemVT.isInteger() &&
6352 "Cannot convert from FP to Int or Int -> FP!");
6353 assert(VT.isVector() == MemVT.isVector() &&
6354 "Cannot use an ext load to convert to or from a vector!");
6355 assert((!VT.isVector() ||
6356 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
6357 "Cannot use an ext load to change the number of vector elements!");
6360 bool Indexed = AM != ISD::UNINDEXED;
6361 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
6363 SDVTList VTs = Indexed ?
6364 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
6365 SDValue Ops[] = { Chain, Ptr, Offset };
6366 FoldingSetNodeID ID;
6367 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
6368 ID.AddInteger(MemVT.getRawBits());
6369 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
6370 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
6371 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6372 void *IP = nullptr;
6373 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6374 cast<LoadSDNode>(E)->refineAlignment(MMO);
6375 return SDValue(E, 0);
6377 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6378 ExtType, MemVT, MMO);
6379 createOperands(N, Ops);
6381 CSEMap.InsertNode(N, IP);
6382 InsertNode(N);
6383 SDValue V(N, 0);
6384 NewSDValueDbgMsg(V, "Creating new node: ", this);
6385 return V;
6388 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6389 SDValue Ptr, MachinePointerInfo PtrInfo,
6390 unsigned Alignment,
6391 MachineMemOperand::Flags MMOFlags,
6392 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6393 SDValue Undef = getUNDEF(Ptr.getValueType());
6394 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6395 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
6398 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6399 SDValue Ptr, MachineMemOperand *MMO) {
6400 SDValue Undef = getUNDEF(Ptr.getValueType());
6401 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6402 VT, MMO);
6405 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6406 EVT VT, SDValue Chain, SDValue Ptr,
6407 MachinePointerInfo PtrInfo, EVT MemVT,
6408 unsigned Alignment,
6409 MachineMemOperand::Flags MMOFlags,
6410 const AAMDNodes &AAInfo) {
6411 SDValue Undef = getUNDEF(Ptr.getValueType());
6412 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
6413 MemVT, Alignment, MMOFlags, AAInfo);
6416 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6417 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
6418 MachineMemOperand *MMO) {
6419 SDValue Undef = getUNDEF(Ptr.getValueType());
6420 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
6421 MemVT, MMO);
6424 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
6425 SDValue Base, SDValue Offset,
6426 ISD::MemIndexedMode AM) {
6427 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
6428 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
6429 // Don't propagate the invariant or dereferenceable flags.
6430 auto MMOFlags =
6431 LD->getMemOperand()->getFlags() &
6432 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
6433 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
6434 LD->getChain(), Base, Offset, LD->getPointerInfo(),
6435 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6436 LD->getAAInfo());
6439 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6440 SDValue Ptr, MachinePointerInfo PtrInfo,
6441 unsigned Alignment,
6442 MachineMemOperand::Flags MMOFlags,
6443 const AAMDNodes &AAInfo) {
6444 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6445 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6446 Alignment = getEVTAlignment(Val.getValueType());
6448 MMOFlags |= MachineMemOperand::MOStore;
6449 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6451 if (PtrInfo.V.isNull())
6452 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6454 MachineFunction &MF = getMachineFunction();
6455 MachineMemOperand *MMO = MF.getMachineMemOperand(
6456 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6457 return getStore(Chain, dl, Val, Ptr, MMO);
6460 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6461 SDValue Ptr, MachineMemOperand *MMO) {
6462 assert(Chain.getValueType() == MVT::Other &&
6463 "Invalid chain type");
6464 EVT VT = Val.getValueType();
6465 SDVTList VTs = getVTList(MVT::Other);
6466 SDValue Undef = getUNDEF(Ptr.getValueType());
6467 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6468 FoldingSetNodeID ID;
6469 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6470 ID.AddInteger(VT.getRawBits());
6471 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6472 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6473 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6474 void *IP = nullptr;
6475 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6476 cast<StoreSDNode>(E)->refineAlignment(MMO);
6477 return SDValue(E, 0);
6479 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6480 ISD::UNINDEXED, false, VT, MMO);
6481 createOperands(N, Ops);
6483 CSEMap.InsertNode(N, IP);
6484 InsertNode(N);
6485 SDValue V(N, 0);
6486 NewSDValueDbgMsg(V, "Creating new node: ", this);
6487 return V;
6490 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6491 SDValue Ptr, MachinePointerInfo PtrInfo,
6492 EVT SVT, unsigned Alignment,
6493 MachineMemOperand::Flags MMOFlags,
6494 const AAMDNodes &AAInfo) {
6495 assert(Chain.getValueType() == MVT::Other &&
6496 "Invalid chain type");
6497 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6498 Alignment = getEVTAlignment(SVT);
6500 MMOFlags |= MachineMemOperand::MOStore;
6501 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6503 if (PtrInfo.V.isNull())
6504 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6506 MachineFunction &MF = getMachineFunction();
6507 MachineMemOperand *MMO = MF.getMachineMemOperand(
6508 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6509 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6512 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6513 SDValue Ptr, EVT SVT,
6514 MachineMemOperand *MMO) {
6515 EVT VT = Val.getValueType();
6517 assert(Chain.getValueType() == MVT::Other &&
6518 "Invalid chain type");
6519 if (VT == SVT)
6520 return getStore(Chain, dl, Val, Ptr, MMO);
6522 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
6523 "Should only be a truncating store, not extending!");
6524 assert(VT.isInteger() == SVT.isInteger() &&
6525 "Can't do FP-INT conversion!");
6526 assert(VT.isVector() == SVT.isVector() &&
6527 "Cannot use trunc store to convert to or from a vector!");
6528 assert((!VT.isVector() ||
6529 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
6530 "Cannot use trunc store to change the number of vector elements!");
6532 SDVTList VTs = getVTList(MVT::Other);
6533 SDValue Undef = getUNDEF(Ptr.getValueType());
6534 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6535 FoldingSetNodeID ID;
6536 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6537 ID.AddInteger(SVT.getRawBits());
6538 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6539 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
6540 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6541 void *IP = nullptr;
6542 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6543 cast<StoreSDNode>(E)->refineAlignment(MMO);
6544 return SDValue(E, 0);
6546 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6547 ISD::UNINDEXED, true, SVT, MMO);
6548 createOperands(N, Ops);
6550 CSEMap.InsertNode(N, IP);
6551 InsertNode(N);
6552 SDValue V(N, 0);
6553 NewSDValueDbgMsg(V, "Creating new node: ", this);
6554 return V;
6557 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
6558 SDValue Base, SDValue Offset,
6559 ISD::MemIndexedMode AM) {
6560 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
6561 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
6562 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
6563 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
6564 FoldingSetNodeID ID;
6565 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6566 ID.AddInteger(ST->getMemoryVT().getRawBits());
6567 ID.AddInteger(ST->getRawSubclassData());
6568 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
6569 void *IP = nullptr;
6570 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6571 return SDValue(E, 0);
6573 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6574 ST->isTruncatingStore(), ST->getMemoryVT(),
6575 ST->getMemOperand());
6576 createOperands(N, Ops);
6578 CSEMap.InsertNode(N, IP);
6579 InsertNode(N);
6580 SDValue V(N, 0);
6581 NewSDValueDbgMsg(V, "Creating new node: ", this);
6582 return V;
6585 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6586 SDValue Ptr, SDValue Mask, SDValue PassThru,
6587 EVT MemVT, MachineMemOperand *MMO,
6588 ISD::LoadExtType ExtTy, bool isExpanding) {
6589 SDVTList VTs = getVTList(VT, MVT::Other);
6590 SDValue Ops[] = { Chain, Ptr, Mask, PassThru };
6591 FoldingSetNodeID ID;
6592 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
6593 ID.AddInteger(VT.getRawBits());
6594 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
6595 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
6596 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6597 void *IP = nullptr;
6598 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6599 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
6600 return SDValue(E, 0);
6602 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6603 ExtTy, isExpanding, MemVT, MMO);
6604 createOperands(N, Ops);
6606 CSEMap.InsertNode(N, IP);
6607 InsertNode(N);
6608 SDValue V(N, 0);
6609 NewSDValueDbgMsg(V, "Creating new node: ", this);
6610 return V;
6613 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
6614 SDValue Val, SDValue Ptr, SDValue Mask,
6615 EVT MemVT, MachineMemOperand *MMO,
6616 bool IsTruncating, bool IsCompressing) {
6617 assert(Chain.getValueType() == MVT::Other &&
6618 "Invalid chain type");
6619 EVT VT = Val.getValueType();
6620 SDVTList VTs = getVTList(MVT::Other);
6621 SDValue Ops[] = { Chain, Val, Ptr, Mask };
6622 FoldingSetNodeID ID;
6623 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
6624 ID.AddInteger(VT.getRawBits());
6625 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
6626 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
6627 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6628 void *IP = nullptr;
6629 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6630 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
6631 return SDValue(E, 0);
6633 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6634 IsTruncating, IsCompressing, MemVT, MMO);
6635 createOperands(N, Ops);
6637 CSEMap.InsertNode(N, IP);
6638 InsertNode(N);
6639 SDValue V(N, 0);
6640 NewSDValueDbgMsg(V, "Creating new node: ", this);
6641 return V;
6644 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
6645 ArrayRef<SDValue> Ops,
6646 MachineMemOperand *MMO) {
6647 assert(Ops.size() == 6 && "Incompatible number of operands");
6649 FoldingSetNodeID ID;
6650 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
6651 ID.AddInteger(VT.getRawBits());
6652 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
6653 dl.getIROrder(), VTs, VT, MMO));
6654 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6655 void *IP = nullptr;
6656 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6657 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
6658 return SDValue(E, 0);
6661 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6662 VTs, VT, MMO);
6663 createOperands(N, Ops);
6665 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
6666 "Incompatible type of the PassThru value in MaskedGatherSDNode");
6667 assert(N->getMask().getValueType().getVectorNumElements() ==
6668 N->getValueType(0).getVectorNumElements() &&
6669 "Vector width mismatch between mask and data");
6670 assert(N->getIndex().getValueType().getVectorNumElements() >=
6671 N->getValueType(0).getVectorNumElements() &&
6672 "Vector width mismatch between index and data");
6673 assert(isa<ConstantSDNode>(N->getScale()) &&
6674 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
6675 "Scale should be a constant power of 2");
6677 CSEMap.InsertNode(N, IP);
6678 InsertNode(N);
6679 SDValue V(N, 0);
6680 NewSDValueDbgMsg(V, "Creating new node: ", this);
6681 return V;
6684 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
6685 ArrayRef<SDValue> Ops,
6686 MachineMemOperand *MMO) {
6687 assert(Ops.size() == 6 && "Incompatible number of operands");
6689 FoldingSetNodeID ID;
6690 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
6691 ID.AddInteger(VT.getRawBits());
6692 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
6693 dl.getIROrder(), VTs, VT, MMO));
6694 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6695 void *IP = nullptr;
6696 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6697 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
6698 return SDValue(E, 0);
6700 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6701 VTs, VT, MMO);
6702 createOperands(N, Ops);
6704 assert(N->getMask().getValueType().getVectorNumElements() ==
6705 N->getValue().getValueType().getVectorNumElements() &&
6706 "Vector width mismatch between mask and data");
6707 assert(N->getIndex().getValueType().getVectorNumElements() >=
6708 N->getValue().getValueType().getVectorNumElements() &&
6709 "Vector width mismatch between index and data");
6710 assert(isa<ConstantSDNode>(N->getScale()) &&
6711 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
6712 "Scale should be a constant power of 2");
6714 CSEMap.InsertNode(N, IP);
6715 InsertNode(N);
6716 SDValue V(N, 0);
6717 NewSDValueDbgMsg(V, "Creating new node: ", this);
6718 return V;
6721 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
6722 SDValue Ptr, SDValue SV, unsigned Align) {
6723 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
6724 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
6727 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6728 ArrayRef<SDUse> Ops) {
6729 switch (Ops.size()) {
6730 case 0: return getNode(Opcode, DL, VT);
6731 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
6732 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
6733 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
6734 default: break;
6737 // Copy from an SDUse array into an SDValue array for use with
6738 // the regular getNode logic.
6739 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
6740 return getNode(Opcode, DL, VT, NewOps);
6743 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6744 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
6745 unsigned NumOps = Ops.size();
6746 switch (NumOps) {
6747 case 0: return getNode(Opcode, DL, VT);
6748 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
6749 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
6750 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
6751 default: break;
6754 switch (Opcode) {
6755 default: break;
6756 case ISD::CONCAT_VECTORS:
6757 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
6758 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
6759 return V;
6760 break;
6761 case ISD::SELECT_CC:
6762 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
6763 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
6764 "LHS and RHS of condition must have same type!");
6765 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
6766 "True and False arms of SelectCC must have same type!");
6767 assert(Ops[2].getValueType() == VT &&
6768 "select_cc node must be of same type as true and false value!");
6769 break;
6770 case ISD::BR_CC:
6771 assert(NumOps == 5 && "BR_CC takes 5 operands!");
6772 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
6773 "LHS/RHS of comparison should match types!");
6774 break;
6777 // Memoize nodes.
6778 SDNode *N;
6779 SDVTList VTs = getVTList(VT);
6781 if (VT != MVT::Glue) {
6782 FoldingSetNodeID ID;
6783 AddNodeIDNode(ID, Opcode, VTs, Ops);
6784 void *IP = nullptr;
6786 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6787 return SDValue(E, 0);
6789 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6790 createOperands(N, Ops);
6792 CSEMap.InsertNode(N, IP);
6793 } else {
6794 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6795 createOperands(N, Ops);
6798 InsertNode(N);
6799 SDValue V(N, 0);
6800 NewSDValueDbgMsg(V, "Creating new node: ", this);
6801 return V;
6804 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
6805 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
6806 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
6809 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6810 ArrayRef<SDValue> Ops) {
6811 if (VTList.NumVTs == 1)
6812 return getNode(Opcode, DL, VTList.VTs[0], Ops);
6814 #if 0
6815 switch (Opcode) {
6816 // FIXME: figure out how to safely handle things like
6817 // int foo(int x) { return 1 << (x & 255); }
6818 // int bar() { return foo(256); }
6819 case ISD::SRA_PARTS:
6820 case ISD::SRL_PARTS:
6821 case ISD::SHL_PARTS:
6822 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
6823 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
6824 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
6825 else if (N3.getOpcode() == ISD::AND)
6826 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
6827 // If the and is only masking out bits that cannot effect the shift,
6828 // eliminate the and.
6829 unsigned NumBits = VT.getScalarSizeInBits()*2;
6830 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
6831 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
6833 break;
6835 #endif
6837 // Memoize the node unless it returns a flag.
6838 SDNode *N;
6839 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6840 FoldingSetNodeID ID;
6841 AddNodeIDNode(ID, Opcode, VTList, Ops);
6842 void *IP = nullptr;
6843 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6844 return SDValue(E, 0);
6846 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
6847 createOperands(N, Ops);
6848 CSEMap.InsertNode(N, IP);
6849 } else {
6850 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
6851 createOperands(N, Ops);
6853 InsertNode(N);
6854 SDValue V(N, 0);
6855 NewSDValueDbgMsg(V, "Creating new node: ", this);
6856 return V;
6859 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
6860 SDVTList VTList) {
6861 return getNode(Opcode, DL, VTList, None);
6864 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6865 SDValue N1) {
6866 SDValue Ops[] = { N1 };
6867 return getNode(Opcode, DL, VTList, Ops);
6870 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6871 SDValue N1, SDValue N2) {
6872 SDValue Ops[] = { N1, N2 };
6873 return getNode(Opcode, DL, VTList, Ops);
6876 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6877 SDValue N1, SDValue N2, SDValue N3) {
6878 SDValue Ops[] = { N1, N2, N3 };
6879 return getNode(Opcode, DL, VTList, Ops);
6882 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6883 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
6884 SDValue Ops[] = { N1, N2, N3, N4 };
6885 return getNode(Opcode, DL, VTList, Ops);
6888 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6889 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
6890 SDValue N5) {
6891 SDValue Ops[] = { N1, N2, N3, N4, N5 };
6892 return getNode(Opcode, DL, VTList, Ops);
6895 SDVTList SelectionDAG::getVTList(EVT VT) {
6896 return makeVTList(SDNode::getValueTypeList(VT), 1);
6899 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
6900 FoldingSetNodeID ID;
6901 ID.AddInteger(2U);
6902 ID.AddInteger(VT1.getRawBits());
6903 ID.AddInteger(VT2.getRawBits());
6905 void *IP = nullptr;
6906 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6907 if (!Result) {
6908 EVT *Array = Allocator.Allocate<EVT>(2);
6909 Array[0] = VT1;
6910 Array[1] = VT2;
6911 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
6912 VTListMap.InsertNode(Result, IP);
6914 return Result->getSDVTList();
6917 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
6918 FoldingSetNodeID ID;
6919 ID.AddInteger(3U);
6920 ID.AddInteger(VT1.getRawBits());
6921 ID.AddInteger(VT2.getRawBits());
6922 ID.AddInteger(VT3.getRawBits());
6924 void *IP = nullptr;
6925 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6926 if (!Result) {
6927 EVT *Array = Allocator.Allocate<EVT>(3);
6928 Array[0] = VT1;
6929 Array[1] = VT2;
6930 Array[2] = VT3;
6931 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
6932 VTListMap.InsertNode(Result, IP);
6934 return Result->getSDVTList();
6937 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
6938 FoldingSetNodeID ID;
6939 ID.AddInteger(4U);
6940 ID.AddInteger(VT1.getRawBits());
6941 ID.AddInteger(VT2.getRawBits());
6942 ID.AddInteger(VT3.getRawBits());
6943 ID.AddInteger(VT4.getRawBits());
6945 void *IP = nullptr;
6946 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6947 if (!Result) {
6948 EVT *Array = Allocator.Allocate<EVT>(4);
6949 Array[0] = VT1;
6950 Array[1] = VT2;
6951 Array[2] = VT3;
6952 Array[3] = VT4;
6953 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
6954 VTListMap.InsertNode(Result, IP);
6956 return Result->getSDVTList();
6959 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
6960 unsigned NumVTs = VTs.size();
6961 FoldingSetNodeID ID;
6962 ID.AddInteger(NumVTs);
6963 for (unsigned index = 0; index < NumVTs; index++) {
6964 ID.AddInteger(VTs[index].getRawBits());
6967 void *IP = nullptr;
6968 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6969 if (!Result) {
6970 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
6971 std::copy(VTs.begin(), VTs.end(), Array);
6972 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
6973 VTListMap.InsertNode(Result, IP);
6975 return Result->getSDVTList();
6979 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
6980 /// specified operands. If the resultant node already exists in the DAG,
6981 /// this does not modify the specified node, instead it returns the node that
6982 /// already exists. If the resultant node does not exist in the DAG, the
6983 /// input node is returned. As a degenerate case, if you specify the same
6984 /// input operands as the node already has, the input node is returned.
6985 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
6986 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
6988 // Check to see if there is no change.
6989 if (Op == N->getOperand(0)) return N;
6991 // See if the modified node already exists.
6992 void *InsertPos = nullptr;
6993 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
6994 return Existing;
6996 // Nope it doesn't. Remove the node from its current place in the maps.
6997 if (InsertPos)
6998 if (!RemoveNodeFromCSEMaps(N))
6999 InsertPos = nullptr;
7001 // Now we update the operands.
7002 N->OperandList[0].set(Op);
7004 updateDivergence(N);
7005 // If this gets put into a CSE map, add it.
7006 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7007 return N;
7010 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7011 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7013 // Check to see if there is no change.
7014 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7015 return N; // No operands changed, just return the input node.
7017 // See if the modified node already exists.
7018 void *InsertPos = nullptr;
7019 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7020 return Existing;
7022 // Nope it doesn't. Remove the node from its current place in the maps.
7023 if (InsertPos)
7024 if (!RemoveNodeFromCSEMaps(N))
7025 InsertPos = nullptr;
7027 // Now we update the operands.
7028 if (N->OperandList[0] != Op1)
7029 N->OperandList[0].set(Op1);
7030 if (N->OperandList[1] != Op2)
7031 N->OperandList[1].set(Op2);
7033 updateDivergence(N);
7034 // If this gets put into a CSE map, add it.
7035 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7036 return N;
7039 SDNode *SelectionDAG::
7040 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7041 SDValue Ops[] = { Op1, Op2, Op3 };
7042 return UpdateNodeOperands(N, Ops);
7045 SDNode *SelectionDAG::
7046 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7047 SDValue Op3, SDValue Op4) {
7048 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7049 return UpdateNodeOperands(N, Ops);
7052 SDNode *SelectionDAG::
7053 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7054 SDValue Op3, SDValue Op4, SDValue Op5) {
7055 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7056 return UpdateNodeOperands(N, Ops);
7059 SDNode *SelectionDAG::
7060 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7061 unsigned NumOps = Ops.size();
7062 assert(N->getNumOperands() == NumOps &&
7063 "Update with wrong number of operands");
7065 // If no operands changed just return the input node.
7066 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7067 return N;
7069 // See if the modified node already exists.
7070 void *InsertPos = nullptr;
7071 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7072 return Existing;
7074 // Nope it doesn't. Remove the node from its current place in the maps.
7075 if (InsertPos)
7076 if (!RemoveNodeFromCSEMaps(N))
7077 InsertPos = nullptr;
7079 // Now we update the operands.
7080 for (unsigned i = 0; i != NumOps; ++i)
7081 if (N->OperandList[i] != Ops[i])
7082 N->OperandList[i].set(Ops[i]);
7084 updateDivergence(N);
7085 // If this gets put into a CSE map, add it.
7086 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7087 return N;
7090 /// DropOperands - Release the operands and set this node to have
7091 /// zero operands.
7092 void SDNode::DropOperands() {
7093 // Unlike the code in MorphNodeTo that does this, we don't need to
7094 // watch for dead nodes here.
7095 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7096 SDUse &Use = *I++;
7097 Use.set(SDValue());
7101 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7102 ArrayRef<MachineMemOperand *> NewMemRefs) {
7103 if (NewMemRefs.empty()) {
7104 N->clearMemRefs();
7105 return;
7108 // Check if we can avoid allocating by storing a single reference directly.
7109 if (NewMemRefs.size() == 1) {
7110 N->MemRefs = NewMemRefs[0];
7111 N->NumMemRefs = 1;
7112 return;
7115 MachineMemOperand **MemRefsBuffer =
7116 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
7117 std::copy(NewMemRefs.begin(), NewMemRefs.end(), MemRefsBuffer);
7118 N->MemRefs = MemRefsBuffer;
7119 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
7122 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7123 /// machine opcode.
7125 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7126 EVT VT) {
7127 SDVTList VTs = getVTList(VT);
7128 return SelectNodeTo(N, MachineOpc, VTs, None);
7131 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7132 EVT VT, SDValue Op1) {
7133 SDVTList VTs = getVTList(VT);
7134 SDValue Ops[] = { Op1 };
7135 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7138 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7139 EVT VT, SDValue Op1,
7140 SDValue Op2) {
7141 SDVTList VTs = getVTList(VT);
7142 SDValue Ops[] = { Op1, Op2 };
7143 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7146 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7147 EVT VT, SDValue Op1,
7148 SDValue Op2, SDValue Op3) {
7149 SDVTList VTs = getVTList(VT);
7150 SDValue Ops[] = { Op1, Op2, Op3 };
7151 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7154 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7155 EVT VT, ArrayRef<SDValue> Ops) {
7156 SDVTList VTs = getVTList(VT);
7157 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7160 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7161 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
7162 SDVTList VTs = getVTList(VT1, VT2);
7163 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7166 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7167 EVT VT1, EVT VT2) {
7168 SDVTList VTs = getVTList(VT1, VT2);
7169 return SelectNodeTo(N, MachineOpc, VTs, None);
7172 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7173 EVT VT1, EVT VT2, EVT VT3,
7174 ArrayRef<SDValue> Ops) {
7175 SDVTList VTs = getVTList(VT1, VT2, VT3);
7176 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7179 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7180 EVT VT1, EVT VT2,
7181 SDValue Op1, SDValue Op2) {
7182 SDVTList VTs = getVTList(VT1, VT2);
7183 SDValue Ops[] = { Op1, Op2 };
7184 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7187 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7188 SDVTList VTs,ArrayRef<SDValue> Ops) {
7189 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
7190 // Reset the NodeID to -1.
7191 New->setNodeId(-1);
7192 if (New != N) {
7193 ReplaceAllUsesWith(N, New);
7194 RemoveDeadNode(N);
7196 return New;
7199 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7200 /// the line number information on the merged node since it is not possible to
7201 /// preserve the information that operation is associated with multiple lines.
7202 /// This will make the debugger working better at -O0, were there is a higher
7203 /// probability having other instructions associated with that line.
7205 /// For IROrder, we keep the smaller of the two
7206 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
7207 DebugLoc NLoc = N->getDebugLoc();
7208 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
7209 N->setDebugLoc(DebugLoc());
7211 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
7212 N->setIROrder(Order);
7213 return N;
7216 /// MorphNodeTo - This *mutates* the specified node to have the specified
7217 /// return type, opcode, and operands.
7219 /// Note that MorphNodeTo returns the resultant node. If there is already a
7220 /// node of the specified opcode and operands, it returns that node instead of
7221 /// the current one. Note that the SDLoc need not be the same.
7223 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7224 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7225 /// node, and because it doesn't require CSE recalculation for any of
7226 /// the node's users.
7228 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7229 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7230 /// the legalizer which maintain worklists that would need to be updated when
7231 /// deleting things.
7232 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
7233 SDVTList VTs, ArrayRef<SDValue> Ops) {
7234 // If an identical node already exists, use it.
7235 void *IP = nullptr;
7236 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
7237 FoldingSetNodeID ID;
7238 AddNodeIDNode(ID, Opc, VTs, Ops);
7239 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
7240 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
7243 if (!RemoveNodeFromCSEMaps(N))
7244 IP = nullptr;
7246 // Start the morphing.
7247 N->NodeType = Opc;
7248 N->ValueList = VTs.VTs;
7249 N->NumValues = VTs.NumVTs;
7251 // Clear the operands list, updating used nodes to remove this from their
7252 // use list. Keep track of any operands that become dead as a result.
7253 SmallPtrSet<SDNode*, 16> DeadNodeSet;
7254 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
7255 SDUse &Use = *I++;
7256 SDNode *Used = Use.getNode();
7257 Use.set(SDValue());
7258 if (Used->use_empty())
7259 DeadNodeSet.insert(Used);
7262 // For MachineNode, initialize the memory references information.
7263 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
7264 MN->clearMemRefs();
7266 // Swap for an appropriately sized array from the recycler.
7267 removeOperands(N);
7268 createOperands(N, Ops);
7270 // Delete any nodes that are still dead after adding the uses for the
7271 // new operands.
7272 if (!DeadNodeSet.empty()) {
7273 SmallVector<SDNode *, 16> DeadNodes;
7274 for (SDNode *N : DeadNodeSet)
7275 if (N->use_empty())
7276 DeadNodes.push_back(N);
7277 RemoveDeadNodes(DeadNodes);
7280 if (IP)
7281 CSEMap.InsertNode(N, IP); // Memoize the new node.
7282 return N;
7285 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
7286 unsigned OrigOpc = Node->getOpcode();
7287 unsigned NewOpc;
7288 bool IsUnary = false;
7289 bool IsTernary = false;
7290 switch (OrigOpc) {
7291 default:
7292 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7293 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break;
7294 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break;
7295 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break;
7296 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break;
7297 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break;
7298 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break;
7299 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break;
7300 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break;
7301 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break;
7302 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break;
7303 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break;
7304 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break;
7305 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break;
7306 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break;
7307 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break;
7308 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break;
7309 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break;
7310 case ISD::STRICT_FNEARBYINT:
7311 NewOpc = ISD::FNEARBYINT;
7312 IsUnary = true;
7313 break;
7316 // We're taking this node out of the chain, so we need to re-link things.
7317 SDValue InputChain = Node->getOperand(0);
7318 SDValue OutputChain = SDValue(Node, 1);
7319 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
7321 SDVTList VTs = getVTList(Node->getOperand(1).getValueType());
7322 SDNode *Res = nullptr;
7323 if (IsUnary)
7324 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) });
7325 else if (IsTernary)
7326 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
7327 Node->getOperand(2),
7328 Node->getOperand(3)});
7329 else
7330 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
7331 Node->getOperand(2) });
7333 // MorphNodeTo can operate in two ways: if an existing node with the
7334 // specified operands exists, it can just return it. Otherwise, it
7335 // updates the node in place to have the requested operands.
7336 if (Res == Node) {
7337 // If we updated the node in place, reset the node ID. To the isel,
7338 // this should be just like a newly allocated machine node.
7339 Res->setNodeId(-1);
7340 } else {
7341 ReplaceAllUsesWith(Node, Res);
7342 RemoveDeadNode(Node);
7345 return Res;
7348 /// getMachineNode - These are used for target selectors to create a new node
7349 /// with specified return type(s), MachineInstr opcode, and operands.
7351 /// Note that getMachineNode returns the resultant node. If there is already a
7352 /// node of the specified opcode and operands, it returns that node instead of
7353 /// the current one.
7354 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7355 EVT VT) {
7356 SDVTList VTs = getVTList(VT);
7357 return getMachineNode(Opcode, dl, VTs, None);
7360 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7361 EVT VT, SDValue Op1) {
7362 SDVTList VTs = getVTList(VT);
7363 SDValue Ops[] = { Op1 };
7364 return getMachineNode(Opcode, dl, VTs, Ops);
7367 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7368 EVT VT, SDValue Op1, SDValue Op2) {
7369 SDVTList VTs = getVTList(VT);
7370 SDValue Ops[] = { Op1, Op2 };
7371 return getMachineNode(Opcode, dl, VTs, Ops);
7374 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7375 EVT VT, SDValue Op1, SDValue Op2,
7376 SDValue Op3) {
7377 SDVTList VTs = getVTList(VT);
7378 SDValue Ops[] = { Op1, Op2, Op3 };
7379 return getMachineNode(Opcode, dl, VTs, Ops);
7382 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7383 EVT VT, ArrayRef<SDValue> Ops) {
7384 SDVTList VTs = getVTList(VT);
7385 return getMachineNode(Opcode, dl, VTs, Ops);
7388 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7389 EVT VT1, EVT VT2, SDValue Op1,
7390 SDValue Op2) {
7391 SDVTList VTs = getVTList(VT1, VT2);
7392 SDValue Ops[] = { Op1, Op2 };
7393 return getMachineNode(Opcode, dl, VTs, Ops);
7396 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7397 EVT VT1, EVT VT2, SDValue Op1,
7398 SDValue Op2, SDValue Op3) {
7399 SDVTList VTs = getVTList(VT1, VT2);
7400 SDValue Ops[] = { Op1, Op2, Op3 };
7401 return getMachineNode(Opcode, dl, VTs, Ops);
7404 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7405 EVT VT1, EVT VT2,
7406 ArrayRef<SDValue> Ops) {
7407 SDVTList VTs = getVTList(VT1, VT2);
7408 return getMachineNode(Opcode, dl, VTs, Ops);
7411 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7412 EVT VT1, EVT VT2, EVT VT3,
7413 SDValue Op1, SDValue Op2) {
7414 SDVTList VTs = getVTList(VT1, VT2, VT3);
7415 SDValue Ops[] = { Op1, Op2 };
7416 return getMachineNode(Opcode, dl, VTs, Ops);
7419 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7420 EVT VT1, EVT VT2, EVT VT3,
7421 SDValue Op1, SDValue Op2,
7422 SDValue Op3) {
7423 SDVTList VTs = getVTList(VT1, VT2, VT3);
7424 SDValue Ops[] = { Op1, Op2, Op3 };
7425 return getMachineNode(Opcode, dl, VTs, Ops);
7428 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7429 EVT VT1, EVT VT2, EVT VT3,
7430 ArrayRef<SDValue> Ops) {
7431 SDVTList VTs = getVTList(VT1, VT2, VT3);
7432 return getMachineNode(Opcode, dl, VTs, Ops);
7435 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7436 ArrayRef<EVT> ResultTys,
7437 ArrayRef<SDValue> Ops) {
7438 SDVTList VTs = getVTList(ResultTys);
7439 return getMachineNode(Opcode, dl, VTs, Ops);
7442 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
7443 SDVTList VTs,
7444 ArrayRef<SDValue> Ops) {
7445 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
7446 MachineSDNode *N;
7447 void *IP = nullptr;
7449 if (DoCSE) {
7450 FoldingSetNodeID ID;
7451 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
7452 IP = nullptr;
7453 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7454 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
7458 // Allocate a new MachineSDNode.
7459 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7460 createOperands(N, Ops);
7462 if (DoCSE)
7463 CSEMap.InsertNode(N, IP);
7465 InsertNode(N);
7466 return N;
7469 /// getTargetExtractSubreg - A convenience function for creating
7470 /// TargetOpcode::EXTRACT_SUBREG nodes.
7471 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7472 SDValue Operand) {
7473 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7474 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
7475 VT, Operand, SRIdxVal);
7476 return SDValue(Subreg, 0);
7479 /// getTargetInsertSubreg - A convenience function for creating
7480 /// TargetOpcode::INSERT_SUBREG nodes.
7481 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7482 SDValue Operand, SDValue Subreg) {
7483 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7484 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
7485 VT, Operand, Subreg, SRIdxVal);
7486 return SDValue(Result, 0);
7489 /// getNodeIfExists - Get the specified node if it's already available, or
7490 /// else return NULL.
7491 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
7492 ArrayRef<SDValue> Ops,
7493 const SDNodeFlags Flags) {
7494 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
7495 FoldingSetNodeID ID;
7496 AddNodeIDNode(ID, Opcode, VTList, Ops);
7497 void *IP = nullptr;
7498 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
7499 E->intersectFlagsWith(Flags);
7500 return E;
7503 return nullptr;
7506 /// getDbgValue - Creates a SDDbgValue node.
7508 /// SDNode
7509 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
7510 SDNode *N, unsigned R, bool IsIndirect,
7511 const DebugLoc &DL, unsigned O) {
7512 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7513 "Expected inlined-at fields to agree");
7514 return new (DbgInfo->getAlloc())
7515 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
7518 /// Constant
7519 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
7520 DIExpression *Expr,
7521 const Value *C,
7522 const DebugLoc &DL, unsigned O) {
7523 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7524 "Expected inlined-at fields to agree");
7525 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
7528 /// FrameIndex
7529 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
7530 DIExpression *Expr, unsigned FI,
7531 bool IsIndirect,
7532 const DebugLoc &DL,
7533 unsigned O) {
7534 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7535 "Expected inlined-at fields to agree");
7536 return new (DbgInfo->getAlloc())
7537 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
7540 /// VReg
7541 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
7542 DIExpression *Expr,
7543 unsigned VReg, bool IsIndirect,
7544 const DebugLoc &DL, unsigned O) {
7545 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7546 "Expected inlined-at fields to agree");
7547 return new (DbgInfo->getAlloc())
7548 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
7551 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
7552 unsigned OffsetInBits, unsigned SizeInBits,
7553 bool InvalidateDbg) {
7554 SDNode *FromNode = From.getNode();
7555 SDNode *ToNode = To.getNode();
7556 assert(FromNode && ToNode && "Can't modify dbg values");
7558 // PR35338
7559 // TODO: assert(From != To && "Redundant dbg value transfer");
7560 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
7561 if (From == To || FromNode == ToNode)
7562 return;
7564 if (!FromNode->getHasDebugValue())
7565 return;
7567 SmallVector<SDDbgValue *, 2> ClonedDVs;
7568 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
7569 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
7570 continue;
7572 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
7574 // Just transfer the dbg value attached to From.
7575 if (Dbg->getResNo() != From.getResNo())
7576 continue;
7578 DIVariable *Var = Dbg->getVariable();
7579 auto *Expr = Dbg->getExpression();
7580 // If a fragment is requested, update the expression.
7581 if (SizeInBits) {
7582 // When splitting a larger (e.g., sign-extended) value whose
7583 // lower bits are described with an SDDbgValue, do not attempt
7584 // to transfer the SDDbgValue to the upper bits.
7585 if (auto FI = Expr->getFragmentInfo())
7586 if (OffsetInBits + SizeInBits > FI->SizeInBits)
7587 continue;
7588 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
7589 SizeInBits);
7590 if (!Fragment)
7591 continue;
7592 Expr = *Fragment;
7594 // Clone the SDDbgValue and move it to To.
7595 SDDbgValue *Clone =
7596 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(),
7597 Dbg->getDebugLoc(), Dbg->getOrder());
7598 ClonedDVs.push_back(Clone);
7600 if (InvalidateDbg)
7601 Dbg->setIsInvalidated();
7604 for (SDDbgValue *Dbg : ClonedDVs)
7605 AddDbgValue(Dbg, ToNode, false);
7608 void SelectionDAG::salvageDebugInfo(SDNode &N) {
7609 if (!N.getHasDebugValue())
7610 return;
7612 SmallVector<SDDbgValue *, 2> ClonedDVs;
7613 for (auto DV : GetDbgValues(&N)) {
7614 if (DV->isInvalidated())
7615 continue;
7616 switch (N.getOpcode()) {
7617 default:
7618 break;
7619 case ISD::ADD:
7620 SDValue N0 = N.getOperand(0);
7621 SDValue N1 = N.getOperand(1);
7622 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
7623 isConstantIntBuildVectorOrConstantInt(N1)) {
7624 uint64_t Offset = N.getConstantOperandVal(1);
7625 // Rewrite an ADD constant node into a DIExpression. Since we are
7626 // performing arithmetic to compute the variable's *value* in the
7627 // DIExpression, we need to mark the expression with a
7628 // DW_OP_stack_value.
7629 auto *DIExpr = DV->getExpression();
7630 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
7631 DIExpression::NoDeref,
7632 DIExpression::WithStackValue);
7633 SDDbgValue *Clone =
7634 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
7635 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
7636 ClonedDVs.push_back(Clone);
7637 DV->setIsInvalidated();
7638 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
7639 N0.getNode()->dumprFull(this);
7640 dbgs() << " into " << *DIExpr << '\n');
7645 for (SDDbgValue *Dbg : ClonedDVs)
7646 AddDbgValue(Dbg, Dbg->getSDNode(), false);
7649 /// Creates a SDDbgLabel node.
7650 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
7651 const DebugLoc &DL, unsigned O) {
7652 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
7653 "Expected inlined-at fields to agree");
7654 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
7657 namespace {
7659 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
7660 /// pointed to by a use iterator is deleted, increment the use iterator
7661 /// so that it doesn't dangle.
7663 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
7664 SDNode::use_iterator &UI;
7665 SDNode::use_iterator &UE;
7667 void NodeDeleted(SDNode *N, SDNode *E) override {
7668 // Increment the iterator as needed.
7669 while (UI != UE && N == *UI)
7670 ++UI;
7673 public:
7674 RAUWUpdateListener(SelectionDAG &d,
7675 SDNode::use_iterator &ui,
7676 SDNode::use_iterator &ue)
7677 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
7680 } // end anonymous namespace
7682 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7683 /// This can cause recursive merging of nodes in the DAG.
7685 /// This version assumes From has a single result value.
7687 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
7688 SDNode *From = FromN.getNode();
7689 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
7690 "Cannot replace with this method!");
7691 assert(From != To.getNode() && "Cannot replace uses of with self");
7693 // Preserve Debug Values
7694 transferDbgValues(FromN, To);
7696 // Iterate over all the existing uses of From. New uses will be added
7697 // to the beginning of the use list, which we avoid visiting.
7698 // This specifically avoids visiting uses of From that arise while the
7699 // replacement is happening, because any such uses would be the result
7700 // of CSE: If an existing node looks like From after one of its operands
7701 // is replaced by To, we don't want to replace of all its users with To
7702 // too. See PR3018 for more info.
7703 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7704 RAUWUpdateListener Listener(*this, UI, UE);
7705 while (UI != UE) {
7706 SDNode *User = *UI;
7708 // This node is about to morph, remove its old self from the CSE maps.
7709 RemoveNodeFromCSEMaps(User);
7711 // A user can appear in a use list multiple times, and when this
7712 // happens the uses are usually next to each other in the list.
7713 // To help reduce the number of CSE recomputations, process all
7714 // the uses of this user that we can find this way.
7715 do {
7716 SDUse &Use = UI.getUse();
7717 ++UI;
7718 Use.set(To);
7719 if (To->isDivergent() != From->isDivergent())
7720 updateDivergence(User);
7721 } while (UI != UE && *UI == User);
7722 // Now that we have modified User, add it back to the CSE maps. If it
7723 // already exists there, recursively merge the results together.
7724 AddModifiedNodeToCSEMaps(User);
7727 // If we just RAUW'd the root, take note.
7728 if (FromN == getRoot())
7729 setRoot(To);
7732 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7733 /// This can cause recursive merging of nodes in the DAG.
7735 /// This version assumes that for each value of From, there is a
7736 /// corresponding value in To in the same position with the same type.
7738 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
7739 #ifndef NDEBUG
7740 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7741 assert((!From->hasAnyUseOfValue(i) ||
7742 From->getValueType(i) == To->getValueType(i)) &&
7743 "Cannot use this version of ReplaceAllUsesWith!");
7744 #endif
7746 // Handle the trivial case.
7747 if (From == To)
7748 return;
7750 // Preserve Debug Info. Only do this if there's a use.
7751 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7752 if (From->hasAnyUseOfValue(i)) {
7753 assert((i < To->getNumValues()) && "Invalid To location");
7754 transferDbgValues(SDValue(From, i), SDValue(To, i));
7757 // Iterate over just the existing users of From. See the comments in
7758 // the ReplaceAllUsesWith above.
7759 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7760 RAUWUpdateListener Listener(*this, UI, UE);
7761 while (UI != UE) {
7762 SDNode *User = *UI;
7764 // This node is about to morph, remove its old self from the CSE maps.
7765 RemoveNodeFromCSEMaps(User);
7767 // A user can appear in a use list multiple times, and when this
7768 // happens the uses are usually next to each other in the list.
7769 // To help reduce the number of CSE recomputations, process all
7770 // the uses of this user that we can find this way.
7771 do {
7772 SDUse &Use = UI.getUse();
7773 ++UI;
7774 Use.setNode(To);
7775 if (To->isDivergent() != From->isDivergent())
7776 updateDivergence(User);
7777 } while (UI != UE && *UI == User);
7779 // Now that we have modified User, add it back to the CSE maps. If it
7780 // already exists there, recursively merge the results together.
7781 AddModifiedNodeToCSEMaps(User);
7784 // If we just RAUW'd the root, take note.
7785 if (From == getRoot().getNode())
7786 setRoot(SDValue(To, getRoot().getResNo()));
7789 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7790 /// This can cause recursive merging of nodes in the DAG.
7792 /// This version can replace From with any result values. To must match the
7793 /// number and types of values returned by From.
7794 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
7795 if (From->getNumValues() == 1) // Handle the simple case efficiently.
7796 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
7798 // Preserve Debug Info.
7799 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7800 transferDbgValues(SDValue(From, i), To[i]);
7802 // Iterate over just the existing users of From. See the comments in
7803 // the ReplaceAllUsesWith above.
7804 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7805 RAUWUpdateListener Listener(*this, UI, UE);
7806 while (UI != UE) {
7807 SDNode *User = *UI;
7809 // This node is about to morph, remove its old self from the CSE maps.
7810 RemoveNodeFromCSEMaps(User);
7812 // A user can appear in a use list multiple times, and when this happens the
7813 // uses are usually next to each other in the list. To help reduce the
7814 // number of CSE and divergence recomputations, process all the uses of this
7815 // user that we can find this way.
7816 bool To_IsDivergent = false;
7817 do {
7818 SDUse &Use = UI.getUse();
7819 const SDValue &ToOp = To[Use.getResNo()];
7820 ++UI;
7821 Use.set(ToOp);
7822 To_IsDivergent |= ToOp->isDivergent();
7823 } while (UI != UE && *UI == User);
7825 if (To_IsDivergent != From->isDivergent())
7826 updateDivergence(User);
7828 // Now that we have modified User, add it back to the CSE maps. If it
7829 // already exists there, recursively merge the results together.
7830 AddModifiedNodeToCSEMaps(User);
7833 // If we just RAUW'd the root, take note.
7834 if (From == getRoot().getNode())
7835 setRoot(SDValue(To[getRoot().getResNo()]));
7838 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
7839 /// uses of other values produced by From.getNode() alone. The Deleted
7840 /// vector is handled the same way as for ReplaceAllUsesWith.
7841 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
7842 // Handle the really simple, really trivial case efficiently.
7843 if (From == To) return;
7845 // Handle the simple, trivial, case efficiently.
7846 if (From.getNode()->getNumValues() == 1) {
7847 ReplaceAllUsesWith(From, To);
7848 return;
7851 // Preserve Debug Info.
7852 transferDbgValues(From, To);
7854 // Iterate over just the existing users of From. See the comments in
7855 // the ReplaceAllUsesWith above.
7856 SDNode::use_iterator UI = From.getNode()->use_begin(),
7857 UE = From.getNode()->use_end();
7858 RAUWUpdateListener Listener(*this, UI, UE);
7859 while (UI != UE) {
7860 SDNode *User = *UI;
7861 bool UserRemovedFromCSEMaps = false;
7863 // A user can appear in a use list multiple times, and when this
7864 // happens the uses are usually next to each other in the list.
7865 // To help reduce the number of CSE recomputations, process all
7866 // the uses of this user that we can find this way.
7867 do {
7868 SDUse &Use = UI.getUse();
7870 // Skip uses of different values from the same node.
7871 if (Use.getResNo() != From.getResNo()) {
7872 ++UI;
7873 continue;
7876 // If this node hasn't been modified yet, it's still in the CSE maps,
7877 // so remove its old self from the CSE maps.
7878 if (!UserRemovedFromCSEMaps) {
7879 RemoveNodeFromCSEMaps(User);
7880 UserRemovedFromCSEMaps = true;
7883 ++UI;
7884 Use.set(To);
7885 if (To->isDivergent() != From->isDivergent())
7886 updateDivergence(User);
7887 } while (UI != UE && *UI == User);
7888 // We are iterating over all uses of the From node, so if a use
7889 // doesn't use the specific value, no changes are made.
7890 if (!UserRemovedFromCSEMaps)
7891 continue;
7893 // Now that we have modified User, add it back to the CSE maps. If it
7894 // already exists there, recursively merge the results together.
7895 AddModifiedNodeToCSEMaps(User);
7898 // If we just RAUW'd the root, take note.
7899 if (From == getRoot())
7900 setRoot(To);
7903 namespace {
7905 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
7906 /// to record information about a use.
7907 struct UseMemo {
7908 SDNode *User;
7909 unsigned Index;
7910 SDUse *Use;
7913 /// operator< - Sort Memos by User.
7914 bool operator<(const UseMemo &L, const UseMemo &R) {
7915 return (intptr_t)L.User < (intptr_t)R.User;
7918 } // end anonymous namespace
7920 void SelectionDAG::updateDivergence(SDNode * N)
7922 if (TLI->isSDNodeAlwaysUniform(N))
7923 return;
7924 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
7925 for (auto &Op : N->ops()) {
7926 if (Op.Val.getValueType() != MVT::Other)
7927 IsDivergent |= Op.getNode()->isDivergent();
7929 if (N->SDNodeBits.IsDivergent != IsDivergent) {
7930 N->SDNodeBits.IsDivergent = IsDivergent;
7931 for (auto U : N->uses()) {
7932 updateDivergence(U);
7938 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode*>& Order) {
7939 DenseMap<SDNode *, unsigned> Degree;
7940 Order.reserve(AllNodes.size());
7941 for (auto & N : allnodes()) {
7942 unsigned NOps = N.getNumOperands();
7943 Degree[&N] = NOps;
7944 if (0 == NOps)
7945 Order.push_back(&N);
7947 for (std::vector<SDNode *>::iterator I = Order.begin();
7948 I!=Order.end();++I) {
7949 SDNode * N = *I;
7950 for (auto U : N->uses()) {
7951 unsigned &UnsortedOps = Degree[U];
7952 if (0 == --UnsortedOps)
7953 Order.push_back(U);
7958 void SelectionDAG::VerifyDAGDiverence()
7960 std::vector<SDNode*> TopoOrder;
7961 CreateTopologicalOrder(TopoOrder);
7962 const TargetLowering &TLI = getTargetLoweringInfo();
7963 DenseMap<const SDNode *, bool> DivergenceMap;
7964 for (auto &N : allnodes()) {
7965 DivergenceMap[&N] = false;
7967 for (auto N : TopoOrder) {
7968 bool IsDivergent = DivergenceMap[N];
7969 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA);
7970 for (auto &Op : N->ops()) {
7971 if (Op.Val.getValueType() != MVT::Other)
7972 IsSDNodeDivergent |= DivergenceMap[Op.getNode()];
7974 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) {
7975 DivergenceMap[N] = true;
7978 for (auto &N : allnodes()) {
7979 (void)N;
7980 assert(DivergenceMap[&N] == N.isDivergent() &&
7981 "Divergence bit inconsistency detected\n");
7986 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
7987 /// uses of other values produced by From.getNode() alone. The same value
7988 /// may appear in both the From and To list. The Deleted vector is
7989 /// handled the same way as for ReplaceAllUsesWith.
7990 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
7991 const SDValue *To,
7992 unsigned Num){
7993 // Handle the simple, trivial case efficiently.
7994 if (Num == 1)
7995 return ReplaceAllUsesOfValueWith(*From, *To);
7997 transferDbgValues(*From, *To);
7999 // Read up all the uses and make records of them. This helps
8000 // processing new uses that are introduced during the
8001 // replacement process.
8002 SmallVector<UseMemo, 4> Uses;
8003 for (unsigned i = 0; i != Num; ++i) {
8004 unsigned FromResNo = From[i].getResNo();
8005 SDNode *FromNode = From[i].getNode();
8006 for (SDNode::use_iterator UI = FromNode->use_begin(),
8007 E = FromNode->use_end(); UI != E; ++UI) {
8008 SDUse &Use = UI.getUse();
8009 if (Use.getResNo() == FromResNo) {
8010 UseMemo Memo = { *UI, i, &Use };
8011 Uses.push_back(Memo);
8016 // Sort the uses, so that all the uses from a given User are together.
8017 llvm::sort(Uses.begin(), Uses.end());
8019 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8020 UseIndex != UseIndexEnd; ) {
8021 // We know that this user uses some value of From. If it is the right
8022 // value, update it.
8023 SDNode *User = Uses[UseIndex].User;
8025 // This node is about to morph, remove its old self from the CSE maps.
8026 RemoveNodeFromCSEMaps(User);
8028 // The Uses array is sorted, so all the uses for a given User
8029 // are next to each other in the list.
8030 // To help reduce the number of CSE recomputations, process all
8031 // the uses of this user that we can find this way.
8032 do {
8033 unsigned i = Uses[UseIndex].Index;
8034 SDUse &Use = *Uses[UseIndex].Use;
8035 ++UseIndex;
8037 Use.set(To[i]);
8038 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8040 // Now that we have modified User, add it back to the CSE maps. If it
8041 // already exists there, recursively merge the results together.
8042 AddModifiedNodeToCSEMaps(User);
8046 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8047 /// based on their topological order. It returns the maximum id and a vector
8048 /// of the SDNodes* in assigned order by reference.
8049 unsigned SelectionDAG::AssignTopologicalOrder() {
8050 unsigned DAGSize = 0;
8052 // SortedPos tracks the progress of the algorithm. Nodes before it are
8053 // sorted, nodes after it are unsorted. When the algorithm completes
8054 // it is at the end of the list.
8055 allnodes_iterator SortedPos = allnodes_begin();
8057 // Visit all the nodes. Move nodes with no operands to the front of
8058 // the list immediately. Annotate nodes that do have operands with their
8059 // operand count. Before we do this, the Node Id fields of the nodes
8060 // may contain arbitrary values. After, the Node Id fields for nodes
8061 // before SortedPos will contain the topological sort index, and the
8062 // Node Id fields for nodes At SortedPos and after will contain the
8063 // count of outstanding operands.
8064 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8065 SDNode *N = &*I++;
8066 checkForCycles(N, this);
8067 unsigned Degree = N->getNumOperands();
8068 if (Degree == 0) {
8069 // A node with no uses, add it to the result array immediately.
8070 N->setNodeId(DAGSize++);
8071 allnodes_iterator Q(N);
8072 if (Q != SortedPos)
8073 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8074 assert(SortedPos != AllNodes.end() && "Overran node list");
8075 ++SortedPos;
8076 } else {
8077 // Temporarily use the Node Id as scratch space for the degree count.
8078 N->setNodeId(Degree);
8082 // Visit all the nodes. As we iterate, move nodes into sorted order,
8083 // such that by the time the end is reached all nodes will be sorted.
8084 for (SDNode &Node : allnodes()) {
8085 SDNode *N = &Node;
8086 checkForCycles(N, this);
8087 // N is in sorted position, so all its uses have one less operand
8088 // that needs to be sorted.
8089 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8090 UI != UE; ++UI) {
8091 SDNode *P = *UI;
8092 unsigned Degree = P->getNodeId();
8093 assert(Degree != 0 && "Invalid node degree");
8094 --Degree;
8095 if (Degree == 0) {
8096 // All of P's operands are sorted, so P may sorted now.
8097 P->setNodeId(DAGSize++);
8098 if (P->getIterator() != SortedPos)
8099 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8100 assert(SortedPos != AllNodes.end() && "Overran node list");
8101 ++SortedPos;
8102 } else {
8103 // Update P's outstanding operand count.
8104 P->setNodeId(Degree);
8107 if (Node.getIterator() == SortedPos) {
8108 #ifndef NDEBUG
8109 allnodes_iterator I(N);
8110 SDNode *S = &*++I;
8111 dbgs() << "Overran sorted position:\n";
8112 S->dumprFull(this); dbgs() << "\n";
8113 dbgs() << "Checking if this is due to cycles\n";
8114 checkForCycles(this, true);
8115 #endif
8116 llvm_unreachable(nullptr);
8120 assert(SortedPos == AllNodes.end() &&
8121 "Topological sort incomplete!");
8122 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
8123 "First node in topological sort is not the entry token!");
8124 assert(AllNodes.front().getNodeId() == 0 &&
8125 "First node in topological sort has non-zero id!");
8126 assert(AllNodes.front().getNumOperands() == 0 &&
8127 "First node in topological sort has operands!");
8128 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
8129 "Last node in topologic sort has unexpected id!");
8130 assert(AllNodes.back().use_empty() &&
8131 "Last node in topologic sort has users!");
8132 assert(DAGSize == allnodes_size() && "Node count mismatch!");
8133 return DAGSize;
8136 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8137 /// value is produced by SD.
8138 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
8139 if (SD) {
8140 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
8141 SD->setHasDebugValue(true);
8143 DbgInfo->add(DB, SD, isParameter);
8146 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
8147 DbgInfo->add(DB);
8150 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
8151 SDValue NewMemOp) {
8152 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
8153 // The new memory operation must have the same position as the old load in
8154 // terms of memory dependency. Create a TokenFactor for the old load and new
8155 // memory operation and update uses of the old load's output chain to use that
8156 // TokenFactor.
8157 SDValue OldChain = SDValue(OldLoad, 1);
8158 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
8159 if (!OldLoad->hasAnyUseOfValue(1))
8160 return NewChain;
8162 SDValue TokenFactor =
8163 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
8164 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
8165 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
8166 return TokenFactor;
8169 //===----------------------------------------------------------------------===//
8170 // SDNode Class
8171 //===----------------------------------------------------------------------===//
8173 bool llvm::isNullConstant(SDValue V) {
8174 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8175 return Const != nullptr && Const->isNullValue();
8178 bool llvm::isNullFPConstant(SDValue V) {
8179 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
8180 return Const != nullptr && Const->isZero() && !Const->isNegative();
8183 bool llvm::isAllOnesConstant(SDValue V) {
8184 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8185 return Const != nullptr && Const->isAllOnesValue();
8188 bool llvm::isOneConstant(SDValue V) {
8189 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8190 return Const != nullptr && Const->isOne();
8193 bool llvm::isBitwiseNot(SDValue V) {
8194 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1));
8197 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) {
8198 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8199 return CN;
8201 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8202 BitVector UndefElements;
8203 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
8205 // BuildVectors can truncate their operands. Ignore that case here.
8206 // FIXME: We blindly ignore splats which include undef which is overly
8207 // pessimistic.
8208 if (CN && UndefElements.none() &&
8209 CN->getValueType(0) == N.getValueType().getScalarType())
8210 return CN;
8213 return nullptr;
8216 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) {
8217 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8218 return CN;
8220 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8221 BitVector UndefElements;
8222 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
8224 if (CN && UndefElements.none())
8225 return CN;
8228 return nullptr;
8231 HandleSDNode::~HandleSDNode() {
8232 DropOperands();
8235 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
8236 const DebugLoc &DL,
8237 const GlobalValue *GA, EVT VT,
8238 int64_t o, unsigned char TF)
8239 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
8240 TheGlobal = GA;
8243 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
8244 EVT VT, unsigned SrcAS,
8245 unsigned DestAS)
8246 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
8247 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
8249 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
8250 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
8251 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
8252 MemSDNodeBits.IsVolatile = MMO->isVolatile();
8253 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
8254 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
8255 MemSDNodeBits.IsInvariant = MMO->isInvariant();
8257 // We check here that the size of the memory operand fits within the size of
8258 // the MMO. This is because the MMO might indicate only a possible address
8259 // range instead of specifying the affected memory addresses precisely.
8260 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
8263 /// Profile - Gather unique data for the node.
8265 void SDNode::Profile(FoldingSetNodeID &ID) const {
8266 AddNodeIDNode(ID, this);
8269 namespace {
8271 struct EVTArray {
8272 std::vector<EVT> VTs;
8274 EVTArray() {
8275 VTs.reserve(MVT::LAST_VALUETYPE);
8276 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
8277 VTs.push_back(MVT((MVT::SimpleValueType)i));
8281 } // end anonymous namespace
8283 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
8284 static ManagedStatic<EVTArray> SimpleVTArray;
8285 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
8287 /// getValueTypeList - Return a pointer to the specified value type.
8289 const EVT *SDNode::getValueTypeList(EVT VT) {
8290 if (VT.isExtended()) {
8291 sys::SmartScopedLock<true> Lock(*VTMutex);
8292 return &(*EVTs->insert(VT).first);
8293 } else {
8294 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
8295 "Value type out of range!");
8296 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
8300 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
8301 /// indicated value. This method ignores uses of other values defined by this
8302 /// operation.
8303 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
8304 assert(Value < getNumValues() && "Bad value!");
8306 // TODO: Only iterate over uses of a given value of the node
8307 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
8308 if (UI.getUse().getResNo() == Value) {
8309 if (NUses == 0)
8310 return false;
8311 --NUses;
8315 // Found exactly the right number of uses?
8316 return NUses == 0;
8319 /// hasAnyUseOfValue - Return true if there are any use of the indicated
8320 /// value. This method ignores uses of other values defined by this operation.
8321 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
8322 assert(Value < getNumValues() && "Bad value!");
8324 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
8325 if (UI.getUse().getResNo() == Value)
8326 return true;
8328 return false;
8331 /// isOnlyUserOf - Return true if this node is the only use of N.
8332 bool SDNode::isOnlyUserOf(const SDNode *N) const {
8333 bool Seen = false;
8334 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8335 SDNode *User = *I;
8336 if (User == this)
8337 Seen = true;
8338 else
8339 return false;
8342 return Seen;
8345 /// Return true if the only users of N are contained in Nodes.
8346 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
8347 bool Seen = false;
8348 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8349 SDNode *User = *I;
8350 if (llvm::any_of(Nodes,
8351 [&User](const SDNode *Node) { return User == Node; }))
8352 Seen = true;
8353 else
8354 return false;
8357 return Seen;
8360 /// isOperand - Return true if this node is an operand of N.
8361 bool SDValue::isOperandOf(const SDNode *N) const {
8362 for (const SDValue &Op : N->op_values())
8363 if (*this == Op)
8364 return true;
8365 return false;
8368 bool SDNode::isOperandOf(const SDNode *N) const {
8369 for (const SDValue &Op : N->op_values())
8370 if (this == Op.getNode())
8371 return true;
8372 return false;
8375 /// reachesChainWithoutSideEffects - Return true if this operand (which must
8376 /// be a chain) reaches the specified operand without crossing any
8377 /// side-effecting instructions on any chain path. In practice, this looks
8378 /// through token factors and non-volatile loads. In order to remain efficient,
8379 /// this only looks a couple of nodes in, it does not do an exhaustive search.
8381 /// Note that we only need to examine chains when we're searching for
8382 /// side-effects; SelectionDAG requires that all side-effects are represented
8383 /// by chains, even if another operand would force a specific ordering. This
8384 /// constraint is necessary to allow transformations like splitting loads.
8385 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
8386 unsigned Depth) const {
8387 if (*this == Dest) return true;
8389 // Don't search too deeply, we just want to be able to see through
8390 // TokenFactor's etc.
8391 if (Depth == 0) return false;
8393 // If this is a token factor, all inputs to the TF happen in parallel.
8394 if (getOpcode() == ISD::TokenFactor) {
8395 // First, try a shallow search.
8396 if (is_contained((*this)->ops(), Dest)) {
8397 // We found the chain we want as an operand of this TokenFactor.
8398 // Essentially, we reach the chain without side-effects if we could
8399 // serialize the TokenFactor into a simple chain of operations with
8400 // Dest as the last operation. This is automatically true if the
8401 // chain has one use: there are no other ordering constraints.
8402 // If the chain has more than one use, we give up: some other
8403 // use of Dest might force a side-effect between Dest and the current
8404 // node.
8405 if (Dest.hasOneUse())
8406 return true;
8408 // Next, try a deep search: check whether every operand of the TokenFactor
8409 // reaches Dest.
8410 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
8411 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
8415 // Loads don't have side effects, look through them.
8416 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
8417 if (!Ld->isVolatile())
8418 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
8420 return false;
8423 bool SDNode::hasPredecessor(const SDNode *N) const {
8424 SmallPtrSet<const SDNode *, 32> Visited;
8425 SmallVector<const SDNode *, 16> Worklist;
8426 Worklist.push_back(this);
8427 return hasPredecessorHelper(N, Visited, Worklist);
8430 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
8431 this->Flags.intersectWith(Flags);
8434 SDValue
8435 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
8436 ArrayRef<ISD::NodeType> CandidateBinOps) {
8437 // The pattern must end in an extract from index 0.
8438 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8439 !isNullConstant(Extract->getOperand(1)))
8440 return SDValue();
8442 SDValue Op = Extract->getOperand(0);
8443 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
8445 // Match against one of the candidate binary ops.
8446 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
8447 return Op.getOpcode() == unsigned(BinOp);
8449 return SDValue();
8451 // At each stage, we're looking for something that looks like:
8452 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
8453 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
8454 // i32 undef, i32 undef, i32 undef, i32 undef>
8455 // %a = binop <8 x i32> %op, %s
8456 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
8457 // we expect something like:
8458 // <4,5,6,7,u,u,u,u>
8459 // <2,3,u,u,u,u,u,u>
8460 // <1,u,u,u,u,u,u,u>
8461 unsigned CandidateBinOp = Op.getOpcode();
8462 for (unsigned i = 0; i < Stages; ++i) {
8463 if (Op.getOpcode() != CandidateBinOp)
8464 return SDValue();
8466 SDValue Op0 = Op.getOperand(0);
8467 SDValue Op1 = Op.getOperand(1);
8469 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
8470 if (Shuffle) {
8471 Op = Op1;
8472 } else {
8473 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
8474 Op = Op0;
8477 // The first operand of the shuffle should be the same as the other operand
8478 // of the binop.
8479 if (!Shuffle || Shuffle->getOperand(0) != Op)
8480 return SDValue();
8482 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
8483 for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index)
8484 if (Shuffle->getMaskElt(Index) != MaskEnd + Index)
8485 return SDValue();
8488 BinOp = (ISD::NodeType)CandidateBinOp;
8489 return Op;
8492 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
8493 assert(N->getNumValues() == 1 &&
8494 "Can't unroll a vector with multiple results!");
8496 EVT VT = N->getValueType(0);
8497 unsigned NE = VT.getVectorNumElements();
8498 EVT EltVT = VT.getVectorElementType();
8499 SDLoc dl(N);
8501 SmallVector<SDValue, 8> Scalars;
8502 SmallVector<SDValue, 4> Operands(N->getNumOperands());
8504 // If ResNE is 0, fully unroll the vector op.
8505 if (ResNE == 0)
8506 ResNE = NE;
8507 else if (NE > ResNE)
8508 NE = ResNE;
8510 unsigned i;
8511 for (i= 0; i != NE; ++i) {
8512 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
8513 SDValue Operand = N->getOperand(j);
8514 EVT OperandVT = Operand.getValueType();
8515 if (OperandVT.isVector()) {
8516 // A vector operand; extract a single element.
8517 EVT OperandEltVT = OperandVT.getVectorElementType();
8518 Operands[j] =
8519 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
8520 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
8521 } else {
8522 // A scalar operand; just use it as is.
8523 Operands[j] = Operand;
8527 switch (N->getOpcode()) {
8528 default: {
8529 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
8530 N->getFlags()));
8531 break;
8533 case ISD::VSELECT:
8534 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
8535 break;
8536 case ISD::SHL:
8537 case ISD::SRA:
8538 case ISD::SRL:
8539 case ISD::ROTL:
8540 case ISD::ROTR:
8541 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
8542 getShiftAmountOperand(Operands[0].getValueType(),
8543 Operands[1])));
8544 break;
8545 case ISD::SIGN_EXTEND_INREG:
8546 case ISD::FP_ROUND_INREG: {
8547 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
8548 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
8549 Operands[0],
8550 getValueType(ExtVT)));
8555 for (; i < ResNE; ++i)
8556 Scalars.push_back(getUNDEF(EltVT));
8558 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
8559 return getBuildVector(VecVT, dl, Scalars);
8562 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
8563 LoadSDNode *Base,
8564 unsigned Bytes,
8565 int Dist) const {
8566 if (LD->isVolatile() || Base->isVolatile())
8567 return false;
8568 if (LD->isIndexed() || Base->isIndexed())
8569 return false;
8570 if (LD->getChain() != Base->getChain())
8571 return false;
8572 EVT VT = LD->getValueType(0);
8573 if (VT.getSizeInBits() / 8 != Bytes)
8574 return false;
8576 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
8577 auto LocDecomp = BaseIndexOffset::match(LD, *this);
8579 int64_t Offset = 0;
8580 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
8581 return (Dist * Bytes == Offset);
8582 return false;
8585 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
8586 /// it cannot be inferred.
8587 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
8588 // If this is a GlobalAddress + cst, return the alignment.
8589 const GlobalValue *GV;
8590 int64_t GVOffset = 0;
8591 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
8592 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
8593 KnownBits Known(IdxWidth);
8594 llvm::computeKnownBits(GV, Known, getDataLayout());
8595 unsigned AlignBits = Known.countMinTrailingZeros();
8596 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
8597 if (Align)
8598 return MinAlign(Align, GVOffset);
8601 // If this is a direct reference to a stack slot, use information about the
8602 // stack slot's alignment.
8603 int FrameIdx = 1 << 31;
8604 int64_t FrameOffset = 0;
8605 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
8606 FrameIdx = FI->getIndex();
8607 } else if (isBaseWithConstantOffset(Ptr) &&
8608 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
8609 // Handle FI+Cst
8610 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
8611 FrameOffset = Ptr.getConstantOperandVal(1);
8614 if (FrameIdx != (1 << 31)) {
8615 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
8616 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
8617 FrameOffset);
8618 return FIInfoAlign;
8621 return 0;
8624 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
8625 /// which is split (or expanded) into two not necessarily identical pieces.
8626 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
8627 // Currently all types are split in half.
8628 EVT LoVT, HiVT;
8629 if (!VT.isVector())
8630 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
8631 else
8632 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
8634 return std::make_pair(LoVT, HiVT);
8637 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
8638 /// low/high part.
8639 std::pair<SDValue, SDValue>
8640 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
8641 const EVT &HiVT) {
8642 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
8643 N.getValueType().getVectorNumElements() &&
8644 "More vector elements requested than available!");
8645 SDValue Lo, Hi;
8646 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
8647 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
8648 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
8649 getConstant(LoVT.getVectorNumElements(), DL,
8650 TLI->getVectorIdxTy(getDataLayout())));
8651 return std::make_pair(Lo, Hi);
8654 void SelectionDAG::ExtractVectorElements(SDValue Op,
8655 SmallVectorImpl<SDValue> &Args,
8656 unsigned Start, unsigned Count) {
8657 EVT VT = Op.getValueType();
8658 if (Count == 0)
8659 Count = VT.getVectorNumElements();
8661 EVT EltVT = VT.getVectorElementType();
8662 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
8663 SDLoc SL(Op);
8664 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
8665 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8666 Op, getConstant(i, SL, IdxTy)));
8670 // getAddressSpace - Return the address space this GlobalAddress belongs to.
8671 unsigned GlobalAddressSDNode::getAddressSpace() const {
8672 return getGlobal()->getType()->getAddressSpace();
8675 Type *ConstantPoolSDNode::getType() const {
8676 if (isMachineConstantPoolEntry())
8677 return Val.MachineCPVal->getType();
8678 return Val.ConstVal->getType();
8681 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
8682 unsigned &SplatBitSize,
8683 bool &HasAnyUndefs,
8684 unsigned MinSplatBits,
8685 bool IsBigEndian) const {
8686 EVT VT = getValueType(0);
8687 assert(VT.isVector() && "Expected a vector type");
8688 unsigned VecWidth = VT.getSizeInBits();
8689 if (MinSplatBits > VecWidth)
8690 return false;
8692 // FIXME: The widths are based on this node's type, but build vectors can
8693 // truncate their operands.
8694 SplatValue = APInt(VecWidth, 0);
8695 SplatUndef = APInt(VecWidth, 0);
8697 // Get the bits. Bits with undefined values (when the corresponding element
8698 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
8699 // in SplatValue. If any of the values are not constant, give up and return
8700 // false.
8701 unsigned int NumOps = getNumOperands();
8702 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
8703 unsigned EltWidth = VT.getScalarSizeInBits();
8705 for (unsigned j = 0; j < NumOps; ++j) {
8706 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
8707 SDValue OpVal = getOperand(i);
8708 unsigned BitPos = j * EltWidth;
8710 if (OpVal.isUndef())
8711 SplatUndef.setBits(BitPos, BitPos + EltWidth);
8712 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
8713 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
8714 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
8715 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
8716 else
8717 return false;
8720 // The build_vector is all constants or undefs. Find the smallest element
8721 // size that splats the vector.
8722 HasAnyUndefs = (SplatUndef != 0);
8724 // FIXME: This does not work for vectors with elements less than 8 bits.
8725 while (VecWidth > 8) {
8726 unsigned HalfSize = VecWidth / 2;
8727 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
8728 APInt LowValue = SplatValue.trunc(HalfSize);
8729 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
8730 APInt LowUndef = SplatUndef.trunc(HalfSize);
8732 // If the two halves do not match (ignoring undef bits), stop here.
8733 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
8734 MinSplatBits > HalfSize)
8735 break;
8737 SplatValue = HighValue | LowValue;
8738 SplatUndef = HighUndef & LowUndef;
8740 VecWidth = HalfSize;
8743 SplatBitSize = VecWidth;
8744 return true;
8747 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
8748 if (UndefElements) {
8749 UndefElements->clear();
8750 UndefElements->resize(getNumOperands());
8752 SDValue Splatted;
8753 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
8754 SDValue Op = getOperand(i);
8755 if (Op.isUndef()) {
8756 if (UndefElements)
8757 (*UndefElements)[i] = true;
8758 } else if (!Splatted) {
8759 Splatted = Op;
8760 } else if (Splatted != Op) {
8761 return SDValue();
8765 if (!Splatted) {
8766 assert(getOperand(0).isUndef() &&
8767 "Can only have a splat without a constant for all undefs.");
8768 return getOperand(0);
8771 return Splatted;
8774 ConstantSDNode *
8775 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
8776 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
8779 ConstantFPSDNode *
8780 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
8781 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
8784 int32_t
8785 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
8786 uint32_t BitWidth) const {
8787 if (ConstantFPSDNode *CN =
8788 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
8789 bool IsExact;
8790 APSInt IntVal(BitWidth);
8791 const APFloat &APF = CN->getValueAPF();
8792 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
8793 APFloat::opOK ||
8794 !IsExact)
8795 return -1;
8797 return IntVal.exactLogBase2();
8799 return -1;
8802 bool BuildVectorSDNode::isConstant() const {
8803 for (const SDValue &Op : op_values()) {
8804 unsigned Opc = Op.getOpcode();
8805 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
8806 return false;
8808 return true;
8811 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
8812 // Find the first non-undef value in the shuffle mask.
8813 unsigned i, e;
8814 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
8815 /* search */;
8817 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
8819 // Make sure all remaining elements are either undef or the same as the first
8820 // non-undef value.
8821 for (int Idx = Mask[i]; i != e; ++i)
8822 if (Mask[i] >= 0 && Mask[i] != Idx)
8823 return false;
8824 return true;
8827 // Returns the SDNode if it is a constant integer BuildVector
8828 // or constant integer.
8829 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
8830 if (isa<ConstantSDNode>(N))
8831 return N.getNode();
8832 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
8833 return N.getNode();
8834 // Treat a GlobalAddress supporting constant offset folding as a
8835 // constant integer.
8836 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
8837 if (GA->getOpcode() == ISD::GlobalAddress &&
8838 TLI->isOffsetFoldingLegal(GA))
8839 return GA;
8840 return nullptr;
8843 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
8844 if (isa<ConstantFPSDNode>(N))
8845 return N.getNode();
8847 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
8848 return N.getNode();
8850 return nullptr;
8853 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
8854 assert(!Node->OperandList && "Node already has operands");
8855 SDUse *Ops = OperandRecycler.allocate(
8856 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
8858 bool IsDivergent = false;
8859 for (unsigned I = 0; I != Vals.size(); ++I) {
8860 Ops[I].setUser(Node);
8861 Ops[I].setInitial(Vals[I]);
8862 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
8863 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent();
8865 Node->NumOperands = Vals.size();
8866 Node->OperandList = Ops;
8867 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
8868 if (!TLI->isSDNodeAlwaysUniform(Node))
8869 Node->SDNodeBits.IsDivergent = IsDivergent;
8870 checkForCycles(Node);
8873 #ifndef NDEBUG
8874 static void checkForCyclesHelper(const SDNode *N,
8875 SmallPtrSetImpl<const SDNode*> &Visited,
8876 SmallPtrSetImpl<const SDNode*> &Checked,
8877 const llvm::SelectionDAG *DAG) {
8878 // If this node has already been checked, don't check it again.
8879 if (Checked.count(N))
8880 return;
8882 // If a node has already been visited on this depth-first walk, reject it as
8883 // a cycle.
8884 if (!Visited.insert(N).second) {
8885 errs() << "Detected cycle in SelectionDAG\n";
8886 dbgs() << "Offending node:\n";
8887 N->dumprFull(DAG); dbgs() << "\n";
8888 abort();
8891 for (const SDValue &Op : N->op_values())
8892 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
8894 Checked.insert(N);
8895 Visited.erase(N);
8897 #endif
8899 void llvm::checkForCycles(const llvm::SDNode *N,
8900 const llvm::SelectionDAG *DAG,
8901 bool force) {
8902 #ifndef NDEBUG
8903 bool check = force;
8904 #ifdef EXPENSIVE_CHECKS
8905 check = true;
8906 #endif // EXPENSIVE_CHECKS
8907 if (check) {
8908 assert(N && "Checking nonexistent SDNode");
8909 SmallPtrSet<const SDNode*, 32> visited;
8910 SmallPtrSet<const SDNode*, 32> checked;
8911 checkForCyclesHelper(N, visited, checked, DAG);
8913 #endif // !NDEBUG
8916 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
8917 checkForCycles(DAG->getRoot().getNode(), DAG, force);