[InstCombine] Signed saturation patterns
[llvm-core.git] / lib / CodeGen / SelectionDAG / SelectionDAG.cpp
blob52a71b91d93f69777b058f70e4aae15b6067fd86
1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/CodeGen/ISDOpcodes.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineConstantPool.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/RuntimeLibcalls.h"
35 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CodeGen.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/KnownBits.h"
59 #include "llvm/Support/MachineValueType.h"
60 #include "llvm/Support/ManagedStatic.h"
61 #include "llvm/Support/MathExtras.h"
62 #include "llvm/Support/Mutex.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetMachine.h"
65 #include "llvm/Target/TargetOptions.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <cstdlib>
70 #include <limits>
71 #include <set>
72 #include <string>
73 #include <utility>
74 #include <vector>
76 using namespace llvm;
78 /// makeVTList - Return an instance of the SDVTList struct initialized with the
79 /// specified members.
80 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
81 SDVTList Res = {VTs, NumVTs};
82 return Res;
85 // Default null implementations of the callbacks.
86 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
87 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
88 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
90 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
92 #define DEBUG_TYPE "selectiondag"
94 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
95 cl::Hidden, cl::init(true),
96 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
98 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
99 cl::desc("Number limit for gluing ld/st of memcpy."),
100 cl::Hidden, cl::init(0));
102 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
103 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
106 //===----------------------------------------------------------------------===//
107 // ConstantFPSDNode Class
108 //===----------------------------------------------------------------------===//
110 /// isExactlyValue - We don't rely on operator== working on double values, as
111 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
112 /// As such, this method can be used to do an exact bit-for-bit comparison of
113 /// two floating point values.
114 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
115 return getValueAPF().bitwiseIsEqual(V);
118 bool ConstantFPSDNode::isValueValidForType(EVT VT,
119 const APFloat& Val) {
120 assert(VT.isFloatingPoint() && "Can only convert between FP types");
122 // convert modifies in place, so make a copy.
123 APFloat Val2 = APFloat(Val);
124 bool losesInfo;
125 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
126 APFloat::rmNearestTiesToEven,
127 &losesInfo);
128 return !losesInfo;
131 //===----------------------------------------------------------------------===//
132 // ISD Namespace
133 //===----------------------------------------------------------------------===//
135 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
136 auto *BV = dyn_cast<BuildVectorSDNode>(N);
137 if (!BV)
138 return false;
140 APInt SplatUndef;
141 unsigned SplatBitSize;
142 bool HasUndefs;
143 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
144 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
145 EltSize) &&
146 EltSize == SplatBitSize;
149 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
150 // specializations of the more general isConstantSplatVector()?
152 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
153 // Look through a bit convert.
154 while (N->getOpcode() == ISD::BITCAST)
155 N = N->getOperand(0).getNode();
157 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
159 unsigned i = 0, e = N->getNumOperands();
161 // Skip over all of the undef values.
162 while (i != e && N->getOperand(i).isUndef())
163 ++i;
165 // Do not accept an all-undef vector.
166 if (i == e) return false;
168 // Do not accept build_vectors that aren't all constants or which have non-~0
169 // elements. We have to be a bit careful here, as the type of the constant
170 // may not be the same as the type of the vector elements due to type
171 // legalization (the elements are promoted to a legal type for the target and
172 // a vector of a type may be legal when the base element type is not).
173 // We only want to check enough bits to cover the vector elements, because
174 // we care if the resultant vector is all ones, not whether the individual
175 // constants are.
176 SDValue NotZero = N->getOperand(i);
177 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
178 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
179 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
180 return false;
181 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
182 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
183 return false;
184 } else
185 return false;
187 // Okay, we have at least one ~0 value, check to see if the rest match or are
188 // undefs. Even with the above element type twiddling, this should be OK, as
189 // the same type legalization should have applied to all the elements.
190 for (++i; i != e; ++i)
191 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
192 return false;
193 return true;
196 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
197 // Look through a bit convert.
198 while (N->getOpcode() == ISD::BITCAST)
199 N = N->getOperand(0).getNode();
201 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
203 bool IsAllUndef = true;
204 for (const SDValue &Op : N->op_values()) {
205 if (Op.isUndef())
206 continue;
207 IsAllUndef = false;
208 // Do not accept build_vectors that aren't all constants or which have non-0
209 // elements. We have to be a bit careful here, as the type of the constant
210 // may not be the same as the type of the vector elements due to type
211 // legalization (the elements are promoted to a legal type for the target
212 // and a vector of a type may be legal when the base element type is not).
213 // We only want to check enough bits to cover the vector elements, because
214 // we care if the resultant vector is all zeros, not whether the individual
215 // constants are.
216 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
217 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
218 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
219 return false;
220 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
221 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
222 return false;
223 } else
224 return false;
227 // Do not accept an all-undef vector.
228 if (IsAllUndef)
229 return false;
230 return true;
233 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
234 if (N->getOpcode() != ISD::BUILD_VECTOR)
235 return false;
237 for (const SDValue &Op : N->op_values()) {
238 if (Op.isUndef())
239 continue;
240 if (!isa<ConstantSDNode>(Op))
241 return false;
243 return true;
246 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
247 if (N->getOpcode() != ISD::BUILD_VECTOR)
248 return false;
250 for (const SDValue &Op : N->op_values()) {
251 if (Op.isUndef())
252 continue;
253 if (!isa<ConstantFPSDNode>(Op))
254 return false;
256 return true;
259 bool ISD::allOperandsUndef(const SDNode *N) {
260 // Return false if the node has no operands.
261 // This is "logically inconsistent" with the definition of "all" but
262 // is probably the desired behavior.
263 if (N->getNumOperands() == 0)
264 return false;
265 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
268 bool ISD::matchUnaryPredicate(SDValue Op,
269 std::function<bool(ConstantSDNode *)> Match,
270 bool AllowUndefs) {
271 // FIXME: Add support for scalar UNDEF cases?
272 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
273 return Match(Cst);
275 // FIXME: Add support for vector UNDEF cases?
276 if (ISD::BUILD_VECTOR != Op.getOpcode())
277 return false;
279 EVT SVT = Op.getValueType().getScalarType();
280 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
281 if (AllowUndefs && Op.getOperand(i).isUndef()) {
282 if (!Match(nullptr))
283 return false;
284 continue;
287 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
288 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
289 return false;
291 return true;
294 bool ISD::matchBinaryPredicate(
295 SDValue LHS, SDValue RHS,
296 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
297 bool AllowUndefs, bool AllowTypeMismatch) {
298 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
299 return false;
301 // TODO: Add support for scalar UNDEF cases?
302 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
303 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
304 return Match(LHSCst, RHSCst);
306 // TODO: Add support for vector UNDEF cases?
307 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
308 ISD::BUILD_VECTOR != RHS.getOpcode())
309 return false;
311 EVT SVT = LHS.getValueType().getScalarType();
312 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
313 SDValue LHSOp = LHS.getOperand(i);
314 SDValue RHSOp = RHS.getOperand(i);
315 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
316 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
317 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
318 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
319 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
320 return false;
321 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
322 LHSOp.getValueType() != RHSOp.getValueType()))
323 return false;
324 if (!Match(LHSCst, RHSCst))
325 return false;
327 return true;
330 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
331 switch (ExtType) {
332 case ISD::EXTLOAD:
333 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
334 case ISD::SEXTLOAD:
335 return ISD::SIGN_EXTEND;
336 case ISD::ZEXTLOAD:
337 return ISD::ZERO_EXTEND;
338 default:
339 break;
342 llvm_unreachable("Invalid LoadExtType");
345 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
346 // To perform this operation, we just need to swap the L and G bits of the
347 // operation.
348 unsigned OldL = (Operation >> 2) & 1;
349 unsigned OldG = (Operation >> 1) & 1;
350 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
351 (OldL << 1) | // New G bit
352 (OldG << 2)); // New L bit.
355 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
356 unsigned Operation = Op;
357 if (isInteger)
358 Operation ^= 7; // Flip L, G, E bits, but not U.
359 else
360 Operation ^= 15; // Flip all of the condition bits.
362 if (Operation > ISD::SETTRUE2)
363 Operation &= ~8; // Don't let N and U bits get set.
365 return ISD::CondCode(Operation);
368 /// For an integer comparison, return 1 if the comparison is a signed operation
369 /// and 2 if the result is an unsigned comparison. Return zero if the operation
370 /// does not depend on the sign of the input (setne and seteq).
371 static int isSignedOp(ISD::CondCode Opcode) {
372 switch (Opcode) {
373 default: llvm_unreachable("Illegal integer setcc operation!");
374 case ISD::SETEQ:
375 case ISD::SETNE: return 0;
376 case ISD::SETLT:
377 case ISD::SETLE:
378 case ISD::SETGT:
379 case ISD::SETGE: return 1;
380 case ISD::SETULT:
381 case ISD::SETULE:
382 case ISD::SETUGT:
383 case ISD::SETUGE: return 2;
387 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
388 bool IsInteger) {
389 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
390 // Cannot fold a signed integer setcc with an unsigned integer setcc.
391 return ISD::SETCC_INVALID;
393 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
395 // If the N and U bits get set, then the resultant comparison DOES suddenly
396 // care about orderedness, and it is true when ordered.
397 if (Op > ISD::SETTRUE2)
398 Op &= ~16; // Clear the U bit if the N bit is set.
400 // Canonicalize illegal integer setcc's.
401 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
402 Op = ISD::SETNE;
404 return ISD::CondCode(Op);
407 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
408 bool IsInteger) {
409 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
410 // Cannot fold a signed setcc with an unsigned setcc.
411 return ISD::SETCC_INVALID;
413 // Combine all of the condition bits.
414 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
416 // Canonicalize illegal integer setcc's.
417 if (IsInteger) {
418 switch (Result) {
419 default: break;
420 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
421 case ISD::SETOEQ: // SETEQ & SETU[LG]E
422 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
423 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
424 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
428 return Result;
431 //===----------------------------------------------------------------------===//
432 // SDNode Profile Support
433 //===----------------------------------------------------------------------===//
435 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
436 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
437 ID.AddInteger(OpC);
440 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
441 /// solely with their pointer.
442 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
443 ID.AddPointer(VTList.VTs);
446 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
447 static void AddNodeIDOperands(FoldingSetNodeID &ID,
448 ArrayRef<SDValue> Ops) {
449 for (auto& Op : Ops) {
450 ID.AddPointer(Op.getNode());
451 ID.AddInteger(Op.getResNo());
455 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
456 static void AddNodeIDOperands(FoldingSetNodeID &ID,
457 ArrayRef<SDUse> Ops) {
458 for (auto& Op : Ops) {
459 ID.AddPointer(Op.getNode());
460 ID.AddInteger(Op.getResNo());
464 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
465 SDVTList VTList, ArrayRef<SDValue> OpList) {
466 AddNodeIDOpcode(ID, OpC);
467 AddNodeIDValueTypes(ID, VTList);
468 AddNodeIDOperands(ID, OpList);
471 /// If this is an SDNode with special info, add this info to the NodeID data.
472 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
473 switch (N->getOpcode()) {
474 case ISD::TargetExternalSymbol:
475 case ISD::ExternalSymbol:
476 case ISD::MCSymbol:
477 llvm_unreachable("Should only be used on nodes with operands");
478 default: break; // Normal nodes don't need extra info.
479 case ISD::TargetConstant:
480 case ISD::Constant: {
481 const ConstantSDNode *C = cast<ConstantSDNode>(N);
482 ID.AddPointer(C->getConstantIntValue());
483 ID.AddBoolean(C->isOpaque());
484 break;
486 case ISD::TargetConstantFP:
487 case ISD::ConstantFP:
488 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
489 break;
490 case ISD::TargetGlobalAddress:
491 case ISD::GlobalAddress:
492 case ISD::TargetGlobalTLSAddress:
493 case ISD::GlobalTLSAddress: {
494 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
495 ID.AddPointer(GA->getGlobal());
496 ID.AddInteger(GA->getOffset());
497 ID.AddInteger(GA->getTargetFlags());
498 break;
500 case ISD::BasicBlock:
501 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
502 break;
503 case ISD::Register:
504 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
505 break;
506 case ISD::RegisterMask:
507 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
508 break;
509 case ISD::SRCVALUE:
510 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
511 break;
512 case ISD::FrameIndex:
513 case ISD::TargetFrameIndex:
514 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
515 break;
516 case ISD::LIFETIME_START:
517 case ISD::LIFETIME_END:
518 if (cast<LifetimeSDNode>(N)->hasOffset()) {
519 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
520 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
522 break;
523 case ISD::JumpTable:
524 case ISD::TargetJumpTable:
525 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
526 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
527 break;
528 case ISD::ConstantPool:
529 case ISD::TargetConstantPool: {
530 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
531 ID.AddInteger(CP->getAlignment());
532 ID.AddInteger(CP->getOffset());
533 if (CP->isMachineConstantPoolEntry())
534 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
535 else
536 ID.AddPointer(CP->getConstVal());
537 ID.AddInteger(CP->getTargetFlags());
538 break;
540 case ISD::TargetIndex: {
541 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
542 ID.AddInteger(TI->getIndex());
543 ID.AddInteger(TI->getOffset());
544 ID.AddInteger(TI->getTargetFlags());
545 break;
547 case ISD::LOAD: {
548 const LoadSDNode *LD = cast<LoadSDNode>(N);
549 ID.AddInteger(LD->getMemoryVT().getRawBits());
550 ID.AddInteger(LD->getRawSubclassData());
551 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
552 break;
554 case ISD::STORE: {
555 const StoreSDNode *ST = cast<StoreSDNode>(N);
556 ID.AddInteger(ST->getMemoryVT().getRawBits());
557 ID.AddInteger(ST->getRawSubclassData());
558 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
559 break;
561 case ISD::MLOAD: {
562 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
563 ID.AddInteger(MLD->getMemoryVT().getRawBits());
564 ID.AddInteger(MLD->getRawSubclassData());
565 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
566 break;
568 case ISD::MSTORE: {
569 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
570 ID.AddInteger(MST->getMemoryVT().getRawBits());
571 ID.AddInteger(MST->getRawSubclassData());
572 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
573 break;
575 case ISD::MGATHER: {
576 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
577 ID.AddInteger(MG->getMemoryVT().getRawBits());
578 ID.AddInteger(MG->getRawSubclassData());
579 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
580 break;
582 case ISD::MSCATTER: {
583 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
584 ID.AddInteger(MS->getMemoryVT().getRawBits());
585 ID.AddInteger(MS->getRawSubclassData());
586 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
587 break;
589 case ISD::ATOMIC_CMP_SWAP:
590 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
591 case ISD::ATOMIC_SWAP:
592 case ISD::ATOMIC_LOAD_ADD:
593 case ISD::ATOMIC_LOAD_SUB:
594 case ISD::ATOMIC_LOAD_AND:
595 case ISD::ATOMIC_LOAD_CLR:
596 case ISD::ATOMIC_LOAD_OR:
597 case ISD::ATOMIC_LOAD_XOR:
598 case ISD::ATOMIC_LOAD_NAND:
599 case ISD::ATOMIC_LOAD_MIN:
600 case ISD::ATOMIC_LOAD_MAX:
601 case ISD::ATOMIC_LOAD_UMIN:
602 case ISD::ATOMIC_LOAD_UMAX:
603 case ISD::ATOMIC_LOAD:
604 case ISD::ATOMIC_STORE: {
605 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
606 ID.AddInteger(AT->getMemoryVT().getRawBits());
607 ID.AddInteger(AT->getRawSubclassData());
608 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
609 break;
611 case ISD::PREFETCH: {
612 const MemSDNode *PF = cast<MemSDNode>(N);
613 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
614 break;
616 case ISD::VECTOR_SHUFFLE: {
617 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
618 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
619 i != e; ++i)
620 ID.AddInteger(SVN->getMaskElt(i));
621 break;
623 case ISD::TargetBlockAddress:
624 case ISD::BlockAddress: {
625 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
626 ID.AddPointer(BA->getBlockAddress());
627 ID.AddInteger(BA->getOffset());
628 ID.AddInteger(BA->getTargetFlags());
629 break;
631 } // end switch (N->getOpcode())
633 // Target specific memory nodes could also have address spaces to check.
634 if (N->isTargetMemoryOpcode())
635 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
638 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
639 /// data.
640 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
641 AddNodeIDOpcode(ID, N->getOpcode());
642 // Add the return value info.
643 AddNodeIDValueTypes(ID, N->getVTList());
644 // Add the operand info.
645 AddNodeIDOperands(ID, N->ops());
647 // Handle SDNode leafs with special info.
648 AddNodeIDCustom(ID, N);
651 //===----------------------------------------------------------------------===//
652 // SelectionDAG Class
653 //===----------------------------------------------------------------------===//
655 /// doNotCSE - Return true if CSE should not be performed for this node.
656 static bool doNotCSE(SDNode *N) {
657 if (N->getValueType(0) == MVT::Glue)
658 return true; // Never CSE anything that produces a flag.
660 switch (N->getOpcode()) {
661 default: break;
662 case ISD::HANDLENODE:
663 case ISD::EH_LABEL:
664 return true; // Never CSE these nodes.
667 // Check that remaining values produced are not flags.
668 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
669 if (N->getValueType(i) == MVT::Glue)
670 return true; // Never CSE anything that produces a flag.
672 return false;
675 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
676 /// SelectionDAG.
677 void SelectionDAG::RemoveDeadNodes() {
678 // Create a dummy node (which is not added to allnodes), that adds a reference
679 // to the root node, preventing it from being deleted.
680 HandleSDNode Dummy(getRoot());
682 SmallVector<SDNode*, 128> DeadNodes;
684 // Add all obviously-dead nodes to the DeadNodes worklist.
685 for (SDNode &Node : allnodes())
686 if (Node.use_empty())
687 DeadNodes.push_back(&Node);
689 RemoveDeadNodes(DeadNodes);
691 // If the root changed (e.g. it was a dead load, update the root).
692 setRoot(Dummy.getValue());
695 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
696 /// given list, and any nodes that become unreachable as a result.
697 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
699 // Process the worklist, deleting the nodes and adding their uses to the
700 // worklist.
701 while (!DeadNodes.empty()) {
702 SDNode *N = DeadNodes.pop_back_val();
703 // Skip to next node if we've already managed to delete the node. This could
704 // happen if replacing a node causes a node previously added to the node to
705 // be deleted.
706 if (N->getOpcode() == ISD::DELETED_NODE)
707 continue;
709 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
710 DUL->NodeDeleted(N, nullptr);
712 // Take the node out of the appropriate CSE map.
713 RemoveNodeFromCSEMaps(N);
715 // Next, brutally remove the operand list. This is safe to do, as there are
716 // no cycles in the graph.
717 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
718 SDUse &Use = *I++;
719 SDNode *Operand = Use.getNode();
720 Use.set(SDValue());
722 // Now that we removed this operand, see if there are no uses of it left.
723 if (Operand->use_empty())
724 DeadNodes.push_back(Operand);
727 DeallocateNode(N);
731 void SelectionDAG::RemoveDeadNode(SDNode *N){
732 SmallVector<SDNode*, 16> DeadNodes(1, N);
734 // Create a dummy node that adds a reference to the root node, preventing
735 // it from being deleted. (This matters if the root is an operand of the
736 // dead node.)
737 HandleSDNode Dummy(getRoot());
739 RemoveDeadNodes(DeadNodes);
742 void SelectionDAG::DeleteNode(SDNode *N) {
743 // First take this out of the appropriate CSE map.
744 RemoveNodeFromCSEMaps(N);
746 // Finally, remove uses due to operands of this node, remove from the
747 // AllNodes list, and delete the node.
748 DeleteNodeNotInCSEMaps(N);
751 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
752 assert(N->getIterator() != AllNodes.begin() &&
753 "Cannot delete the entry node!");
754 assert(N->use_empty() && "Cannot delete a node that is not dead!");
756 // Drop all of the operands and decrement used node's use counts.
757 N->DropOperands();
759 DeallocateNode(N);
762 void SDDbgInfo::erase(const SDNode *Node) {
763 DbgValMapType::iterator I = DbgValMap.find(Node);
764 if (I == DbgValMap.end())
765 return;
766 for (auto &Val: I->second)
767 Val->setIsInvalidated();
768 DbgValMap.erase(I);
771 void SelectionDAG::DeallocateNode(SDNode *N) {
772 // If we have operands, deallocate them.
773 removeOperands(N);
775 NodeAllocator.Deallocate(AllNodes.remove(N));
777 // Set the opcode to DELETED_NODE to help catch bugs when node
778 // memory is reallocated.
779 // FIXME: There are places in SDag that have grown a dependency on the opcode
780 // value in the released node.
781 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
782 N->NodeType = ISD::DELETED_NODE;
784 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
785 // them and forget about that node.
786 DbgInfo->erase(N);
789 #ifndef NDEBUG
790 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
791 static void VerifySDNode(SDNode *N) {
792 switch (N->getOpcode()) {
793 default:
794 break;
795 case ISD::BUILD_PAIR: {
796 EVT VT = N->getValueType(0);
797 assert(N->getNumValues() == 1 && "Too many results!");
798 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
799 "Wrong return type!");
800 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
801 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
802 "Mismatched operand types!");
803 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
804 "Wrong operand type!");
805 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
806 "Wrong return type size");
807 break;
809 case ISD::BUILD_VECTOR: {
810 assert(N->getNumValues() == 1 && "Too many results!");
811 assert(N->getValueType(0).isVector() && "Wrong return type!");
812 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
813 "Wrong number of operands!");
814 EVT EltVT = N->getValueType(0).getVectorElementType();
815 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
816 assert((I->getValueType() == EltVT ||
817 (EltVT.isInteger() && I->getValueType().isInteger() &&
818 EltVT.bitsLE(I->getValueType()))) &&
819 "Wrong operand type!");
820 assert(I->getValueType() == N->getOperand(0).getValueType() &&
821 "Operands must all have the same type");
823 break;
827 #endif // NDEBUG
829 /// Insert a newly allocated node into the DAG.
831 /// Handles insertion into the all nodes list and CSE map, as well as
832 /// verification and other common operations when a new node is allocated.
833 void SelectionDAG::InsertNode(SDNode *N) {
834 AllNodes.push_back(N);
835 #ifndef NDEBUG
836 N->PersistentId = NextPersistentId++;
837 VerifySDNode(N);
838 #endif
839 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
840 DUL->NodeInserted(N);
843 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
844 /// correspond to it. This is useful when we're about to delete or repurpose
845 /// the node. We don't want future request for structurally identical nodes
846 /// to return N anymore.
847 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
848 bool Erased = false;
849 switch (N->getOpcode()) {
850 case ISD::HANDLENODE: return false; // noop.
851 case ISD::CONDCODE:
852 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
853 "Cond code doesn't exist!");
854 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
855 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
856 break;
857 case ISD::ExternalSymbol:
858 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
859 break;
860 case ISD::TargetExternalSymbol: {
861 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
862 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
863 ESN->getSymbol(), ESN->getTargetFlags()));
864 break;
866 case ISD::MCSymbol: {
867 auto *MCSN = cast<MCSymbolSDNode>(N);
868 Erased = MCSymbols.erase(MCSN->getMCSymbol());
869 break;
871 case ISD::VALUETYPE: {
872 EVT VT = cast<VTSDNode>(N)->getVT();
873 if (VT.isExtended()) {
874 Erased = ExtendedValueTypeNodes.erase(VT);
875 } else {
876 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
877 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
879 break;
881 default:
882 // Remove it from the CSE Map.
883 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
884 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
885 Erased = CSEMap.RemoveNode(N);
886 break;
888 #ifndef NDEBUG
889 // Verify that the node was actually in one of the CSE maps, unless it has a
890 // flag result (which cannot be CSE'd) or is one of the special cases that are
891 // not subject to CSE.
892 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
893 !N->isMachineOpcode() && !doNotCSE(N)) {
894 N->dump(this);
895 dbgs() << "\n";
896 llvm_unreachable("Node is not in map!");
898 #endif
899 return Erased;
902 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
903 /// maps and modified in place. Add it back to the CSE maps, unless an identical
904 /// node already exists, in which case transfer all its users to the existing
905 /// node. This transfer can potentially trigger recursive merging.
906 void
907 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
908 // For node types that aren't CSE'd, just act as if no identical node
909 // already exists.
910 if (!doNotCSE(N)) {
911 SDNode *Existing = CSEMap.GetOrInsertNode(N);
912 if (Existing != N) {
913 // If there was already an existing matching node, use ReplaceAllUsesWith
914 // to replace the dead one with the existing one. This can cause
915 // recursive merging of other unrelated nodes down the line.
916 ReplaceAllUsesWith(N, Existing);
918 // N is now dead. Inform the listeners and delete it.
919 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
920 DUL->NodeDeleted(N, Existing);
921 DeleteNodeNotInCSEMaps(N);
922 return;
926 // If the node doesn't already exist, we updated it. Inform listeners.
927 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
928 DUL->NodeUpdated(N);
931 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
932 /// were replaced with those specified. If this node is never memoized,
933 /// return null, otherwise return a pointer to the slot it would take. If a
934 /// node already exists with these operands, the slot will be non-null.
935 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
936 void *&InsertPos) {
937 if (doNotCSE(N))
938 return nullptr;
940 SDValue Ops[] = { Op };
941 FoldingSetNodeID ID;
942 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
943 AddNodeIDCustom(ID, N);
944 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
945 if (Node)
946 Node->intersectFlagsWith(N->getFlags());
947 return Node;
950 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
951 /// were replaced with those specified. If this node is never memoized,
952 /// return null, otherwise return a pointer to the slot it would take. If a
953 /// node already exists with these operands, the slot will be non-null.
954 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
955 SDValue Op1, SDValue Op2,
956 void *&InsertPos) {
957 if (doNotCSE(N))
958 return nullptr;
960 SDValue Ops[] = { Op1, Op2 };
961 FoldingSetNodeID ID;
962 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
963 AddNodeIDCustom(ID, N);
964 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
965 if (Node)
966 Node->intersectFlagsWith(N->getFlags());
967 return Node;
970 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
971 /// were replaced with those specified. If this node is never memoized,
972 /// return null, otherwise return a pointer to the slot it would take. If a
973 /// node already exists with these operands, the slot will be non-null.
974 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
975 void *&InsertPos) {
976 if (doNotCSE(N))
977 return nullptr;
979 FoldingSetNodeID ID;
980 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
981 AddNodeIDCustom(ID, N);
982 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
983 if (Node)
984 Node->intersectFlagsWith(N->getFlags());
985 return Node;
988 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
989 Type *Ty = VT == MVT::iPTR ?
990 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
991 VT.getTypeForEVT(*getContext());
993 return getDataLayout().getABITypeAlignment(Ty);
996 // EntryNode could meaningfully have debug info if we can find it...
997 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
998 : TM(tm), OptLevel(OL),
999 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1000 Root(getEntryNode()) {
1001 InsertNode(&EntryNode);
1002 DbgInfo = new SDDbgInfo();
1005 void SelectionDAG::init(MachineFunction &NewMF,
1006 OptimizationRemarkEmitter &NewORE,
1007 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1008 LegacyDivergenceAnalysis * Divergence) {
1009 MF = &NewMF;
1010 SDAGISelPass = PassPtr;
1011 ORE = &NewORE;
1012 TLI = getSubtarget().getTargetLowering();
1013 TSI = getSubtarget().getSelectionDAGInfo();
1014 LibInfo = LibraryInfo;
1015 Context = &MF->getFunction().getContext();
1016 DA = Divergence;
1019 SelectionDAG::~SelectionDAG() {
1020 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1021 allnodes_clear();
1022 OperandRecycler.clear(OperandAllocator);
1023 delete DbgInfo;
1026 void SelectionDAG::allnodes_clear() {
1027 assert(&*AllNodes.begin() == &EntryNode);
1028 AllNodes.remove(AllNodes.begin());
1029 while (!AllNodes.empty())
1030 DeallocateNode(&AllNodes.front());
1031 #ifndef NDEBUG
1032 NextPersistentId = 0;
1033 #endif
1036 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1037 void *&InsertPos) {
1038 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1039 if (N) {
1040 switch (N->getOpcode()) {
1041 default: break;
1042 case ISD::Constant:
1043 case ISD::ConstantFP:
1044 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1045 "debug location. Use another overload.");
1048 return N;
1051 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1052 const SDLoc &DL, void *&InsertPos) {
1053 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1054 if (N) {
1055 switch (N->getOpcode()) {
1056 case ISD::Constant:
1057 case ISD::ConstantFP:
1058 // Erase debug location from the node if the node is used at several
1059 // different places. Do not propagate one location to all uses as it
1060 // will cause a worse single stepping debugging experience.
1061 if (N->getDebugLoc() != DL.getDebugLoc())
1062 N->setDebugLoc(DebugLoc());
1063 break;
1064 default:
1065 // When the node's point of use is located earlier in the instruction
1066 // sequence than its prior point of use, update its debug info to the
1067 // earlier location.
1068 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1069 N->setDebugLoc(DL.getDebugLoc());
1070 break;
1073 return N;
1076 void SelectionDAG::clear() {
1077 allnodes_clear();
1078 OperandRecycler.clear(OperandAllocator);
1079 OperandAllocator.Reset();
1080 CSEMap.clear();
1082 ExtendedValueTypeNodes.clear();
1083 ExternalSymbols.clear();
1084 TargetExternalSymbols.clear();
1085 MCSymbols.clear();
1086 SDCallSiteDbgInfo.clear();
1087 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1088 static_cast<CondCodeSDNode*>(nullptr));
1089 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1090 static_cast<SDNode*>(nullptr));
1092 EntryNode.UseList = nullptr;
1093 InsertNode(&EntryNode);
1094 Root = getEntryNode();
1095 DbgInfo->clear();
1098 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1099 return VT.bitsGT(Op.getValueType())
1100 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1101 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1104 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1105 return VT.bitsGT(Op.getValueType()) ?
1106 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1107 getNode(ISD::TRUNCATE, DL, VT, Op);
1110 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1111 return VT.bitsGT(Op.getValueType()) ?
1112 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1113 getNode(ISD::TRUNCATE, DL, VT, Op);
1116 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1117 return VT.bitsGT(Op.getValueType()) ?
1118 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1119 getNode(ISD::TRUNCATE, DL, VT, Op);
1122 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1123 EVT OpVT) {
1124 if (VT.bitsLE(Op.getValueType()))
1125 return getNode(ISD::TRUNCATE, SL, VT, Op);
1127 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1128 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1131 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1132 assert(!VT.isVector() &&
1133 "getZeroExtendInReg should use the vector element type instead of "
1134 "the vector type!");
1135 if (Op.getValueType().getScalarType() == VT) return Op;
1136 unsigned BitWidth = Op.getScalarValueSizeInBits();
1137 APInt Imm = APInt::getLowBitsSet(BitWidth,
1138 VT.getSizeInBits());
1139 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1140 getConstant(Imm, DL, Op.getValueType()));
1143 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1144 // Only unsigned pointer semantics are supported right now. In the future this
1145 // might delegate to TLI to check pointer signedness.
1146 return getZExtOrTrunc(Op, DL, VT);
1149 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1150 // Only unsigned pointer semantics are supported right now. In the future this
1151 // might delegate to TLI to check pointer signedness.
1152 return getZeroExtendInReg(Op, DL, VT);
1155 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1156 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1157 EVT EltVT = VT.getScalarType();
1158 SDValue NegOne =
1159 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1160 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1163 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1164 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1165 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1168 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1169 EVT OpVT) {
1170 if (!V)
1171 return getConstant(0, DL, VT);
1173 switch (TLI->getBooleanContents(OpVT)) {
1174 case TargetLowering::ZeroOrOneBooleanContent:
1175 case TargetLowering::UndefinedBooleanContent:
1176 return getConstant(1, DL, VT);
1177 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1178 return getAllOnesConstant(DL, VT);
1180 llvm_unreachable("Unexpected boolean content enum!");
1183 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1184 bool isT, bool isO) {
1185 EVT EltVT = VT.getScalarType();
1186 assert((EltVT.getSizeInBits() >= 64 ||
1187 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1188 "getConstant with a uint64_t value that doesn't fit in the type!");
1189 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1192 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1193 bool isT, bool isO) {
1194 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1197 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1198 EVT VT, bool isT, bool isO) {
1199 assert(VT.isInteger() && "Cannot create FP integer constant!");
1201 EVT EltVT = VT.getScalarType();
1202 const ConstantInt *Elt = &Val;
1204 // In some cases the vector type is legal but the element type is illegal and
1205 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1206 // inserted value (the type does not need to match the vector element type).
1207 // Any extra bits introduced will be truncated away.
1208 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1209 TargetLowering::TypePromoteInteger) {
1210 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1211 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1212 Elt = ConstantInt::get(*getContext(), NewVal);
1214 // In other cases the element type is illegal and needs to be expanded, for
1215 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1216 // the value into n parts and use a vector type with n-times the elements.
1217 // Then bitcast to the type requested.
1218 // Legalizing constants too early makes the DAGCombiner's job harder so we
1219 // only legalize if the DAG tells us we must produce legal types.
1220 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1221 TLI->getTypeAction(*getContext(), EltVT) ==
1222 TargetLowering::TypeExpandInteger) {
1223 const APInt &NewVal = Elt->getValue();
1224 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1225 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1226 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1227 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1229 // Check the temporary vector is the correct size. If this fails then
1230 // getTypeToTransformTo() probably returned a type whose size (in bits)
1231 // isn't a power-of-2 factor of the requested type size.
1232 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1234 SmallVector<SDValue, 2> EltParts;
1235 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1236 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1237 .zextOrTrunc(ViaEltSizeInBits), DL,
1238 ViaEltVT, isT, isO));
1241 // EltParts is currently in little endian order. If we actually want
1242 // big-endian order then reverse it now.
1243 if (getDataLayout().isBigEndian())
1244 std::reverse(EltParts.begin(), EltParts.end());
1246 // The elements must be reversed when the element order is different
1247 // to the endianness of the elements (because the BITCAST is itself a
1248 // vector shuffle in this situation). However, we do not need any code to
1249 // perform this reversal because getConstant() is producing a vector
1250 // splat.
1251 // This situation occurs in MIPS MSA.
1253 SmallVector<SDValue, 8> Ops;
1254 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1255 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1257 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1258 return V;
1261 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1262 "APInt size does not match type size!");
1263 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1264 FoldingSetNodeID ID;
1265 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1266 ID.AddPointer(Elt);
1267 ID.AddBoolean(isO);
1268 void *IP = nullptr;
1269 SDNode *N = nullptr;
1270 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1271 if (!VT.isVector())
1272 return SDValue(N, 0);
1274 if (!N) {
1275 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1276 CSEMap.InsertNode(N, IP);
1277 InsertNode(N);
1278 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1281 SDValue Result(N, 0);
1282 if (VT.isVector())
1283 Result = getSplatBuildVector(VT, DL, Result);
1285 return Result;
1288 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1289 bool isTarget) {
1290 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1293 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1294 const SDLoc &DL, bool LegalTypes) {
1295 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1296 return getConstant(Val, DL, ShiftVT);
1299 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1300 bool isTarget) {
1301 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1304 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1305 EVT VT, bool isTarget) {
1306 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1308 EVT EltVT = VT.getScalarType();
1310 // Do the map lookup using the actual bit pattern for the floating point
1311 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1312 // we don't have issues with SNANs.
1313 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1314 FoldingSetNodeID ID;
1315 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1316 ID.AddPointer(&V);
1317 void *IP = nullptr;
1318 SDNode *N = nullptr;
1319 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1320 if (!VT.isVector())
1321 return SDValue(N, 0);
1323 if (!N) {
1324 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1325 CSEMap.InsertNode(N, IP);
1326 InsertNode(N);
1329 SDValue Result(N, 0);
1330 if (VT.isVector())
1331 Result = getSplatBuildVector(VT, DL, Result);
1332 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1333 return Result;
1336 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1337 bool isTarget) {
1338 EVT EltVT = VT.getScalarType();
1339 if (EltVT == MVT::f32)
1340 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1341 else if (EltVT == MVT::f64)
1342 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1343 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1344 EltVT == MVT::f16) {
1345 bool Ignored;
1346 APFloat APF = APFloat(Val);
1347 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1348 &Ignored);
1349 return getConstantFP(APF, DL, VT, isTarget);
1350 } else
1351 llvm_unreachable("Unsupported type in getConstantFP");
1354 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1355 EVT VT, int64_t Offset, bool isTargetGA,
1356 unsigned TargetFlags) {
1357 assert((TargetFlags == 0 || isTargetGA) &&
1358 "Cannot set target flags on target-independent globals");
1360 // Truncate (with sign-extension) the offset value to the pointer size.
1361 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1362 if (BitWidth < 64)
1363 Offset = SignExtend64(Offset, BitWidth);
1365 unsigned Opc;
1366 if (GV->isThreadLocal())
1367 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1368 else
1369 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1371 FoldingSetNodeID ID;
1372 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1373 ID.AddPointer(GV);
1374 ID.AddInteger(Offset);
1375 ID.AddInteger(TargetFlags);
1376 void *IP = nullptr;
1377 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1378 return SDValue(E, 0);
1380 auto *N = newSDNode<GlobalAddressSDNode>(
1381 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1382 CSEMap.InsertNode(N, IP);
1383 InsertNode(N);
1384 return SDValue(N, 0);
1387 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1388 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1389 FoldingSetNodeID ID;
1390 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1391 ID.AddInteger(FI);
1392 void *IP = nullptr;
1393 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1394 return SDValue(E, 0);
1396 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1397 CSEMap.InsertNode(N, IP);
1398 InsertNode(N);
1399 return SDValue(N, 0);
1402 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1403 unsigned TargetFlags) {
1404 assert((TargetFlags == 0 || isTarget) &&
1405 "Cannot set target flags on target-independent jump tables");
1406 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1407 FoldingSetNodeID ID;
1408 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1409 ID.AddInteger(JTI);
1410 ID.AddInteger(TargetFlags);
1411 void *IP = nullptr;
1412 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1413 return SDValue(E, 0);
1415 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1416 CSEMap.InsertNode(N, IP);
1417 InsertNode(N);
1418 return SDValue(N, 0);
1421 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1422 unsigned Alignment, int Offset,
1423 bool isTarget,
1424 unsigned TargetFlags) {
1425 assert((TargetFlags == 0 || isTarget) &&
1426 "Cannot set target flags on target-independent globals");
1427 if (Alignment == 0)
1428 Alignment = MF->getFunction().hasOptSize()
1429 ? getDataLayout().getABITypeAlignment(C->getType())
1430 : getDataLayout().getPrefTypeAlignment(C->getType());
1431 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1432 FoldingSetNodeID ID;
1433 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1434 ID.AddInteger(Alignment);
1435 ID.AddInteger(Offset);
1436 ID.AddPointer(C);
1437 ID.AddInteger(TargetFlags);
1438 void *IP = nullptr;
1439 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1440 return SDValue(E, 0);
1442 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1443 TargetFlags);
1444 CSEMap.InsertNode(N, IP);
1445 InsertNode(N);
1446 return SDValue(N, 0);
1449 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1450 unsigned Alignment, int Offset,
1451 bool isTarget,
1452 unsigned TargetFlags) {
1453 assert((TargetFlags == 0 || isTarget) &&
1454 "Cannot set target flags on target-independent globals");
1455 if (Alignment == 0)
1456 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1457 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1458 FoldingSetNodeID ID;
1459 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1460 ID.AddInteger(Alignment);
1461 ID.AddInteger(Offset);
1462 C->addSelectionDAGCSEId(ID);
1463 ID.AddInteger(TargetFlags);
1464 void *IP = nullptr;
1465 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1466 return SDValue(E, 0);
1468 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1469 TargetFlags);
1470 CSEMap.InsertNode(N, IP);
1471 InsertNode(N);
1472 return SDValue(N, 0);
1475 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1476 unsigned TargetFlags) {
1477 FoldingSetNodeID ID;
1478 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1479 ID.AddInteger(Index);
1480 ID.AddInteger(Offset);
1481 ID.AddInteger(TargetFlags);
1482 void *IP = nullptr;
1483 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1484 return SDValue(E, 0);
1486 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1487 CSEMap.InsertNode(N, IP);
1488 InsertNode(N);
1489 return SDValue(N, 0);
1492 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1493 FoldingSetNodeID ID;
1494 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1495 ID.AddPointer(MBB);
1496 void *IP = nullptr;
1497 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1498 return SDValue(E, 0);
1500 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1501 CSEMap.InsertNode(N, IP);
1502 InsertNode(N);
1503 return SDValue(N, 0);
1506 SDValue SelectionDAG::getValueType(EVT VT) {
1507 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1508 ValueTypeNodes.size())
1509 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1511 SDNode *&N = VT.isExtended() ?
1512 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1514 if (N) return SDValue(N, 0);
1515 N = newSDNode<VTSDNode>(VT);
1516 InsertNode(N);
1517 return SDValue(N, 0);
1520 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1521 SDNode *&N = ExternalSymbols[Sym];
1522 if (N) return SDValue(N, 0);
1523 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1524 InsertNode(N);
1525 return SDValue(N, 0);
1528 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1529 SDNode *&N = MCSymbols[Sym];
1530 if (N)
1531 return SDValue(N, 0);
1532 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1533 InsertNode(N);
1534 return SDValue(N, 0);
1537 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1538 unsigned TargetFlags) {
1539 SDNode *&N =
1540 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1541 if (N) return SDValue(N, 0);
1542 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1543 InsertNode(N);
1544 return SDValue(N, 0);
1547 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1548 if ((unsigned)Cond >= CondCodeNodes.size())
1549 CondCodeNodes.resize(Cond+1);
1551 if (!CondCodeNodes[Cond]) {
1552 auto *N = newSDNode<CondCodeSDNode>(Cond);
1553 CondCodeNodes[Cond] = N;
1554 InsertNode(N);
1557 return SDValue(CondCodeNodes[Cond], 0);
1560 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1561 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1562 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1563 std::swap(N1, N2);
1564 ShuffleVectorSDNode::commuteMask(M);
1567 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1568 SDValue N2, ArrayRef<int> Mask) {
1569 assert(VT.getVectorNumElements() == Mask.size() &&
1570 "Must have the same number of vector elements as mask elements!");
1571 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1572 "Invalid VECTOR_SHUFFLE");
1574 // Canonicalize shuffle undef, undef -> undef
1575 if (N1.isUndef() && N2.isUndef())
1576 return getUNDEF(VT);
1578 // Validate that all indices in Mask are within the range of the elements
1579 // input to the shuffle.
1580 int NElts = Mask.size();
1581 assert(llvm::all_of(Mask,
1582 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1583 "Index out of range");
1585 // Copy the mask so we can do any needed cleanup.
1586 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1588 // Canonicalize shuffle v, v -> v, undef
1589 if (N1 == N2) {
1590 N2 = getUNDEF(VT);
1591 for (int i = 0; i != NElts; ++i)
1592 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1595 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1596 if (N1.isUndef())
1597 commuteShuffle(N1, N2, MaskVec);
1599 if (TLI->hasVectorBlend()) {
1600 // If shuffling a splat, try to blend the splat instead. We do this here so
1601 // that even when this arises during lowering we don't have to re-handle it.
1602 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1603 BitVector UndefElements;
1604 SDValue Splat = BV->getSplatValue(&UndefElements);
1605 if (!Splat)
1606 return;
1608 for (int i = 0; i < NElts; ++i) {
1609 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1610 continue;
1612 // If this input comes from undef, mark it as such.
1613 if (UndefElements[MaskVec[i] - Offset]) {
1614 MaskVec[i] = -1;
1615 continue;
1618 // If we can blend a non-undef lane, use that instead.
1619 if (!UndefElements[i])
1620 MaskVec[i] = i + Offset;
1623 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1624 BlendSplat(N1BV, 0);
1625 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1626 BlendSplat(N2BV, NElts);
1629 // Canonicalize all index into lhs, -> shuffle lhs, undef
1630 // Canonicalize all index into rhs, -> shuffle rhs, undef
1631 bool AllLHS = true, AllRHS = true;
1632 bool N2Undef = N2.isUndef();
1633 for (int i = 0; i != NElts; ++i) {
1634 if (MaskVec[i] >= NElts) {
1635 if (N2Undef)
1636 MaskVec[i] = -1;
1637 else
1638 AllLHS = false;
1639 } else if (MaskVec[i] >= 0) {
1640 AllRHS = false;
1643 if (AllLHS && AllRHS)
1644 return getUNDEF(VT);
1645 if (AllLHS && !N2Undef)
1646 N2 = getUNDEF(VT);
1647 if (AllRHS) {
1648 N1 = getUNDEF(VT);
1649 commuteShuffle(N1, N2, MaskVec);
1651 // Reset our undef status after accounting for the mask.
1652 N2Undef = N2.isUndef();
1653 // Re-check whether both sides ended up undef.
1654 if (N1.isUndef() && N2Undef)
1655 return getUNDEF(VT);
1657 // If Identity shuffle return that node.
1658 bool Identity = true, AllSame = true;
1659 for (int i = 0; i != NElts; ++i) {
1660 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1661 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1663 if (Identity && NElts)
1664 return N1;
1666 // Shuffling a constant splat doesn't change the result.
1667 if (N2Undef) {
1668 SDValue V = N1;
1670 // Look through any bitcasts. We check that these don't change the number
1671 // (and size) of elements and just changes their types.
1672 while (V.getOpcode() == ISD::BITCAST)
1673 V = V->getOperand(0);
1675 // A splat should always show up as a build vector node.
1676 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1677 BitVector UndefElements;
1678 SDValue Splat = BV->getSplatValue(&UndefElements);
1679 // If this is a splat of an undef, shuffling it is also undef.
1680 if (Splat && Splat.isUndef())
1681 return getUNDEF(VT);
1683 bool SameNumElts =
1684 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1686 // We only have a splat which can skip shuffles if there is a splatted
1687 // value and no undef lanes rearranged by the shuffle.
1688 if (Splat && UndefElements.none()) {
1689 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1690 // number of elements match or the value splatted is a zero constant.
1691 if (SameNumElts)
1692 return N1;
1693 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1694 if (C->isNullValue())
1695 return N1;
1698 // If the shuffle itself creates a splat, build the vector directly.
1699 if (AllSame && SameNumElts) {
1700 EVT BuildVT = BV->getValueType(0);
1701 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1702 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1704 // We may have jumped through bitcasts, so the type of the
1705 // BUILD_VECTOR may not match the type of the shuffle.
1706 if (BuildVT != VT)
1707 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1708 return NewBV;
1713 FoldingSetNodeID ID;
1714 SDValue Ops[2] = { N1, N2 };
1715 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1716 for (int i = 0; i != NElts; ++i)
1717 ID.AddInteger(MaskVec[i]);
1719 void* IP = nullptr;
1720 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1721 return SDValue(E, 0);
1723 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1724 // SDNode doesn't have access to it. This memory will be "leaked" when
1725 // the node is deallocated, but recovered when the NodeAllocator is released.
1726 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1727 llvm::copy(MaskVec, MaskAlloc);
1729 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1730 dl.getDebugLoc(), MaskAlloc);
1731 createOperands(N, Ops);
1733 CSEMap.InsertNode(N, IP);
1734 InsertNode(N);
1735 SDValue V = SDValue(N, 0);
1736 NewSDValueDbgMsg(V, "Creating new node: ", this);
1737 return V;
1740 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1741 EVT VT = SV.getValueType(0);
1742 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1743 ShuffleVectorSDNode::commuteMask(MaskVec);
1745 SDValue Op0 = SV.getOperand(0);
1746 SDValue Op1 = SV.getOperand(1);
1747 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1750 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1751 FoldingSetNodeID ID;
1752 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1753 ID.AddInteger(RegNo);
1754 void *IP = nullptr;
1755 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1756 return SDValue(E, 0);
1758 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1759 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1760 CSEMap.InsertNode(N, IP);
1761 InsertNode(N);
1762 return SDValue(N, 0);
1765 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1766 FoldingSetNodeID ID;
1767 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1768 ID.AddPointer(RegMask);
1769 void *IP = nullptr;
1770 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1771 return SDValue(E, 0);
1773 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1774 CSEMap.InsertNode(N, IP);
1775 InsertNode(N);
1776 return SDValue(N, 0);
1779 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1780 MCSymbol *Label) {
1781 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1784 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1785 SDValue Root, MCSymbol *Label) {
1786 FoldingSetNodeID ID;
1787 SDValue Ops[] = { Root };
1788 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1789 ID.AddPointer(Label);
1790 void *IP = nullptr;
1791 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1792 return SDValue(E, 0);
1794 auto *N =
1795 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1796 createOperands(N, Ops);
1798 CSEMap.InsertNode(N, IP);
1799 InsertNode(N);
1800 return SDValue(N, 0);
1803 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1804 int64_t Offset, bool isTarget,
1805 unsigned TargetFlags) {
1806 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1808 FoldingSetNodeID ID;
1809 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1810 ID.AddPointer(BA);
1811 ID.AddInteger(Offset);
1812 ID.AddInteger(TargetFlags);
1813 void *IP = nullptr;
1814 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1815 return SDValue(E, 0);
1817 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1818 CSEMap.InsertNode(N, IP);
1819 InsertNode(N);
1820 return SDValue(N, 0);
1823 SDValue SelectionDAG::getSrcValue(const Value *V) {
1824 assert((!V || V->getType()->isPointerTy()) &&
1825 "SrcValue is not a pointer?");
1827 FoldingSetNodeID ID;
1828 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1829 ID.AddPointer(V);
1831 void *IP = nullptr;
1832 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1833 return SDValue(E, 0);
1835 auto *N = newSDNode<SrcValueSDNode>(V);
1836 CSEMap.InsertNode(N, IP);
1837 InsertNode(N);
1838 return SDValue(N, 0);
1841 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1842 FoldingSetNodeID ID;
1843 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1844 ID.AddPointer(MD);
1846 void *IP = nullptr;
1847 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1848 return SDValue(E, 0);
1850 auto *N = newSDNode<MDNodeSDNode>(MD);
1851 CSEMap.InsertNode(N, IP);
1852 InsertNode(N);
1853 return SDValue(N, 0);
1856 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1857 if (VT == V.getValueType())
1858 return V;
1860 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1863 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1864 unsigned SrcAS, unsigned DestAS) {
1865 SDValue Ops[] = {Ptr};
1866 FoldingSetNodeID ID;
1867 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1868 ID.AddInteger(SrcAS);
1869 ID.AddInteger(DestAS);
1871 void *IP = nullptr;
1872 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1873 return SDValue(E, 0);
1875 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1876 VT, SrcAS, DestAS);
1877 createOperands(N, Ops);
1879 CSEMap.InsertNode(N, IP);
1880 InsertNode(N);
1881 return SDValue(N, 0);
1884 /// getShiftAmountOperand - Return the specified value casted to
1885 /// the target's desired shift amount type.
1886 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1887 EVT OpTy = Op.getValueType();
1888 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1889 if (OpTy == ShTy || OpTy.isVector()) return Op;
1891 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1894 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1895 SDLoc dl(Node);
1896 const TargetLowering &TLI = getTargetLoweringInfo();
1897 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1898 EVT VT = Node->getValueType(0);
1899 SDValue Tmp1 = Node->getOperand(0);
1900 SDValue Tmp2 = Node->getOperand(1);
1901 const MaybeAlign MA(Node->getConstantOperandVal(3));
1903 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1904 Tmp2, MachinePointerInfo(V));
1905 SDValue VAList = VAListLoad;
1907 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
1908 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1909 getConstant(MA->value() - 1, dl, VAList.getValueType()));
1911 VAList =
1912 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1913 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
1916 // Increment the pointer, VAList, to the next vaarg
1917 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1918 getConstant(getDataLayout().getTypeAllocSize(
1919 VT.getTypeForEVT(*getContext())),
1920 dl, VAList.getValueType()));
1921 // Store the incremented VAList to the legalized pointer
1922 Tmp1 =
1923 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1924 // Load the actual argument out of the pointer VAList
1925 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1928 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1929 SDLoc dl(Node);
1930 const TargetLowering &TLI = getTargetLoweringInfo();
1931 // This defaults to loading a pointer from the input and storing it to the
1932 // output, returning the chain.
1933 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1934 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1935 SDValue Tmp1 =
1936 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1937 Node->getOperand(2), MachinePointerInfo(VS));
1938 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1939 MachinePointerInfo(VD));
1942 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1943 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1944 unsigned ByteSize = VT.getStoreSize();
1945 Type *Ty = VT.getTypeForEVT(*getContext());
1946 unsigned StackAlign =
1947 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1949 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1950 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1953 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1954 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1955 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1956 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1957 const DataLayout &DL = getDataLayout();
1958 unsigned Align =
1959 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1961 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1962 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1963 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1966 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1967 ISD::CondCode Cond, const SDLoc &dl) {
1968 EVT OpVT = N1.getValueType();
1970 // These setcc operations always fold.
1971 switch (Cond) {
1972 default: break;
1973 case ISD::SETFALSE:
1974 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
1975 case ISD::SETTRUE:
1976 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
1978 case ISD::SETOEQ:
1979 case ISD::SETOGT:
1980 case ISD::SETOGE:
1981 case ISD::SETOLT:
1982 case ISD::SETOLE:
1983 case ISD::SETONE:
1984 case ISD::SETO:
1985 case ISD::SETUO:
1986 case ISD::SETUEQ:
1987 case ISD::SETUNE:
1988 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
1989 break;
1992 if (OpVT.isInteger()) {
1993 // For EQ and NE, we can always pick a value for the undef to make the
1994 // predicate pass or fail, so we can return undef.
1995 // Matches behavior in llvm::ConstantFoldCompareInstruction.
1996 // icmp eq/ne X, undef -> undef.
1997 if ((N1.isUndef() || N2.isUndef()) &&
1998 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
1999 return getUNDEF(VT);
2001 // If both operands are undef, we can return undef for int comparison.
2002 // icmp undef, undef -> undef.
2003 if (N1.isUndef() && N2.isUndef())
2004 return getUNDEF(VT);
2006 // icmp X, X -> true/false
2007 // icmp X, undef -> true/false because undef could be X.
2008 if (N1 == N2)
2009 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2012 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2013 const APInt &C2 = N2C->getAPIntValue();
2014 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2015 const APInt &C1 = N1C->getAPIntValue();
2017 switch (Cond) {
2018 default: llvm_unreachable("Unknown integer setcc!");
2019 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2020 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2021 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2022 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2023 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2024 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2025 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2026 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2027 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2028 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2033 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2034 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2036 if (N1CFP && N2CFP) {
2037 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2038 switch (Cond) {
2039 default: break;
2040 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2041 return getUNDEF(VT);
2042 LLVM_FALLTHROUGH;
2043 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2044 OpVT);
2045 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2046 return getUNDEF(VT);
2047 LLVM_FALLTHROUGH;
2048 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2049 R==APFloat::cmpLessThan, dl, VT,
2050 OpVT);
2051 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2052 return getUNDEF(VT);
2053 LLVM_FALLTHROUGH;
2054 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2055 OpVT);
2056 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2057 return getUNDEF(VT);
2058 LLVM_FALLTHROUGH;
2059 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2060 VT, OpVT);
2061 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2062 return getUNDEF(VT);
2063 LLVM_FALLTHROUGH;
2064 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2065 R==APFloat::cmpEqual, dl, VT,
2066 OpVT);
2067 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2068 return getUNDEF(VT);
2069 LLVM_FALLTHROUGH;
2070 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2071 R==APFloat::cmpEqual, dl, VT, OpVT);
2072 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2073 OpVT);
2074 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2075 OpVT);
2076 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2077 R==APFloat::cmpEqual, dl, VT,
2078 OpVT);
2079 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2080 OpVT);
2081 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2082 R==APFloat::cmpLessThan, dl, VT,
2083 OpVT);
2084 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2085 R==APFloat::cmpUnordered, dl, VT,
2086 OpVT);
2087 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2088 VT, OpVT);
2089 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2090 OpVT);
2092 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2093 // Ensure that the constant occurs on the RHS.
2094 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2095 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2096 return SDValue();
2097 return getSetCC(dl, VT, N2, N1, SwappedCond);
2098 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2099 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2100 // If an operand is known to be a nan (or undef that could be a nan), we can
2101 // fold it.
2102 // Choosing NaN for the undef will always make unordered comparison succeed
2103 // and ordered comparison fails.
2104 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2105 switch (ISD::getUnorderedFlavor(Cond)) {
2106 default:
2107 llvm_unreachable("Unknown flavor!");
2108 case 0: // Known false.
2109 return getBoolConstant(false, dl, VT, OpVT);
2110 case 1: // Known true.
2111 return getBoolConstant(true, dl, VT, OpVT);
2112 case 2: // Undefined.
2113 return getUNDEF(VT);
2117 // Could not fold it.
2118 return SDValue();
2121 /// See if the specified operand can be simplified with the knowledge that only
2122 /// the bits specified by DemandedBits are used.
2123 /// TODO: really we should be making this into the DAG equivalent of
2124 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2125 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2126 EVT VT = V.getValueType();
2127 APInt DemandedElts = VT.isVector()
2128 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2129 : APInt(1, 1);
2130 return GetDemandedBits(V, DemandedBits, DemandedElts);
2133 /// See if the specified operand can be simplified with the knowledge that only
2134 /// the bits specified by DemandedBits are used in the elements specified by
2135 /// DemandedElts.
2136 /// TODO: really we should be making this into the DAG equivalent of
2137 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2138 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2139 const APInt &DemandedElts) {
2140 switch (V.getOpcode()) {
2141 default:
2142 break;
2143 case ISD::Constant: {
2144 auto *CV = cast<ConstantSDNode>(V.getNode());
2145 assert(CV && "Const value should be ConstSDNode.");
2146 const APInt &CVal = CV->getAPIntValue();
2147 APInt NewVal = CVal & DemandedBits;
2148 if (NewVal != CVal)
2149 return getConstant(NewVal, SDLoc(V), V.getValueType());
2150 break;
2152 case ISD::OR:
2153 case ISD::XOR:
2154 case ISD::SIGN_EXTEND_INREG:
2155 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2156 *this, 0);
2157 case ISD::SRL:
2158 // Only look at single-use SRLs.
2159 if (!V.getNode()->hasOneUse())
2160 break;
2161 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2162 // See if we can recursively simplify the LHS.
2163 unsigned Amt = RHSC->getZExtValue();
2165 // Watch out for shift count overflow though.
2166 if (Amt >= DemandedBits.getBitWidth())
2167 break;
2168 APInt SrcDemandedBits = DemandedBits << Amt;
2169 if (SDValue SimplifyLHS =
2170 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2171 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2172 V.getOperand(1));
2174 break;
2175 case ISD::AND: {
2176 // X & -1 -> X (ignoring bits which aren't demanded).
2177 // Also handle the case where masked out bits in X are known to be zero.
2178 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) {
2179 const APInt &AndVal = RHSC->getAPIntValue();
2180 if (DemandedBits.isSubsetOf(AndVal) ||
2181 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero |
2182 AndVal))
2183 return V.getOperand(0);
2185 break;
2187 case ISD::ANY_EXTEND: {
2188 SDValue Src = V.getOperand(0);
2189 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2190 // Being conservative here - only peek through if we only demand bits in the
2191 // non-extended source (even though the extended bits are technically
2192 // undef).
2193 if (DemandedBits.getActiveBits() > SrcBitWidth)
2194 break;
2195 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth);
2196 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits))
2197 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2198 break;
2201 return SDValue();
2204 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2205 /// use this predicate to simplify operations downstream.
2206 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2207 unsigned BitWidth = Op.getScalarValueSizeInBits();
2208 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2211 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2212 /// this predicate to simplify operations downstream. Mask is known to be zero
2213 /// for bits that V cannot have.
2214 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2215 unsigned Depth) const {
2216 EVT VT = V.getValueType();
2217 APInt DemandedElts = VT.isVector()
2218 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2219 : APInt(1, 1);
2220 return MaskedValueIsZero(V, Mask, DemandedElts, Depth);
2223 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2224 /// DemandedElts. We use this predicate to simplify operations downstream.
2225 /// Mask is known to be zero for bits that V cannot have.
2226 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2227 const APInt &DemandedElts,
2228 unsigned Depth) const {
2229 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2232 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2233 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2234 unsigned Depth) const {
2235 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2238 /// isSplatValue - Return true if the vector V has the same value
2239 /// across all DemandedElts.
2240 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2241 APInt &UndefElts) {
2242 if (!DemandedElts)
2243 return false; // No demanded elts, better to assume we don't know anything.
2245 EVT VT = V.getValueType();
2246 assert(VT.isVector() && "Vector type expected");
2248 unsigned NumElts = VT.getVectorNumElements();
2249 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2250 UndefElts = APInt::getNullValue(NumElts);
2252 switch (V.getOpcode()) {
2253 case ISD::BUILD_VECTOR: {
2254 SDValue Scl;
2255 for (unsigned i = 0; i != NumElts; ++i) {
2256 SDValue Op = V.getOperand(i);
2257 if (Op.isUndef()) {
2258 UndefElts.setBit(i);
2259 continue;
2261 if (!DemandedElts[i])
2262 continue;
2263 if (Scl && Scl != Op)
2264 return false;
2265 Scl = Op;
2267 return true;
2269 case ISD::VECTOR_SHUFFLE: {
2270 // Check if this is a shuffle node doing a splat.
2271 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2272 int SplatIndex = -1;
2273 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2274 for (int i = 0; i != (int)NumElts; ++i) {
2275 int M = Mask[i];
2276 if (M < 0) {
2277 UndefElts.setBit(i);
2278 continue;
2280 if (!DemandedElts[i])
2281 continue;
2282 if (0 <= SplatIndex && SplatIndex != M)
2283 return false;
2284 SplatIndex = M;
2286 return true;
2288 case ISD::EXTRACT_SUBVECTOR: {
2289 SDValue Src = V.getOperand(0);
2290 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1));
2291 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2292 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2293 // Offset the demanded elts by the subvector index.
2294 uint64_t Idx = SubIdx->getZExtValue();
2295 APInt UndefSrcElts;
2296 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2297 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) {
2298 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2299 return true;
2302 break;
2304 case ISD::ADD:
2305 case ISD::SUB:
2306 case ISD::AND: {
2307 APInt UndefLHS, UndefRHS;
2308 SDValue LHS = V.getOperand(0);
2309 SDValue RHS = V.getOperand(1);
2310 if (isSplatValue(LHS, DemandedElts, UndefLHS) &&
2311 isSplatValue(RHS, DemandedElts, UndefRHS)) {
2312 UndefElts = UndefLHS | UndefRHS;
2313 return true;
2315 break;
2319 return false;
2322 /// Helper wrapper to main isSplatValue function.
2323 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2324 EVT VT = V.getValueType();
2325 assert(VT.isVector() && "Vector type expected");
2326 unsigned NumElts = VT.getVectorNumElements();
2328 APInt UndefElts;
2329 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
2330 return isSplatValue(V, DemandedElts, UndefElts) &&
2331 (AllowUndefs || !UndefElts);
2334 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2335 V = peekThroughExtractSubvectors(V);
2337 EVT VT = V.getValueType();
2338 unsigned Opcode = V.getOpcode();
2339 switch (Opcode) {
2340 default: {
2341 APInt UndefElts;
2342 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2343 if (isSplatValue(V, DemandedElts, UndefElts)) {
2344 // Handle case where all demanded elements are UNDEF.
2345 if (DemandedElts.isSubsetOf(UndefElts)) {
2346 SplatIdx = 0;
2347 return getUNDEF(VT);
2349 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2350 return V;
2352 break;
2354 case ISD::VECTOR_SHUFFLE: {
2355 // Check if this is a shuffle node doing a splat.
2356 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2357 // getTargetVShiftNode currently struggles without the splat source.
2358 auto *SVN = cast<ShuffleVectorSDNode>(V);
2359 if (!SVN->isSplat())
2360 break;
2361 int Idx = SVN->getSplatIndex();
2362 int NumElts = V.getValueType().getVectorNumElements();
2363 SplatIdx = Idx % NumElts;
2364 return V.getOperand(Idx / NumElts);
2368 return SDValue();
2371 SDValue SelectionDAG::getSplatValue(SDValue V) {
2372 int SplatIdx;
2373 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx))
2374 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V),
2375 SrcVector.getValueType().getScalarType(), SrcVector,
2376 getIntPtrConstant(SplatIdx, SDLoc(V)));
2377 return SDValue();
2380 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2381 /// is less than the element bit-width of the shift node, return it.
2382 static const APInt *getValidShiftAmountConstant(SDValue V) {
2383 unsigned BitWidth = V.getScalarValueSizeInBits();
2384 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
2385 // Shifting more than the bitwidth is not valid.
2386 const APInt &ShAmt = SA->getAPIntValue();
2387 if (ShAmt.ult(BitWidth))
2388 return &ShAmt;
2390 return nullptr;
2393 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less
2394 /// than the element bit-width of the shift node, return the minimum value.
2395 static const APInt *getValidMinimumShiftAmountConstant(SDValue V) {
2396 unsigned BitWidth = V.getScalarValueSizeInBits();
2397 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2398 if (!BV)
2399 return nullptr;
2400 const APInt *MinShAmt = nullptr;
2401 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2402 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2403 if (!SA)
2404 return nullptr;
2405 // Shifting more than the bitwidth is not valid.
2406 const APInt &ShAmt = SA->getAPIntValue();
2407 if (ShAmt.uge(BitWidth))
2408 return nullptr;
2409 if (MinShAmt && MinShAmt->ule(ShAmt))
2410 continue;
2411 MinShAmt = &ShAmt;
2413 return MinShAmt;
2416 /// Determine which bits of Op are known to be either zero or one and return
2417 /// them in Known. For vectors, the known bits are those that are shared by
2418 /// every vector element.
2419 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2420 EVT VT = Op.getValueType();
2421 APInt DemandedElts = VT.isVector()
2422 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2423 : APInt(1, 1);
2424 return computeKnownBits(Op, DemandedElts, Depth);
2427 /// Determine which bits of Op are known to be either zero or one and return
2428 /// them in Known. The DemandedElts argument allows us to only collect the known
2429 /// bits that are shared by the requested vector elements.
2430 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2431 unsigned Depth) const {
2432 unsigned BitWidth = Op.getScalarValueSizeInBits();
2434 KnownBits Known(BitWidth); // Don't know anything.
2436 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2437 // We know all of the bits for a constant!
2438 Known.One = C->getAPIntValue();
2439 Known.Zero = ~Known.One;
2440 return Known;
2442 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2443 // We know all of the bits for a constant fp!
2444 Known.One = C->getValueAPF().bitcastToAPInt();
2445 Known.Zero = ~Known.One;
2446 return Known;
2449 if (Depth >= MaxRecursionDepth)
2450 return Known; // Limit search depth.
2452 KnownBits Known2;
2453 unsigned NumElts = DemandedElts.getBitWidth();
2454 assert((!Op.getValueType().isVector() ||
2455 NumElts == Op.getValueType().getVectorNumElements()) &&
2456 "Unexpected vector size");
2458 if (!DemandedElts)
2459 return Known; // No demanded elts, better to assume we don't know anything.
2461 unsigned Opcode = Op.getOpcode();
2462 switch (Opcode) {
2463 case ISD::BUILD_VECTOR:
2464 // Collect the known bits that are shared by every demanded vector element.
2465 Known.Zero.setAllBits(); Known.One.setAllBits();
2466 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2467 if (!DemandedElts[i])
2468 continue;
2470 SDValue SrcOp = Op.getOperand(i);
2471 Known2 = computeKnownBits(SrcOp, Depth + 1);
2473 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2474 if (SrcOp.getValueSizeInBits() != BitWidth) {
2475 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2476 "Expected BUILD_VECTOR implicit truncation");
2477 Known2 = Known2.trunc(BitWidth);
2480 // Known bits are the values that are shared by every demanded element.
2481 Known.One &= Known2.One;
2482 Known.Zero &= Known2.Zero;
2484 // If we don't know any bits, early out.
2485 if (Known.isUnknown())
2486 break;
2488 break;
2489 case ISD::VECTOR_SHUFFLE: {
2490 // Collect the known bits that are shared by every vector element referenced
2491 // by the shuffle.
2492 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2493 Known.Zero.setAllBits(); Known.One.setAllBits();
2494 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2495 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2496 for (unsigned i = 0; i != NumElts; ++i) {
2497 if (!DemandedElts[i])
2498 continue;
2500 int M = SVN->getMaskElt(i);
2501 if (M < 0) {
2502 // For UNDEF elements, we don't know anything about the common state of
2503 // the shuffle result.
2504 Known.resetAll();
2505 DemandedLHS.clearAllBits();
2506 DemandedRHS.clearAllBits();
2507 break;
2510 if ((unsigned)M < NumElts)
2511 DemandedLHS.setBit((unsigned)M % NumElts);
2512 else
2513 DemandedRHS.setBit((unsigned)M % NumElts);
2515 // Known bits are the values that are shared by every demanded element.
2516 if (!!DemandedLHS) {
2517 SDValue LHS = Op.getOperand(0);
2518 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2519 Known.One &= Known2.One;
2520 Known.Zero &= Known2.Zero;
2522 // If we don't know any bits, early out.
2523 if (Known.isUnknown())
2524 break;
2525 if (!!DemandedRHS) {
2526 SDValue RHS = Op.getOperand(1);
2527 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2528 Known.One &= Known2.One;
2529 Known.Zero &= Known2.Zero;
2531 break;
2533 case ISD::CONCAT_VECTORS: {
2534 // Split DemandedElts and test each of the demanded subvectors.
2535 Known.Zero.setAllBits(); Known.One.setAllBits();
2536 EVT SubVectorVT = Op.getOperand(0).getValueType();
2537 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2538 unsigned NumSubVectors = Op.getNumOperands();
2539 for (unsigned i = 0; i != NumSubVectors; ++i) {
2540 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2541 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2542 if (!!DemandedSub) {
2543 SDValue Sub = Op.getOperand(i);
2544 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2545 Known.One &= Known2.One;
2546 Known.Zero &= Known2.Zero;
2548 // If we don't know any bits, early out.
2549 if (Known.isUnknown())
2550 break;
2552 break;
2554 case ISD::INSERT_SUBVECTOR: {
2555 // If we know the element index, demand any elements from the subvector and
2556 // the remainder from the src its inserted into, otherwise demand them all.
2557 SDValue Src = Op.getOperand(0);
2558 SDValue Sub = Op.getOperand(1);
2559 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2560 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2561 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2562 Known.One.setAllBits();
2563 Known.Zero.setAllBits();
2564 uint64_t Idx = SubIdx->getZExtValue();
2565 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2566 if (!!DemandedSubElts) {
2567 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2568 if (Known.isUnknown())
2569 break; // early-out.
2571 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2572 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2573 if (!!DemandedSrcElts) {
2574 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2575 Known.One &= Known2.One;
2576 Known.Zero &= Known2.Zero;
2578 } else {
2579 Known = computeKnownBits(Sub, Depth + 1);
2580 if (Known.isUnknown())
2581 break; // early-out.
2582 Known2 = computeKnownBits(Src, Depth + 1);
2583 Known.One &= Known2.One;
2584 Known.Zero &= Known2.Zero;
2586 break;
2588 case ISD::EXTRACT_SUBVECTOR: {
2589 // If we know the element index, just demand that subvector elements,
2590 // otherwise demand them all.
2591 SDValue Src = Op.getOperand(0);
2592 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2593 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2594 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
2595 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2596 // Offset the demanded elts by the subvector index.
2597 uint64_t Idx = SubIdx->getZExtValue();
2598 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2600 Known = computeKnownBits(Src, DemandedSrc, Depth + 1);
2601 break;
2603 case ISD::SCALAR_TO_VECTOR: {
2604 // We know about scalar_to_vector as much as we know about it source,
2605 // which becomes the first element of otherwise unknown vector.
2606 if (DemandedElts != 1)
2607 break;
2609 SDValue N0 = Op.getOperand(0);
2610 Known = computeKnownBits(N0, Depth + 1);
2611 if (N0.getValueSizeInBits() != BitWidth)
2612 Known = Known.trunc(BitWidth);
2614 break;
2616 case ISD::BITCAST: {
2617 SDValue N0 = Op.getOperand(0);
2618 EVT SubVT = N0.getValueType();
2619 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2621 // Ignore bitcasts from unsupported types.
2622 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2623 break;
2625 // Fast handling of 'identity' bitcasts.
2626 if (BitWidth == SubBitWidth) {
2627 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2628 break;
2631 bool IsLE = getDataLayout().isLittleEndian();
2633 // Bitcast 'small element' vector to 'large element' scalar/vector.
2634 if ((BitWidth % SubBitWidth) == 0) {
2635 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2637 // Collect known bits for the (larger) output by collecting the known
2638 // bits from each set of sub elements and shift these into place.
2639 // We need to separately call computeKnownBits for each set of
2640 // sub elements as the knownbits for each is likely to be different.
2641 unsigned SubScale = BitWidth / SubBitWidth;
2642 APInt SubDemandedElts(NumElts * SubScale, 0);
2643 for (unsigned i = 0; i != NumElts; ++i)
2644 if (DemandedElts[i])
2645 SubDemandedElts.setBit(i * SubScale);
2647 for (unsigned i = 0; i != SubScale; ++i) {
2648 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2649 Depth + 1);
2650 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2651 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2652 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2656 // Bitcast 'large element' scalar/vector to 'small element' vector.
2657 if ((SubBitWidth % BitWidth) == 0) {
2658 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2660 // Collect known bits for the (smaller) output by collecting the known
2661 // bits from the overlapping larger input elements and extracting the
2662 // sub sections we actually care about.
2663 unsigned SubScale = SubBitWidth / BitWidth;
2664 APInt SubDemandedElts(NumElts / SubScale, 0);
2665 for (unsigned i = 0; i != NumElts; ++i)
2666 if (DemandedElts[i])
2667 SubDemandedElts.setBit(i / SubScale);
2669 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2671 Known.Zero.setAllBits(); Known.One.setAllBits();
2672 for (unsigned i = 0; i != NumElts; ++i)
2673 if (DemandedElts[i]) {
2674 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2675 unsigned Offset = (Shifts % SubScale) * BitWidth;
2676 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2677 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2678 // If we don't know any bits, early out.
2679 if (Known.isUnknown())
2680 break;
2683 break;
2685 case ISD::AND:
2686 // If either the LHS or the RHS are Zero, the result is zero.
2687 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2688 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2690 // Output known-1 bits are only known if set in both the LHS & RHS.
2691 Known.One &= Known2.One;
2692 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2693 Known.Zero |= Known2.Zero;
2694 break;
2695 case ISD::OR:
2696 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2697 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2699 // Output known-0 bits are only known if clear in both the LHS & RHS.
2700 Known.Zero &= Known2.Zero;
2701 // Output known-1 are known to be set if set in either the LHS | RHS.
2702 Known.One |= Known2.One;
2703 break;
2704 case ISD::XOR: {
2705 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2706 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2708 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2709 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2710 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2711 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2712 Known.Zero = KnownZeroOut;
2713 break;
2715 case ISD::MUL: {
2716 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2717 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2719 // If low bits are zero in either operand, output low known-0 bits.
2720 // Also compute a conservative estimate for high known-0 bits.
2721 // More trickiness is possible, but this is sufficient for the
2722 // interesting case of alignment computation.
2723 unsigned TrailZ = Known.countMinTrailingZeros() +
2724 Known2.countMinTrailingZeros();
2725 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2726 Known2.countMinLeadingZeros(),
2727 BitWidth) - BitWidth;
2729 Known.resetAll();
2730 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2731 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2732 break;
2734 case ISD::UDIV: {
2735 // For the purposes of computing leading zeros we can conservatively
2736 // treat a udiv as a logical right shift by the power of 2 known to
2737 // be less than the denominator.
2738 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2739 unsigned LeadZ = Known2.countMinLeadingZeros();
2741 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2742 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2743 if (RHSMaxLeadingZeros != BitWidth)
2744 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2746 Known.Zero.setHighBits(LeadZ);
2747 break;
2749 case ISD::SELECT:
2750 case ISD::VSELECT:
2751 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2752 // If we don't know any bits, early out.
2753 if (Known.isUnknown())
2754 break;
2755 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2757 // Only known if known in both the LHS and RHS.
2758 Known.One &= Known2.One;
2759 Known.Zero &= Known2.Zero;
2760 break;
2761 case ISD::SELECT_CC:
2762 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2763 // If we don't know any bits, early out.
2764 if (Known.isUnknown())
2765 break;
2766 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2768 // Only known if known in both the LHS and RHS.
2769 Known.One &= Known2.One;
2770 Known.Zero &= Known2.Zero;
2771 break;
2772 case ISD::SMULO:
2773 case ISD::UMULO:
2774 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2775 if (Op.getResNo() != 1)
2776 break;
2777 // The boolean result conforms to getBooleanContents.
2778 // If we know the result of a setcc has the top bits zero, use this info.
2779 // We know that we have an integer-based boolean since these operations
2780 // are only available for integer.
2781 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2782 TargetLowering::ZeroOrOneBooleanContent &&
2783 BitWidth > 1)
2784 Known.Zero.setBitsFrom(1);
2785 break;
2786 case ISD::SETCC:
2787 // If we know the result of a setcc has the top bits zero, use this info.
2788 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2789 TargetLowering::ZeroOrOneBooleanContent &&
2790 BitWidth > 1)
2791 Known.Zero.setBitsFrom(1);
2792 break;
2793 case ISD::SHL:
2794 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2795 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2796 unsigned Shift = ShAmt->getZExtValue();
2797 Known.Zero <<= Shift;
2798 Known.One <<= Shift;
2799 // Low bits are known zero.
2800 Known.Zero.setLowBits(Shift);
2802 break;
2803 case ISD::SRL:
2804 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2805 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2806 unsigned Shift = ShAmt->getZExtValue();
2807 Known.Zero.lshrInPlace(Shift);
2808 Known.One.lshrInPlace(Shift);
2809 // High bits are known zero.
2810 Known.Zero.setHighBits(Shift);
2811 } else if (const APInt *ShMinAmt = getValidMinimumShiftAmountConstant(Op)) {
2812 // Minimum shift high bits are known zero.
2813 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
2815 break;
2816 case ISD::SRA:
2817 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2818 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2819 unsigned Shift = ShAmt->getZExtValue();
2820 // Sign extend known zero/one bit (else is unknown).
2821 Known.Zero.ashrInPlace(Shift);
2822 Known.One.ashrInPlace(Shift);
2824 break;
2825 case ISD::FSHL:
2826 case ISD::FSHR:
2827 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
2828 unsigned Amt = C->getAPIntValue().urem(BitWidth);
2830 // For fshl, 0-shift returns the 1st arg.
2831 // For fshr, 0-shift returns the 2nd arg.
2832 if (Amt == 0) {
2833 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
2834 DemandedElts, Depth + 1);
2835 break;
2838 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2839 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2840 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2841 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2842 if (Opcode == ISD::FSHL) {
2843 Known.One <<= Amt;
2844 Known.Zero <<= Amt;
2845 Known2.One.lshrInPlace(BitWidth - Amt);
2846 Known2.Zero.lshrInPlace(BitWidth - Amt);
2847 } else {
2848 Known.One <<= BitWidth - Amt;
2849 Known.Zero <<= BitWidth - Amt;
2850 Known2.One.lshrInPlace(Amt);
2851 Known2.Zero.lshrInPlace(Amt);
2853 Known.One |= Known2.One;
2854 Known.Zero |= Known2.Zero;
2856 break;
2857 case ISD::SIGN_EXTEND_INREG: {
2858 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2859 unsigned EBits = EVT.getScalarSizeInBits();
2861 // Sign extension. Compute the demanded bits in the result that are not
2862 // present in the input.
2863 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2865 APInt InSignMask = APInt::getSignMask(EBits);
2866 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2868 // If the sign extended bits are demanded, we know that the sign
2869 // bit is demanded.
2870 InSignMask = InSignMask.zext(BitWidth);
2871 if (NewBits.getBoolValue())
2872 InputDemandedBits |= InSignMask;
2874 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2875 Known.One &= InputDemandedBits;
2876 Known.Zero &= InputDemandedBits;
2878 // If the sign bit of the input is known set or clear, then we know the
2879 // top bits of the result.
2880 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2881 Known.Zero |= NewBits;
2882 Known.One &= ~NewBits;
2883 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2884 Known.One |= NewBits;
2885 Known.Zero &= ~NewBits;
2886 } else { // Input sign bit unknown
2887 Known.Zero &= ~NewBits;
2888 Known.One &= ~NewBits;
2890 break;
2892 case ISD::CTTZ:
2893 case ISD::CTTZ_ZERO_UNDEF: {
2894 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2895 // If we have a known 1, its position is our upper bound.
2896 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2897 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2898 Known.Zero.setBitsFrom(LowBits);
2899 break;
2901 case ISD::CTLZ:
2902 case ISD::CTLZ_ZERO_UNDEF: {
2903 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2904 // If we have a known 1, its position is our upper bound.
2905 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2906 unsigned LowBits = Log2_32(PossibleLZ) + 1;
2907 Known.Zero.setBitsFrom(LowBits);
2908 break;
2910 case ISD::CTPOP: {
2911 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2912 // If we know some of the bits are zero, they can't be one.
2913 unsigned PossibleOnes = Known2.countMaxPopulation();
2914 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
2915 break;
2917 case ISD::LOAD: {
2918 LoadSDNode *LD = cast<LoadSDNode>(Op);
2919 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
2920 if (ISD::isNON_EXTLoad(LD) && Cst) {
2921 // Determine any common known bits from the loaded constant pool value.
2922 Type *CstTy = Cst->getType();
2923 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
2924 // If its a vector splat, then we can (quickly) reuse the scalar path.
2925 // NOTE: We assume all elements match and none are UNDEF.
2926 if (CstTy->isVectorTy()) {
2927 if (const Constant *Splat = Cst->getSplatValue()) {
2928 Cst = Splat;
2929 CstTy = Cst->getType();
2932 // TODO - do we need to handle different bitwidths?
2933 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
2934 // Iterate across all vector elements finding common known bits.
2935 Known.One.setAllBits();
2936 Known.Zero.setAllBits();
2937 for (unsigned i = 0; i != NumElts; ++i) {
2938 if (!DemandedElts[i])
2939 continue;
2940 if (Constant *Elt = Cst->getAggregateElement(i)) {
2941 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
2942 const APInt &Value = CInt->getValue();
2943 Known.One &= Value;
2944 Known.Zero &= ~Value;
2945 continue;
2947 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
2948 APInt Value = CFP->getValueAPF().bitcastToAPInt();
2949 Known.One &= Value;
2950 Known.Zero &= ~Value;
2951 continue;
2954 Known.One.clearAllBits();
2955 Known.Zero.clearAllBits();
2956 break;
2958 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
2959 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
2960 const APInt &Value = CInt->getValue();
2961 Known.One = Value;
2962 Known.Zero = ~Value;
2963 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
2964 APInt Value = CFP->getValueAPF().bitcastToAPInt();
2965 Known.One = Value;
2966 Known.Zero = ~Value;
2970 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2971 // If this is a ZEXTLoad and we are looking at the loaded value.
2972 EVT VT = LD->getMemoryVT();
2973 unsigned MemBits = VT.getScalarSizeInBits();
2974 Known.Zero.setBitsFrom(MemBits);
2975 } else if (const MDNode *Ranges = LD->getRanges()) {
2976 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2977 computeKnownBitsFromRangeMetadata(*Ranges, Known);
2979 break;
2981 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2982 EVT InVT = Op.getOperand(0).getValueType();
2983 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2984 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2985 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
2986 break;
2988 case ISD::ZERO_EXTEND: {
2989 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2990 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
2991 break;
2993 case ISD::SIGN_EXTEND_VECTOR_INREG: {
2994 EVT InVT = Op.getOperand(0).getValueType();
2995 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2996 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2997 // If the sign bit is known to be zero or one, then sext will extend
2998 // it to the top bits, else it will just zext.
2999 Known = Known.sext(BitWidth);
3000 break;
3002 case ISD::SIGN_EXTEND: {
3003 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3004 // If the sign bit is known to be zero or one, then sext will extend
3005 // it to the top bits, else it will just zext.
3006 Known = Known.sext(BitWidth);
3007 break;
3009 case ISD::ANY_EXTEND: {
3010 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3011 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */);
3012 break;
3014 case ISD::TRUNCATE: {
3015 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3016 Known = Known.trunc(BitWidth);
3017 break;
3019 case ISD::AssertZext: {
3020 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3021 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3022 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3023 Known.Zero |= (~InMask);
3024 Known.One &= (~Known.Zero);
3025 break;
3027 case ISD::FGETSIGN:
3028 // All bits are zero except the low bit.
3029 Known.Zero.setBitsFrom(1);
3030 break;
3031 case ISD::USUBO:
3032 case ISD::SSUBO:
3033 if (Op.getResNo() == 1) {
3034 // If we know the result of a setcc has the top bits zero, use this info.
3035 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3036 TargetLowering::ZeroOrOneBooleanContent &&
3037 BitWidth > 1)
3038 Known.Zero.setBitsFrom(1);
3039 break;
3041 LLVM_FALLTHROUGH;
3042 case ISD::SUB:
3043 case ISD::SUBC: {
3044 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3045 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3046 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3047 Known, Known2);
3048 break;
3050 case ISD::UADDO:
3051 case ISD::SADDO:
3052 case ISD::ADDCARRY:
3053 if (Op.getResNo() == 1) {
3054 // If we know the result of a setcc has the top bits zero, use this info.
3055 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3056 TargetLowering::ZeroOrOneBooleanContent &&
3057 BitWidth > 1)
3058 Known.Zero.setBitsFrom(1);
3059 break;
3061 LLVM_FALLTHROUGH;
3062 case ISD::ADD:
3063 case ISD::ADDC:
3064 case ISD::ADDE: {
3065 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3067 // With ADDE and ADDCARRY, a carry bit may be added in.
3068 KnownBits Carry(1);
3069 if (Opcode == ISD::ADDE)
3070 // Can't track carry from glue, set carry to unknown.
3071 Carry.resetAll();
3072 else if (Opcode == ISD::ADDCARRY)
3073 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3074 // the trouble (how often will we find a known carry bit). And I haven't
3075 // tested this very much yet, but something like this might work:
3076 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3077 // Carry = Carry.zextOrTrunc(1, false);
3078 Carry.resetAll();
3079 else
3080 Carry.setAllZero();
3082 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3083 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3084 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3085 break;
3087 case ISD::SREM:
3088 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3089 const APInt &RA = Rem->getAPIntValue().abs();
3090 if (RA.isPowerOf2()) {
3091 APInt LowBits = RA - 1;
3092 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3094 // The low bits of the first operand are unchanged by the srem.
3095 Known.Zero = Known2.Zero & LowBits;
3096 Known.One = Known2.One & LowBits;
3098 // If the first operand is non-negative or has all low bits zero, then
3099 // the upper bits are all zero.
3100 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
3101 Known.Zero |= ~LowBits;
3103 // If the first operand is negative and not all low bits are zero, then
3104 // the upper bits are all one.
3105 if (Known2.isNegative() && LowBits.intersects(Known2.One))
3106 Known.One |= ~LowBits;
3107 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
3110 break;
3111 case ISD::UREM: {
3112 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3113 const APInt &RA = Rem->getAPIntValue();
3114 if (RA.isPowerOf2()) {
3115 APInt LowBits = (RA - 1);
3116 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3118 // The upper bits are all zero, the lower ones are unchanged.
3119 Known.Zero = Known2.Zero | ~LowBits;
3120 Known.One = Known2.One & LowBits;
3121 break;
3125 // Since the result is less than or equal to either operand, any leading
3126 // zero bits in either operand must also exist in the result.
3127 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3128 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3130 uint32_t Leaders =
3131 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
3132 Known.resetAll();
3133 Known.Zero.setHighBits(Leaders);
3134 break;
3136 case ISD::EXTRACT_ELEMENT: {
3137 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3138 const unsigned Index = Op.getConstantOperandVal(1);
3139 const unsigned EltBitWidth = Op.getValueSizeInBits();
3141 // Remove low part of known bits mask
3142 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3143 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3145 // Remove high part of known bit mask
3146 Known = Known.trunc(EltBitWidth);
3147 break;
3149 case ISD::EXTRACT_VECTOR_ELT: {
3150 SDValue InVec = Op.getOperand(0);
3151 SDValue EltNo = Op.getOperand(1);
3152 EVT VecVT = InVec.getValueType();
3153 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3154 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3155 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3156 // anything about the extended bits.
3157 if (BitWidth > EltBitWidth)
3158 Known = Known.trunc(EltBitWidth);
3159 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3160 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
3161 // If we know the element index, just demand that vector element.
3162 unsigned Idx = ConstEltNo->getZExtValue();
3163 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
3164 Known = computeKnownBits(InVec, DemandedElt, Depth + 1);
3165 } else {
3166 // Unknown element index, so ignore DemandedElts and demand them all.
3167 Known = computeKnownBits(InVec, Depth + 1);
3169 if (BitWidth > EltBitWidth)
3170 Known = Known.zext(BitWidth, false /* => any extend */);
3171 break;
3173 case ISD::INSERT_VECTOR_ELT: {
3174 SDValue InVec = Op.getOperand(0);
3175 SDValue InVal = Op.getOperand(1);
3176 SDValue EltNo = Op.getOperand(2);
3178 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3179 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3180 // If we know the element index, split the demand between the
3181 // source vector and the inserted element.
3182 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
3183 unsigned EltIdx = CEltNo->getZExtValue();
3185 // If we demand the inserted element then add its common known bits.
3186 if (DemandedElts[EltIdx]) {
3187 Known2 = computeKnownBits(InVal, Depth + 1);
3188 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3189 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3192 // If we demand the source vector then add its common known bits, ensuring
3193 // that we don't demand the inserted element.
3194 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
3195 if (!!VectorElts) {
3196 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1);
3197 Known.One &= Known2.One;
3198 Known.Zero &= Known2.Zero;
3200 } else {
3201 // Unknown element index, so ignore DemandedElts and demand them all.
3202 Known = computeKnownBits(InVec, Depth + 1);
3203 Known2 = computeKnownBits(InVal, Depth + 1);
3204 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3205 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3207 break;
3209 case ISD::BITREVERSE: {
3210 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3211 Known.Zero = Known2.Zero.reverseBits();
3212 Known.One = Known2.One.reverseBits();
3213 break;
3215 case ISD::BSWAP: {
3216 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3217 Known.Zero = Known2.Zero.byteSwap();
3218 Known.One = Known2.One.byteSwap();
3219 break;
3221 case ISD::ABS: {
3222 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3224 // If the source's MSB is zero then we know the rest of the bits already.
3225 if (Known2.isNonNegative()) {
3226 Known.Zero = Known2.Zero;
3227 Known.One = Known2.One;
3228 break;
3231 // We only know that the absolute values's MSB will be zero iff there is
3232 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
3233 Known2.One.clearSignBit();
3234 if (Known2.One.getBoolValue()) {
3235 Known.Zero = APInt::getSignMask(BitWidth);
3236 break;
3238 break;
3240 case ISD::UMIN: {
3241 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3242 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3244 // UMIN - we know that the result will have the maximum of the
3245 // known zero leading bits of the inputs.
3246 unsigned LeadZero = Known.countMinLeadingZeros();
3247 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
3249 Known.Zero &= Known2.Zero;
3250 Known.One &= Known2.One;
3251 Known.Zero.setHighBits(LeadZero);
3252 break;
3254 case ISD::UMAX: {
3255 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3256 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3258 // UMAX - we know that the result will have the maximum of the
3259 // known one leading bits of the inputs.
3260 unsigned LeadOne = Known.countMinLeadingOnes();
3261 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
3263 Known.Zero &= Known2.Zero;
3264 Known.One &= Known2.One;
3265 Known.One.setHighBits(LeadOne);
3266 break;
3268 case ISD::SMIN:
3269 case ISD::SMAX: {
3270 // If we have a clamp pattern, we know that the number of sign bits will be
3271 // the minimum of the clamp min/max range.
3272 bool IsMax = (Opcode == ISD::SMAX);
3273 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3274 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3275 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3276 CstHigh =
3277 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3278 if (CstLow && CstHigh) {
3279 if (!IsMax)
3280 std::swap(CstLow, CstHigh);
3282 const APInt &ValueLow = CstLow->getAPIntValue();
3283 const APInt &ValueHigh = CstHigh->getAPIntValue();
3284 if (ValueLow.sle(ValueHigh)) {
3285 unsigned LowSignBits = ValueLow.getNumSignBits();
3286 unsigned HighSignBits = ValueHigh.getNumSignBits();
3287 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3288 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3289 Known.One.setHighBits(MinSignBits);
3290 break;
3292 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3293 Known.Zero.setHighBits(MinSignBits);
3294 break;
3299 // Fallback - just get the shared known bits of the operands.
3300 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3301 if (Known.isUnknown()) break; // Early-out
3302 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3303 Known.Zero &= Known2.Zero;
3304 Known.One &= Known2.One;
3305 break;
3307 case ISD::FrameIndex:
3308 case ISD::TargetFrameIndex:
3309 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
3310 break;
3312 default:
3313 if (Opcode < ISD::BUILTIN_OP_END)
3314 break;
3315 LLVM_FALLTHROUGH;
3316 case ISD::INTRINSIC_WO_CHAIN:
3317 case ISD::INTRINSIC_W_CHAIN:
3318 case ISD::INTRINSIC_VOID:
3319 // Allow the target to implement this method for its nodes.
3320 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3321 break;
3324 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3325 return Known;
3328 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3329 SDValue N1) const {
3330 // X + 0 never overflow
3331 if (isNullConstant(N1))
3332 return OFK_Never;
3334 KnownBits N1Known = computeKnownBits(N1);
3335 if (N1Known.Zero.getBoolValue()) {
3336 KnownBits N0Known = computeKnownBits(N0);
3338 bool overflow;
3339 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow);
3340 if (!overflow)
3341 return OFK_Never;
3344 // mulhi + 1 never overflow
3345 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3346 (~N1Known.Zero & 0x01) == ~N1Known.Zero)
3347 return OFK_Never;
3349 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3350 KnownBits N0Known = computeKnownBits(N0);
3352 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero)
3353 return OFK_Never;
3356 return OFK_Sometime;
3359 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3360 EVT OpVT = Val.getValueType();
3361 unsigned BitWidth = OpVT.getScalarSizeInBits();
3363 // Is the constant a known power of 2?
3364 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3365 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3367 // A left-shift of a constant one will have exactly one bit set because
3368 // shifting the bit off the end is undefined.
3369 if (Val.getOpcode() == ISD::SHL) {
3370 auto *C = isConstOrConstSplat(Val.getOperand(0));
3371 if (C && C->getAPIntValue() == 1)
3372 return true;
3375 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3376 // one bit set.
3377 if (Val.getOpcode() == ISD::SRL) {
3378 auto *C = isConstOrConstSplat(Val.getOperand(0));
3379 if (C && C->getAPIntValue().isSignMask())
3380 return true;
3383 // Are all operands of a build vector constant powers of two?
3384 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3385 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3386 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3387 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3388 return false;
3390 return true;
3392 // More could be done here, though the above checks are enough
3393 // to handle some common cases.
3395 // Fall back to computeKnownBits to catch other known cases.
3396 KnownBits Known = computeKnownBits(Val);
3397 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3400 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3401 EVT VT = Op.getValueType();
3402 APInt DemandedElts = VT.isVector()
3403 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3404 : APInt(1, 1);
3405 return ComputeNumSignBits(Op, DemandedElts, Depth);
3408 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3409 unsigned Depth) const {
3410 EVT VT = Op.getValueType();
3411 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3412 unsigned VTBits = VT.getScalarSizeInBits();
3413 unsigned NumElts = DemandedElts.getBitWidth();
3414 unsigned Tmp, Tmp2;
3415 unsigned FirstAnswer = 1;
3417 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3418 const APInt &Val = C->getAPIntValue();
3419 return Val.getNumSignBits();
3422 if (Depth >= MaxRecursionDepth)
3423 return 1; // Limit search depth.
3425 if (!DemandedElts)
3426 return 1; // No demanded elts, better to assume we don't know anything.
3428 unsigned Opcode = Op.getOpcode();
3429 switch (Opcode) {
3430 default: break;
3431 case ISD::AssertSext:
3432 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3433 return VTBits-Tmp+1;
3434 case ISD::AssertZext:
3435 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3436 return VTBits-Tmp;
3438 case ISD::BUILD_VECTOR:
3439 Tmp = VTBits;
3440 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3441 if (!DemandedElts[i])
3442 continue;
3444 SDValue SrcOp = Op.getOperand(i);
3445 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3447 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3448 if (SrcOp.getValueSizeInBits() != VTBits) {
3449 assert(SrcOp.getValueSizeInBits() > VTBits &&
3450 "Expected BUILD_VECTOR implicit truncation");
3451 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3452 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3454 Tmp = std::min(Tmp, Tmp2);
3456 return Tmp;
3458 case ISD::VECTOR_SHUFFLE: {
3459 // Collect the minimum number of sign bits that are shared by every vector
3460 // element referenced by the shuffle.
3461 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3462 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3463 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3464 for (unsigned i = 0; i != NumElts; ++i) {
3465 int M = SVN->getMaskElt(i);
3466 if (!DemandedElts[i])
3467 continue;
3468 // For UNDEF elements, we don't know anything about the common state of
3469 // the shuffle result.
3470 if (M < 0)
3471 return 1;
3472 if ((unsigned)M < NumElts)
3473 DemandedLHS.setBit((unsigned)M % NumElts);
3474 else
3475 DemandedRHS.setBit((unsigned)M % NumElts);
3477 Tmp = std::numeric_limits<unsigned>::max();
3478 if (!!DemandedLHS)
3479 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3480 if (!!DemandedRHS) {
3481 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3482 Tmp = std::min(Tmp, Tmp2);
3484 // If we don't know anything, early out and try computeKnownBits fall-back.
3485 if (Tmp == 1)
3486 break;
3487 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3488 return Tmp;
3491 case ISD::BITCAST: {
3492 SDValue N0 = Op.getOperand(0);
3493 EVT SrcVT = N0.getValueType();
3494 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3496 // Ignore bitcasts from unsupported types..
3497 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3498 break;
3500 // Fast handling of 'identity' bitcasts.
3501 if (VTBits == SrcBits)
3502 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3504 bool IsLE = getDataLayout().isLittleEndian();
3506 // Bitcast 'large element' scalar/vector to 'small element' vector.
3507 if ((SrcBits % VTBits) == 0) {
3508 assert(VT.isVector() && "Expected bitcast to vector");
3510 unsigned Scale = SrcBits / VTBits;
3511 APInt SrcDemandedElts(NumElts / Scale, 0);
3512 for (unsigned i = 0; i != NumElts; ++i)
3513 if (DemandedElts[i])
3514 SrcDemandedElts.setBit(i / Scale);
3516 // Fast case - sign splat can be simply split across the small elements.
3517 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3518 if (Tmp == SrcBits)
3519 return VTBits;
3521 // Slow case - determine how far the sign extends into each sub-element.
3522 Tmp2 = VTBits;
3523 for (unsigned i = 0; i != NumElts; ++i)
3524 if (DemandedElts[i]) {
3525 unsigned SubOffset = i % Scale;
3526 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3527 SubOffset = SubOffset * VTBits;
3528 if (Tmp <= SubOffset)
3529 return 1;
3530 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3532 return Tmp2;
3534 break;
3537 case ISD::SIGN_EXTEND:
3538 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3539 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3540 case ISD::SIGN_EXTEND_INREG:
3541 // Max of the input and what this extends.
3542 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3543 Tmp = VTBits-Tmp+1;
3544 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3545 return std::max(Tmp, Tmp2);
3546 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3547 SDValue Src = Op.getOperand(0);
3548 EVT SrcVT = Src.getValueType();
3549 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3550 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3551 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3554 case ISD::SRA:
3555 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3556 // SRA X, C -> adds C sign bits.
3557 if (ConstantSDNode *C =
3558 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3559 APInt ShiftVal = C->getAPIntValue();
3560 ShiftVal += Tmp;
3561 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
3563 return Tmp;
3564 case ISD::SHL:
3565 if (ConstantSDNode *C =
3566 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3567 // shl destroys sign bits.
3568 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3569 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
3570 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
3571 return Tmp - C->getZExtValue();
3573 break;
3574 case ISD::AND:
3575 case ISD::OR:
3576 case ISD::XOR: // NOT is handled here.
3577 // Logical binary ops preserve the number of sign bits at the worst.
3578 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3579 if (Tmp != 1) {
3580 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3581 FirstAnswer = std::min(Tmp, Tmp2);
3582 // We computed what we know about the sign bits as our first
3583 // answer. Now proceed to the generic code that uses
3584 // computeKnownBits, and pick whichever answer is better.
3586 break;
3588 case ISD::SELECT:
3589 case ISD::VSELECT:
3590 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3591 if (Tmp == 1) return 1; // Early out.
3592 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3593 return std::min(Tmp, Tmp2);
3594 case ISD::SELECT_CC:
3595 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3596 if (Tmp == 1) return 1; // Early out.
3597 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3598 return std::min(Tmp, Tmp2);
3600 case ISD::SMIN:
3601 case ISD::SMAX: {
3602 // If we have a clamp pattern, we know that the number of sign bits will be
3603 // the minimum of the clamp min/max range.
3604 bool IsMax = (Opcode == ISD::SMAX);
3605 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3606 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3607 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3608 CstHigh =
3609 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3610 if (CstLow && CstHigh) {
3611 if (!IsMax)
3612 std::swap(CstLow, CstHigh);
3613 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3614 Tmp = CstLow->getAPIntValue().getNumSignBits();
3615 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3616 return std::min(Tmp, Tmp2);
3620 // Fallback - just get the minimum number of sign bits of the operands.
3621 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3622 if (Tmp == 1)
3623 return 1; // Early out.
3624 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3625 return std::min(Tmp, Tmp2);
3627 case ISD::UMIN:
3628 case ISD::UMAX:
3629 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3630 if (Tmp == 1)
3631 return 1; // Early out.
3632 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3633 return std::min(Tmp, Tmp2);
3634 case ISD::SADDO:
3635 case ISD::UADDO:
3636 case ISD::SSUBO:
3637 case ISD::USUBO:
3638 case ISD::SMULO:
3639 case ISD::UMULO:
3640 if (Op.getResNo() != 1)
3641 break;
3642 // The boolean result conforms to getBooleanContents. Fall through.
3643 // If setcc returns 0/-1, all bits are sign bits.
3644 // We know that we have an integer-based boolean since these operations
3645 // are only available for integer.
3646 if (TLI->getBooleanContents(VT.isVector(), false) ==
3647 TargetLowering::ZeroOrNegativeOneBooleanContent)
3648 return VTBits;
3649 break;
3650 case ISD::SETCC:
3651 // If setcc returns 0/-1, all bits are sign bits.
3652 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3653 TargetLowering::ZeroOrNegativeOneBooleanContent)
3654 return VTBits;
3655 break;
3656 case ISD::ROTL:
3657 case ISD::ROTR:
3658 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3659 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3661 // Handle rotate right by N like a rotate left by 32-N.
3662 if (Opcode == ISD::ROTR)
3663 RotAmt = (VTBits - RotAmt) % VTBits;
3665 // If we aren't rotating out all of the known-in sign bits, return the
3666 // number that are left. This handles rotl(sext(x), 1) for example.
3667 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3668 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3670 break;
3671 case ISD::ADD:
3672 case ISD::ADDC:
3673 // Add can have at most one carry bit. Thus we know that the output
3674 // is, at worst, one more bit than the inputs.
3675 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3676 if (Tmp == 1) return 1; // Early out.
3678 // Special case decrementing a value (ADD X, -1):
3679 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3680 if (CRHS->isAllOnesValue()) {
3681 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1);
3683 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3684 // sign bits set.
3685 if ((Known.Zero | 1).isAllOnesValue())
3686 return VTBits;
3688 // If we are subtracting one from a positive number, there is no carry
3689 // out of the result.
3690 if (Known.isNonNegative())
3691 return Tmp;
3694 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3695 if (Tmp2 == 1) return 1;
3696 return std::min(Tmp, Tmp2)-1;
3698 case ISD::SUB:
3699 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3700 if (Tmp2 == 1) return 1;
3702 // Handle NEG.
3703 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3704 if (CLHS->isNullValue()) {
3705 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1);
3706 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3707 // sign bits set.
3708 if ((Known.Zero | 1).isAllOnesValue())
3709 return VTBits;
3711 // If the input is known to be positive (the sign bit is known clear),
3712 // the output of the NEG has the same number of sign bits as the input.
3713 if (Known.isNonNegative())
3714 return Tmp2;
3716 // Otherwise, we treat this like a SUB.
3719 // Sub can have at most one carry bit. Thus we know that the output
3720 // is, at worst, one more bit than the inputs.
3721 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3722 if (Tmp == 1) return 1; // Early out.
3723 return std::min(Tmp, Tmp2)-1;
3724 case ISD::MUL: {
3725 // The output of the Mul can be at most twice the valid bits in the inputs.
3726 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3727 if (SignBitsOp0 == 1)
3728 break;
3729 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3730 if (SignBitsOp1 == 1)
3731 break;
3732 unsigned OutValidBits =
3733 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
3734 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
3736 case ISD::TRUNCATE: {
3737 // Check if the sign bits of source go down as far as the truncated value.
3738 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3739 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3740 if (NumSrcSignBits > (NumSrcBits - VTBits))
3741 return NumSrcSignBits - (NumSrcBits - VTBits);
3742 break;
3744 case ISD::EXTRACT_ELEMENT: {
3745 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3746 const int BitWidth = Op.getValueSizeInBits();
3747 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3749 // Get reverse index (starting from 1), Op1 value indexes elements from
3750 // little end. Sign starts at big end.
3751 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3753 // If the sign portion ends in our element the subtraction gives correct
3754 // result. Otherwise it gives either negative or > bitwidth result
3755 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3757 case ISD::INSERT_VECTOR_ELT: {
3758 SDValue InVec = Op.getOperand(0);
3759 SDValue InVal = Op.getOperand(1);
3760 SDValue EltNo = Op.getOperand(2);
3762 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3763 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3764 // If we know the element index, split the demand between the
3765 // source vector and the inserted element.
3766 unsigned EltIdx = CEltNo->getZExtValue();
3768 // If we demand the inserted element then get its sign bits.
3769 Tmp = std::numeric_limits<unsigned>::max();
3770 if (DemandedElts[EltIdx]) {
3771 // TODO - handle implicit truncation of inserted elements.
3772 if (InVal.getScalarValueSizeInBits() != VTBits)
3773 break;
3774 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3777 // If we demand the source vector then get its sign bits, and determine
3778 // the minimum.
3779 APInt VectorElts = DemandedElts;
3780 VectorElts.clearBit(EltIdx);
3781 if (!!VectorElts) {
3782 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3783 Tmp = std::min(Tmp, Tmp2);
3785 } else {
3786 // Unknown element index, so ignore DemandedElts and demand them all.
3787 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3788 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3789 Tmp = std::min(Tmp, Tmp2);
3791 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3792 return Tmp;
3794 case ISD::EXTRACT_VECTOR_ELT: {
3795 SDValue InVec = Op.getOperand(0);
3796 SDValue EltNo = Op.getOperand(1);
3797 EVT VecVT = InVec.getValueType();
3798 const unsigned BitWidth = Op.getValueSizeInBits();
3799 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3800 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3802 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3803 // anything about sign bits. But if the sizes match we can derive knowledge
3804 // about sign bits from the vector operand.
3805 if (BitWidth != EltBitWidth)
3806 break;
3808 // If we know the element index, just demand that vector element, else for
3809 // an unknown element index, ignore DemandedElts and demand them all.
3810 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3811 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3812 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3813 DemandedSrcElts =
3814 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3816 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3818 case ISD::EXTRACT_SUBVECTOR: {
3819 // If we know the element index, just demand that subvector elements,
3820 // otherwise demand them all.
3821 SDValue Src = Op.getOperand(0);
3822 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3823 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3824 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
3825 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3826 // Offset the demanded elts by the subvector index.
3827 uint64_t Idx = SubIdx->getZExtValue();
3828 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3830 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3832 case ISD::CONCAT_VECTORS: {
3833 // Determine the minimum number of sign bits across all demanded
3834 // elts of the input vectors. Early out if the result is already 1.
3835 Tmp = std::numeric_limits<unsigned>::max();
3836 EVT SubVectorVT = Op.getOperand(0).getValueType();
3837 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3838 unsigned NumSubVectors = Op.getNumOperands();
3839 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3840 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3841 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3842 if (!DemandedSub)
3843 continue;
3844 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3845 Tmp = std::min(Tmp, Tmp2);
3847 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3848 return Tmp;
3850 case ISD::INSERT_SUBVECTOR: {
3851 // If we know the element index, demand any elements from the subvector and
3852 // the remainder from the src its inserted into, otherwise demand them all.
3853 SDValue Src = Op.getOperand(0);
3854 SDValue Sub = Op.getOperand(1);
3855 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3856 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3857 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
3858 Tmp = std::numeric_limits<unsigned>::max();
3859 uint64_t Idx = SubIdx->getZExtValue();
3860 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3861 if (!!DemandedSubElts) {
3862 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3863 if (Tmp == 1) return 1; // early-out
3865 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
3866 APInt DemandedSrcElts = DemandedElts & ~SubMask;
3867 if (!!DemandedSrcElts) {
3868 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3869 Tmp = std::min(Tmp, Tmp2);
3871 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3872 return Tmp;
3875 // Not able to determine the index so just assume worst case.
3876 Tmp = ComputeNumSignBits(Sub, Depth + 1);
3877 if (Tmp == 1) return 1; // early-out
3878 Tmp2 = ComputeNumSignBits(Src, Depth + 1);
3879 Tmp = std::min(Tmp, Tmp2);
3880 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3881 return Tmp;
3885 // If we are looking at the loaded value of the SDNode.
3886 if (Op.getResNo() == 0) {
3887 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3888 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3889 unsigned ExtType = LD->getExtensionType();
3890 switch (ExtType) {
3891 default: break;
3892 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
3893 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3894 return VTBits - Tmp + 1;
3895 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
3896 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3897 return VTBits - Tmp;
3898 case ISD::NON_EXTLOAD:
3899 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
3900 // We only need to handle vectors - computeKnownBits should handle
3901 // scalar cases.
3902 Type *CstTy = Cst->getType();
3903 if (CstTy->isVectorTy() &&
3904 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
3905 Tmp = VTBits;
3906 for (unsigned i = 0; i != NumElts; ++i) {
3907 if (!DemandedElts[i])
3908 continue;
3909 if (Constant *Elt = Cst->getAggregateElement(i)) {
3910 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3911 const APInt &Value = CInt->getValue();
3912 Tmp = std::min(Tmp, Value.getNumSignBits());
3913 continue;
3915 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3916 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3917 Tmp = std::min(Tmp, Value.getNumSignBits());
3918 continue;
3921 // Unknown type. Conservatively assume no bits match sign bit.
3922 return 1;
3924 return Tmp;
3927 break;
3932 // Allow the target to implement this method for its nodes.
3933 if (Opcode >= ISD::BUILTIN_OP_END ||
3934 Opcode == ISD::INTRINSIC_WO_CHAIN ||
3935 Opcode == ISD::INTRINSIC_W_CHAIN ||
3936 Opcode == ISD::INTRINSIC_VOID) {
3937 unsigned NumBits =
3938 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
3939 if (NumBits > 1)
3940 FirstAnswer = std::max(FirstAnswer, NumBits);
3943 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3944 // use this information.
3945 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
3947 APInt Mask;
3948 if (Known.isNonNegative()) { // sign bit is 0
3949 Mask = Known.Zero;
3950 } else if (Known.isNegative()) { // sign bit is 1;
3951 Mask = Known.One;
3952 } else {
3953 // Nothing known.
3954 return FirstAnswer;
3957 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3958 // the number of identical bits in the top of the input value.
3959 Mask = ~Mask;
3960 Mask <<= Mask.getBitWidth()-VTBits;
3961 // Return # leading zeros. We use 'min' here in case Val was zero before
3962 // shifting. We don't want to return '64' as for an i32 "0".
3963 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3966 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3967 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3968 !isa<ConstantSDNode>(Op.getOperand(1)))
3969 return false;
3971 if (Op.getOpcode() == ISD::OR &&
3972 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
3973 return false;
3975 return true;
3978 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
3979 // If we're told that NaNs won't happen, assume they won't.
3980 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
3981 return true;
3983 if (Depth >= MaxRecursionDepth)
3984 return false; // Limit search depth.
3986 // TODO: Handle vectors.
3987 // If the value is a constant, we can obviously see if it is a NaN or not.
3988 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
3989 return !C->getValueAPF().isNaN() ||
3990 (SNaN && !C->getValueAPF().isSignaling());
3993 unsigned Opcode = Op.getOpcode();
3994 switch (Opcode) {
3995 case ISD::FADD:
3996 case ISD::FSUB:
3997 case ISD::FMUL:
3998 case ISD::FDIV:
3999 case ISD::FREM:
4000 case ISD::FSIN:
4001 case ISD::FCOS: {
4002 if (SNaN)
4003 return true;
4004 // TODO: Need isKnownNeverInfinity
4005 return false;
4007 case ISD::FCANONICALIZE:
4008 case ISD::FEXP:
4009 case ISD::FEXP2:
4010 case ISD::FTRUNC:
4011 case ISD::FFLOOR:
4012 case ISD::FCEIL:
4013 case ISD::FROUND:
4014 case ISD::FRINT:
4015 case ISD::FNEARBYINT: {
4016 if (SNaN)
4017 return true;
4018 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4020 case ISD::FABS:
4021 case ISD::FNEG:
4022 case ISD::FCOPYSIGN: {
4023 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4025 case ISD::SELECT:
4026 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4027 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4028 case ISD::FP_EXTEND:
4029 case ISD::FP_ROUND: {
4030 if (SNaN)
4031 return true;
4032 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4034 case ISD::SINT_TO_FP:
4035 case ISD::UINT_TO_FP:
4036 return true;
4037 case ISD::FMA:
4038 case ISD::FMAD: {
4039 if (SNaN)
4040 return true;
4041 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4042 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4043 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4045 case ISD::FSQRT: // Need is known positive
4046 case ISD::FLOG:
4047 case ISD::FLOG2:
4048 case ISD::FLOG10:
4049 case ISD::FPOWI:
4050 case ISD::FPOW: {
4051 if (SNaN)
4052 return true;
4053 // TODO: Refine on operand
4054 return false;
4056 case ISD::FMINNUM:
4057 case ISD::FMAXNUM: {
4058 // Only one needs to be known not-nan, since it will be returned if the
4059 // other ends up being one.
4060 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4061 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4063 case ISD::FMINNUM_IEEE:
4064 case ISD::FMAXNUM_IEEE: {
4065 if (SNaN)
4066 return true;
4067 // This can return a NaN if either operand is an sNaN, or if both operands
4068 // are NaN.
4069 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4070 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4071 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4072 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4074 case ISD::FMINIMUM:
4075 case ISD::FMAXIMUM: {
4076 // TODO: Does this quiet or return the origina NaN as-is?
4077 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4078 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4080 case ISD::EXTRACT_VECTOR_ELT: {
4081 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4083 default:
4084 if (Opcode >= ISD::BUILTIN_OP_END ||
4085 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4086 Opcode == ISD::INTRINSIC_W_CHAIN ||
4087 Opcode == ISD::INTRINSIC_VOID) {
4088 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4091 return false;
4095 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4096 assert(Op.getValueType().isFloatingPoint() &&
4097 "Floating point type expected");
4099 // If the value is a constant, we can obviously see if it is a zero or not.
4100 // TODO: Add BuildVector support.
4101 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4102 return !C->isZero();
4103 return false;
4106 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4107 assert(!Op.getValueType().isFloatingPoint() &&
4108 "Floating point types unsupported - use isKnownNeverZeroFloat");
4110 // If the value is a constant, we can obviously see if it is a zero or not.
4111 if (ISD::matchUnaryPredicate(
4112 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4113 return true;
4115 // TODO: Recognize more cases here.
4116 switch (Op.getOpcode()) {
4117 default: break;
4118 case ISD::OR:
4119 if (isKnownNeverZero(Op.getOperand(1)) ||
4120 isKnownNeverZero(Op.getOperand(0)))
4121 return true;
4122 break;
4125 return false;
4128 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4129 // Check the obvious case.
4130 if (A == B) return true;
4132 // For for negative and positive zero.
4133 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4134 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4135 if (CA->isZero() && CB->isZero()) return true;
4137 // Otherwise they may not be equal.
4138 return false;
4141 // FIXME: unify with llvm::haveNoCommonBitsSet.
4142 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
4143 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4144 assert(A.getValueType() == B.getValueType() &&
4145 "Values must have the same type");
4146 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
4149 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4150 ArrayRef<SDValue> Ops,
4151 SelectionDAG &DAG) {
4152 int NumOps = Ops.size();
4153 assert(NumOps != 0 && "Can't build an empty vector!");
4154 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4155 "Incorrect element count in BUILD_VECTOR!");
4157 // BUILD_VECTOR of UNDEFs is UNDEF.
4158 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4159 return DAG.getUNDEF(VT);
4161 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4162 SDValue IdentitySrc;
4163 bool IsIdentity = true;
4164 for (int i = 0; i != NumOps; ++i) {
4165 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4166 Ops[i].getOperand(0).getValueType() != VT ||
4167 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4168 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4169 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4170 IsIdentity = false;
4171 break;
4173 IdentitySrc = Ops[i].getOperand(0);
4175 if (IsIdentity)
4176 return IdentitySrc;
4178 return SDValue();
4181 /// Try to simplify vector concatenation to an input value, undef, or build
4182 /// vector.
4183 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4184 ArrayRef<SDValue> Ops,
4185 SelectionDAG &DAG) {
4186 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4187 assert(llvm::all_of(Ops,
4188 [Ops](SDValue Op) {
4189 return Ops[0].getValueType() == Op.getValueType();
4190 }) &&
4191 "Concatenation of vectors with inconsistent value types!");
4192 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
4193 VT.getVectorNumElements() &&
4194 "Incorrect element count in vector concatenation!");
4196 if (Ops.size() == 1)
4197 return Ops[0];
4199 // Concat of UNDEFs is UNDEF.
4200 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4201 return DAG.getUNDEF(VT);
4203 // Scan the operands and look for extract operations from a single source
4204 // that correspond to insertion at the same location via this concatenation:
4205 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4206 SDValue IdentitySrc;
4207 bool IsIdentity = true;
4208 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4209 SDValue Op = Ops[i];
4210 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements();
4211 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4212 Op.getOperand(0).getValueType() != VT ||
4213 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4214 !isa<ConstantSDNode>(Op.getOperand(1)) ||
4215 Op.getConstantOperandVal(1) != IdentityIndex) {
4216 IsIdentity = false;
4217 break;
4219 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4220 "Unexpected identity source vector for concat of extracts");
4221 IdentitySrc = Op.getOperand(0);
4223 if (IsIdentity) {
4224 assert(IdentitySrc && "Failed to set source vector of extracts");
4225 return IdentitySrc;
4228 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4229 // simplified to one big BUILD_VECTOR.
4230 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4231 EVT SVT = VT.getScalarType();
4232 SmallVector<SDValue, 16> Elts;
4233 for (SDValue Op : Ops) {
4234 EVT OpVT = Op.getValueType();
4235 if (Op.isUndef())
4236 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4237 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4238 Elts.append(Op->op_begin(), Op->op_end());
4239 else
4240 return SDValue();
4243 // BUILD_VECTOR requires all inputs to be of the same type, find the
4244 // maximum type and extend them all.
4245 for (SDValue Op : Elts)
4246 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4248 if (SVT.bitsGT(VT.getScalarType()))
4249 for (SDValue &Op : Elts)
4250 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4251 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4252 : DAG.getSExtOrTrunc(Op, DL, SVT);
4254 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4255 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4256 return V;
4259 /// Gets or creates the specified node.
4260 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4261 FoldingSetNodeID ID;
4262 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4263 void *IP = nullptr;
4264 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4265 return SDValue(E, 0);
4267 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4268 getVTList(VT));
4269 CSEMap.InsertNode(N, IP);
4271 InsertNode(N);
4272 SDValue V = SDValue(N, 0);
4273 NewSDValueDbgMsg(V, "Creating new node: ", this);
4274 return V;
4277 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4278 SDValue Operand, const SDNodeFlags Flags) {
4279 // Constant fold unary operations with an integer constant operand. Even
4280 // opaque constant will be folded, because the folding of unary operations
4281 // doesn't create new constants with different values. Nevertheless, the
4282 // opaque flag is preserved during folding to prevent future folding with
4283 // other constants.
4284 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4285 const APInt &Val = C->getAPIntValue();
4286 switch (Opcode) {
4287 default: break;
4288 case ISD::SIGN_EXTEND:
4289 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4290 C->isTargetOpcode(), C->isOpaque());
4291 case ISD::TRUNCATE:
4292 if (C->isOpaque())
4293 break;
4294 LLVM_FALLTHROUGH;
4295 case ISD::ANY_EXTEND:
4296 case ISD::ZERO_EXTEND:
4297 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4298 C->isTargetOpcode(), C->isOpaque());
4299 case ISD::UINT_TO_FP:
4300 case ISD::SINT_TO_FP: {
4301 APFloat apf(EVTToAPFloatSemantics(VT),
4302 APInt::getNullValue(VT.getSizeInBits()));
4303 (void)apf.convertFromAPInt(Val,
4304 Opcode==ISD::SINT_TO_FP,
4305 APFloat::rmNearestTiesToEven);
4306 return getConstantFP(apf, DL, VT);
4308 case ISD::BITCAST:
4309 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4310 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4311 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4312 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4313 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4314 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4315 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4316 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4317 break;
4318 case ISD::ABS:
4319 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4320 C->isOpaque());
4321 case ISD::BITREVERSE:
4322 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4323 C->isOpaque());
4324 case ISD::BSWAP:
4325 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4326 C->isOpaque());
4327 case ISD::CTPOP:
4328 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4329 C->isOpaque());
4330 case ISD::CTLZ:
4331 case ISD::CTLZ_ZERO_UNDEF:
4332 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4333 C->isOpaque());
4334 case ISD::CTTZ:
4335 case ISD::CTTZ_ZERO_UNDEF:
4336 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4337 C->isOpaque());
4338 case ISD::FP16_TO_FP: {
4339 bool Ignored;
4340 APFloat FPV(APFloat::IEEEhalf(),
4341 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4343 // This can return overflow, underflow, or inexact; we don't care.
4344 // FIXME need to be more flexible about rounding mode.
4345 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4346 APFloat::rmNearestTiesToEven, &Ignored);
4347 return getConstantFP(FPV, DL, VT);
4352 // Constant fold unary operations with a floating point constant operand.
4353 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4354 APFloat V = C->getValueAPF(); // make copy
4355 switch (Opcode) {
4356 case ISD::FNEG:
4357 V.changeSign();
4358 return getConstantFP(V, DL, VT);
4359 case ISD::FABS:
4360 V.clearSign();
4361 return getConstantFP(V, DL, VT);
4362 case ISD::FCEIL: {
4363 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4364 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4365 return getConstantFP(V, DL, VT);
4366 break;
4368 case ISD::FTRUNC: {
4369 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4370 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4371 return getConstantFP(V, DL, VT);
4372 break;
4374 case ISD::FFLOOR: {
4375 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4376 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4377 return getConstantFP(V, DL, VT);
4378 break;
4380 case ISD::FP_EXTEND: {
4381 bool ignored;
4382 // This can return overflow, underflow, or inexact; we don't care.
4383 // FIXME need to be more flexible about rounding mode.
4384 (void)V.convert(EVTToAPFloatSemantics(VT),
4385 APFloat::rmNearestTiesToEven, &ignored);
4386 return getConstantFP(V, DL, VT);
4388 case ISD::FP_TO_SINT:
4389 case ISD::FP_TO_UINT: {
4390 bool ignored;
4391 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4392 // FIXME need to be more flexible about rounding mode.
4393 APFloat::opStatus s =
4394 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4395 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4396 break;
4397 return getConstant(IntVal, DL, VT);
4399 case ISD::BITCAST:
4400 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4401 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4402 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4403 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4404 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4405 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4406 break;
4407 case ISD::FP_TO_FP16: {
4408 bool Ignored;
4409 // This can return overflow, underflow, or inexact; we don't care.
4410 // FIXME need to be more flexible about rounding mode.
4411 (void)V.convert(APFloat::IEEEhalf(),
4412 APFloat::rmNearestTiesToEven, &Ignored);
4413 return getConstant(V.bitcastToAPInt(), DL, VT);
4418 // Constant fold unary operations with a vector integer or float operand.
4419 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4420 if (BV->isConstant()) {
4421 switch (Opcode) {
4422 default:
4423 // FIXME: Entirely reasonable to perform folding of other unary
4424 // operations here as the need arises.
4425 break;
4426 case ISD::FNEG:
4427 case ISD::FABS:
4428 case ISD::FCEIL:
4429 case ISD::FTRUNC:
4430 case ISD::FFLOOR:
4431 case ISD::FP_EXTEND:
4432 case ISD::FP_TO_SINT:
4433 case ISD::FP_TO_UINT:
4434 case ISD::TRUNCATE:
4435 case ISD::ANY_EXTEND:
4436 case ISD::ZERO_EXTEND:
4437 case ISD::SIGN_EXTEND:
4438 case ISD::UINT_TO_FP:
4439 case ISD::SINT_TO_FP:
4440 case ISD::ABS:
4441 case ISD::BITREVERSE:
4442 case ISD::BSWAP:
4443 case ISD::CTLZ:
4444 case ISD::CTLZ_ZERO_UNDEF:
4445 case ISD::CTTZ:
4446 case ISD::CTTZ_ZERO_UNDEF:
4447 case ISD::CTPOP: {
4448 SDValue Ops = { Operand };
4449 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4450 return Fold;
4456 unsigned OpOpcode = Operand.getNode()->getOpcode();
4457 switch (Opcode) {
4458 case ISD::TokenFactor:
4459 case ISD::MERGE_VALUES:
4460 case ISD::CONCAT_VECTORS:
4461 return Operand; // Factor, merge or concat of one node? No need.
4462 case ISD::BUILD_VECTOR: {
4463 // Attempt to simplify BUILD_VECTOR.
4464 SDValue Ops[] = {Operand};
4465 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4466 return V;
4467 break;
4469 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4470 case ISD::FP_EXTEND:
4471 assert(VT.isFloatingPoint() &&
4472 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4473 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4474 assert((!VT.isVector() ||
4475 VT.getVectorNumElements() ==
4476 Operand.getValueType().getVectorNumElements()) &&
4477 "Vector element count mismatch!");
4478 assert(Operand.getValueType().bitsLT(VT) &&
4479 "Invalid fpext node, dst < src!");
4480 if (Operand.isUndef())
4481 return getUNDEF(VT);
4482 break;
4483 case ISD::FP_TO_SINT:
4484 case ISD::FP_TO_UINT:
4485 if (Operand.isUndef())
4486 return getUNDEF(VT);
4487 break;
4488 case ISD::SINT_TO_FP:
4489 case ISD::UINT_TO_FP:
4490 // [us]itofp(undef) = 0, because the result value is bounded.
4491 if (Operand.isUndef())
4492 return getConstantFP(0.0, DL, VT);
4493 break;
4494 case ISD::SIGN_EXTEND:
4495 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4496 "Invalid SIGN_EXTEND!");
4497 assert(VT.isVector() == Operand.getValueType().isVector() &&
4498 "SIGN_EXTEND result type type should be vector iff the operand "
4499 "type is vector!");
4500 if (Operand.getValueType() == VT) return Operand; // noop extension
4501 assert((!VT.isVector() ||
4502 VT.getVectorNumElements() ==
4503 Operand.getValueType().getVectorNumElements()) &&
4504 "Vector element count mismatch!");
4505 assert(Operand.getValueType().bitsLT(VT) &&
4506 "Invalid sext node, dst < src!");
4507 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4508 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4509 else if (OpOpcode == ISD::UNDEF)
4510 // sext(undef) = 0, because the top bits will all be the same.
4511 return getConstant(0, DL, VT);
4512 break;
4513 case ISD::ZERO_EXTEND:
4514 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4515 "Invalid ZERO_EXTEND!");
4516 assert(VT.isVector() == Operand.getValueType().isVector() &&
4517 "ZERO_EXTEND result type type should be vector iff the operand "
4518 "type is vector!");
4519 if (Operand.getValueType() == VT) return Operand; // noop extension
4520 assert((!VT.isVector() ||
4521 VT.getVectorNumElements() ==
4522 Operand.getValueType().getVectorNumElements()) &&
4523 "Vector element count mismatch!");
4524 assert(Operand.getValueType().bitsLT(VT) &&
4525 "Invalid zext node, dst < src!");
4526 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4527 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4528 else if (OpOpcode == ISD::UNDEF)
4529 // zext(undef) = 0, because the top bits will be zero.
4530 return getConstant(0, DL, VT);
4531 break;
4532 case ISD::ANY_EXTEND:
4533 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4534 "Invalid ANY_EXTEND!");
4535 assert(VT.isVector() == Operand.getValueType().isVector() &&
4536 "ANY_EXTEND result type type should be vector iff the operand "
4537 "type is vector!");
4538 if (Operand.getValueType() == VT) return Operand; // noop extension
4539 assert((!VT.isVector() ||
4540 VT.getVectorNumElements() ==
4541 Operand.getValueType().getVectorNumElements()) &&
4542 "Vector element count mismatch!");
4543 assert(Operand.getValueType().bitsLT(VT) &&
4544 "Invalid anyext node, dst < src!");
4546 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4547 OpOpcode == ISD::ANY_EXTEND)
4548 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4549 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4550 else if (OpOpcode == ISD::UNDEF)
4551 return getUNDEF(VT);
4553 // (ext (trunc x)) -> x
4554 if (OpOpcode == ISD::TRUNCATE) {
4555 SDValue OpOp = Operand.getOperand(0);
4556 if (OpOp.getValueType() == VT) {
4557 transferDbgValues(Operand, OpOp);
4558 return OpOp;
4561 break;
4562 case ISD::TRUNCATE:
4563 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4564 "Invalid TRUNCATE!");
4565 assert(VT.isVector() == Operand.getValueType().isVector() &&
4566 "TRUNCATE result type type should be vector iff the operand "
4567 "type is vector!");
4568 if (Operand.getValueType() == VT) return Operand; // noop truncate
4569 assert((!VT.isVector() ||
4570 VT.getVectorNumElements() ==
4571 Operand.getValueType().getVectorNumElements()) &&
4572 "Vector element count mismatch!");
4573 assert(Operand.getValueType().bitsGT(VT) &&
4574 "Invalid truncate node, src < dst!");
4575 if (OpOpcode == ISD::TRUNCATE)
4576 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4577 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4578 OpOpcode == ISD::ANY_EXTEND) {
4579 // If the source is smaller than the dest, we still need an extend.
4580 if (Operand.getOperand(0).getValueType().getScalarType()
4581 .bitsLT(VT.getScalarType()))
4582 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4583 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4584 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4585 return Operand.getOperand(0);
4587 if (OpOpcode == ISD::UNDEF)
4588 return getUNDEF(VT);
4589 break;
4590 case ISD::ANY_EXTEND_VECTOR_INREG:
4591 case ISD::ZERO_EXTEND_VECTOR_INREG:
4592 case ISD::SIGN_EXTEND_VECTOR_INREG:
4593 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4594 assert(Operand.getValueType().bitsLE(VT) &&
4595 "The input must be the same size or smaller than the result.");
4596 assert(VT.getVectorNumElements() <
4597 Operand.getValueType().getVectorNumElements() &&
4598 "The destination vector type must have fewer lanes than the input.");
4599 break;
4600 case ISD::ABS:
4601 assert(VT.isInteger() && VT == Operand.getValueType() &&
4602 "Invalid ABS!");
4603 if (OpOpcode == ISD::UNDEF)
4604 return getUNDEF(VT);
4605 break;
4606 case ISD::BSWAP:
4607 assert(VT.isInteger() && VT == Operand.getValueType() &&
4608 "Invalid BSWAP!");
4609 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4610 "BSWAP types must be a multiple of 16 bits!");
4611 if (OpOpcode == ISD::UNDEF)
4612 return getUNDEF(VT);
4613 break;
4614 case ISD::BITREVERSE:
4615 assert(VT.isInteger() && VT == Operand.getValueType() &&
4616 "Invalid BITREVERSE!");
4617 if (OpOpcode == ISD::UNDEF)
4618 return getUNDEF(VT);
4619 break;
4620 case ISD::BITCAST:
4621 // Basic sanity checking.
4622 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4623 "Cannot BITCAST between types of different sizes!");
4624 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4625 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4626 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4627 if (OpOpcode == ISD::UNDEF)
4628 return getUNDEF(VT);
4629 break;
4630 case ISD::SCALAR_TO_VECTOR:
4631 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4632 (VT.getVectorElementType() == Operand.getValueType() ||
4633 (VT.getVectorElementType().isInteger() &&
4634 Operand.getValueType().isInteger() &&
4635 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4636 "Illegal SCALAR_TO_VECTOR node!");
4637 if (OpOpcode == ISD::UNDEF)
4638 return getUNDEF(VT);
4639 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4640 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4641 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4642 Operand.getConstantOperandVal(1) == 0 &&
4643 Operand.getOperand(0).getValueType() == VT)
4644 return Operand.getOperand(0);
4645 break;
4646 case ISD::FNEG:
4647 // Negation of an unknown bag of bits is still completely undefined.
4648 if (OpOpcode == ISD::UNDEF)
4649 return getUNDEF(VT);
4651 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
4652 if ((getTarget().Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) &&
4653 OpOpcode == ISD::FSUB)
4654 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1),
4655 Operand.getOperand(0), Flags);
4656 if (OpOpcode == ISD::FNEG) // --X -> X
4657 return Operand.getOperand(0);
4658 break;
4659 case ISD::FABS:
4660 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4661 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4662 break;
4665 SDNode *N;
4666 SDVTList VTs = getVTList(VT);
4667 SDValue Ops[] = {Operand};
4668 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4669 FoldingSetNodeID ID;
4670 AddNodeIDNode(ID, Opcode, VTs, Ops);
4671 void *IP = nullptr;
4672 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4673 E->intersectFlagsWith(Flags);
4674 return SDValue(E, 0);
4677 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4678 N->setFlags(Flags);
4679 createOperands(N, Ops);
4680 CSEMap.InsertNode(N, IP);
4681 } else {
4682 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4683 createOperands(N, Ops);
4686 InsertNode(N);
4687 SDValue V = SDValue(N, 0);
4688 NewSDValueDbgMsg(V, "Creating new node: ", this);
4689 return V;
4692 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
4693 const APInt &C2) {
4694 switch (Opcode) {
4695 case ISD::ADD: return std::make_pair(C1 + C2, true);
4696 case ISD::SUB: return std::make_pair(C1 - C2, true);
4697 case ISD::MUL: return std::make_pair(C1 * C2, true);
4698 case ISD::AND: return std::make_pair(C1 & C2, true);
4699 case ISD::OR: return std::make_pair(C1 | C2, true);
4700 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
4701 case ISD::SHL: return std::make_pair(C1 << C2, true);
4702 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
4703 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
4704 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
4705 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
4706 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
4707 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
4708 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
4709 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
4710 case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true);
4711 case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true);
4712 case ISD::SSUBSAT: return std::make_pair(C1.ssub_sat(C2), true);
4713 case ISD::USUBSAT: return std::make_pair(C1.usub_sat(C2), true);
4714 case ISD::UDIV:
4715 if (!C2.getBoolValue())
4716 break;
4717 return std::make_pair(C1.udiv(C2), true);
4718 case ISD::UREM:
4719 if (!C2.getBoolValue())
4720 break;
4721 return std::make_pair(C1.urem(C2), true);
4722 case ISD::SDIV:
4723 if (!C2.getBoolValue())
4724 break;
4725 return std::make_pair(C1.sdiv(C2), true);
4726 case ISD::SREM:
4727 if (!C2.getBoolValue())
4728 break;
4729 return std::make_pair(C1.srem(C2), true);
4731 return std::make_pair(APInt(1, 0), false);
4734 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4735 EVT VT, const ConstantSDNode *C1,
4736 const ConstantSDNode *C2) {
4737 if (C1->isOpaque() || C2->isOpaque())
4738 return SDValue();
4740 std::pair<APInt, bool> Folded = FoldValue(Opcode, C1->getAPIntValue(),
4741 C2->getAPIntValue());
4742 if (!Folded.second)
4743 return SDValue();
4744 return getConstant(Folded.first, DL, VT);
4747 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4748 const GlobalAddressSDNode *GA,
4749 const SDNode *N2) {
4750 if (GA->getOpcode() != ISD::GlobalAddress)
4751 return SDValue();
4752 if (!TLI->isOffsetFoldingLegal(GA))
4753 return SDValue();
4754 auto *C2 = dyn_cast<ConstantSDNode>(N2);
4755 if (!C2)
4756 return SDValue();
4757 int64_t Offset = C2->getSExtValue();
4758 switch (Opcode) {
4759 case ISD::ADD: break;
4760 case ISD::SUB: Offset = -uint64_t(Offset); break;
4761 default: return SDValue();
4763 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
4764 GA->getOffset() + uint64_t(Offset));
4767 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4768 switch (Opcode) {
4769 case ISD::SDIV:
4770 case ISD::UDIV:
4771 case ISD::SREM:
4772 case ISD::UREM: {
4773 // If a divisor is zero/undef or any element of a divisor vector is
4774 // zero/undef, the whole op is undef.
4775 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4776 SDValue Divisor = Ops[1];
4777 if (Divisor.isUndef() || isNullConstant(Divisor))
4778 return true;
4780 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4781 llvm::any_of(Divisor->op_values(),
4782 [](SDValue V) { return V.isUndef() ||
4783 isNullConstant(V); });
4784 // TODO: Handle signed overflow.
4786 // TODO: Handle oversized shifts.
4787 default:
4788 return false;
4792 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4793 EVT VT, SDNode *N1, SDNode *N2) {
4794 // If the opcode is a target-specific ISD node, there's nothing we can
4795 // do here and the operand rules may not line up with the below, so
4796 // bail early.
4797 if (Opcode >= ISD::BUILTIN_OP_END)
4798 return SDValue();
4800 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)}))
4801 return getUNDEF(VT);
4803 // Handle the case of two scalars.
4804 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
4805 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
4806 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2);
4807 assert((!Folded || !VT.isVector()) &&
4808 "Can't fold vectors ops with scalar operands");
4809 return Folded;
4813 // fold (add Sym, c) -> Sym+c
4814 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
4815 return FoldSymbolOffset(Opcode, VT, GA, N2);
4816 if (TLI->isCommutativeBinOp(Opcode))
4817 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
4818 return FoldSymbolOffset(Opcode, VT, GA, N1);
4820 // For vectors, extract each constant element and fold them individually.
4821 // Either input may be an undef value.
4822 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
4823 if (!BV1 && !N1->isUndef())
4824 return SDValue();
4825 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
4826 if (!BV2 && !N2->isUndef())
4827 return SDValue();
4828 // If both operands are undef, that's handled the same way as scalars.
4829 if (!BV1 && !BV2)
4830 return SDValue();
4832 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4833 "Vector binop with different number of elements in operands?");
4835 EVT SVT = VT.getScalarType();
4836 EVT LegalSVT = SVT;
4837 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4838 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4839 if (LegalSVT.bitsLT(SVT))
4840 return SDValue();
4842 SmallVector<SDValue, 4> Outputs;
4843 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
4844 for (unsigned I = 0; I != NumOps; ++I) {
4845 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
4846 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
4847 if (SVT.isInteger()) {
4848 if (V1->getValueType(0).bitsGT(SVT))
4849 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4850 if (V2->getValueType(0).bitsGT(SVT))
4851 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4854 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4855 return SDValue();
4857 // Fold one vector element.
4858 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4859 if (LegalSVT != SVT)
4860 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4862 // Scalar folding only succeeded if the result is a constant or UNDEF.
4863 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4864 ScalarResult.getOpcode() != ISD::ConstantFP)
4865 return SDValue();
4866 Outputs.push_back(ScalarResult);
4869 assert(VT.getVectorNumElements() == Outputs.size() &&
4870 "Vector size mismatch!");
4872 // We may have a vector type but a scalar result. Create a splat.
4873 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4875 // Build a big vector out of the scalar elements we generated.
4876 return getBuildVector(VT, SDLoc(), Outputs);
4879 // TODO: Merge with FoldConstantArithmetic
4880 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4881 const SDLoc &DL, EVT VT,
4882 ArrayRef<SDValue> Ops,
4883 const SDNodeFlags Flags) {
4884 // If the opcode is a target-specific ISD node, there's nothing we can
4885 // do here and the operand rules may not line up with the below, so
4886 // bail early.
4887 if (Opcode >= ISD::BUILTIN_OP_END)
4888 return SDValue();
4890 if (isUndef(Opcode, Ops))
4891 return getUNDEF(VT);
4893 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4894 if (!VT.isVector())
4895 return SDValue();
4897 unsigned NumElts = VT.getVectorNumElements();
4899 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4900 return !Op.getValueType().isVector() ||
4901 Op.getValueType().getVectorNumElements() == NumElts;
4904 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4905 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
4906 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
4907 (BV && BV->isConstant());
4910 // All operands must be vector types with the same number of elements as
4911 // the result type and must be either UNDEF or a build vector of constant
4912 // or UNDEF scalars.
4913 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
4914 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
4915 return SDValue();
4917 // If we are comparing vectors, then the result needs to be a i1 boolean
4918 // that is then sign-extended back to the legal result type.
4919 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
4921 // Find legal integer scalar type for constant promotion and
4922 // ensure that its scalar size is at least as large as source.
4923 EVT LegalSVT = VT.getScalarType();
4924 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4925 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4926 if (LegalSVT.bitsLT(VT.getScalarType()))
4927 return SDValue();
4930 // Constant fold each scalar lane separately.
4931 SmallVector<SDValue, 4> ScalarResults;
4932 for (unsigned i = 0; i != NumElts; i++) {
4933 SmallVector<SDValue, 4> ScalarOps;
4934 for (SDValue Op : Ops) {
4935 EVT InSVT = Op.getValueType().getScalarType();
4936 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
4937 if (!InBV) {
4938 // We've checked that this is UNDEF or a constant of some kind.
4939 if (Op.isUndef())
4940 ScalarOps.push_back(getUNDEF(InSVT));
4941 else
4942 ScalarOps.push_back(Op);
4943 continue;
4946 SDValue ScalarOp = InBV->getOperand(i);
4947 EVT ScalarVT = ScalarOp.getValueType();
4949 // Build vector (integer) scalar operands may need implicit
4950 // truncation - do this before constant folding.
4951 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
4952 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
4954 ScalarOps.push_back(ScalarOp);
4957 // Constant fold the scalar operands.
4958 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
4960 // Legalize the (integer) scalar constant if necessary.
4961 if (LegalSVT != SVT)
4962 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4964 // Scalar folding only succeeded if the result is a constant or UNDEF.
4965 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4966 ScalarResult.getOpcode() != ISD::ConstantFP)
4967 return SDValue();
4968 ScalarResults.push_back(ScalarResult);
4971 SDValue V = getBuildVector(VT, DL, ScalarResults);
4972 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
4973 return V;
4976 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
4977 EVT VT, SDValue N1, SDValue N2) {
4978 // TODO: We don't do any constant folding for strict FP opcodes here, but we
4979 // should. That will require dealing with a potentially non-default
4980 // rounding mode, checking the "opStatus" return value from the APFloat
4981 // math calculations, and possibly other variations.
4982 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
4983 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
4984 if (N1CFP && N2CFP) {
4985 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
4986 switch (Opcode) {
4987 case ISD::FADD:
4988 C1.add(C2, APFloat::rmNearestTiesToEven);
4989 return getConstantFP(C1, DL, VT);
4990 case ISD::FSUB:
4991 C1.subtract(C2, APFloat::rmNearestTiesToEven);
4992 return getConstantFP(C1, DL, VT);
4993 case ISD::FMUL:
4994 C1.multiply(C2, APFloat::rmNearestTiesToEven);
4995 return getConstantFP(C1, DL, VT);
4996 case ISD::FDIV:
4997 C1.divide(C2, APFloat::rmNearestTiesToEven);
4998 return getConstantFP(C1, DL, VT);
4999 case ISD::FREM:
5000 C1.mod(C2);
5001 return getConstantFP(C1, DL, VT);
5002 case ISD::FCOPYSIGN:
5003 C1.copySign(C2);
5004 return getConstantFP(C1, DL, VT);
5005 default: break;
5008 if (N1CFP && Opcode == ISD::FP_ROUND) {
5009 APFloat C1 = N1CFP->getValueAPF(); // make copy
5010 bool Unused;
5011 // This can return overflow, underflow, or inexact; we don't care.
5012 // FIXME need to be more flexible about rounding mode.
5013 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5014 &Unused);
5015 return getConstantFP(C1, DL, VT);
5018 switch (Opcode) {
5019 case ISD::FADD:
5020 case ISD::FSUB:
5021 case ISD::FMUL:
5022 case ISD::FDIV:
5023 case ISD::FREM:
5024 // If both operands are undef, the result is undef. If 1 operand is undef,
5025 // the result is NaN. This should match the behavior of the IR optimizer.
5026 if (N1.isUndef() && N2.isUndef())
5027 return getUNDEF(VT);
5028 if (N1.isUndef() || N2.isUndef())
5029 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5031 return SDValue();
5034 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5035 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5036 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5037 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5038 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5039 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5041 // Canonicalize constant to RHS if commutative.
5042 if (TLI->isCommutativeBinOp(Opcode)) {
5043 if (N1C && !N2C) {
5044 std::swap(N1C, N2C);
5045 std::swap(N1, N2);
5046 } else if (N1CFP && !N2CFP) {
5047 std::swap(N1CFP, N2CFP);
5048 std::swap(N1, N2);
5052 switch (Opcode) {
5053 default: break;
5054 case ISD::TokenFactor:
5055 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5056 N2.getValueType() == MVT::Other && "Invalid token factor!");
5057 // Fold trivial token factors.
5058 if (N1.getOpcode() == ISD::EntryToken) return N2;
5059 if (N2.getOpcode() == ISD::EntryToken) return N1;
5060 if (N1 == N2) return N1;
5061 break;
5062 case ISD::BUILD_VECTOR: {
5063 // Attempt to simplify BUILD_VECTOR.
5064 SDValue Ops[] = {N1, N2};
5065 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5066 return V;
5067 break;
5069 case ISD::CONCAT_VECTORS: {
5070 SDValue Ops[] = {N1, N2};
5071 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5072 return V;
5073 break;
5075 case ISD::AND:
5076 assert(VT.isInteger() && "This operator does not apply to FP types!");
5077 assert(N1.getValueType() == N2.getValueType() &&
5078 N1.getValueType() == VT && "Binary operator types must match!");
5079 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5080 // worth handling here.
5081 if (N2C && N2C->isNullValue())
5082 return N2;
5083 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5084 return N1;
5085 break;
5086 case ISD::OR:
5087 case ISD::XOR:
5088 case ISD::ADD:
5089 case ISD::SUB:
5090 assert(VT.isInteger() && "This operator does not apply to FP types!");
5091 assert(N1.getValueType() == N2.getValueType() &&
5092 N1.getValueType() == VT && "Binary operator types must match!");
5093 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5094 // it's worth handling here.
5095 if (N2C && N2C->isNullValue())
5096 return N1;
5097 break;
5098 case ISD::UDIV:
5099 case ISD::UREM:
5100 case ISD::MULHU:
5101 case ISD::MULHS:
5102 case ISD::MUL:
5103 case ISD::SDIV:
5104 case ISD::SREM:
5105 case ISD::SMIN:
5106 case ISD::SMAX:
5107 case ISD::UMIN:
5108 case ISD::UMAX:
5109 case ISD::SADDSAT:
5110 case ISD::SSUBSAT:
5111 case ISD::UADDSAT:
5112 case ISD::USUBSAT:
5113 assert(VT.isInteger() && "This operator does not apply to FP types!");
5114 assert(N1.getValueType() == N2.getValueType() &&
5115 N1.getValueType() == VT && "Binary operator types must match!");
5116 break;
5117 case ISD::FADD:
5118 case ISD::FSUB:
5119 case ISD::FMUL:
5120 case ISD::FDIV:
5121 case ISD::FREM:
5122 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5123 assert(N1.getValueType() == N2.getValueType() &&
5124 N1.getValueType() == VT && "Binary operator types must match!");
5125 if (SDValue V = simplifyFPBinop(Opcode, N1, N2))
5126 return V;
5127 break;
5128 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5129 assert(N1.getValueType() == VT &&
5130 N1.getValueType().isFloatingPoint() &&
5131 N2.getValueType().isFloatingPoint() &&
5132 "Invalid FCOPYSIGN!");
5133 break;
5134 case ISD::SHL:
5135 case ISD::SRA:
5136 case ISD::SRL:
5137 if (SDValue V = simplifyShift(N1, N2))
5138 return V;
5139 LLVM_FALLTHROUGH;
5140 case ISD::ROTL:
5141 case ISD::ROTR:
5142 assert(VT == N1.getValueType() &&
5143 "Shift operators return type must be the same as their first arg");
5144 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5145 "Shifts only work on integers");
5146 assert((!VT.isVector() || VT == N2.getValueType()) &&
5147 "Vector shift amounts must be in the same as their first arg");
5148 // Verify that the shift amount VT is big enough to hold valid shift
5149 // amounts. This catches things like trying to shift an i1024 value by an
5150 // i8, which is easy to fall into in generic code that uses
5151 // TLI.getShiftAmount().
5152 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
5153 "Invalid use of small shift amount with oversized value!");
5155 // Always fold shifts of i1 values so the code generator doesn't need to
5156 // handle them. Since we know the size of the shift has to be less than the
5157 // size of the value, the shift/rotate count is guaranteed to be zero.
5158 if (VT == MVT::i1)
5159 return N1;
5160 if (N2C && N2C->isNullValue())
5161 return N1;
5162 break;
5163 case ISD::FP_ROUND:
5164 assert(VT.isFloatingPoint() &&
5165 N1.getValueType().isFloatingPoint() &&
5166 VT.bitsLE(N1.getValueType()) &&
5167 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5168 "Invalid FP_ROUND!");
5169 if (N1.getValueType() == VT) return N1; // noop conversion.
5170 break;
5171 case ISD::AssertSext:
5172 case ISD::AssertZext: {
5173 EVT EVT = cast<VTSDNode>(N2)->getVT();
5174 assert(VT == N1.getValueType() && "Not an inreg extend!");
5175 assert(VT.isInteger() && EVT.isInteger() &&
5176 "Cannot *_EXTEND_INREG FP types");
5177 assert(!EVT.isVector() &&
5178 "AssertSExt/AssertZExt type should be the vector element type "
5179 "rather than the vector type!");
5180 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5181 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5182 break;
5184 case ISD::SIGN_EXTEND_INREG: {
5185 EVT EVT = cast<VTSDNode>(N2)->getVT();
5186 assert(VT == N1.getValueType() && "Not an inreg extend!");
5187 assert(VT.isInteger() && EVT.isInteger() &&
5188 "Cannot *_EXTEND_INREG FP types");
5189 assert(EVT.isVector() == VT.isVector() &&
5190 "SIGN_EXTEND_INREG type should be vector iff the operand "
5191 "type is vector!");
5192 assert((!EVT.isVector() ||
5193 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
5194 "Vector element counts must match in SIGN_EXTEND_INREG");
5195 assert(EVT.bitsLE(VT) && "Not extending!");
5196 if (EVT == VT) return N1; // Not actually extending
5198 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5199 unsigned FromBits = EVT.getScalarSizeInBits();
5200 Val <<= Val.getBitWidth() - FromBits;
5201 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5202 return getConstant(Val, DL, ConstantVT);
5205 if (N1C) {
5206 const APInt &Val = N1C->getAPIntValue();
5207 return SignExtendInReg(Val, VT);
5209 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5210 SmallVector<SDValue, 8> Ops;
5211 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5212 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5213 SDValue Op = N1.getOperand(i);
5214 if (Op.isUndef()) {
5215 Ops.push_back(getUNDEF(OpVT));
5216 continue;
5218 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5219 APInt Val = C->getAPIntValue();
5220 Ops.push_back(SignExtendInReg(Val, OpVT));
5222 return getBuildVector(VT, DL, Ops);
5224 break;
5226 case ISD::EXTRACT_VECTOR_ELT:
5227 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5228 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5229 element type of the vector.");
5231 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
5232 if (N1.isUndef())
5233 return getUNDEF(VT);
5235 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
5236 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5237 return getUNDEF(VT);
5239 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5240 // expanding copies of large vectors from registers.
5241 if (N2C &&
5242 N1.getOpcode() == ISD::CONCAT_VECTORS &&
5243 N1.getNumOperands() > 0) {
5244 unsigned Factor =
5245 N1.getOperand(0).getValueType().getVectorNumElements();
5246 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5247 N1.getOperand(N2C->getZExtValue() / Factor),
5248 getConstant(N2C->getZExtValue() % Factor, DL,
5249 N2.getValueType()));
5252 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
5253 // expanding large vector constants.
5254 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
5255 SDValue Elt = N1.getOperand(N2C->getZExtValue());
5257 if (VT != Elt.getValueType())
5258 // If the vector element type is not legal, the BUILD_VECTOR operands
5259 // are promoted and implicitly truncated, and the result implicitly
5260 // extended. Make that explicit here.
5261 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5263 return Elt;
5266 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5267 // operations are lowered to scalars.
5268 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5269 // If the indices are the same, return the inserted element else
5270 // if the indices are known different, extract the element from
5271 // the original vector.
5272 SDValue N1Op2 = N1.getOperand(2);
5273 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5275 if (N1Op2C && N2C) {
5276 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5277 if (VT == N1.getOperand(1).getValueType())
5278 return N1.getOperand(1);
5279 else
5280 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5283 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5287 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5288 // when vector types are scalarized and v1iX is legal.
5289 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
5290 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5291 N1.getValueType().getVectorNumElements() == 1) {
5292 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5293 N1.getOperand(1));
5295 break;
5296 case ISD::EXTRACT_ELEMENT:
5297 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5298 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5299 (N1.getValueType().isInteger() == VT.isInteger()) &&
5300 N1.getValueType() != VT &&
5301 "Wrong types for EXTRACT_ELEMENT!");
5303 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5304 // 64-bit integers into 32-bit parts. Instead of building the extract of
5305 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5306 if (N1.getOpcode() == ISD::BUILD_PAIR)
5307 return N1.getOperand(N2C->getZExtValue());
5309 // EXTRACT_ELEMENT of a constant int is also very common.
5310 if (N1C) {
5311 unsigned ElementSize = VT.getSizeInBits();
5312 unsigned Shift = ElementSize * N2C->getZExtValue();
5313 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5314 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5316 break;
5317 case ISD::EXTRACT_SUBVECTOR:
5318 if (VT.isSimple() && N1.getValueType().isSimple()) {
5319 assert(VT.isVector() && N1.getValueType().isVector() &&
5320 "Extract subvector VTs must be a vectors!");
5321 assert(VT.getVectorElementType() ==
5322 N1.getValueType().getVectorElementType() &&
5323 "Extract subvector VTs must have the same element type!");
5324 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
5325 "Extract subvector must be from larger vector to smaller vector!");
5327 if (N2C) {
5328 assert((VT.getVectorNumElements() + N2C->getZExtValue()
5329 <= N1.getValueType().getVectorNumElements())
5330 && "Extract subvector overflow!");
5333 // Trivial extraction.
5334 if (VT.getSimpleVT() == N1.getSimpleValueType())
5335 return N1;
5337 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5338 if (N1.isUndef())
5339 return getUNDEF(VT);
5341 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5342 // the concat have the same type as the extract.
5343 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
5344 N1.getNumOperands() > 0 &&
5345 VT == N1.getOperand(0).getValueType()) {
5346 unsigned Factor = VT.getVectorNumElements();
5347 return N1.getOperand(N2C->getZExtValue() / Factor);
5350 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5351 // during shuffle legalization.
5352 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5353 VT == N1.getOperand(1).getValueType())
5354 return N1.getOperand(1);
5356 break;
5359 // Perform trivial constant folding.
5360 if (SDValue SV =
5361 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
5362 return SV;
5364 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5365 return V;
5367 // Canonicalize an UNDEF to the RHS, even over a constant.
5368 if (N1.isUndef()) {
5369 if (TLI->isCommutativeBinOp(Opcode)) {
5370 std::swap(N1, N2);
5371 } else {
5372 switch (Opcode) {
5373 case ISD::SIGN_EXTEND_INREG:
5374 case ISD::SUB:
5375 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5376 case ISD::UDIV:
5377 case ISD::SDIV:
5378 case ISD::UREM:
5379 case ISD::SREM:
5380 case ISD::SSUBSAT:
5381 case ISD::USUBSAT:
5382 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5387 // Fold a bunch of operators when the RHS is undef.
5388 if (N2.isUndef()) {
5389 switch (Opcode) {
5390 case ISD::XOR:
5391 if (N1.isUndef())
5392 // Handle undef ^ undef -> 0 special case. This is a common
5393 // idiom (misuse).
5394 return getConstant(0, DL, VT);
5395 LLVM_FALLTHROUGH;
5396 case ISD::ADD:
5397 case ISD::SUB:
5398 case ISD::UDIV:
5399 case ISD::SDIV:
5400 case ISD::UREM:
5401 case ISD::SREM:
5402 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5403 case ISD::MUL:
5404 case ISD::AND:
5405 case ISD::SSUBSAT:
5406 case ISD::USUBSAT:
5407 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5408 case ISD::OR:
5409 case ISD::SADDSAT:
5410 case ISD::UADDSAT:
5411 return getAllOnesConstant(DL, VT);
5415 // Memoize this node if possible.
5416 SDNode *N;
5417 SDVTList VTs = getVTList(VT);
5418 SDValue Ops[] = {N1, N2};
5419 if (VT != MVT::Glue) {
5420 FoldingSetNodeID ID;
5421 AddNodeIDNode(ID, Opcode, VTs, Ops);
5422 void *IP = nullptr;
5423 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5424 E->intersectFlagsWith(Flags);
5425 return SDValue(E, 0);
5428 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5429 N->setFlags(Flags);
5430 createOperands(N, Ops);
5431 CSEMap.InsertNode(N, IP);
5432 } else {
5433 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5434 createOperands(N, Ops);
5437 InsertNode(N);
5438 SDValue V = SDValue(N, 0);
5439 NewSDValueDbgMsg(V, "Creating new node: ", this);
5440 return V;
5443 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5444 SDValue N1, SDValue N2, SDValue N3,
5445 const SDNodeFlags Flags) {
5446 // Perform various simplifications.
5447 switch (Opcode) {
5448 case ISD::FMA: {
5449 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5450 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5451 N3.getValueType() == VT && "FMA types must match!");
5452 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5453 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5454 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5455 if (N1CFP && N2CFP && N3CFP) {
5456 APFloat V1 = N1CFP->getValueAPF();
5457 const APFloat &V2 = N2CFP->getValueAPF();
5458 const APFloat &V3 = N3CFP->getValueAPF();
5459 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5460 return getConstantFP(V1, DL, VT);
5462 break;
5464 case ISD::BUILD_VECTOR: {
5465 // Attempt to simplify BUILD_VECTOR.
5466 SDValue Ops[] = {N1, N2, N3};
5467 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5468 return V;
5469 break;
5471 case ISD::CONCAT_VECTORS: {
5472 SDValue Ops[] = {N1, N2, N3};
5473 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5474 return V;
5475 break;
5477 case ISD::SETCC: {
5478 assert(VT.isInteger() && "SETCC result type must be an integer!");
5479 assert(N1.getValueType() == N2.getValueType() &&
5480 "SETCC operands must have the same type!");
5481 assert(VT.isVector() == N1.getValueType().isVector() &&
5482 "SETCC type should be vector iff the operand type is vector!");
5483 assert((!VT.isVector() ||
5484 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) &&
5485 "SETCC vector element counts must match!");
5486 // Use FoldSetCC to simplify SETCC's.
5487 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5488 return V;
5489 // Vector constant folding.
5490 SDValue Ops[] = {N1, N2, N3};
5491 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5492 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5493 return V;
5495 break;
5497 case ISD::SELECT:
5498 case ISD::VSELECT:
5499 if (SDValue V = simplifySelect(N1, N2, N3))
5500 return V;
5501 break;
5502 case ISD::VECTOR_SHUFFLE:
5503 llvm_unreachable("should use getVectorShuffle constructor!");
5504 case ISD::INSERT_VECTOR_ELT: {
5505 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5506 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5507 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5508 return getUNDEF(VT);
5509 break;
5511 case ISD::INSERT_SUBVECTOR: {
5512 // Inserting undef into undef is still undef.
5513 if (N1.isUndef() && N2.isUndef())
5514 return getUNDEF(VT);
5515 SDValue Index = N3;
5516 if (VT.isSimple() && N1.getValueType().isSimple()
5517 && N2.getValueType().isSimple()) {
5518 assert(VT.isVector() && N1.getValueType().isVector() &&
5519 N2.getValueType().isVector() &&
5520 "Insert subvector VTs must be a vectors");
5521 assert(VT == N1.getValueType() &&
5522 "Dest and insert subvector source types must match!");
5523 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
5524 "Insert subvector must be from smaller vector to larger vector!");
5525 if (isa<ConstantSDNode>(Index)) {
5526 assert((N2.getValueType().getVectorNumElements() +
5527 cast<ConstantSDNode>(Index)->getZExtValue()
5528 <= VT.getVectorNumElements())
5529 && "Insert subvector overflow!");
5532 // Trivial insertion.
5533 if (VT.getSimpleVT() == N2.getSimpleValueType())
5534 return N2;
5536 // If this is an insert of an extracted vector into an undef vector, we
5537 // can just use the input to the extract.
5538 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5539 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
5540 return N2.getOperand(0);
5542 break;
5544 case ISD::BITCAST:
5545 // Fold bit_convert nodes from a type to themselves.
5546 if (N1.getValueType() == VT)
5547 return N1;
5548 break;
5551 // Memoize node if it doesn't produce a flag.
5552 SDNode *N;
5553 SDVTList VTs = getVTList(VT);
5554 SDValue Ops[] = {N1, N2, N3};
5555 if (VT != MVT::Glue) {
5556 FoldingSetNodeID ID;
5557 AddNodeIDNode(ID, Opcode, VTs, Ops);
5558 void *IP = nullptr;
5559 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5560 E->intersectFlagsWith(Flags);
5561 return SDValue(E, 0);
5564 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5565 N->setFlags(Flags);
5566 createOperands(N, Ops);
5567 CSEMap.InsertNode(N, IP);
5568 } else {
5569 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5570 createOperands(N, Ops);
5573 InsertNode(N);
5574 SDValue V = SDValue(N, 0);
5575 NewSDValueDbgMsg(V, "Creating new node: ", this);
5576 return V;
5579 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5580 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5581 SDValue Ops[] = { N1, N2, N3, N4 };
5582 return getNode(Opcode, DL, VT, Ops);
5585 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5586 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5587 SDValue N5) {
5588 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5589 return getNode(Opcode, DL, VT, Ops);
5592 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5593 /// the incoming stack arguments to be loaded from the stack.
5594 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5595 SmallVector<SDValue, 8> ArgChains;
5597 // Include the original chain at the beginning of the list. When this is
5598 // used by target LowerCall hooks, this helps legalize find the
5599 // CALLSEQ_BEGIN node.
5600 ArgChains.push_back(Chain);
5602 // Add a chain value for each stack argument.
5603 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5604 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5605 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5606 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5607 if (FI->getIndex() < 0)
5608 ArgChains.push_back(SDValue(L, 1));
5610 // Build a tokenfactor for all the chains.
5611 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5614 /// getMemsetValue - Vectorized representation of the memset value
5615 /// operand.
5616 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5617 const SDLoc &dl) {
5618 assert(!Value.isUndef());
5620 unsigned NumBits = VT.getScalarSizeInBits();
5621 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5622 assert(C->getAPIntValue().getBitWidth() == 8);
5623 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5624 if (VT.isInteger()) {
5625 bool IsOpaque = VT.getSizeInBits() > 64 ||
5626 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5627 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5629 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5630 VT);
5633 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5634 EVT IntVT = VT.getScalarType();
5635 if (!IntVT.isInteger())
5636 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5638 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5639 if (NumBits > 8) {
5640 // Use a multiplication with 0x010101... to extend the input to the
5641 // required length.
5642 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5643 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5644 DAG.getConstant(Magic, dl, IntVT));
5647 if (VT != Value.getValueType() && !VT.isInteger())
5648 Value = DAG.getBitcast(VT.getScalarType(), Value);
5649 if (VT != Value.getValueType())
5650 Value = DAG.getSplatBuildVector(VT, dl, Value);
5652 return Value;
5655 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5656 /// used when a memcpy is turned into a memset when the source is a constant
5657 /// string ptr.
5658 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5659 const TargetLowering &TLI,
5660 const ConstantDataArraySlice &Slice) {
5661 // Handle vector with all elements zero.
5662 if (Slice.Array == nullptr) {
5663 if (VT.isInteger())
5664 return DAG.getConstant(0, dl, VT);
5665 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5666 return DAG.getConstantFP(0.0, dl, VT);
5667 else if (VT.isVector()) {
5668 unsigned NumElts = VT.getVectorNumElements();
5669 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5670 return DAG.getNode(ISD::BITCAST, dl, VT,
5671 DAG.getConstant(0, dl,
5672 EVT::getVectorVT(*DAG.getContext(),
5673 EltVT, NumElts)));
5674 } else
5675 llvm_unreachable("Expected type!");
5678 assert(!VT.isVector() && "Can't handle vector type here!");
5679 unsigned NumVTBits = VT.getSizeInBits();
5680 unsigned NumVTBytes = NumVTBits / 8;
5681 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5683 APInt Val(NumVTBits, 0);
5684 if (DAG.getDataLayout().isLittleEndian()) {
5685 for (unsigned i = 0; i != NumBytes; ++i)
5686 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5687 } else {
5688 for (unsigned i = 0; i != NumBytes; ++i)
5689 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5692 // If the "cost" of materializing the integer immediate is less than the cost
5693 // of a load, then it is cost effective to turn the load into the immediate.
5694 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5695 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5696 return DAG.getConstant(Val, dl, VT);
5697 return SDValue(nullptr, 0);
5700 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
5701 const SDLoc &DL) {
5702 EVT VT = Base.getValueType();
5703 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
5706 /// Returns true if memcpy source is constant data.
5707 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
5708 uint64_t SrcDelta = 0;
5709 GlobalAddressSDNode *G = nullptr;
5710 if (Src.getOpcode() == ISD::GlobalAddress)
5711 G = cast<GlobalAddressSDNode>(Src);
5712 else if (Src.getOpcode() == ISD::ADD &&
5713 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
5714 Src.getOperand(1).getOpcode() == ISD::Constant) {
5715 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
5716 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
5718 if (!G)
5719 return false;
5721 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
5722 SrcDelta + G->getOffset());
5725 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
5726 // On Darwin, -Os means optimize for size without hurting performance, so
5727 // only really optimize for size when -Oz (MinSize) is used.
5728 if (MF.getTarget().getTargetTriple().isOSDarwin())
5729 return MF.getFunction().hasMinSize();
5730 return MF.getFunction().hasOptSize();
5733 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
5734 SmallVector<SDValue, 32> &OutChains, unsigned From,
5735 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
5736 SmallVector<SDValue, 16> &OutStoreChains) {
5737 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
5738 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
5739 SmallVector<SDValue, 16> GluedLoadChains;
5740 for (unsigned i = From; i < To; ++i) {
5741 OutChains.push_back(OutLoadChains[i]);
5742 GluedLoadChains.push_back(OutLoadChains[i]);
5745 // Chain for all loads.
5746 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
5747 GluedLoadChains);
5749 for (unsigned i = From; i < To; ++i) {
5750 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
5751 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
5752 ST->getBasePtr(), ST->getMemoryVT(),
5753 ST->getMemOperand());
5754 OutChains.push_back(NewStore);
5758 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5759 SDValue Chain, SDValue Dst, SDValue Src,
5760 uint64_t Size, unsigned Alignment,
5761 bool isVol, bool AlwaysInline,
5762 MachinePointerInfo DstPtrInfo,
5763 MachinePointerInfo SrcPtrInfo) {
5764 // Turn a memcpy of undef to nop.
5765 // FIXME: We need to honor volatile even is Src is undef.
5766 if (Src.isUndef())
5767 return Chain;
5769 // Expand memcpy to a series of load and store ops if the size operand falls
5770 // below a certain threshold.
5771 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5772 // rather than maybe a humongous number of loads and stores.
5773 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5774 const DataLayout &DL = DAG.getDataLayout();
5775 LLVMContext &C = *DAG.getContext();
5776 std::vector<EVT> MemOps;
5777 bool DstAlignCanChange = false;
5778 MachineFunction &MF = DAG.getMachineFunction();
5779 MachineFrameInfo &MFI = MF.getFrameInfo();
5780 bool OptSize = shouldLowerMemFuncForSize(MF);
5781 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5782 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5783 DstAlignCanChange = true;
5784 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5785 if (Alignment > SrcAlign)
5786 SrcAlign = Alignment;
5787 ConstantDataArraySlice Slice;
5788 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5789 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5790 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5792 if (!TLI.findOptimalMemOpLowering(
5793 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment),
5794 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false,
5795 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant,
5796 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(),
5797 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
5798 return SDValue();
5800 if (DstAlignCanChange) {
5801 Type *Ty = MemOps[0].getTypeForEVT(C);
5802 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5804 // Don't promote to an alignment that would require dynamic stack
5805 // realignment.
5806 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5807 if (!TRI->needsStackRealignment(MF))
5808 while (NewAlign > Alignment &&
5809 DL.exceedsNaturalStackAlignment(Align(NewAlign)))
5810 NewAlign /= 2;
5812 if (NewAlign > Alignment) {
5813 // Give the stack frame object a larger alignment if needed.
5814 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5815 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5816 Alignment = NewAlign;
5820 MachineMemOperand::Flags MMOFlags =
5821 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5822 SmallVector<SDValue, 16> OutLoadChains;
5823 SmallVector<SDValue, 16> OutStoreChains;
5824 SmallVector<SDValue, 32> OutChains;
5825 unsigned NumMemOps = MemOps.size();
5826 uint64_t SrcOff = 0, DstOff = 0;
5827 for (unsigned i = 0; i != NumMemOps; ++i) {
5828 EVT VT = MemOps[i];
5829 unsigned VTSize = VT.getSizeInBits() / 8;
5830 SDValue Value, Store;
5832 if (VTSize > Size) {
5833 // Issuing an unaligned load / store pair that overlaps with the previous
5834 // pair. Adjust the offset accordingly.
5835 assert(i == NumMemOps-1 && i != 0);
5836 SrcOff -= VTSize - Size;
5837 DstOff -= VTSize - Size;
5840 if (CopyFromConstant &&
5841 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5842 // It's unlikely a store of a vector immediate can be done in a single
5843 // instruction. It would require a load from a constantpool first.
5844 // We only handle zero vectors here.
5845 // FIXME: Handle other cases where store of vector immediate is done in
5846 // a single instruction.
5847 ConstantDataArraySlice SubSlice;
5848 if (SrcOff < Slice.Length) {
5849 SubSlice = Slice;
5850 SubSlice.move(SrcOff);
5851 } else {
5852 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5853 SubSlice.Array = nullptr;
5854 SubSlice.Offset = 0;
5855 SubSlice.Length = VTSize;
5857 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5858 if (Value.getNode()) {
5859 Store = DAG.getStore(
5860 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5861 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
5862 OutChains.push_back(Store);
5866 if (!Store.getNode()) {
5867 // The type might not be legal for the target. This should only happen
5868 // if the type is smaller than a legal type, as on PPC, so the right
5869 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5870 // to Load/Store if NVT==VT.
5871 // FIXME does the case above also need this?
5872 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5873 assert(NVT.bitsGE(VT));
5875 bool isDereferenceable =
5876 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5877 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5878 if (isDereferenceable)
5879 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5881 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5882 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5883 SrcPtrInfo.getWithOffset(SrcOff), VT,
5884 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5885 OutLoadChains.push_back(Value.getValue(1));
5887 Store = DAG.getTruncStore(
5888 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5889 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
5890 OutStoreChains.push_back(Store);
5892 SrcOff += VTSize;
5893 DstOff += VTSize;
5894 Size -= VTSize;
5897 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
5898 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
5899 unsigned NumLdStInMemcpy = OutStoreChains.size();
5901 if (NumLdStInMemcpy) {
5902 // It may be that memcpy might be converted to memset if it's memcpy
5903 // of constants. In such a case, we won't have loads and stores, but
5904 // just stores. In the absence of loads, there is nothing to gang up.
5905 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
5906 // If target does not care, just leave as it.
5907 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
5908 OutChains.push_back(OutLoadChains[i]);
5909 OutChains.push_back(OutStoreChains[i]);
5911 } else {
5912 // Ld/St less than/equal limit set by target.
5913 if (NumLdStInMemcpy <= GluedLdStLimit) {
5914 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5915 NumLdStInMemcpy, OutLoadChains,
5916 OutStoreChains);
5917 } else {
5918 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
5919 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
5920 unsigned GlueIter = 0;
5922 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
5923 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
5924 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
5926 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
5927 OutLoadChains, OutStoreChains);
5928 GlueIter += GluedLdStLimit;
5931 // Residual ld/st.
5932 if (RemainingLdStInMemcpy) {
5933 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5934 RemainingLdStInMemcpy, OutLoadChains,
5935 OutStoreChains);
5940 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5943 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5944 SDValue Chain, SDValue Dst, SDValue Src,
5945 uint64_t Size, unsigned Align,
5946 bool isVol, bool AlwaysInline,
5947 MachinePointerInfo DstPtrInfo,
5948 MachinePointerInfo SrcPtrInfo) {
5949 // Turn a memmove of undef to nop.
5950 // FIXME: We need to honor volatile even is Src is undef.
5951 if (Src.isUndef())
5952 return Chain;
5954 // Expand memmove to a series of load and store ops if the size operand falls
5955 // below a certain threshold.
5956 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5957 const DataLayout &DL = DAG.getDataLayout();
5958 LLVMContext &C = *DAG.getContext();
5959 std::vector<EVT> MemOps;
5960 bool DstAlignCanChange = false;
5961 MachineFunction &MF = DAG.getMachineFunction();
5962 MachineFrameInfo &MFI = MF.getFrameInfo();
5963 bool OptSize = shouldLowerMemFuncForSize(MF);
5964 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5965 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5966 DstAlignCanChange = true;
5967 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5968 if (Align > SrcAlign)
5969 SrcAlign = Align;
5970 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
5971 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in
5972 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the
5973 // correct code.
5974 bool AllowOverlap = false;
5975 if (!TLI.findOptimalMemOpLowering(
5976 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign,
5977 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
5978 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
5979 MF.getFunction().getAttributes()))
5980 return SDValue();
5982 if (DstAlignCanChange) {
5983 Type *Ty = MemOps[0].getTypeForEVT(C);
5984 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5985 if (NewAlign > Align) {
5986 // Give the stack frame object a larger alignment if needed.
5987 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5988 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5989 Align = NewAlign;
5993 MachineMemOperand::Flags MMOFlags =
5994 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5995 uint64_t SrcOff = 0, DstOff = 0;
5996 SmallVector<SDValue, 8> LoadValues;
5997 SmallVector<SDValue, 8> LoadChains;
5998 SmallVector<SDValue, 8> OutChains;
5999 unsigned NumMemOps = MemOps.size();
6000 for (unsigned i = 0; i < NumMemOps; i++) {
6001 EVT VT = MemOps[i];
6002 unsigned VTSize = VT.getSizeInBits() / 8;
6003 SDValue Value;
6005 bool isDereferenceable =
6006 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6007 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6008 if (isDereferenceable)
6009 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6011 Value =
6012 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
6013 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
6014 LoadValues.push_back(Value);
6015 LoadChains.push_back(Value.getValue(1));
6016 SrcOff += VTSize;
6018 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6019 OutChains.clear();
6020 for (unsigned i = 0; i < NumMemOps; i++) {
6021 EVT VT = MemOps[i];
6022 unsigned VTSize = VT.getSizeInBits() / 8;
6023 SDValue Store;
6025 Store = DAG.getStore(Chain, dl, LoadValues[i],
6026 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6027 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
6028 OutChains.push_back(Store);
6029 DstOff += VTSize;
6032 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6035 /// Lower the call to 'memset' intrinsic function into a series of store
6036 /// operations.
6038 /// \param DAG Selection DAG where lowered code is placed.
6039 /// \param dl Link to corresponding IR location.
6040 /// \param Chain Control flow dependency.
6041 /// \param Dst Pointer to destination memory location.
6042 /// \param Src Value of byte to write into the memory.
6043 /// \param Size Number of bytes to write.
6044 /// \param Align Alignment of the destination in bytes.
6045 /// \param isVol True if destination is volatile.
6046 /// \param DstPtrInfo IR information on the memory pointer.
6047 /// \returns New head in the control flow, if lowering was successful, empty
6048 /// SDValue otherwise.
6050 /// The function tries to replace 'llvm.memset' intrinsic with several store
6051 /// operations and value calculation code. This is usually profitable for small
6052 /// memory size.
6053 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6054 SDValue Chain, SDValue Dst, SDValue Src,
6055 uint64_t Size, unsigned Align, bool isVol,
6056 MachinePointerInfo DstPtrInfo) {
6057 // Turn a memset of undef to nop.
6058 // FIXME: We need to honor volatile even is Src is undef.
6059 if (Src.isUndef())
6060 return Chain;
6062 // Expand memset to a series of load/store ops if the size operand
6063 // falls below a certain threshold.
6064 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6065 std::vector<EVT> MemOps;
6066 bool DstAlignCanChange = false;
6067 MachineFunction &MF = DAG.getMachineFunction();
6068 MachineFrameInfo &MFI = MF.getFrameInfo();
6069 bool OptSize = shouldLowerMemFuncForSize(MF);
6070 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6071 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6072 DstAlignCanChange = true;
6073 bool IsZeroVal =
6074 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6075 if (!TLI.findOptimalMemOpLowering(
6076 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size,
6077 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true,
6078 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
6079 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u,
6080 MF.getFunction().getAttributes()))
6081 return SDValue();
6083 if (DstAlignCanChange) {
6084 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6085 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
6086 if (NewAlign > Align) {
6087 // Give the stack frame object a larger alignment if needed.
6088 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
6089 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6090 Align = NewAlign;
6094 SmallVector<SDValue, 8> OutChains;
6095 uint64_t DstOff = 0;
6096 unsigned NumMemOps = MemOps.size();
6098 // Find the largest store and generate the bit pattern for it.
6099 EVT LargestVT = MemOps[0];
6100 for (unsigned i = 1; i < NumMemOps; i++)
6101 if (MemOps[i].bitsGT(LargestVT))
6102 LargestVT = MemOps[i];
6103 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6105 for (unsigned i = 0; i < NumMemOps; i++) {
6106 EVT VT = MemOps[i];
6107 unsigned VTSize = VT.getSizeInBits() / 8;
6108 if (VTSize > Size) {
6109 // Issuing an unaligned load / store pair that overlaps with the previous
6110 // pair. Adjust the offset accordingly.
6111 assert(i == NumMemOps-1 && i != 0);
6112 DstOff -= VTSize - Size;
6115 // If this store is smaller than the largest store see whether we can get
6116 // the smaller value for free with a truncate.
6117 SDValue Value = MemSetValue;
6118 if (VT.bitsLT(LargestVT)) {
6119 if (!LargestVT.isVector() && !VT.isVector() &&
6120 TLI.isTruncateFree(LargestVT, VT))
6121 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6122 else
6123 Value = getMemsetValue(Src, VT, DAG, dl);
6125 assert(Value.getValueType() == VT && "Value with wrong type.");
6126 SDValue Store = DAG.getStore(
6127 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6128 DstPtrInfo.getWithOffset(DstOff), Align,
6129 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6130 OutChains.push_back(Store);
6131 DstOff += VT.getSizeInBits() / 8;
6132 Size -= VTSize;
6135 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6138 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6139 unsigned AS) {
6140 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6141 // pointer operands can be losslessly bitcasted to pointers of address space 0
6142 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
6143 report_fatal_error("cannot lower memory intrinsic in address space " +
6144 Twine(AS));
6148 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6149 SDValue Src, SDValue Size, unsigned Align,
6150 bool isVol, bool AlwaysInline, bool isTailCall,
6151 MachinePointerInfo DstPtrInfo,
6152 MachinePointerInfo SrcPtrInfo) {
6153 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6155 // Check to see if we should lower the memcpy to loads and stores first.
6156 // For cases within the target-specified limits, this is the best choice.
6157 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6158 if (ConstantSize) {
6159 // Memcpy with size zero? Just return the original chain.
6160 if (ConstantSize->isNullValue())
6161 return Chain;
6163 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6164 ConstantSize->getZExtValue(),Align,
6165 isVol, false, DstPtrInfo, SrcPtrInfo);
6166 if (Result.getNode())
6167 return Result;
6170 // Then check to see if we should lower the memcpy with target-specific
6171 // code. If the target chooses to do this, this is the next best.
6172 if (TSI) {
6173 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6174 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
6175 DstPtrInfo, SrcPtrInfo);
6176 if (Result.getNode())
6177 return Result;
6180 // If we really need inline code and the target declined to provide it,
6181 // use a (potentially long) sequence of loads and stores.
6182 if (AlwaysInline) {
6183 assert(ConstantSize && "AlwaysInline requires a constant size!");
6184 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6185 ConstantSize->getZExtValue(), Align, isVol,
6186 true, DstPtrInfo, SrcPtrInfo);
6189 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6190 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6192 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6193 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6194 // respect volatile, so they may do things like read or write memory
6195 // beyond the given memory regions. But fixing this isn't easy, and most
6196 // people don't care.
6198 // Emit a library call.
6199 TargetLowering::ArgListTy Args;
6200 TargetLowering::ArgListEntry Entry;
6201 Entry.Ty = Type::getInt8PtrTy(*getContext());
6202 Entry.Node = Dst; Args.push_back(Entry);
6203 Entry.Node = Src; Args.push_back(Entry);
6205 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6206 Entry.Node = Size; Args.push_back(Entry);
6207 // FIXME: pass in SDLoc
6208 TargetLowering::CallLoweringInfo CLI(*this);
6209 CLI.setDebugLoc(dl)
6210 .setChain(Chain)
6211 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6212 Dst.getValueType().getTypeForEVT(*getContext()),
6213 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6214 TLI->getPointerTy(getDataLayout())),
6215 std::move(Args))
6216 .setDiscardResult()
6217 .setTailCall(isTailCall);
6219 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6220 return CallResult.second;
6223 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6224 SDValue Dst, unsigned DstAlign,
6225 SDValue Src, unsigned SrcAlign,
6226 SDValue Size, Type *SizeTy,
6227 unsigned ElemSz, bool isTailCall,
6228 MachinePointerInfo DstPtrInfo,
6229 MachinePointerInfo SrcPtrInfo) {
6230 // Emit a library call.
6231 TargetLowering::ArgListTy Args;
6232 TargetLowering::ArgListEntry Entry;
6233 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6234 Entry.Node = Dst;
6235 Args.push_back(Entry);
6237 Entry.Node = Src;
6238 Args.push_back(Entry);
6240 Entry.Ty = SizeTy;
6241 Entry.Node = Size;
6242 Args.push_back(Entry);
6244 RTLIB::Libcall LibraryCall =
6245 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6246 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6247 report_fatal_error("Unsupported element size");
6249 TargetLowering::CallLoweringInfo CLI(*this);
6250 CLI.setDebugLoc(dl)
6251 .setChain(Chain)
6252 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6253 Type::getVoidTy(*getContext()),
6254 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6255 TLI->getPointerTy(getDataLayout())),
6256 std::move(Args))
6257 .setDiscardResult()
6258 .setTailCall(isTailCall);
6260 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6261 return CallResult.second;
6264 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6265 SDValue Src, SDValue Size, unsigned Align,
6266 bool isVol, bool isTailCall,
6267 MachinePointerInfo DstPtrInfo,
6268 MachinePointerInfo SrcPtrInfo) {
6269 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6271 // Check to see if we should lower the memmove to loads and stores first.
6272 // For cases within the target-specified limits, this is the best choice.
6273 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6274 if (ConstantSize) {
6275 // Memmove with size zero? Just return the original chain.
6276 if (ConstantSize->isNullValue())
6277 return Chain;
6279 SDValue Result =
6280 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
6281 ConstantSize->getZExtValue(), Align, isVol,
6282 false, DstPtrInfo, SrcPtrInfo);
6283 if (Result.getNode())
6284 return Result;
6287 // Then check to see if we should lower the memmove with target-specific
6288 // code. If the target chooses to do this, this is the next best.
6289 if (TSI) {
6290 SDValue Result = TSI->EmitTargetCodeForMemmove(
6291 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
6292 if (Result.getNode())
6293 return Result;
6296 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6297 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6299 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6300 // not be safe. See memcpy above for more details.
6302 // Emit a library call.
6303 TargetLowering::ArgListTy Args;
6304 TargetLowering::ArgListEntry Entry;
6305 Entry.Ty = Type::getInt8PtrTy(*getContext());
6306 Entry.Node = Dst; Args.push_back(Entry);
6307 Entry.Node = Src; Args.push_back(Entry);
6309 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6310 Entry.Node = Size; Args.push_back(Entry);
6311 // FIXME: pass in SDLoc
6312 TargetLowering::CallLoweringInfo CLI(*this);
6313 CLI.setDebugLoc(dl)
6314 .setChain(Chain)
6315 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6316 Dst.getValueType().getTypeForEVT(*getContext()),
6317 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6318 TLI->getPointerTy(getDataLayout())),
6319 std::move(Args))
6320 .setDiscardResult()
6321 .setTailCall(isTailCall);
6323 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6324 return CallResult.second;
6327 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6328 SDValue Dst, unsigned DstAlign,
6329 SDValue Src, unsigned SrcAlign,
6330 SDValue Size, Type *SizeTy,
6331 unsigned ElemSz, bool isTailCall,
6332 MachinePointerInfo DstPtrInfo,
6333 MachinePointerInfo SrcPtrInfo) {
6334 // Emit a library call.
6335 TargetLowering::ArgListTy Args;
6336 TargetLowering::ArgListEntry Entry;
6337 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6338 Entry.Node = Dst;
6339 Args.push_back(Entry);
6341 Entry.Node = Src;
6342 Args.push_back(Entry);
6344 Entry.Ty = SizeTy;
6345 Entry.Node = Size;
6346 Args.push_back(Entry);
6348 RTLIB::Libcall LibraryCall =
6349 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6350 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6351 report_fatal_error("Unsupported element size");
6353 TargetLowering::CallLoweringInfo CLI(*this);
6354 CLI.setDebugLoc(dl)
6355 .setChain(Chain)
6356 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6357 Type::getVoidTy(*getContext()),
6358 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6359 TLI->getPointerTy(getDataLayout())),
6360 std::move(Args))
6361 .setDiscardResult()
6362 .setTailCall(isTailCall);
6364 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6365 return CallResult.second;
6368 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6369 SDValue Src, SDValue Size, unsigned Align,
6370 bool isVol, bool isTailCall,
6371 MachinePointerInfo DstPtrInfo) {
6372 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6374 // Check to see if we should lower the memset to stores first.
6375 // For cases within the target-specified limits, this is the best choice.
6376 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6377 if (ConstantSize) {
6378 // Memset with size zero? Just return the original chain.
6379 if (ConstantSize->isNullValue())
6380 return Chain;
6382 SDValue Result =
6383 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
6384 Align, isVol, DstPtrInfo);
6386 if (Result.getNode())
6387 return Result;
6390 // Then check to see if we should lower the memset with target-specific
6391 // code. If the target chooses to do this, this is the next best.
6392 if (TSI) {
6393 SDValue Result = TSI->EmitTargetCodeForMemset(
6394 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
6395 if (Result.getNode())
6396 return Result;
6399 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6401 // Emit a library call.
6402 TargetLowering::ArgListTy Args;
6403 TargetLowering::ArgListEntry Entry;
6404 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6405 Args.push_back(Entry);
6406 Entry.Node = Src;
6407 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6408 Args.push_back(Entry);
6409 Entry.Node = Size;
6410 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6411 Args.push_back(Entry);
6413 // FIXME: pass in SDLoc
6414 TargetLowering::CallLoweringInfo CLI(*this);
6415 CLI.setDebugLoc(dl)
6416 .setChain(Chain)
6417 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6418 Dst.getValueType().getTypeForEVT(*getContext()),
6419 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6420 TLI->getPointerTy(getDataLayout())),
6421 std::move(Args))
6422 .setDiscardResult()
6423 .setTailCall(isTailCall);
6425 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6426 return CallResult.second;
6429 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6430 SDValue Dst, unsigned DstAlign,
6431 SDValue Value, SDValue Size, Type *SizeTy,
6432 unsigned ElemSz, bool isTailCall,
6433 MachinePointerInfo DstPtrInfo) {
6434 // Emit a library call.
6435 TargetLowering::ArgListTy Args;
6436 TargetLowering::ArgListEntry Entry;
6437 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6438 Entry.Node = Dst;
6439 Args.push_back(Entry);
6441 Entry.Ty = Type::getInt8Ty(*getContext());
6442 Entry.Node = Value;
6443 Args.push_back(Entry);
6445 Entry.Ty = SizeTy;
6446 Entry.Node = Size;
6447 Args.push_back(Entry);
6449 RTLIB::Libcall LibraryCall =
6450 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6451 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6452 report_fatal_error("Unsupported element size");
6454 TargetLowering::CallLoweringInfo CLI(*this);
6455 CLI.setDebugLoc(dl)
6456 .setChain(Chain)
6457 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6458 Type::getVoidTy(*getContext()),
6459 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6460 TLI->getPointerTy(getDataLayout())),
6461 std::move(Args))
6462 .setDiscardResult()
6463 .setTailCall(isTailCall);
6465 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6466 return CallResult.second;
6469 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6470 SDVTList VTList, ArrayRef<SDValue> Ops,
6471 MachineMemOperand *MMO) {
6472 FoldingSetNodeID ID;
6473 ID.AddInteger(MemVT.getRawBits());
6474 AddNodeIDNode(ID, Opcode, VTList, Ops);
6475 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6476 void* IP = nullptr;
6477 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6478 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6479 return SDValue(E, 0);
6482 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6483 VTList, MemVT, MMO);
6484 createOperands(N, Ops);
6486 CSEMap.InsertNode(N, IP);
6487 InsertNode(N);
6488 return SDValue(N, 0);
6491 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6492 EVT MemVT, SDVTList VTs, SDValue Chain,
6493 SDValue Ptr, SDValue Cmp, SDValue Swp,
6494 MachineMemOperand *MMO) {
6495 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6496 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6497 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6499 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6500 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6503 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6504 SDValue Chain, SDValue Ptr, SDValue Val,
6505 MachineMemOperand *MMO) {
6506 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6507 Opcode == ISD::ATOMIC_LOAD_SUB ||
6508 Opcode == ISD::ATOMIC_LOAD_AND ||
6509 Opcode == ISD::ATOMIC_LOAD_CLR ||
6510 Opcode == ISD::ATOMIC_LOAD_OR ||
6511 Opcode == ISD::ATOMIC_LOAD_XOR ||
6512 Opcode == ISD::ATOMIC_LOAD_NAND ||
6513 Opcode == ISD::ATOMIC_LOAD_MIN ||
6514 Opcode == ISD::ATOMIC_LOAD_MAX ||
6515 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6516 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6517 Opcode == ISD::ATOMIC_LOAD_FADD ||
6518 Opcode == ISD::ATOMIC_LOAD_FSUB ||
6519 Opcode == ISD::ATOMIC_SWAP ||
6520 Opcode == ISD::ATOMIC_STORE) &&
6521 "Invalid Atomic Op");
6523 EVT VT = Val.getValueType();
6525 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6526 getVTList(VT, MVT::Other);
6527 SDValue Ops[] = {Chain, Ptr, Val};
6528 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6531 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6532 EVT VT, SDValue Chain, SDValue Ptr,
6533 MachineMemOperand *MMO) {
6534 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6536 SDVTList VTs = getVTList(VT, MVT::Other);
6537 SDValue Ops[] = {Chain, Ptr};
6538 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6541 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
6542 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6543 if (Ops.size() == 1)
6544 return Ops[0];
6546 SmallVector<EVT, 4> VTs;
6547 VTs.reserve(Ops.size());
6548 for (unsigned i = 0; i < Ops.size(); ++i)
6549 VTs.push_back(Ops[i].getValueType());
6550 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6553 SDValue SelectionDAG::getMemIntrinsicNode(
6554 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6555 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
6556 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
6557 if (Align == 0) // Ensure that codegen never sees alignment 0
6558 Align = getEVTAlignment(MemVT);
6560 if (!Size)
6561 Size = MemVT.getStoreSize();
6563 MachineFunction &MF = getMachineFunction();
6564 MachineMemOperand *MMO =
6565 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo);
6567 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6570 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6571 SDVTList VTList,
6572 ArrayRef<SDValue> Ops, EVT MemVT,
6573 MachineMemOperand *MMO) {
6574 assert((Opcode == ISD::INTRINSIC_VOID ||
6575 Opcode == ISD::INTRINSIC_W_CHAIN ||
6576 Opcode == ISD::PREFETCH ||
6577 Opcode == ISD::LIFETIME_START ||
6578 Opcode == ISD::LIFETIME_END ||
6579 ((int)Opcode <= std::numeric_limits<int>::max() &&
6580 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6581 "Opcode is not a memory-accessing opcode!");
6583 // Memoize the node unless it returns a flag.
6584 MemIntrinsicSDNode *N;
6585 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6586 FoldingSetNodeID ID;
6587 AddNodeIDNode(ID, Opcode, VTList, Ops);
6588 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6589 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6590 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6591 void *IP = nullptr;
6592 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6593 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6594 return SDValue(E, 0);
6597 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6598 VTList, MemVT, MMO);
6599 createOperands(N, Ops);
6601 CSEMap.InsertNode(N, IP);
6602 } else {
6603 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6604 VTList, MemVT, MMO);
6605 createOperands(N, Ops);
6607 InsertNode(N);
6608 SDValue V(N, 0);
6609 NewSDValueDbgMsg(V, "Creating new node: ", this);
6610 return V;
6613 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
6614 SDValue Chain, int FrameIndex,
6615 int64_t Size, int64_t Offset) {
6616 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
6617 const auto VTs = getVTList(MVT::Other);
6618 SDValue Ops[2] = {
6619 Chain,
6620 getFrameIndex(FrameIndex,
6621 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6622 true)};
6624 FoldingSetNodeID ID;
6625 AddNodeIDNode(ID, Opcode, VTs, Ops);
6626 ID.AddInteger(FrameIndex);
6627 ID.AddInteger(Size);
6628 ID.AddInteger(Offset);
6629 void *IP = nullptr;
6630 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6631 return SDValue(E, 0);
6633 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
6634 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
6635 createOperands(N, Ops);
6636 CSEMap.InsertNode(N, IP);
6637 InsertNode(N);
6638 SDValue V(N, 0);
6639 NewSDValueDbgMsg(V, "Creating new node: ", this);
6640 return V;
6643 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6644 /// MachinePointerInfo record from it. This is particularly useful because the
6645 /// code generator has many cases where it doesn't bother passing in a
6646 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6647 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6648 SelectionDAG &DAG, SDValue Ptr,
6649 int64_t Offset = 0) {
6650 // If this is FI+Offset, we can model it.
6651 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6652 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6653 FI->getIndex(), Offset);
6655 // If this is (FI+Offset1)+Offset2, we can model it.
6656 if (Ptr.getOpcode() != ISD::ADD ||
6657 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6658 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6659 return Info;
6661 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6662 return MachinePointerInfo::getFixedStack(
6663 DAG.getMachineFunction(), FI,
6664 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6667 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6668 /// MachinePointerInfo record from it. This is particularly useful because the
6669 /// code generator has many cases where it doesn't bother passing in a
6670 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6671 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6672 SelectionDAG &DAG, SDValue Ptr,
6673 SDValue OffsetOp) {
6674 // If the 'Offset' value isn't a constant, we can't handle this.
6675 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6676 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6677 if (OffsetOp.isUndef())
6678 return InferPointerInfo(Info, DAG, Ptr);
6679 return Info;
6682 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6683 EVT VT, const SDLoc &dl, SDValue Chain,
6684 SDValue Ptr, SDValue Offset,
6685 MachinePointerInfo PtrInfo, EVT MemVT,
6686 unsigned Alignment,
6687 MachineMemOperand::Flags MMOFlags,
6688 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6689 assert(Chain.getValueType() == MVT::Other &&
6690 "Invalid chain type");
6691 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6692 Alignment = getEVTAlignment(MemVT);
6694 MMOFlags |= MachineMemOperand::MOLoad;
6695 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
6696 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6697 // clients.
6698 if (PtrInfo.V.isNull())
6699 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
6701 MachineFunction &MF = getMachineFunction();
6702 MachineMemOperand *MMO = MF.getMachineMemOperand(
6703 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
6704 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
6707 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6708 EVT VT, const SDLoc &dl, SDValue Chain,
6709 SDValue Ptr, SDValue Offset, EVT MemVT,
6710 MachineMemOperand *MMO) {
6711 if (VT == MemVT) {
6712 ExtType = ISD::NON_EXTLOAD;
6713 } else if (ExtType == ISD::NON_EXTLOAD) {
6714 assert(VT == MemVT && "Non-extending load from different memory type!");
6715 } else {
6716 // Extending load.
6717 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
6718 "Should only be an extending load, not truncating!");
6719 assert(VT.isInteger() == MemVT.isInteger() &&
6720 "Cannot convert from FP to Int or Int -> FP!");
6721 assert(VT.isVector() == MemVT.isVector() &&
6722 "Cannot use an ext load to convert to or from a vector!");
6723 assert((!VT.isVector() ||
6724 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
6725 "Cannot use an ext load to change the number of vector elements!");
6728 bool Indexed = AM != ISD::UNINDEXED;
6729 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
6731 SDVTList VTs = Indexed ?
6732 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
6733 SDValue Ops[] = { Chain, Ptr, Offset };
6734 FoldingSetNodeID ID;
6735 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
6736 ID.AddInteger(MemVT.getRawBits());
6737 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
6738 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
6739 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6740 void *IP = nullptr;
6741 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6742 cast<LoadSDNode>(E)->refineAlignment(MMO);
6743 return SDValue(E, 0);
6745 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6746 ExtType, MemVT, MMO);
6747 createOperands(N, Ops);
6749 CSEMap.InsertNode(N, IP);
6750 InsertNode(N);
6751 SDValue V(N, 0);
6752 NewSDValueDbgMsg(V, "Creating new node: ", this);
6753 return V;
6756 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6757 SDValue Ptr, MachinePointerInfo PtrInfo,
6758 unsigned Alignment,
6759 MachineMemOperand::Flags MMOFlags,
6760 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6761 SDValue Undef = getUNDEF(Ptr.getValueType());
6762 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6763 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
6766 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6767 SDValue Ptr, MachineMemOperand *MMO) {
6768 SDValue Undef = getUNDEF(Ptr.getValueType());
6769 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6770 VT, MMO);
6773 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6774 EVT VT, SDValue Chain, SDValue Ptr,
6775 MachinePointerInfo PtrInfo, EVT MemVT,
6776 unsigned Alignment,
6777 MachineMemOperand::Flags MMOFlags,
6778 const AAMDNodes &AAInfo) {
6779 SDValue Undef = getUNDEF(Ptr.getValueType());
6780 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
6781 MemVT, Alignment, MMOFlags, AAInfo);
6784 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6785 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
6786 MachineMemOperand *MMO) {
6787 SDValue Undef = getUNDEF(Ptr.getValueType());
6788 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
6789 MemVT, MMO);
6792 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
6793 SDValue Base, SDValue Offset,
6794 ISD::MemIndexedMode AM) {
6795 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
6796 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
6797 // Don't propagate the invariant or dereferenceable flags.
6798 auto MMOFlags =
6799 LD->getMemOperand()->getFlags() &
6800 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
6801 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
6802 LD->getChain(), Base, Offset, LD->getPointerInfo(),
6803 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6804 LD->getAAInfo());
6807 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6808 SDValue Ptr, MachinePointerInfo PtrInfo,
6809 unsigned Alignment,
6810 MachineMemOperand::Flags MMOFlags,
6811 const AAMDNodes &AAInfo) {
6812 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6813 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6814 Alignment = getEVTAlignment(Val.getValueType());
6816 MMOFlags |= MachineMemOperand::MOStore;
6817 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6819 if (PtrInfo.V.isNull())
6820 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6822 MachineFunction &MF = getMachineFunction();
6823 MachineMemOperand *MMO = MF.getMachineMemOperand(
6824 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6825 return getStore(Chain, dl, Val, Ptr, MMO);
6828 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6829 SDValue Ptr, MachineMemOperand *MMO) {
6830 assert(Chain.getValueType() == MVT::Other &&
6831 "Invalid chain type");
6832 EVT VT = Val.getValueType();
6833 SDVTList VTs = getVTList(MVT::Other);
6834 SDValue Undef = getUNDEF(Ptr.getValueType());
6835 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6836 FoldingSetNodeID ID;
6837 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6838 ID.AddInteger(VT.getRawBits());
6839 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6840 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6841 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6842 void *IP = nullptr;
6843 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6844 cast<StoreSDNode>(E)->refineAlignment(MMO);
6845 return SDValue(E, 0);
6847 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6848 ISD::UNINDEXED, false, VT, MMO);
6849 createOperands(N, Ops);
6851 CSEMap.InsertNode(N, IP);
6852 InsertNode(N);
6853 SDValue V(N, 0);
6854 NewSDValueDbgMsg(V, "Creating new node: ", this);
6855 return V;
6858 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6859 SDValue Ptr, MachinePointerInfo PtrInfo,
6860 EVT SVT, unsigned Alignment,
6861 MachineMemOperand::Flags MMOFlags,
6862 const AAMDNodes &AAInfo) {
6863 assert(Chain.getValueType() == MVT::Other &&
6864 "Invalid chain type");
6865 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6866 Alignment = getEVTAlignment(SVT);
6868 MMOFlags |= MachineMemOperand::MOStore;
6869 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6871 if (PtrInfo.V.isNull())
6872 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6874 MachineFunction &MF = getMachineFunction();
6875 MachineMemOperand *MMO = MF.getMachineMemOperand(
6876 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6877 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6880 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6881 SDValue Ptr, EVT SVT,
6882 MachineMemOperand *MMO) {
6883 EVT VT = Val.getValueType();
6885 assert(Chain.getValueType() == MVT::Other &&
6886 "Invalid chain type");
6887 if (VT == SVT)
6888 return getStore(Chain, dl, Val, Ptr, MMO);
6890 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
6891 "Should only be a truncating store, not extending!");
6892 assert(VT.isInteger() == SVT.isInteger() &&
6893 "Can't do FP-INT conversion!");
6894 assert(VT.isVector() == SVT.isVector() &&
6895 "Cannot use trunc store to convert to or from a vector!");
6896 assert((!VT.isVector() ||
6897 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
6898 "Cannot use trunc store to change the number of vector elements!");
6900 SDVTList VTs = getVTList(MVT::Other);
6901 SDValue Undef = getUNDEF(Ptr.getValueType());
6902 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6903 FoldingSetNodeID ID;
6904 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6905 ID.AddInteger(SVT.getRawBits());
6906 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6907 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
6908 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6909 void *IP = nullptr;
6910 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6911 cast<StoreSDNode>(E)->refineAlignment(MMO);
6912 return SDValue(E, 0);
6914 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6915 ISD::UNINDEXED, true, SVT, MMO);
6916 createOperands(N, Ops);
6918 CSEMap.InsertNode(N, IP);
6919 InsertNode(N);
6920 SDValue V(N, 0);
6921 NewSDValueDbgMsg(V, "Creating new node: ", this);
6922 return V;
6925 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
6926 SDValue Base, SDValue Offset,
6927 ISD::MemIndexedMode AM) {
6928 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
6929 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
6930 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
6931 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
6932 FoldingSetNodeID ID;
6933 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6934 ID.AddInteger(ST->getMemoryVT().getRawBits());
6935 ID.AddInteger(ST->getRawSubclassData());
6936 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
6937 void *IP = nullptr;
6938 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6939 return SDValue(E, 0);
6941 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6942 ST->isTruncatingStore(), ST->getMemoryVT(),
6943 ST->getMemOperand());
6944 createOperands(N, Ops);
6946 CSEMap.InsertNode(N, IP);
6947 InsertNode(N);
6948 SDValue V(N, 0);
6949 NewSDValueDbgMsg(V, "Creating new node: ", this);
6950 return V;
6953 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6954 SDValue Ptr, SDValue Mask, SDValue PassThru,
6955 EVT MemVT, MachineMemOperand *MMO,
6956 ISD::LoadExtType ExtTy, bool isExpanding) {
6957 SDVTList VTs = getVTList(VT, MVT::Other);
6958 SDValue Ops[] = { Chain, Ptr, Mask, PassThru };
6959 FoldingSetNodeID ID;
6960 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
6961 ID.AddInteger(MemVT.getRawBits());
6962 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
6963 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
6964 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6965 void *IP = nullptr;
6966 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6967 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
6968 return SDValue(E, 0);
6970 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6971 ExtTy, isExpanding, MemVT, MMO);
6972 createOperands(N, Ops);
6974 CSEMap.InsertNode(N, IP);
6975 InsertNode(N);
6976 SDValue V(N, 0);
6977 NewSDValueDbgMsg(V, "Creating new node: ", this);
6978 return V;
6981 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
6982 SDValue Val, SDValue Ptr, SDValue Mask,
6983 EVT MemVT, MachineMemOperand *MMO,
6984 bool IsTruncating, bool IsCompressing) {
6985 assert(Chain.getValueType() == MVT::Other &&
6986 "Invalid chain type");
6987 SDVTList VTs = getVTList(MVT::Other);
6988 SDValue Ops[] = { Chain, Val, Ptr, Mask };
6989 FoldingSetNodeID ID;
6990 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
6991 ID.AddInteger(MemVT.getRawBits());
6992 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
6993 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
6994 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6995 void *IP = nullptr;
6996 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6997 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
6998 return SDValue(E, 0);
7000 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7001 IsTruncating, IsCompressing, MemVT, MMO);
7002 createOperands(N, Ops);
7004 CSEMap.InsertNode(N, IP);
7005 InsertNode(N);
7006 SDValue V(N, 0);
7007 NewSDValueDbgMsg(V, "Creating new node: ", this);
7008 return V;
7011 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7012 ArrayRef<SDValue> Ops,
7013 MachineMemOperand *MMO,
7014 ISD::MemIndexType IndexType) {
7015 assert(Ops.size() == 6 && "Incompatible number of operands");
7017 FoldingSetNodeID ID;
7018 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7019 ID.AddInteger(VT.getRawBits());
7020 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7021 dl.getIROrder(), VTs, VT, MMO, IndexType));
7022 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7023 void *IP = nullptr;
7024 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7025 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7026 return SDValue(E, 0);
7029 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7030 VTs, VT, MMO, IndexType);
7031 createOperands(N, Ops);
7033 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7034 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7035 assert(N->getMask().getValueType().getVectorNumElements() ==
7036 N->getValueType(0).getVectorNumElements() &&
7037 "Vector width mismatch between mask and data");
7038 assert(N->getIndex().getValueType().getVectorNumElements() >=
7039 N->getValueType(0).getVectorNumElements() &&
7040 "Vector width mismatch between index and data");
7041 assert(isa<ConstantSDNode>(N->getScale()) &&
7042 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7043 "Scale should be a constant power of 2");
7045 CSEMap.InsertNode(N, IP);
7046 InsertNode(N);
7047 SDValue V(N, 0);
7048 NewSDValueDbgMsg(V, "Creating new node: ", this);
7049 return V;
7052 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7053 ArrayRef<SDValue> Ops,
7054 MachineMemOperand *MMO,
7055 ISD::MemIndexType IndexType) {
7056 assert(Ops.size() == 6 && "Incompatible number of operands");
7058 FoldingSetNodeID ID;
7059 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7060 ID.AddInteger(VT.getRawBits());
7061 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7062 dl.getIROrder(), VTs, VT, MMO, IndexType));
7063 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7064 void *IP = nullptr;
7065 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7066 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7067 return SDValue(E, 0);
7069 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7070 VTs, VT, MMO, IndexType);
7071 createOperands(N, Ops);
7073 assert(N->getMask().getValueType().getVectorNumElements() ==
7074 N->getValue().getValueType().getVectorNumElements() &&
7075 "Vector width mismatch between mask and data");
7076 assert(N->getIndex().getValueType().getVectorNumElements() >=
7077 N->getValue().getValueType().getVectorNumElements() &&
7078 "Vector width mismatch between index and data");
7079 assert(isa<ConstantSDNode>(N->getScale()) &&
7080 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7081 "Scale should be a constant power of 2");
7083 CSEMap.InsertNode(N, IP);
7084 InsertNode(N);
7085 SDValue V(N, 0);
7086 NewSDValueDbgMsg(V, "Creating new node: ", this);
7087 return V;
7090 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7091 // select undef, T, F --> T (if T is a constant), otherwise F
7092 // select, ?, undef, F --> F
7093 // select, ?, T, undef --> T
7094 if (Cond.isUndef())
7095 return isConstantValueOfAnyType(T) ? T : F;
7096 if (T.isUndef())
7097 return F;
7098 if (F.isUndef())
7099 return T;
7101 // select true, T, F --> T
7102 // select false, T, F --> F
7103 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7104 return CondC->isNullValue() ? F : T;
7106 // TODO: This should simplify VSELECT with constant condition using something
7107 // like this (but check boolean contents to be complete?):
7108 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7109 // return T;
7110 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7111 // return F;
7113 // select ?, T, T --> T
7114 if (T == F)
7115 return T;
7117 return SDValue();
7120 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7121 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7122 if (X.isUndef())
7123 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7124 // shift X, undef --> undef (because it may shift by the bitwidth)
7125 if (Y.isUndef())
7126 return getUNDEF(X.getValueType());
7128 // shift 0, Y --> 0
7129 // shift X, 0 --> X
7130 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7131 return X;
7133 // shift X, C >= bitwidth(X) --> undef
7134 // All vector elements must be too big (or undef) to avoid partial undefs.
7135 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7136 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7138 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7139 return getUNDEF(X.getValueType());
7141 return SDValue();
7144 // TODO: Use fast-math-flags to enable more simplifications.
7145 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) {
7146 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7147 if (!YC)
7148 return SDValue();
7150 // X + -0.0 --> X
7151 if (Opcode == ISD::FADD)
7152 if (YC->getValueAPF().isNegZero())
7153 return X;
7155 // X - +0.0 --> X
7156 if (Opcode == ISD::FSUB)
7157 if (YC->getValueAPF().isPosZero())
7158 return X;
7160 // X * 1.0 --> X
7161 // X / 1.0 --> X
7162 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7163 if (YC->getValueAPF().isExactlyValue(1.0))
7164 return X;
7166 return SDValue();
7169 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7170 SDValue Ptr, SDValue SV, unsigned Align) {
7171 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7172 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7175 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7176 ArrayRef<SDUse> Ops) {
7177 switch (Ops.size()) {
7178 case 0: return getNode(Opcode, DL, VT);
7179 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7180 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7181 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7182 default: break;
7185 // Copy from an SDUse array into an SDValue array for use with
7186 // the regular getNode logic.
7187 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7188 return getNode(Opcode, DL, VT, NewOps);
7191 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7192 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7193 unsigned NumOps = Ops.size();
7194 switch (NumOps) {
7195 case 0: return getNode(Opcode, DL, VT);
7196 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7197 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7198 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7199 default: break;
7202 switch (Opcode) {
7203 default: break;
7204 case ISD::BUILD_VECTOR:
7205 // Attempt to simplify BUILD_VECTOR.
7206 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7207 return V;
7208 break;
7209 case ISD::CONCAT_VECTORS:
7210 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7211 return V;
7212 break;
7213 case ISD::SELECT_CC:
7214 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7215 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7216 "LHS and RHS of condition must have same type!");
7217 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7218 "True and False arms of SelectCC must have same type!");
7219 assert(Ops[2].getValueType() == VT &&
7220 "select_cc node must be of same type as true and false value!");
7221 break;
7222 case ISD::BR_CC:
7223 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7224 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7225 "LHS/RHS of comparison should match types!");
7226 break;
7229 // Memoize nodes.
7230 SDNode *N;
7231 SDVTList VTs = getVTList(VT);
7233 if (VT != MVT::Glue) {
7234 FoldingSetNodeID ID;
7235 AddNodeIDNode(ID, Opcode, VTs, Ops);
7236 void *IP = nullptr;
7238 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7239 return SDValue(E, 0);
7241 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7242 createOperands(N, Ops);
7244 CSEMap.InsertNode(N, IP);
7245 } else {
7246 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7247 createOperands(N, Ops);
7250 InsertNode(N);
7251 SDValue V(N, 0);
7252 NewSDValueDbgMsg(V, "Creating new node: ", this);
7253 return V;
7256 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7257 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7258 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7261 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7262 ArrayRef<SDValue> Ops) {
7263 if (VTList.NumVTs == 1)
7264 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7266 #if 0
7267 switch (Opcode) {
7268 // FIXME: figure out how to safely handle things like
7269 // int foo(int x) { return 1 << (x & 255); }
7270 // int bar() { return foo(256); }
7271 case ISD::SRA_PARTS:
7272 case ISD::SRL_PARTS:
7273 case ISD::SHL_PARTS:
7274 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7275 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7276 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7277 else if (N3.getOpcode() == ISD::AND)
7278 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7279 // If the and is only masking out bits that cannot effect the shift,
7280 // eliminate the and.
7281 unsigned NumBits = VT.getScalarSizeInBits()*2;
7282 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7283 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7285 break;
7287 #endif
7289 // Memoize the node unless it returns a flag.
7290 SDNode *N;
7291 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7292 FoldingSetNodeID ID;
7293 AddNodeIDNode(ID, Opcode, VTList, Ops);
7294 void *IP = nullptr;
7295 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7296 return SDValue(E, 0);
7298 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7299 createOperands(N, Ops);
7300 CSEMap.InsertNode(N, IP);
7301 } else {
7302 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7303 createOperands(N, Ops);
7305 InsertNode(N);
7306 SDValue V(N, 0);
7307 NewSDValueDbgMsg(V, "Creating new node: ", this);
7308 return V;
7311 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7312 SDVTList VTList) {
7313 return getNode(Opcode, DL, VTList, None);
7316 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7317 SDValue N1) {
7318 SDValue Ops[] = { N1 };
7319 return getNode(Opcode, DL, VTList, Ops);
7322 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7323 SDValue N1, SDValue N2) {
7324 SDValue Ops[] = { N1, N2 };
7325 return getNode(Opcode, DL, VTList, Ops);
7328 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7329 SDValue N1, SDValue N2, SDValue N3) {
7330 SDValue Ops[] = { N1, N2, N3 };
7331 return getNode(Opcode, DL, VTList, Ops);
7334 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7335 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7336 SDValue Ops[] = { N1, N2, N3, N4 };
7337 return getNode(Opcode, DL, VTList, Ops);
7340 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7341 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7342 SDValue N5) {
7343 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7344 return getNode(Opcode, DL, VTList, Ops);
7347 SDVTList SelectionDAG::getVTList(EVT VT) {
7348 return makeVTList(SDNode::getValueTypeList(VT), 1);
7351 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7352 FoldingSetNodeID ID;
7353 ID.AddInteger(2U);
7354 ID.AddInteger(VT1.getRawBits());
7355 ID.AddInteger(VT2.getRawBits());
7357 void *IP = nullptr;
7358 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7359 if (!Result) {
7360 EVT *Array = Allocator.Allocate<EVT>(2);
7361 Array[0] = VT1;
7362 Array[1] = VT2;
7363 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7364 VTListMap.InsertNode(Result, IP);
7366 return Result->getSDVTList();
7369 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7370 FoldingSetNodeID ID;
7371 ID.AddInteger(3U);
7372 ID.AddInteger(VT1.getRawBits());
7373 ID.AddInteger(VT2.getRawBits());
7374 ID.AddInteger(VT3.getRawBits());
7376 void *IP = nullptr;
7377 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7378 if (!Result) {
7379 EVT *Array = Allocator.Allocate<EVT>(3);
7380 Array[0] = VT1;
7381 Array[1] = VT2;
7382 Array[2] = VT3;
7383 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7384 VTListMap.InsertNode(Result, IP);
7386 return Result->getSDVTList();
7389 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7390 FoldingSetNodeID ID;
7391 ID.AddInteger(4U);
7392 ID.AddInteger(VT1.getRawBits());
7393 ID.AddInteger(VT2.getRawBits());
7394 ID.AddInteger(VT3.getRawBits());
7395 ID.AddInteger(VT4.getRawBits());
7397 void *IP = nullptr;
7398 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7399 if (!Result) {
7400 EVT *Array = Allocator.Allocate<EVT>(4);
7401 Array[0] = VT1;
7402 Array[1] = VT2;
7403 Array[2] = VT3;
7404 Array[3] = VT4;
7405 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7406 VTListMap.InsertNode(Result, IP);
7408 return Result->getSDVTList();
7411 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7412 unsigned NumVTs = VTs.size();
7413 FoldingSetNodeID ID;
7414 ID.AddInteger(NumVTs);
7415 for (unsigned index = 0; index < NumVTs; index++) {
7416 ID.AddInteger(VTs[index].getRawBits());
7419 void *IP = nullptr;
7420 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7421 if (!Result) {
7422 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7423 llvm::copy(VTs, Array);
7424 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7425 VTListMap.InsertNode(Result, IP);
7427 return Result->getSDVTList();
7431 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7432 /// specified operands. If the resultant node already exists in the DAG,
7433 /// this does not modify the specified node, instead it returns the node that
7434 /// already exists. If the resultant node does not exist in the DAG, the
7435 /// input node is returned. As a degenerate case, if you specify the same
7436 /// input operands as the node already has, the input node is returned.
7437 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7438 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7440 // Check to see if there is no change.
7441 if (Op == N->getOperand(0)) return N;
7443 // See if the modified node already exists.
7444 void *InsertPos = nullptr;
7445 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7446 return Existing;
7448 // Nope it doesn't. Remove the node from its current place in the maps.
7449 if (InsertPos)
7450 if (!RemoveNodeFromCSEMaps(N))
7451 InsertPos = nullptr;
7453 // Now we update the operands.
7454 N->OperandList[0].set(Op);
7456 updateDivergence(N);
7457 // If this gets put into a CSE map, add it.
7458 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7459 return N;
7462 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7463 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7465 // Check to see if there is no change.
7466 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7467 return N; // No operands changed, just return the input node.
7469 // See if the modified node already exists.
7470 void *InsertPos = nullptr;
7471 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7472 return Existing;
7474 // Nope it doesn't. Remove the node from its current place in the maps.
7475 if (InsertPos)
7476 if (!RemoveNodeFromCSEMaps(N))
7477 InsertPos = nullptr;
7479 // Now we update the operands.
7480 if (N->OperandList[0] != Op1)
7481 N->OperandList[0].set(Op1);
7482 if (N->OperandList[1] != Op2)
7483 N->OperandList[1].set(Op2);
7485 updateDivergence(N);
7486 // If this gets put into a CSE map, add it.
7487 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7488 return N;
7491 SDNode *SelectionDAG::
7492 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7493 SDValue Ops[] = { Op1, Op2, Op3 };
7494 return UpdateNodeOperands(N, Ops);
7497 SDNode *SelectionDAG::
7498 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7499 SDValue Op3, SDValue Op4) {
7500 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7501 return UpdateNodeOperands(N, Ops);
7504 SDNode *SelectionDAG::
7505 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7506 SDValue Op3, SDValue Op4, SDValue Op5) {
7507 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7508 return UpdateNodeOperands(N, Ops);
7511 SDNode *SelectionDAG::
7512 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7513 unsigned NumOps = Ops.size();
7514 assert(N->getNumOperands() == NumOps &&
7515 "Update with wrong number of operands");
7517 // If no operands changed just return the input node.
7518 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7519 return N;
7521 // See if the modified node already exists.
7522 void *InsertPos = nullptr;
7523 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7524 return Existing;
7526 // Nope it doesn't. Remove the node from its current place in the maps.
7527 if (InsertPos)
7528 if (!RemoveNodeFromCSEMaps(N))
7529 InsertPos = nullptr;
7531 // Now we update the operands.
7532 for (unsigned i = 0; i != NumOps; ++i)
7533 if (N->OperandList[i] != Ops[i])
7534 N->OperandList[i].set(Ops[i]);
7536 updateDivergence(N);
7537 // If this gets put into a CSE map, add it.
7538 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7539 return N;
7542 /// DropOperands - Release the operands and set this node to have
7543 /// zero operands.
7544 void SDNode::DropOperands() {
7545 // Unlike the code in MorphNodeTo that does this, we don't need to
7546 // watch for dead nodes here.
7547 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7548 SDUse &Use = *I++;
7549 Use.set(SDValue());
7553 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7554 ArrayRef<MachineMemOperand *> NewMemRefs) {
7555 if (NewMemRefs.empty()) {
7556 N->clearMemRefs();
7557 return;
7560 // Check if we can avoid allocating by storing a single reference directly.
7561 if (NewMemRefs.size() == 1) {
7562 N->MemRefs = NewMemRefs[0];
7563 N->NumMemRefs = 1;
7564 return;
7567 MachineMemOperand **MemRefsBuffer =
7568 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
7569 llvm::copy(NewMemRefs, MemRefsBuffer);
7570 N->MemRefs = MemRefsBuffer;
7571 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
7574 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7575 /// machine opcode.
7577 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7578 EVT VT) {
7579 SDVTList VTs = getVTList(VT);
7580 return SelectNodeTo(N, MachineOpc, VTs, None);
7583 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7584 EVT VT, SDValue Op1) {
7585 SDVTList VTs = getVTList(VT);
7586 SDValue Ops[] = { Op1 };
7587 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7590 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7591 EVT VT, SDValue Op1,
7592 SDValue Op2) {
7593 SDVTList VTs = getVTList(VT);
7594 SDValue Ops[] = { Op1, Op2 };
7595 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7598 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7599 EVT VT, SDValue Op1,
7600 SDValue Op2, SDValue Op3) {
7601 SDVTList VTs = getVTList(VT);
7602 SDValue Ops[] = { Op1, Op2, Op3 };
7603 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7606 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7607 EVT VT, ArrayRef<SDValue> Ops) {
7608 SDVTList VTs = getVTList(VT);
7609 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7612 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7613 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
7614 SDVTList VTs = getVTList(VT1, VT2);
7615 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7618 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7619 EVT VT1, EVT VT2) {
7620 SDVTList VTs = getVTList(VT1, VT2);
7621 return SelectNodeTo(N, MachineOpc, VTs, None);
7624 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7625 EVT VT1, EVT VT2, EVT VT3,
7626 ArrayRef<SDValue> Ops) {
7627 SDVTList VTs = getVTList(VT1, VT2, VT3);
7628 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7631 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7632 EVT VT1, EVT VT2,
7633 SDValue Op1, SDValue Op2) {
7634 SDVTList VTs = getVTList(VT1, VT2);
7635 SDValue Ops[] = { Op1, Op2 };
7636 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7639 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7640 SDVTList VTs,ArrayRef<SDValue> Ops) {
7641 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
7642 // Reset the NodeID to -1.
7643 New->setNodeId(-1);
7644 if (New != N) {
7645 ReplaceAllUsesWith(N, New);
7646 RemoveDeadNode(N);
7648 return New;
7651 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7652 /// the line number information on the merged node since it is not possible to
7653 /// preserve the information that operation is associated with multiple lines.
7654 /// This will make the debugger working better at -O0, were there is a higher
7655 /// probability having other instructions associated with that line.
7657 /// For IROrder, we keep the smaller of the two
7658 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
7659 DebugLoc NLoc = N->getDebugLoc();
7660 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
7661 N->setDebugLoc(DebugLoc());
7663 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
7664 N->setIROrder(Order);
7665 return N;
7668 /// MorphNodeTo - This *mutates* the specified node to have the specified
7669 /// return type, opcode, and operands.
7671 /// Note that MorphNodeTo returns the resultant node. If there is already a
7672 /// node of the specified opcode and operands, it returns that node instead of
7673 /// the current one. Note that the SDLoc need not be the same.
7675 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7676 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7677 /// node, and because it doesn't require CSE recalculation for any of
7678 /// the node's users.
7680 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7681 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7682 /// the legalizer which maintain worklists that would need to be updated when
7683 /// deleting things.
7684 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
7685 SDVTList VTs, ArrayRef<SDValue> Ops) {
7686 // If an identical node already exists, use it.
7687 void *IP = nullptr;
7688 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
7689 FoldingSetNodeID ID;
7690 AddNodeIDNode(ID, Opc, VTs, Ops);
7691 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
7692 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
7695 if (!RemoveNodeFromCSEMaps(N))
7696 IP = nullptr;
7698 // Start the morphing.
7699 N->NodeType = Opc;
7700 N->ValueList = VTs.VTs;
7701 N->NumValues = VTs.NumVTs;
7703 // Clear the operands list, updating used nodes to remove this from their
7704 // use list. Keep track of any operands that become dead as a result.
7705 SmallPtrSet<SDNode*, 16> DeadNodeSet;
7706 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
7707 SDUse &Use = *I++;
7708 SDNode *Used = Use.getNode();
7709 Use.set(SDValue());
7710 if (Used->use_empty())
7711 DeadNodeSet.insert(Used);
7714 // For MachineNode, initialize the memory references information.
7715 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
7716 MN->clearMemRefs();
7718 // Swap for an appropriately sized array from the recycler.
7719 removeOperands(N);
7720 createOperands(N, Ops);
7722 // Delete any nodes that are still dead after adding the uses for the
7723 // new operands.
7724 if (!DeadNodeSet.empty()) {
7725 SmallVector<SDNode *, 16> DeadNodes;
7726 for (SDNode *N : DeadNodeSet)
7727 if (N->use_empty())
7728 DeadNodes.push_back(N);
7729 RemoveDeadNodes(DeadNodes);
7732 if (IP)
7733 CSEMap.InsertNode(N, IP); // Memoize the new node.
7734 return N;
7737 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
7738 unsigned OrigOpc = Node->getOpcode();
7739 unsigned NewOpc;
7740 switch (OrigOpc) {
7741 default:
7742 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7743 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break;
7744 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break;
7745 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break;
7746 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break;
7747 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break;
7748 case ISD::STRICT_FMA: NewOpc = ISD::FMA; break;
7749 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; break;
7750 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break;
7751 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break;
7752 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; break;
7753 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; break;
7754 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; break;
7755 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; break;
7756 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; break;
7757 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; break;
7758 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; break;
7759 case ISD::STRICT_LRINT: NewOpc = ISD::LRINT; break;
7760 case ISD::STRICT_LLRINT: NewOpc = ISD::LLRINT; break;
7761 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; break;
7762 case ISD::STRICT_FNEARBYINT: NewOpc = ISD::FNEARBYINT; break;
7763 case ISD::STRICT_FMAXNUM: NewOpc = ISD::FMAXNUM; break;
7764 case ISD::STRICT_FMINNUM: NewOpc = ISD::FMINNUM; break;
7765 case ISD::STRICT_FCEIL: NewOpc = ISD::FCEIL; break;
7766 case ISD::STRICT_FFLOOR: NewOpc = ISD::FFLOOR; break;
7767 case ISD::STRICT_LROUND: NewOpc = ISD::LROUND; break;
7768 case ISD::STRICT_LLROUND: NewOpc = ISD::LLROUND; break;
7769 case ISD::STRICT_FROUND: NewOpc = ISD::FROUND; break;
7770 case ISD::STRICT_FTRUNC: NewOpc = ISD::FTRUNC; break;
7771 case ISD::STRICT_FP_ROUND: NewOpc = ISD::FP_ROUND; break;
7772 case ISD::STRICT_FP_EXTEND: NewOpc = ISD::FP_EXTEND; break;
7773 case ISD::STRICT_FP_TO_SINT: NewOpc = ISD::FP_TO_SINT; break;
7774 case ISD::STRICT_FP_TO_UINT: NewOpc = ISD::FP_TO_UINT; break;
7777 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
7779 // We're taking this node out of the chain, so we need to re-link things.
7780 SDValue InputChain = Node->getOperand(0);
7781 SDValue OutputChain = SDValue(Node, 1);
7782 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
7784 SmallVector<SDValue, 3> Ops;
7785 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
7786 Ops.push_back(Node->getOperand(i));
7788 SDVTList VTs = getVTList(Node->getValueType(0));
7789 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
7791 // MorphNodeTo can operate in two ways: if an existing node with the
7792 // specified operands exists, it can just return it. Otherwise, it
7793 // updates the node in place to have the requested operands.
7794 if (Res == Node) {
7795 // If we updated the node in place, reset the node ID. To the isel,
7796 // this should be just like a newly allocated machine node.
7797 Res->setNodeId(-1);
7798 } else {
7799 ReplaceAllUsesWith(Node, Res);
7800 RemoveDeadNode(Node);
7803 return Res;
7806 /// getMachineNode - These are used for target selectors to create a new node
7807 /// with specified return type(s), MachineInstr opcode, and operands.
7809 /// Note that getMachineNode returns the resultant node. If there is already a
7810 /// node of the specified opcode and operands, it returns that node instead of
7811 /// the current one.
7812 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7813 EVT VT) {
7814 SDVTList VTs = getVTList(VT);
7815 return getMachineNode(Opcode, dl, VTs, None);
7818 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7819 EVT VT, SDValue Op1) {
7820 SDVTList VTs = getVTList(VT);
7821 SDValue Ops[] = { Op1 };
7822 return getMachineNode(Opcode, dl, VTs, Ops);
7825 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7826 EVT VT, SDValue Op1, SDValue Op2) {
7827 SDVTList VTs = getVTList(VT);
7828 SDValue Ops[] = { Op1, Op2 };
7829 return getMachineNode(Opcode, dl, VTs, Ops);
7832 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7833 EVT VT, SDValue Op1, SDValue Op2,
7834 SDValue Op3) {
7835 SDVTList VTs = getVTList(VT);
7836 SDValue Ops[] = { Op1, Op2, Op3 };
7837 return getMachineNode(Opcode, dl, VTs, Ops);
7840 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7841 EVT VT, ArrayRef<SDValue> Ops) {
7842 SDVTList VTs = getVTList(VT);
7843 return getMachineNode(Opcode, dl, VTs, Ops);
7846 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7847 EVT VT1, EVT VT2, SDValue Op1,
7848 SDValue Op2) {
7849 SDVTList VTs = getVTList(VT1, VT2);
7850 SDValue Ops[] = { Op1, Op2 };
7851 return getMachineNode(Opcode, dl, VTs, Ops);
7854 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7855 EVT VT1, EVT VT2, SDValue Op1,
7856 SDValue Op2, SDValue Op3) {
7857 SDVTList VTs = getVTList(VT1, VT2);
7858 SDValue Ops[] = { Op1, Op2, Op3 };
7859 return getMachineNode(Opcode, dl, VTs, Ops);
7862 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7863 EVT VT1, EVT VT2,
7864 ArrayRef<SDValue> Ops) {
7865 SDVTList VTs = getVTList(VT1, VT2);
7866 return getMachineNode(Opcode, dl, VTs, Ops);
7869 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7870 EVT VT1, EVT VT2, EVT VT3,
7871 SDValue Op1, SDValue Op2) {
7872 SDVTList VTs = getVTList(VT1, VT2, VT3);
7873 SDValue Ops[] = { Op1, Op2 };
7874 return getMachineNode(Opcode, dl, VTs, Ops);
7877 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7878 EVT VT1, EVT VT2, EVT VT3,
7879 SDValue Op1, SDValue Op2,
7880 SDValue Op3) {
7881 SDVTList VTs = getVTList(VT1, VT2, VT3);
7882 SDValue Ops[] = { Op1, Op2, Op3 };
7883 return getMachineNode(Opcode, dl, VTs, Ops);
7886 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7887 EVT VT1, EVT VT2, EVT VT3,
7888 ArrayRef<SDValue> Ops) {
7889 SDVTList VTs = getVTList(VT1, VT2, VT3);
7890 return getMachineNode(Opcode, dl, VTs, Ops);
7893 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7894 ArrayRef<EVT> ResultTys,
7895 ArrayRef<SDValue> Ops) {
7896 SDVTList VTs = getVTList(ResultTys);
7897 return getMachineNode(Opcode, dl, VTs, Ops);
7900 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
7901 SDVTList VTs,
7902 ArrayRef<SDValue> Ops) {
7903 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
7904 MachineSDNode *N;
7905 void *IP = nullptr;
7907 if (DoCSE) {
7908 FoldingSetNodeID ID;
7909 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
7910 IP = nullptr;
7911 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7912 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
7916 // Allocate a new MachineSDNode.
7917 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7918 createOperands(N, Ops);
7920 if (DoCSE)
7921 CSEMap.InsertNode(N, IP);
7923 InsertNode(N);
7924 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
7925 return N;
7928 /// getTargetExtractSubreg - A convenience function for creating
7929 /// TargetOpcode::EXTRACT_SUBREG nodes.
7930 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7931 SDValue Operand) {
7932 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7933 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
7934 VT, Operand, SRIdxVal);
7935 return SDValue(Subreg, 0);
7938 /// getTargetInsertSubreg - A convenience function for creating
7939 /// TargetOpcode::INSERT_SUBREG nodes.
7940 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7941 SDValue Operand, SDValue Subreg) {
7942 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7943 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
7944 VT, Operand, Subreg, SRIdxVal);
7945 return SDValue(Result, 0);
7948 /// getNodeIfExists - Get the specified node if it's already available, or
7949 /// else return NULL.
7950 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
7951 ArrayRef<SDValue> Ops,
7952 const SDNodeFlags Flags) {
7953 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
7954 FoldingSetNodeID ID;
7955 AddNodeIDNode(ID, Opcode, VTList, Ops);
7956 void *IP = nullptr;
7957 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
7958 E->intersectFlagsWith(Flags);
7959 return E;
7962 return nullptr;
7965 /// getDbgValue - Creates a SDDbgValue node.
7967 /// SDNode
7968 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
7969 SDNode *N, unsigned R, bool IsIndirect,
7970 const DebugLoc &DL, unsigned O) {
7971 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7972 "Expected inlined-at fields to agree");
7973 return new (DbgInfo->getAlloc())
7974 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
7977 /// Constant
7978 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
7979 DIExpression *Expr,
7980 const Value *C,
7981 const DebugLoc &DL, unsigned O) {
7982 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7983 "Expected inlined-at fields to agree");
7984 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
7987 /// FrameIndex
7988 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
7989 DIExpression *Expr, unsigned FI,
7990 bool IsIndirect,
7991 const DebugLoc &DL,
7992 unsigned O) {
7993 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7994 "Expected inlined-at fields to agree");
7995 return new (DbgInfo->getAlloc())
7996 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
7999 /// VReg
8000 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
8001 DIExpression *Expr,
8002 unsigned VReg, bool IsIndirect,
8003 const DebugLoc &DL, unsigned O) {
8004 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8005 "Expected inlined-at fields to agree");
8006 return new (DbgInfo->getAlloc())
8007 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
8010 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8011 unsigned OffsetInBits, unsigned SizeInBits,
8012 bool InvalidateDbg) {
8013 SDNode *FromNode = From.getNode();
8014 SDNode *ToNode = To.getNode();
8015 assert(FromNode && ToNode && "Can't modify dbg values");
8017 // PR35338
8018 // TODO: assert(From != To && "Redundant dbg value transfer");
8019 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8020 if (From == To || FromNode == ToNode)
8021 return;
8023 if (!FromNode->getHasDebugValue())
8024 return;
8026 SmallVector<SDDbgValue *, 2> ClonedDVs;
8027 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8028 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
8029 continue;
8031 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8033 // Just transfer the dbg value attached to From.
8034 if (Dbg->getResNo() != From.getResNo())
8035 continue;
8037 DIVariable *Var = Dbg->getVariable();
8038 auto *Expr = Dbg->getExpression();
8039 // If a fragment is requested, update the expression.
8040 if (SizeInBits) {
8041 // When splitting a larger (e.g., sign-extended) value whose
8042 // lower bits are described with an SDDbgValue, do not attempt
8043 // to transfer the SDDbgValue to the upper bits.
8044 if (auto FI = Expr->getFragmentInfo())
8045 if (OffsetInBits + SizeInBits > FI->SizeInBits)
8046 continue;
8047 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8048 SizeInBits);
8049 if (!Fragment)
8050 continue;
8051 Expr = *Fragment;
8053 // Clone the SDDbgValue and move it to To.
8054 SDDbgValue *Clone =
8055 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(),
8056 Dbg->getDebugLoc(), Dbg->getOrder());
8057 ClonedDVs.push_back(Clone);
8059 if (InvalidateDbg) {
8060 // Invalidate value and indicate the SDDbgValue should not be emitted.
8061 Dbg->setIsInvalidated();
8062 Dbg->setIsEmitted();
8066 for (SDDbgValue *Dbg : ClonedDVs)
8067 AddDbgValue(Dbg, ToNode, false);
8070 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8071 if (!N.getHasDebugValue())
8072 return;
8074 SmallVector<SDDbgValue *, 2> ClonedDVs;
8075 for (auto DV : GetDbgValues(&N)) {
8076 if (DV->isInvalidated())
8077 continue;
8078 switch (N.getOpcode()) {
8079 default:
8080 break;
8081 case ISD::ADD:
8082 SDValue N0 = N.getOperand(0);
8083 SDValue N1 = N.getOperand(1);
8084 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8085 isConstantIntBuildVectorOrConstantInt(N1)) {
8086 uint64_t Offset = N.getConstantOperandVal(1);
8087 // Rewrite an ADD constant node into a DIExpression. Since we are
8088 // performing arithmetic to compute the variable's *value* in the
8089 // DIExpression, we need to mark the expression with a
8090 // DW_OP_stack_value.
8091 auto *DIExpr = DV->getExpression();
8092 DIExpr =
8093 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset);
8094 SDDbgValue *Clone =
8095 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
8096 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
8097 ClonedDVs.push_back(Clone);
8098 DV->setIsInvalidated();
8099 DV->setIsEmitted();
8100 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8101 N0.getNode()->dumprFull(this);
8102 dbgs() << " into " << *DIExpr << '\n');
8107 for (SDDbgValue *Dbg : ClonedDVs)
8108 AddDbgValue(Dbg, Dbg->getSDNode(), false);
8111 /// Creates a SDDbgLabel node.
8112 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8113 const DebugLoc &DL, unsigned O) {
8114 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8115 "Expected inlined-at fields to agree");
8116 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8119 namespace {
8121 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8122 /// pointed to by a use iterator is deleted, increment the use iterator
8123 /// so that it doesn't dangle.
8125 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8126 SDNode::use_iterator &UI;
8127 SDNode::use_iterator &UE;
8129 void NodeDeleted(SDNode *N, SDNode *E) override {
8130 // Increment the iterator as needed.
8131 while (UI != UE && N == *UI)
8132 ++UI;
8135 public:
8136 RAUWUpdateListener(SelectionDAG &d,
8137 SDNode::use_iterator &ui,
8138 SDNode::use_iterator &ue)
8139 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8142 } // end anonymous namespace
8144 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8145 /// This can cause recursive merging of nodes in the DAG.
8147 /// This version assumes From has a single result value.
8149 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8150 SDNode *From = FromN.getNode();
8151 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8152 "Cannot replace with this method!");
8153 assert(From != To.getNode() && "Cannot replace uses of with self");
8155 // Preserve Debug Values
8156 transferDbgValues(FromN, To);
8158 // Iterate over all the existing uses of From. New uses will be added
8159 // to the beginning of the use list, which we avoid visiting.
8160 // This specifically avoids visiting uses of From that arise while the
8161 // replacement is happening, because any such uses would be the result
8162 // of CSE: If an existing node looks like From after one of its operands
8163 // is replaced by To, we don't want to replace of all its users with To
8164 // too. See PR3018 for more info.
8165 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8166 RAUWUpdateListener Listener(*this, UI, UE);
8167 while (UI != UE) {
8168 SDNode *User = *UI;
8170 // This node is about to morph, remove its old self from the CSE maps.
8171 RemoveNodeFromCSEMaps(User);
8173 // A user can appear in a use list multiple times, and when this
8174 // happens the uses are usually next to each other in the list.
8175 // To help reduce the number of CSE recomputations, process all
8176 // the uses of this user that we can find this way.
8177 do {
8178 SDUse &Use = UI.getUse();
8179 ++UI;
8180 Use.set(To);
8181 if (To->isDivergent() != From->isDivergent())
8182 updateDivergence(User);
8183 } while (UI != UE && *UI == User);
8184 // Now that we have modified User, add it back to the CSE maps. If it
8185 // already exists there, recursively merge the results together.
8186 AddModifiedNodeToCSEMaps(User);
8189 // If we just RAUW'd the root, take note.
8190 if (FromN == getRoot())
8191 setRoot(To);
8194 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8195 /// This can cause recursive merging of nodes in the DAG.
8197 /// This version assumes that for each value of From, there is a
8198 /// corresponding value in To in the same position with the same type.
8200 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8201 #ifndef NDEBUG
8202 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8203 assert((!From->hasAnyUseOfValue(i) ||
8204 From->getValueType(i) == To->getValueType(i)) &&
8205 "Cannot use this version of ReplaceAllUsesWith!");
8206 #endif
8208 // Handle the trivial case.
8209 if (From == To)
8210 return;
8212 // Preserve Debug Info. Only do this if there's a use.
8213 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8214 if (From->hasAnyUseOfValue(i)) {
8215 assert((i < To->getNumValues()) && "Invalid To location");
8216 transferDbgValues(SDValue(From, i), SDValue(To, i));
8219 // Iterate over just the existing users of From. See the comments in
8220 // the ReplaceAllUsesWith above.
8221 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8222 RAUWUpdateListener Listener(*this, UI, UE);
8223 while (UI != UE) {
8224 SDNode *User = *UI;
8226 // This node is about to morph, remove its old self from the CSE maps.
8227 RemoveNodeFromCSEMaps(User);
8229 // A user can appear in a use list multiple times, and when this
8230 // happens the uses are usually next to each other in the list.
8231 // To help reduce the number of CSE recomputations, process all
8232 // the uses of this user that we can find this way.
8233 do {
8234 SDUse &Use = UI.getUse();
8235 ++UI;
8236 Use.setNode(To);
8237 if (To->isDivergent() != From->isDivergent())
8238 updateDivergence(User);
8239 } while (UI != UE && *UI == User);
8241 // Now that we have modified User, add it back to the CSE maps. If it
8242 // already exists there, recursively merge the results together.
8243 AddModifiedNodeToCSEMaps(User);
8246 // If we just RAUW'd the root, take note.
8247 if (From == getRoot().getNode())
8248 setRoot(SDValue(To, getRoot().getResNo()));
8251 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8252 /// This can cause recursive merging of nodes in the DAG.
8254 /// This version can replace From with any result values. To must match the
8255 /// number and types of values returned by From.
8256 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8257 if (From->getNumValues() == 1) // Handle the simple case efficiently.
8258 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8260 // Preserve Debug Info.
8261 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8262 transferDbgValues(SDValue(From, i), To[i]);
8264 // Iterate over just the existing users of From. See the comments in
8265 // the ReplaceAllUsesWith above.
8266 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8267 RAUWUpdateListener Listener(*this, UI, UE);
8268 while (UI != UE) {
8269 SDNode *User = *UI;
8271 // This node is about to morph, remove its old self from the CSE maps.
8272 RemoveNodeFromCSEMaps(User);
8274 // A user can appear in a use list multiple times, and when this happens the
8275 // uses are usually next to each other in the list. To help reduce the
8276 // number of CSE and divergence recomputations, process all the uses of this
8277 // user that we can find this way.
8278 bool To_IsDivergent = false;
8279 do {
8280 SDUse &Use = UI.getUse();
8281 const SDValue &ToOp = To[Use.getResNo()];
8282 ++UI;
8283 Use.set(ToOp);
8284 To_IsDivergent |= ToOp->isDivergent();
8285 } while (UI != UE && *UI == User);
8287 if (To_IsDivergent != From->isDivergent())
8288 updateDivergence(User);
8290 // Now that we have modified User, add it back to the CSE maps. If it
8291 // already exists there, recursively merge the results together.
8292 AddModifiedNodeToCSEMaps(User);
8295 // If we just RAUW'd the root, take note.
8296 if (From == getRoot().getNode())
8297 setRoot(SDValue(To[getRoot().getResNo()]));
8300 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8301 /// uses of other values produced by From.getNode() alone. The Deleted
8302 /// vector is handled the same way as for ReplaceAllUsesWith.
8303 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8304 // Handle the really simple, really trivial case efficiently.
8305 if (From == To) return;
8307 // Handle the simple, trivial, case efficiently.
8308 if (From.getNode()->getNumValues() == 1) {
8309 ReplaceAllUsesWith(From, To);
8310 return;
8313 // Preserve Debug Info.
8314 transferDbgValues(From, To);
8316 // Iterate over just the existing users of From. See the comments in
8317 // the ReplaceAllUsesWith above.
8318 SDNode::use_iterator UI = From.getNode()->use_begin(),
8319 UE = From.getNode()->use_end();
8320 RAUWUpdateListener Listener(*this, UI, UE);
8321 while (UI != UE) {
8322 SDNode *User = *UI;
8323 bool UserRemovedFromCSEMaps = false;
8325 // A user can appear in a use list multiple times, and when this
8326 // happens the uses are usually next to each other in the list.
8327 // To help reduce the number of CSE recomputations, process all
8328 // the uses of this user that we can find this way.
8329 do {
8330 SDUse &Use = UI.getUse();
8332 // Skip uses of different values from the same node.
8333 if (Use.getResNo() != From.getResNo()) {
8334 ++UI;
8335 continue;
8338 // If this node hasn't been modified yet, it's still in the CSE maps,
8339 // so remove its old self from the CSE maps.
8340 if (!UserRemovedFromCSEMaps) {
8341 RemoveNodeFromCSEMaps(User);
8342 UserRemovedFromCSEMaps = true;
8345 ++UI;
8346 Use.set(To);
8347 if (To->isDivergent() != From->isDivergent())
8348 updateDivergence(User);
8349 } while (UI != UE && *UI == User);
8350 // We are iterating over all uses of the From node, so if a use
8351 // doesn't use the specific value, no changes are made.
8352 if (!UserRemovedFromCSEMaps)
8353 continue;
8355 // Now that we have modified User, add it back to the CSE maps. If it
8356 // already exists there, recursively merge the results together.
8357 AddModifiedNodeToCSEMaps(User);
8360 // If we just RAUW'd the root, take note.
8361 if (From == getRoot())
8362 setRoot(To);
8365 namespace {
8367 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8368 /// to record information about a use.
8369 struct UseMemo {
8370 SDNode *User;
8371 unsigned Index;
8372 SDUse *Use;
8375 /// operator< - Sort Memos by User.
8376 bool operator<(const UseMemo &L, const UseMemo &R) {
8377 return (intptr_t)L.User < (intptr_t)R.User;
8380 } // end anonymous namespace
8382 void SelectionDAG::updateDivergence(SDNode * N)
8384 if (TLI->isSDNodeAlwaysUniform(N))
8385 return;
8386 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
8387 for (auto &Op : N->ops()) {
8388 if (Op.Val.getValueType() != MVT::Other)
8389 IsDivergent |= Op.getNode()->isDivergent();
8391 if (N->SDNodeBits.IsDivergent != IsDivergent) {
8392 N->SDNodeBits.IsDivergent = IsDivergent;
8393 for (auto U : N->uses()) {
8394 updateDivergence(U);
8399 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
8400 DenseMap<SDNode *, unsigned> Degree;
8401 Order.reserve(AllNodes.size());
8402 for (auto &N : allnodes()) {
8403 unsigned NOps = N.getNumOperands();
8404 Degree[&N] = NOps;
8405 if (0 == NOps)
8406 Order.push_back(&N);
8408 for (size_t I = 0; I != Order.size(); ++I) {
8409 SDNode *N = Order[I];
8410 for (auto U : N->uses()) {
8411 unsigned &UnsortedOps = Degree[U];
8412 if (0 == --UnsortedOps)
8413 Order.push_back(U);
8418 #ifndef NDEBUG
8419 void SelectionDAG::VerifyDAGDiverence() {
8420 std::vector<SDNode *> TopoOrder;
8421 CreateTopologicalOrder(TopoOrder);
8422 const TargetLowering &TLI = getTargetLoweringInfo();
8423 DenseMap<const SDNode *, bool> DivergenceMap;
8424 for (auto &N : allnodes()) {
8425 DivergenceMap[&N] = false;
8427 for (auto N : TopoOrder) {
8428 bool IsDivergent = DivergenceMap[N];
8429 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA);
8430 for (auto &Op : N->ops()) {
8431 if (Op.Val.getValueType() != MVT::Other)
8432 IsSDNodeDivergent |= DivergenceMap[Op.getNode()];
8434 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) {
8435 DivergenceMap[N] = true;
8438 for (auto &N : allnodes()) {
8439 (void)N;
8440 assert(DivergenceMap[&N] == N.isDivergent() &&
8441 "Divergence bit inconsistency detected\n");
8444 #endif
8446 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8447 /// uses of other values produced by From.getNode() alone. The same value
8448 /// may appear in both the From and To list. The Deleted vector is
8449 /// handled the same way as for ReplaceAllUsesWith.
8450 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8451 const SDValue *To,
8452 unsigned Num){
8453 // Handle the simple, trivial case efficiently.
8454 if (Num == 1)
8455 return ReplaceAllUsesOfValueWith(*From, *To);
8457 transferDbgValues(*From, *To);
8459 // Read up all the uses and make records of them. This helps
8460 // processing new uses that are introduced during the
8461 // replacement process.
8462 SmallVector<UseMemo, 4> Uses;
8463 for (unsigned i = 0; i != Num; ++i) {
8464 unsigned FromResNo = From[i].getResNo();
8465 SDNode *FromNode = From[i].getNode();
8466 for (SDNode::use_iterator UI = FromNode->use_begin(),
8467 E = FromNode->use_end(); UI != E; ++UI) {
8468 SDUse &Use = UI.getUse();
8469 if (Use.getResNo() == FromResNo) {
8470 UseMemo Memo = { *UI, i, &Use };
8471 Uses.push_back(Memo);
8476 // Sort the uses, so that all the uses from a given User are together.
8477 llvm::sort(Uses);
8479 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8480 UseIndex != UseIndexEnd; ) {
8481 // We know that this user uses some value of From. If it is the right
8482 // value, update it.
8483 SDNode *User = Uses[UseIndex].User;
8485 // This node is about to morph, remove its old self from the CSE maps.
8486 RemoveNodeFromCSEMaps(User);
8488 // The Uses array is sorted, so all the uses for a given User
8489 // are next to each other in the list.
8490 // To help reduce the number of CSE recomputations, process all
8491 // the uses of this user that we can find this way.
8492 do {
8493 unsigned i = Uses[UseIndex].Index;
8494 SDUse &Use = *Uses[UseIndex].Use;
8495 ++UseIndex;
8497 Use.set(To[i]);
8498 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8500 // Now that we have modified User, add it back to the CSE maps. If it
8501 // already exists there, recursively merge the results together.
8502 AddModifiedNodeToCSEMaps(User);
8506 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8507 /// based on their topological order. It returns the maximum id and a vector
8508 /// of the SDNodes* in assigned order by reference.
8509 unsigned SelectionDAG::AssignTopologicalOrder() {
8510 unsigned DAGSize = 0;
8512 // SortedPos tracks the progress of the algorithm. Nodes before it are
8513 // sorted, nodes after it are unsorted. When the algorithm completes
8514 // it is at the end of the list.
8515 allnodes_iterator SortedPos = allnodes_begin();
8517 // Visit all the nodes. Move nodes with no operands to the front of
8518 // the list immediately. Annotate nodes that do have operands with their
8519 // operand count. Before we do this, the Node Id fields of the nodes
8520 // may contain arbitrary values. After, the Node Id fields for nodes
8521 // before SortedPos will contain the topological sort index, and the
8522 // Node Id fields for nodes At SortedPos and after will contain the
8523 // count of outstanding operands.
8524 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8525 SDNode *N = &*I++;
8526 checkForCycles(N, this);
8527 unsigned Degree = N->getNumOperands();
8528 if (Degree == 0) {
8529 // A node with no uses, add it to the result array immediately.
8530 N->setNodeId(DAGSize++);
8531 allnodes_iterator Q(N);
8532 if (Q != SortedPos)
8533 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8534 assert(SortedPos != AllNodes.end() && "Overran node list");
8535 ++SortedPos;
8536 } else {
8537 // Temporarily use the Node Id as scratch space for the degree count.
8538 N->setNodeId(Degree);
8542 // Visit all the nodes. As we iterate, move nodes into sorted order,
8543 // such that by the time the end is reached all nodes will be sorted.
8544 for (SDNode &Node : allnodes()) {
8545 SDNode *N = &Node;
8546 checkForCycles(N, this);
8547 // N is in sorted position, so all its uses have one less operand
8548 // that needs to be sorted.
8549 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8550 UI != UE; ++UI) {
8551 SDNode *P = *UI;
8552 unsigned Degree = P->getNodeId();
8553 assert(Degree != 0 && "Invalid node degree");
8554 --Degree;
8555 if (Degree == 0) {
8556 // All of P's operands are sorted, so P may sorted now.
8557 P->setNodeId(DAGSize++);
8558 if (P->getIterator() != SortedPos)
8559 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8560 assert(SortedPos != AllNodes.end() && "Overran node list");
8561 ++SortedPos;
8562 } else {
8563 // Update P's outstanding operand count.
8564 P->setNodeId(Degree);
8567 if (Node.getIterator() == SortedPos) {
8568 #ifndef NDEBUG
8569 allnodes_iterator I(N);
8570 SDNode *S = &*++I;
8571 dbgs() << "Overran sorted position:\n";
8572 S->dumprFull(this); dbgs() << "\n";
8573 dbgs() << "Checking if this is due to cycles\n";
8574 checkForCycles(this, true);
8575 #endif
8576 llvm_unreachable(nullptr);
8580 assert(SortedPos == AllNodes.end() &&
8581 "Topological sort incomplete!");
8582 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
8583 "First node in topological sort is not the entry token!");
8584 assert(AllNodes.front().getNodeId() == 0 &&
8585 "First node in topological sort has non-zero id!");
8586 assert(AllNodes.front().getNumOperands() == 0 &&
8587 "First node in topological sort has operands!");
8588 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
8589 "Last node in topologic sort has unexpected id!");
8590 assert(AllNodes.back().use_empty() &&
8591 "Last node in topologic sort has users!");
8592 assert(DAGSize == allnodes_size() && "Node count mismatch!");
8593 return DAGSize;
8596 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8597 /// value is produced by SD.
8598 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
8599 if (SD) {
8600 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
8601 SD->setHasDebugValue(true);
8603 DbgInfo->add(DB, SD, isParameter);
8606 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
8607 DbgInfo->add(DB);
8610 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
8611 SDValue NewMemOp) {
8612 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
8613 // The new memory operation must have the same position as the old load in
8614 // terms of memory dependency. Create a TokenFactor for the old load and new
8615 // memory operation and update uses of the old load's output chain to use that
8616 // TokenFactor.
8617 SDValue OldChain = SDValue(OldLoad, 1);
8618 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
8619 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1))
8620 return NewChain;
8622 SDValue TokenFactor =
8623 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
8624 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
8625 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
8626 return TokenFactor;
8629 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
8630 Function **OutFunction) {
8631 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
8633 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
8634 auto *Module = MF->getFunction().getParent();
8635 auto *Function = Module->getFunction(Symbol);
8637 if (OutFunction != nullptr)
8638 *OutFunction = Function;
8640 if (Function != nullptr) {
8641 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
8642 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
8645 std::string ErrorStr;
8646 raw_string_ostream ErrorFormatter(ErrorStr);
8648 ErrorFormatter << "Undefined external symbol ";
8649 ErrorFormatter << '"' << Symbol << '"';
8650 ErrorFormatter.flush();
8652 report_fatal_error(ErrorStr);
8655 //===----------------------------------------------------------------------===//
8656 // SDNode Class
8657 //===----------------------------------------------------------------------===//
8659 bool llvm::isNullConstant(SDValue V) {
8660 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8661 return Const != nullptr && Const->isNullValue();
8664 bool llvm::isNullFPConstant(SDValue V) {
8665 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
8666 return Const != nullptr && Const->isZero() && !Const->isNegative();
8669 bool llvm::isAllOnesConstant(SDValue V) {
8670 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8671 return Const != nullptr && Const->isAllOnesValue();
8674 bool llvm::isOneConstant(SDValue V) {
8675 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8676 return Const != nullptr && Const->isOne();
8679 SDValue llvm::peekThroughBitcasts(SDValue V) {
8680 while (V.getOpcode() == ISD::BITCAST)
8681 V = V.getOperand(0);
8682 return V;
8685 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
8686 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
8687 V = V.getOperand(0);
8688 return V;
8691 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
8692 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
8693 V = V.getOperand(0);
8694 return V;
8697 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
8698 if (V.getOpcode() != ISD::XOR)
8699 return false;
8700 V = peekThroughBitcasts(V.getOperand(1));
8701 unsigned NumBits = V.getScalarValueSizeInBits();
8702 ConstantSDNode *C =
8703 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
8704 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
8707 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
8708 bool AllowTruncation) {
8709 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8710 return CN;
8712 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8713 BitVector UndefElements;
8714 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
8716 // BuildVectors can truncate their operands. Ignore that case here unless
8717 // AllowTruncation is set.
8718 if (CN && (UndefElements.none() || AllowUndefs)) {
8719 EVT CVT = CN->getValueType(0);
8720 EVT NSVT = N.getValueType().getScalarType();
8721 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8722 if (AllowTruncation || (CVT == NSVT))
8723 return CN;
8727 return nullptr;
8730 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
8731 bool AllowUndefs,
8732 bool AllowTruncation) {
8733 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8734 return CN;
8736 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8737 BitVector UndefElements;
8738 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
8740 // BuildVectors can truncate their operands. Ignore that case here unless
8741 // AllowTruncation is set.
8742 if (CN && (UndefElements.none() || AllowUndefs)) {
8743 EVT CVT = CN->getValueType(0);
8744 EVT NSVT = N.getValueType().getScalarType();
8745 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8746 if (AllowTruncation || (CVT == NSVT))
8747 return CN;
8751 return nullptr;
8754 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
8755 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8756 return CN;
8758 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8759 BitVector UndefElements;
8760 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
8761 if (CN && (UndefElements.none() || AllowUndefs))
8762 return CN;
8765 return nullptr;
8768 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
8769 const APInt &DemandedElts,
8770 bool AllowUndefs) {
8771 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8772 return CN;
8774 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8775 BitVector UndefElements;
8776 ConstantFPSDNode *CN =
8777 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
8778 if (CN && (UndefElements.none() || AllowUndefs))
8779 return CN;
8782 return nullptr;
8785 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
8786 // TODO: may want to use peekThroughBitcast() here.
8787 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
8788 return C && C->isNullValue();
8791 bool llvm::isOneOrOneSplat(SDValue N) {
8792 // TODO: may want to use peekThroughBitcast() here.
8793 unsigned BitWidth = N.getScalarValueSizeInBits();
8794 ConstantSDNode *C = isConstOrConstSplat(N);
8795 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
8798 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
8799 N = peekThroughBitcasts(N);
8800 unsigned BitWidth = N.getScalarValueSizeInBits();
8801 ConstantSDNode *C = isConstOrConstSplat(N);
8802 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
8805 HandleSDNode::~HandleSDNode() {
8806 DropOperands();
8809 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
8810 const DebugLoc &DL,
8811 const GlobalValue *GA, EVT VT,
8812 int64_t o, unsigned TF)
8813 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
8814 TheGlobal = GA;
8817 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
8818 EVT VT, unsigned SrcAS,
8819 unsigned DestAS)
8820 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
8821 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
8823 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
8824 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
8825 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
8826 MemSDNodeBits.IsVolatile = MMO->isVolatile();
8827 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
8828 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
8829 MemSDNodeBits.IsInvariant = MMO->isInvariant();
8831 // We check here that the size of the memory operand fits within the size of
8832 // the MMO. This is because the MMO might indicate only a possible address
8833 // range instead of specifying the affected memory addresses precisely.
8834 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
8837 /// Profile - Gather unique data for the node.
8839 void SDNode::Profile(FoldingSetNodeID &ID) const {
8840 AddNodeIDNode(ID, this);
8843 namespace {
8845 struct EVTArray {
8846 std::vector<EVT> VTs;
8848 EVTArray() {
8849 VTs.reserve(MVT::LAST_VALUETYPE);
8850 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
8851 VTs.push_back(MVT((MVT::SimpleValueType)i));
8855 } // end anonymous namespace
8857 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
8858 static ManagedStatic<EVTArray> SimpleVTArray;
8859 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
8861 /// getValueTypeList - Return a pointer to the specified value type.
8863 const EVT *SDNode::getValueTypeList(EVT VT) {
8864 if (VT.isExtended()) {
8865 sys::SmartScopedLock<true> Lock(*VTMutex);
8866 return &(*EVTs->insert(VT).first);
8867 } else {
8868 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
8869 "Value type out of range!");
8870 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
8874 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
8875 /// indicated value. This method ignores uses of other values defined by this
8876 /// operation.
8877 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
8878 assert(Value < getNumValues() && "Bad value!");
8880 // TODO: Only iterate over uses of a given value of the node
8881 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
8882 if (UI.getUse().getResNo() == Value) {
8883 if (NUses == 0)
8884 return false;
8885 --NUses;
8889 // Found exactly the right number of uses?
8890 return NUses == 0;
8893 /// hasAnyUseOfValue - Return true if there are any use of the indicated
8894 /// value. This method ignores uses of other values defined by this operation.
8895 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
8896 assert(Value < getNumValues() && "Bad value!");
8898 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
8899 if (UI.getUse().getResNo() == Value)
8900 return true;
8902 return false;
8905 /// isOnlyUserOf - Return true if this node is the only use of N.
8906 bool SDNode::isOnlyUserOf(const SDNode *N) const {
8907 bool Seen = false;
8908 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8909 SDNode *User = *I;
8910 if (User == this)
8911 Seen = true;
8912 else
8913 return false;
8916 return Seen;
8919 /// Return true if the only users of N are contained in Nodes.
8920 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
8921 bool Seen = false;
8922 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8923 SDNode *User = *I;
8924 if (llvm::any_of(Nodes,
8925 [&User](const SDNode *Node) { return User == Node; }))
8926 Seen = true;
8927 else
8928 return false;
8931 return Seen;
8934 /// isOperand - Return true if this node is an operand of N.
8935 bool SDValue::isOperandOf(const SDNode *N) const {
8936 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; });
8939 bool SDNode::isOperandOf(const SDNode *N) const {
8940 return any_of(N->op_values(),
8941 [this](SDValue Op) { return this == Op.getNode(); });
8944 /// reachesChainWithoutSideEffects - Return true if this operand (which must
8945 /// be a chain) reaches the specified operand without crossing any
8946 /// side-effecting instructions on any chain path. In practice, this looks
8947 /// through token factors and non-volatile loads. In order to remain efficient,
8948 /// this only looks a couple of nodes in, it does not do an exhaustive search.
8950 /// Note that we only need to examine chains when we're searching for
8951 /// side-effects; SelectionDAG requires that all side-effects are represented
8952 /// by chains, even if another operand would force a specific ordering. This
8953 /// constraint is necessary to allow transformations like splitting loads.
8954 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
8955 unsigned Depth) const {
8956 if (*this == Dest) return true;
8958 // Don't search too deeply, we just want to be able to see through
8959 // TokenFactor's etc.
8960 if (Depth == 0) return false;
8962 // If this is a token factor, all inputs to the TF happen in parallel.
8963 if (getOpcode() == ISD::TokenFactor) {
8964 // First, try a shallow search.
8965 if (is_contained((*this)->ops(), Dest)) {
8966 // We found the chain we want as an operand of this TokenFactor.
8967 // Essentially, we reach the chain without side-effects if we could
8968 // serialize the TokenFactor into a simple chain of operations with
8969 // Dest as the last operation. This is automatically true if the
8970 // chain has one use: there are no other ordering constraints.
8971 // If the chain has more than one use, we give up: some other
8972 // use of Dest might force a side-effect between Dest and the current
8973 // node.
8974 if (Dest.hasOneUse())
8975 return true;
8977 // Next, try a deep search: check whether every operand of the TokenFactor
8978 // reaches Dest.
8979 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
8980 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
8984 // Loads don't have side effects, look through them.
8985 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
8986 if (Ld->isUnordered())
8987 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
8989 return false;
8992 bool SDNode::hasPredecessor(const SDNode *N) const {
8993 SmallPtrSet<const SDNode *, 32> Visited;
8994 SmallVector<const SDNode *, 16> Worklist;
8995 Worklist.push_back(this);
8996 return hasPredecessorHelper(N, Visited, Worklist);
8999 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9000 this->Flags.intersectWith(Flags);
9003 SDValue
9004 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9005 ArrayRef<ISD::NodeType> CandidateBinOps,
9006 bool AllowPartials) {
9007 // The pattern must end in an extract from index 0.
9008 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9009 !isNullConstant(Extract->getOperand(1)))
9010 return SDValue();
9012 // Match against one of the candidate binary ops.
9013 SDValue Op = Extract->getOperand(0);
9014 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9015 return Op.getOpcode() == unsigned(BinOp);
9017 return SDValue();
9019 // Floating-point reductions may require relaxed constraints on the final step
9020 // of the reduction because they may reorder intermediate operations.
9021 unsigned CandidateBinOp = Op.getOpcode();
9022 if (Op.getValueType().isFloatingPoint()) {
9023 SDNodeFlags Flags = Op->getFlags();
9024 switch (CandidateBinOp) {
9025 case ISD::FADD:
9026 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9027 return SDValue();
9028 break;
9029 default:
9030 llvm_unreachable("Unhandled FP opcode for binop reduction");
9034 // Matching failed - attempt to see if we did enough stages that a partial
9035 // reduction from a subvector is possible.
9036 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9037 if (!AllowPartials || !Op)
9038 return SDValue();
9039 EVT OpVT = Op.getValueType();
9040 EVT OpSVT = OpVT.getScalarType();
9041 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9042 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9043 return SDValue();
9044 BinOp = (ISD::NodeType)CandidateBinOp;
9045 return getNode(
9046 ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9047 getConstant(0, SDLoc(Op), TLI->getVectorIdxTy(getDataLayout())));
9050 // At each stage, we're looking for something that looks like:
9051 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9052 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9053 // i32 undef, i32 undef, i32 undef, i32 undef>
9054 // %a = binop <8 x i32> %op, %s
9055 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9056 // we expect something like:
9057 // <4,5,6,7,u,u,u,u>
9058 // <2,3,u,u,u,u,u,u>
9059 // <1,u,u,u,u,u,u,u>
9060 // While a partial reduction match would be:
9061 // <2,3,u,u,u,u,u,u>
9062 // <1,u,u,u,u,u,u,u>
9063 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9064 SDValue PrevOp;
9065 for (unsigned i = 0; i < Stages; ++i) {
9066 unsigned MaskEnd = (1 << i);
9068 if (Op.getOpcode() != CandidateBinOp)
9069 return PartialReduction(PrevOp, MaskEnd);
9071 SDValue Op0 = Op.getOperand(0);
9072 SDValue Op1 = Op.getOperand(1);
9074 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9075 if (Shuffle) {
9076 Op = Op1;
9077 } else {
9078 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9079 Op = Op0;
9082 // The first operand of the shuffle should be the same as the other operand
9083 // of the binop.
9084 if (!Shuffle || Shuffle->getOperand(0) != Op)
9085 return PartialReduction(PrevOp, MaskEnd);
9087 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9088 for (int Index = 0; Index < (int)MaskEnd; ++Index)
9089 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9090 return PartialReduction(PrevOp, MaskEnd);
9092 PrevOp = Op;
9095 BinOp = (ISD::NodeType)CandidateBinOp;
9096 return Op;
9099 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9100 assert(N->getNumValues() == 1 &&
9101 "Can't unroll a vector with multiple results!");
9103 EVT VT = N->getValueType(0);
9104 unsigned NE = VT.getVectorNumElements();
9105 EVT EltVT = VT.getVectorElementType();
9106 SDLoc dl(N);
9108 SmallVector<SDValue, 8> Scalars;
9109 SmallVector<SDValue, 4> Operands(N->getNumOperands());
9111 // If ResNE is 0, fully unroll the vector op.
9112 if (ResNE == 0)
9113 ResNE = NE;
9114 else if (NE > ResNE)
9115 NE = ResNE;
9117 unsigned i;
9118 for (i= 0; i != NE; ++i) {
9119 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9120 SDValue Operand = N->getOperand(j);
9121 EVT OperandVT = Operand.getValueType();
9122 if (OperandVT.isVector()) {
9123 // A vector operand; extract a single element.
9124 EVT OperandEltVT = OperandVT.getVectorElementType();
9125 Operands[j] =
9126 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
9127 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
9128 } else {
9129 // A scalar operand; just use it as is.
9130 Operands[j] = Operand;
9134 switch (N->getOpcode()) {
9135 default: {
9136 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9137 N->getFlags()));
9138 break;
9140 case ISD::VSELECT:
9141 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9142 break;
9143 case ISD::SHL:
9144 case ISD::SRA:
9145 case ISD::SRL:
9146 case ISD::ROTL:
9147 case ISD::ROTR:
9148 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9149 getShiftAmountOperand(Operands[0].getValueType(),
9150 Operands[1])));
9151 break;
9152 case ISD::SIGN_EXTEND_INREG: {
9153 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9154 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9155 Operands[0],
9156 getValueType(ExtVT)));
9161 for (; i < ResNE; ++i)
9162 Scalars.push_back(getUNDEF(EltVT));
9164 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9165 return getBuildVector(VecVT, dl, Scalars);
9168 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9169 SDNode *N, unsigned ResNE) {
9170 unsigned Opcode = N->getOpcode();
9171 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9172 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9173 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9174 "Expected an overflow opcode");
9176 EVT ResVT = N->getValueType(0);
9177 EVT OvVT = N->getValueType(1);
9178 EVT ResEltVT = ResVT.getVectorElementType();
9179 EVT OvEltVT = OvVT.getVectorElementType();
9180 SDLoc dl(N);
9182 // If ResNE is 0, fully unroll the vector op.
9183 unsigned NE = ResVT.getVectorNumElements();
9184 if (ResNE == 0)
9185 ResNE = NE;
9186 else if (NE > ResNE)
9187 NE = ResNE;
9189 SmallVector<SDValue, 8> LHSScalars;
9190 SmallVector<SDValue, 8> RHSScalars;
9191 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
9192 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
9194 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
9195 SDVTList VTs = getVTList(ResEltVT, SVT);
9196 SmallVector<SDValue, 8> ResScalars;
9197 SmallVector<SDValue, 8> OvScalars;
9198 for (unsigned i = 0; i < NE; ++i) {
9199 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
9200 SDValue Ov =
9201 getSelect(dl, OvEltVT, Res.getValue(1),
9202 getBoolConstant(true, dl, OvEltVT, ResVT),
9203 getConstant(0, dl, OvEltVT));
9205 ResScalars.push_back(Res);
9206 OvScalars.push_back(Ov);
9209 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
9210 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
9212 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
9213 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
9214 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
9215 getBuildVector(NewOvVT, dl, OvScalars));
9218 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
9219 LoadSDNode *Base,
9220 unsigned Bytes,
9221 int Dist) const {
9222 if (LD->isVolatile() || Base->isVolatile())
9223 return false;
9224 // TODO: probably too restrictive for atomics, revisit
9225 if (!LD->isSimple())
9226 return false;
9227 if (LD->isIndexed() || Base->isIndexed())
9228 return false;
9229 if (LD->getChain() != Base->getChain())
9230 return false;
9231 EVT VT = LD->getValueType(0);
9232 if (VT.getSizeInBits() / 8 != Bytes)
9233 return false;
9235 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
9236 auto LocDecomp = BaseIndexOffset::match(LD, *this);
9238 int64_t Offset = 0;
9239 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
9240 return (Dist * Bytes == Offset);
9241 return false;
9244 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
9245 /// it cannot be inferred.
9246 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
9247 // If this is a GlobalAddress + cst, return the alignment.
9248 const GlobalValue *GV;
9249 int64_t GVOffset = 0;
9250 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9251 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
9252 KnownBits Known(IdxWidth);
9253 llvm::computeKnownBits(GV, Known, getDataLayout());
9254 unsigned AlignBits = Known.countMinTrailingZeros();
9255 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
9256 if (Align)
9257 return MinAlign(Align, GVOffset);
9260 // If this is a direct reference to a stack slot, use information about the
9261 // stack slot's alignment.
9262 int FrameIdx = INT_MIN;
9263 int64_t FrameOffset = 0;
9264 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9265 FrameIdx = FI->getIndex();
9266 } else if (isBaseWithConstantOffset(Ptr) &&
9267 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9268 // Handle FI+Cst
9269 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9270 FrameOffset = Ptr.getConstantOperandVal(1);
9273 if (FrameIdx != INT_MIN) {
9274 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9275 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
9276 FrameOffset);
9277 return FIInfoAlign;
9280 return 0;
9283 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9284 /// which is split (or expanded) into two not necessarily identical pieces.
9285 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9286 // Currently all types are split in half.
9287 EVT LoVT, HiVT;
9288 if (!VT.isVector())
9289 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9290 else
9291 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9293 return std::make_pair(LoVT, HiVT);
9296 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9297 /// low/high part.
9298 std::pair<SDValue, SDValue>
9299 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9300 const EVT &HiVT) {
9301 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
9302 N.getValueType().getVectorNumElements() &&
9303 "More vector elements requested than available!");
9304 SDValue Lo, Hi;
9305 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
9306 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9307 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9308 getConstant(LoVT.getVectorNumElements(), DL,
9309 TLI->getVectorIdxTy(getDataLayout())));
9310 return std::make_pair(Lo, Hi);
9313 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
9314 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
9315 EVT VT = N.getValueType();
9316 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
9317 NextPowerOf2(VT.getVectorNumElements()));
9318 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
9319 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9322 void SelectionDAG::ExtractVectorElements(SDValue Op,
9323 SmallVectorImpl<SDValue> &Args,
9324 unsigned Start, unsigned Count) {
9325 EVT VT = Op.getValueType();
9326 if (Count == 0)
9327 Count = VT.getVectorNumElements();
9329 EVT EltVT = VT.getVectorElementType();
9330 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
9331 SDLoc SL(Op);
9332 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9333 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9334 Op, getConstant(i, SL, IdxTy)));
9338 // getAddressSpace - Return the address space this GlobalAddress belongs to.
9339 unsigned GlobalAddressSDNode::getAddressSpace() const {
9340 return getGlobal()->getType()->getAddressSpace();
9343 Type *ConstantPoolSDNode::getType() const {
9344 if (isMachineConstantPoolEntry())
9345 return Val.MachineCPVal->getType();
9346 return Val.ConstVal->getType();
9349 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9350 unsigned &SplatBitSize,
9351 bool &HasAnyUndefs,
9352 unsigned MinSplatBits,
9353 bool IsBigEndian) const {
9354 EVT VT = getValueType(0);
9355 assert(VT.isVector() && "Expected a vector type");
9356 unsigned VecWidth = VT.getSizeInBits();
9357 if (MinSplatBits > VecWidth)
9358 return false;
9360 // FIXME: The widths are based on this node's type, but build vectors can
9361 // truncate their operands.
9362 SplatValue = APInt(VecWidth, 0);
9363 SplatUndef = APInt(VecWidth, 0);
9365 // Get the bits. Bits with undefined values (when the corresponding element
9366 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9367 // in SplatValue. If any of the values are not constant, give up and return
9368 // false.
9369 unsigned int NumOps = getNumOperands();
9370 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9371 unsigned EltWidth = VT.getScalarSizeInBits();
9373 for (unsigned j = 0; j < NumOps; ++j) {
9374 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9375 SDValue OpVal = getOperand(i);
9376 unsigned BitPos = j * EltWidth;
9378 if (OpVal.isUndef())
9379 SplatUndef.setBits(BitPos, BitPos + EltWidth);
9380 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9381 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9382 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9383 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9384 else
9385 return false;
9388 // The build_vector is all constants or undefs. Find the smallest element
9389 // size that splats the vector.
9390 HasAnyUndefs = (SplatUndef != 0);
9392 // FIXME: This does not work for vectors with elements less than 8 bits.
9393 while (VecWidth > 8) {
9394 unsigned HalfSize = VecWidth / 2;
9395 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9396 APInt LowValue = SplatValue.trunc(HalfSize);
9397 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9398 APInt LowUndef = SplatUndef.trunc(HalfSize);
9400 // If the two halves do not match (ignoring undef bits), stop here.
9401 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9402 MinSplatBits > HalfSize)
9403 break;
9405 SplatValue = HighValue | LowValue;
9406 SplatUndef = HighUndef & LowUndef;
9408 VecWidth = HalfSize;
9411 SplatBitSize = VecWidth;
9412 return true;
9415 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
9416 BitVector *UndefElements) const {
9417 if (UndefElements) {
9418 UndefElements->clear();
9419 UndefElements->resize(getNumOperands());
9421 assert(getNumOperands() == DemandedElts.getBitWidth() &&
9422 "Unexpected vector size");
9423 if (!DemandedElts)
9424 return SDValue();
9425 SDValue Splatted;
9426 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
9427 if (!DemandedElts[i])
9428 continue;
9429 SDValue Op = getOperand(i);
9430 if (Op.isUndef()) {
9431 if (UndefElements)
9432 (*UndefElements)[i] = true;
9433 } else if (!Splatted) {
9434 Splatted = Op;
9435 } else if (Splatted != Op) {
9436 return SDValue();
9440 if (!Splatted) {
9441 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
9442 assert(getOperand(FirstDemandedIdx).isUndef() &&
9443 "Can only have a splat without a constant for all undefs.");
9444 return getOperand(FirstDemandedIdx);
9447 return Splatted;
9450 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9451 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
9452 return getSplatValue(DemandedElts, UndefElements);
9455 ConstantSDNode *
9456 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
9457 BitVector *UndefElements) const {
9458 return dyn_cast_or_null<ConstantSDNode>(
9459 getSplatValue(DemandedElts, UndefElements));
9462 ConstantSDNode *
9463 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
9464 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
9467 ConstantFPSDNode *
9468 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
9469 BitVector *UndefElements) const {
9470 return dyn_cast_or_null<ConstantFPSDNode>(
9471 getSplatValue(DemandedElts, UndefElements));
9474 ConstantFPSDNode *
9475 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
9476 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
9479 int32_t
9480 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
9481 uint32_t BitWidth) const {
9482 if (ConstantFPSDNode *CN =
9483 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
9484 bool IsExact;
9485 APSInt IntVal(BitWidth);
9486 const APFloat &APF = CN->getValueAPF();
9487 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
9488 APFloat::opOK ||
9489 !IsExact)
9490 return -1;
9492 return IntVal.exactLogBase2();
9494 return -1;
9497 bool BuildVectorSDNode::isConstant() const {
9498 for (const SDValue &Op : op_values()) {
9499 unsigned Opc = Op.getOpcode();
9500 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
9501 return false;
9503 return true;
9506 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
9507 // Find the first non-undef value in the shuffle mask.
9508 unsigned i, e;
9509 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
9510 /* search */;
9512 // If all elements are undefined, this shuffle can be considered a splat
9513 // (although it should eventually get simplified away completely).
9514 if (i == e)
9515 return true;
9517 // Make sure all remaining elements are either undef or the same as the first
9518 // non-undef value.
9519 for (int Idx = Mask[i]; i != e; ++i)
9520 if (Mask[i] >= 0 && Mask[i] != Idx)
9521 return false;
9522 return true;
9525 // Returns the SDNode if it is a constant integer BuildVector
9526 // or constant integer.
9527 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
9528 if (isa<ConstantSDNode>(N))
9529 return N.getNode();
9530 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
9531 return N.getNode();
9532 // Treat a GlobalAddress supporting constant offset folding as a
9533 // constant integer.
9534 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
9535 if (GA->getOpcode() == ISD::GlobalAddress &&
9536 TLI->isOffsetFoldingLegal(GA))
9537 return GA;
9538 return nullptr;
9541 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
9542 if (isa<ConstantFPSDNode>(N))
9543 return N.getNode();
9545 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
9546 return N.getNode();
9548 return nullptr;
9551 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
9552 assert(!Node->OperandList && "Node already has operands");
9553 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
9554 "too many operands to fit into SDNode");
9555 SDUse *Ops = OperandRecycler.allocate(
9556 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
9558 bool IsDivergent = false;
9559 for (unsigned I = 0; I != Vals.size(); ++I) {
9560 Ops[I].setUser(Node);
9561 Ops[I].setInitial(Vals[I]);
9562 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
9563 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent();
9565 Node->NumOperands = Vals.size();
9566 Node->OperandList = Ops;
9567 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
9568 if (!TLI->isSDNodeAlwaysUniform(Node))
9569 Node->SDNodeBits.IsDivergent = IsDivergent;
9570 checkForCycles(Node);
9573 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
9574 SmallVectorImpl<SDValue> &Vals) {
9575 size_t Limit = SDNode::getMaxNumOperands();
9576 while (Vals.size() > Limit) {
9577 unsigned SliceIdx = Vals.size() - Limit;
9578 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
9579 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
9580 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
9581 Vals.emplace_back(NewTF);
9583 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
9586 #ifndef NDEBUG
9587 static void checkForCyclesHelper(const SDNode *N,
9588 SmallPtrSetImpl<const SDNode*> &Visited,
9589 SmallPtrSetImpl<const SDNode*> &Checked,
9590 const llvm::SelectionDAG *DAG) {
9591 // If this node has already been checked, don't check it again.
9592 if (Checked.count(N))
9593 return;
9595 // If a node has already been visited on this depth-first walk, reject it as
9596 // a cycle.
9597 if (!Visited.insert(N).second) {
9598 errs() << "Detected cycle in SelectionDAG\n";
9599 dbgs() << "Offending node:\n";
9600 N->dumprFull(DAG); dbgs() << "\n";
9601 abort();
9604 for (const SDValue &Op : N->op_values())
9605 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
9607 Checked.insert(N);
9608 Visited.erase(N);
9610 #endif
9612 void llvm::checkForCycles(const llvm::SDNode *N,
9613 const llvm::SelectionDAG *DAG,
9614 bool force) {
9615 #ifndef NDEBUG
9616 bool check = force;
9617 #ifdef EXPENSIVE_CHECKS
9618 check = true;
9619 #endif // EXPENSIVE_CHECKS
9620 if (check) {
9621 assert(N && "Checking nonexistent SDNode");
9622 SmallPtrSet<const SDNode*, 32> visited;
9623 SmallPtrSet<const SDNode*, 32> checked;
9624 checkForCyclesHelper(N, visited, checked, DAG);
9626 #endif // !NDEBUG
9629 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
9630 checkForCycles(DAG->getRoot().getNode(), DAG, force);