[RISCV] Refactor predicates for rvv intrinsic patterns.
[llvm-project.git] / llvm / lib / Analysis / ValueTracking.cpp
blob0368c090665fd28d26623395db2951b1dce5cb9c
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/iterator_range.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumeBundleQueries.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/EHPersonalities.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/IntrinsicsAArch64.h"
57 #include "llvm/IR/IntrinsicsRISCV.h"
58 #include "llvm/IR/IntrinsicsX86.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PatternMatch.h"
64 #include "llvm/IR/Type.h"
65 #include "llvm/IR/User.h"
66 #include "llvm/IR/Value.h"
67 #include "llvm/Support/Casting.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Compiler.h"
70 #include "llvm/Support/ErrorHandling.h"
71 #include "llvm/Support/KnownBits.h"
72 #include "llvm/Support/MathExtras.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <optional>
77 #include <utility>
79 using namespace llvm;
80 using namespace llvm::PatternMatch;
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85 cl::Hidden, cl::init(20));
88 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
89 /// returns the element type's bitwidth.
90 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
91 if (unsigned BitWidth = Ty->getScalarSizeInBits())
92 return BitWidth;
94 return DL.getPointerTypeSizeInBits(Ty);
97 namespace {
99 // Simplifying using an assume can only be done in a particular control-flow
100 // context (the context instruction provides that context). If an assume and
101 // the context instruction are not in the same block then the DT helps in
102 // figuring out if we can use it.
103 struct Query {
104 const DataLayout &DL;
105 AssumptionCache *AC;
106 const Instruction *CxtI;
107 const DominatorTree *DT;
109 // Unlike the other analyses, this may be a nullptr because not all clients
110 // provide it currently.
111 OptimizationRemarkEmitter *ORE;
113 /// If true, it is safe to use metadata during simplification.
114 InstrInfoQuery IIQ;
116 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
117 const DominatorTree *DT, bool UseInstrInfo,
118 OptimizationRemarkEmitter *ORE = nullptr)
119 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
122 } // end anonymous namespace
124 // Given the provided Value and, potentially, a context instruction, return
125 // the preferred context instruction (if any).
126 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
127 // If we've been provided with a context instruction, then use that (provided
128 // it has been inserted).
129 if (CxtI && CxtI->getParent())
130 return CxtI;
132 // If the value is really an already-inserted instruction, then use that.
133 CxtI = dyn_cast<Instruction>(V);
134 if (CxtI && CxtI->getParent())
135 return CxtI;
137 return nullptr;
140 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
141 // If we've been provided with a context instruction, then use that (provided
142 // it has been inserted).
143 if (CxtI && CxtI->getParent())
144 return CxtI;
146 // If the value is really an already-inserted instruction, then use that.
147 CxtI = dyn_cast<Instruction>(V1);
148 if (CxtI && CxtI->getParent())
149 return CxtI;
151 CxtI = dyn_cast<Instruction>(V2);
152 if (CxtI && CxtI->getParent())
153 return CxtI;
155 return nullptr;
158 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
159 const APInt &DemandedElts,
160 APInt &DemandedLHS, APInt &DemandedRHS) {
161 if (isa<ScalableVectorType>(Shuf->getType())) {
162 assert(DemandedElts == APInt(1,1));
163 DemandedLHS = DemandedRHS = DemandedElts;
164 return true;
167 int NumElts =
168 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
169 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
170 DemandedElts, DemandedLHS, DemandedRHS);
173 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
174 KnownBits &Known, unsigned Depth, const Query &Q);
176 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
177 const Query &Q) {
178 // Since the number of lanes in a scalable vector is unknown at compile time,
179 // we track one bit which is implicitly broadcast to all lanes. This means
180 // that all lanes in a scalable vector are considered demanded.
181 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
182 APInt DemandedElts =
183 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
184 computeKnownBits(V, DemandedElts, Known, Depth, Q);
187 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
188 const DataLayout &DL, unsigned Depth,
189 AssumptionCache *AC, const Instruction *CxtI,
190 const DominatorTree *DT,
191 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
192 ::computeKnownBits(V, Known, Depth,
193 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
196 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
197 KnownBits &Known, const DataLayout &DL,
198 unsigned Depth, AssumptionCache *AC,
199 const Instruction *CxtI, const DominatorTree *DT,
200 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
201 ::computeKnownBits(V, DemandedElts, Known, Depth,
202 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
205 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
206 unsigned Depth, const Query &Q);
208 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
209 const Query &Q);
211 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
212 unsigned Depth, AssumptionCache *AC,
213 const Instruction *CxtI,
214 const DominatorTree *DT,
215 OptimizationRemarkEmitter *ORE,
216 bool UseInstrInfo) {
217 return ::computeKnownBits(
218 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
221 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
222 const DataLayout &DL, unsigned Depth,
223 AssumptionCache *AC, const Instruction *CxtI,
224 const DominatorTree *DT,
225 OptimizationRemarkEmitter *ORE,
226 bool UseInstrInfo) {
227 return ::computeKnownBits(
228 V, DemandedElts, Depth,
229 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
232 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
233 const DataLayout &DL, AssumptionCache *AC,
234 const Instruction *CxtI, const DominatorTree *DT,
235 bool UseInstrInfo) {
236 assert(LHS->getType() == RHS->getType() &&
237 "LHS and RHS should have the same type");
238 assert(LHS->getType()->isIntOrIntVectorTy() &&
239 "LHS and RHS should be integers");
240 // Look for an inverted mask: (X & ~M) op (Y & M).
242 Value *M;
243 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
244 match(RHS, m_c_And(m_Specific(M), m_Value())))
245 return true;
246 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
247 match(LHS, m_c_And(m_Specific(M), m_Value())))
248 return true;
251 // X op (Y & ~X)
252 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
253 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
254 return true;
256 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
257 // for constant Y.
258 Value *Y;
259 if (match(RHS,
260 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
261 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
262 return true;
264 // Peek through extends to find a 'not' of the other side:
265 // (ext Y) op ext(~Y)
266 // (ext ~Y) op ext(Y)
267 if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
268 match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) ||
269 (match(RHS, m_ZExtOrSExt(m_Value(Y))) &&
270 match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))))
271 return true;
273 // Look for: (A & B) op ~(A | B)
275 Value *A, *B;
276 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
277 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
278 return true;
279 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
280 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
281 return true;
283 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
284 KnownBits LHSKnown(IT->getBitWidth());
285 KnownBits RHSKnown(IT->getBitWidth());
286 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
287 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
288 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
291 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
292 return !I->user_empty() && all_of(I->users(), [](const User *U) {
293 ICmpInst::Predicate P;
294 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
298 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
299 const Query &Q);
301 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
302 bool OrZero, unsigned Depth,
303 AssumptionCache *AC, const Instruction *CxtI,
304 const DominatorTree *DT, bool UseInstrInfo) {
305 return ::isKnownToBeAPowerOfTwo(
306 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
309 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
310 unsigned Depth, const Query &Q);
312 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
314 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
315 AssumptionCache *AC, const Instruction *CxtI,
316 const DominatorTree *DT, bool UseInstrInfo) {
317 return ::isKnownNonZero(V, Depth,
318 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
321 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
322 unsigned Depth, AssumptionCache *AC,
323 const Instruction *CxtI, const DominatorTree *DT,
324 bool UseInstrInfo) {
325 KnownBits Known =
326 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
327 return Known.isNonNegative();
330 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
331 AssumptionCache *AC, const Instruction *CxtI,
332 const DominatorTree *DT, bool UseInstrInfo) {
333 if (auto *CI = dyn_cast<ConstantInt>(V))
334 return CI->getValue().isStrictlyPositive();
336 // TODO: We'd doing two recursive queries here. We should factor this such
337 // that only a single query is needed.
338 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
339 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
342 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
343 AssumptionCache *AC, const Instruction *CxtI,
344 const DominatorTree *DT, bool UseInstrInfo) {
345 KnownBits Known =
346 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
347 return Known.isNegative();
350 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
351 const Query &Q);
353 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
354 const DataLayout &DL, AssumptionCache *AC,
355 const Instruction *CxtI, const DominatorTree *DT,
356 bool UseInstrInfo) {
357 return ::isKnownNonEqual(V1, V2, 0,
358 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
359 UseInstrInfo, /*ORE=*/nullptr));
362 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
363 const Query &Q);
365 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
366 const DataLayout &DL, unsigned Depth,
367 AssumptionCache *AC, const Instruction *CxtI,
368 const DominatorTree *DT, bool UseInstrInfo) {
369 return ::MaskedValueIsZero(
370 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
373 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
374 unsigned Depth, const Query &Q);
376 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
377 const Query &Q) {
378 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
379 APInt DemandedElts =
380 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
381 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
384 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
385 unsigned Depth, AssumptionCache *AC,
386 const Instruction *CxtI,
387 const DominatorTree *DT, bool UseInstrInfo) {
388 return ::ComputeNumSignBits(
389 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
392 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
393 unsigned Depth, AssumptionCache *AC,
394 const Instruction *CxtI,
395 const DominatorTree *DT) {
396 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
397 return V->getType()->getScalarSizeInBits() - SignBits + 1;
400 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
401 bool NSW, const APInt &DemandedElts,
402 KnownBits &KnownOut, KnownBits &Known2,
403 unsigned Depth, const Query &Q) {
404 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
406 // If one operand is unknown and we have no nowrap information,
407 // the result will be unknown independently of the second operand.
408 if (KnownOut.isUnknown() && !NSW)
409 return;
411 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
412 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
415 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
416 const APInt &DemandedElts, KnownBits &Known,
417 KnownBits &Known2, unsigned Depth,
418 const Query &Q) {
419 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
420 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
422 bool isKnownNegative = false;
423 bool isKnownNonNegative = false;
424 // If the multiplication is known not to overflow, compute the sign bit.
425 if (NSW) {
426 if (Op0 == Op1) {
427 // The product of a number with itself is non-negative.
428 isKnownNonNegative = true;
429 } else {
430 bool isKnownNonNegativeOp1 = Known.isNonNegative();
431 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
432 bool isKnownNegativeOp1 = Known.isNegative();
433 bool isKnownNegativeOp0 = Known2.isNegative();
434 // The product of two numbers with the same sign is non-negative.
435 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
436 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
437 // The product of a negative number and a non-negative number is either
438 // negative or zero.
439 if (!isKnownNonNegative)
440 isKnownNegative =
441 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
442 Known2.isNonZero()) ||
443 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
447 bool SelfMultiply = Op0 == Op1;
448 // TODO: SelfMultiply can be poison, but not undef.
449 if (SelfMultiply)
450 SelfMultiply &=
451 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
452 Known = KnownBits::mul(Known, Known2, SelfMultiply);
454 // Only make use of no-wrap flags if we failed to compute the sign bit
455 // directly. This matters if the multiplication always overflows, in
456 // which case we prefer to follow the result of the direct computation,
457 // though as the program is invoking undefined behaviour we can choose
458 // whatever we like here.
459 if (isKnownNonNegative && !Known.isNegative())
460 Known.makeNonNegative();
461 else if (isKnownNegative && !Known.isNonNegative())
462 Known.makeNegative();
465 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
466 KnownBits &Known) {
467 unsigned BitWidth = Known.getBitWidth();
468 unsigned NumRanges = Ranges.getNumOperands() / 2;
469 assert(NumRanges >= 1);
471 Known.Zero.setAllBits();
472 Known.One.setAllBits();
474 for (unsigned i = 0; i < NumRanges; ++i) {
475 ConstantInt *Lower =
476 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
477 ConstantInt *Upper =
478 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
479 ConstantRange Range(Lower->getValue(), Upper->getValue());
481 // The first CommonPrefixBits of all values in Range are equal.
482 unsigned CommonPrefixBits =
483 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero();
484 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
485 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
486 Known.One &= UnsignedMax & Mask;
487 Known.Zero &= ~UnsignedMax & Mask;
491 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
492 SmallVector<const Value *, 16> WorkSet(1, I);
493 SmallPtrSet<const Value *, 32> Visited;
494 SmallPtrSet<const Value *, 16> EphValues;
496 // The instruction defining an assumption's condition itself is always
497 // considered ephemeral to that assumption (even if it has other
498 // non-ephemeral users). See r246696's test case for an example.
499 if (is_contained(I->operands(), E))
500 return true;
502 while (!WorkSet.empty()) {
503 const Value *V = WorkSet.pop_back_val();
504 if (!Visited.insert(V).second)
505 continue;
507 // If all uses of this value are ephemeral, then so is this value.
508 if (llvm::all_of(V->users(), [&](const User *U) {
509 return EphValues.count(U);
510 })) {
511 if (V == E)
512 return true;
514 if (V == I || (isa<Instruction>(V) &&
515 !cast<Instruction>(V)->mayHaveSideEffects() &&
516 !cast<Instruction>(V)->isTerminator())) {
517 EphValues.insert(V);
518 if (const User *U = dyn_cast<User>(V))
519 append_range(WorkSet, U->operands());
524 return false;
527 // Is this an intrinsic that cannot be speculated but also cannot trap?
528 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
529 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
530 return CI->isAssumeLikeIntrinsic();
532 return false;
535 bool llvm::isValidAssumeForContext(const Instruction *Inv,
536 const Instruction *CxtI,
537 const DominatorTree *DT) {
538 // There are two restrictions on the use of an assume:
539 // 1. The assume must dominate the context (or the control flow must
540 // reach the assume whenever it reaches the context).
541 // 2. The context must not be in the assume's set of ephemeral values
542 // (otherwise we will use the assume to prove that the condition
543 // feeding the assume is trivially true, thus causing the removal of
544 // the assume).
546 if (Inv->getParent() == CxtI->getParent()) {
547 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
548 // in the BB.
549 if (Inv->comesBefore(CxtI))
550 return true;
552 // Don't let an assume affect itself - this would cause the problems
553 // `isEphemeralValueOf` is trying to prevent, and it would also make
554 // the loop below go out of bounds.
555 if (Inv == CxtI)
556 return false;
558 // The context comes first, but they're both in the same block.
559 // Make sure there is nothing in between that might interrupt
560 // the control flow, not even CxtI itself.
561 // We limit the scan distance between the assume and its context instruction
562 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
563 // it can be adjusted if needed (could be turned into a cl::opt).
564 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
565 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
566 return false;
568 return !isEphemeralValueOf(Inv, CxtI);
571 // Inv and CxtI are in different blocks.
572 if (DT) {
573 if (DT->dominates(Inv, CxtI))
574 return true;
575 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
576 // We don't have a DT, but this trivially dominates.
577 return true;
580 return false;
583 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
584 // v u> y implies v != 0.
585 if (Pred == ICmpInst::ICMP_UGT)
586 return true;
588 // Special-case v != 0 to also handle v != null.
589 if (Pred == ICmpInst::ICMP_NE)
590 return match(RHS, m_Zero());
592 // All other predicates - rely on generic ConstantRange handling.
593 const APInt *C;
594 if (!match(RHS, m_APInt(C)))
595 return false;
597 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
598 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
601 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
602 // Use of assumptions is context-sensitive. If we don't have a context, we
603 // cannot use them!
604 if (!Q.AC || !Q.CxtI)
605 return false;
607 if (Q.CxtI && V->getType()->isPointerTy()) {
608 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
609 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
610 V->getType()->getPointerAddressSpace()))
611 AttrKinds.push_back(Attribute::Dereferenceable);
613 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
614 return true;
617 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
618 if (!AssumeVH)
619 continue;
620 CallInst *I = cast<CallInst>(AssumeVH);
621 assert(I->getFunction() == Q.CxtI->getFunction() &&
622 "Got assumption for the wrong function!");
624 // Warning: This loop can end up being somewhat performance sensitive.
625 // We're running this loop for once for each value queried resulting in a
626 // runtime of ~O(#assumes * #values).
628 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
629 "must be an assume intrinsic");
631 Value *RHS;
632 CmpInst::Predicate Pred;
633 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
634 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
635 return false;
637 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
638 return true;
641 return false;
644 static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp,
645 KnownBits &Known, unsigned Depth,
646 const Query &Q) {
647 unsigned BitWidth = Known.getBitWidth();
648 // We are attempting to compute known bits for the operands of an assume.
649 // Do not try to use other assumptions for those recursive calls because
650 // that can lead to mutual recursion and a compile-time explosion.
651 // An example of the mutual recursion: computeKnownBits can call
652 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
653 // and so on.
654 Query QueryNoAC = Q;
655 QueryNoAC.AC = nullptr;
657 // Note that ptrtoint may change the bitwidth.
658 Value *A, *B;
659 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
661 CmpInst::Predicate Pred;
662 uint64_t C;
663 switch (Cmp->getPredicate()) {
664 default:
665 break;
666 case ICmpInst::ICMP_EQ:
667 // assume(v = a)
668 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) {
669 KnownBits RHSKnown =
670 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
671 Known = Known.unionWith(RHSKnown);
672 // assume(v & b = a)
673 } else if (match(Cmp,
674 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) {
675 KnownBits RHSKnown =
676 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
677 KnownBits MaskKnown =
678 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
680 // For those bits in the mask that are known to be one, we can propagate
681 // known bits from the RHS to V.
682 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
683 Known.One |= RHSKnown.One & MaskKnown.One;
684 // assume(~(v & b) = a)
685 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
686 m_Value(A)))) {
687 KnownBits RHSKnown =
688 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
689 KnownBits MaskKnown =
690 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
692 // For those bits in the mask that are known to be one, we can propagate
693 // inverted known bits from the RHS to V.
694 Known.Zero |= RHSKnown.One & MaskKnown.One;
695 Known.One |= RHSKnown.Zero & MaskKnown.One;
696 // assume(v | b = a)
697 } else if (match(Cmp,
698 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) {
699 KnownBits RHSKnown =
700 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
701 KnownBits BKnown =
702 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
704 // For those bits in B that are known to be zero, we can propagate known
705 // bits from the RHS to V.
706 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
707 Known.One |= RHSKnown.One & BKnown.Zero;
708 // assume(~(v | b) = a)
709 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
710 m_Value(A)))) {
711 KnownBits RHSKnown =
712 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
713 KnownBits BKnown =
714 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
716 // For those bits in B that are known to be zero, we can propagate
717 // inverted known bits from the RHS to V.
718 Known.Zero |= RHSKnown.One & BKnown.Zero;
719 Known.One |= RHSKnown.Zero & BKnown.Zero;
720 // assume(v ^ b = a)
721 } else if (match(Cmp,
722 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) {
723 KnownBits RHSKnown =
724 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
725 KnownBits BKnown =
726 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
728 // For those bits in B that are known to be zero, we can propagate known
729 // bits from the RHS to V. For those bits in B that are known to be one,
730 // we can propagate inverted known bits from the RHS to V.
731 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
732 Known.One |= RHSKnown.One & BKnown.Zero;
733 Known.Zero |= RHSKnown.One & BKnown.One;
734 Known.One |= RHSKnown.Zero & BKnown.One;
735 // assume(~(v ^ b) = a)
736 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
737 m_Value(A)))) {
738 KnownBits RHSKnown =
739 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
740 KnownBits BKnown =
741 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
743 // For those bits in B that are known to be zero, we can propagate
744 // inverted known bits from the RHS to V. For those bits in B that are
745 // known to be one, we can propagate known bits from the RHS to V.
746 Known.Zero |= RHSKnown.One & BKnown.Zero;
747 Known.One |= RHSKnown.Zero & BKnown.Zero;
748 Known.Zero |= RHSKnown.Zero & BKnown.One;
749 Known.One |= RHSKnown.One & BKnown.One;
750 // assume(v << c = a)
751 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
752 m_Value(A))) &&
753 C < BitWidth) {
754 KnownBits RHSKnown =
755 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
757 // For those bits in RHS that are known, we can propagate them to known
758 // bits in V shifted to the right by C.
759 RHSKnown.Zero.lshrInPlace(C);
760 RHSKnown.One.lshrInPlace(C);
761 Known = Known.unionWith(RHSKnown);
762 // assume(~(v << c) = a)
763 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
764 m_Value(A))) &&
765 C < BitWidth) {
766 KnownBits RHSKnown =
767 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
768 // For those bits in RHS that are known, we can propagate them inverted
769 // to known bits in V shifted to the right by C.
770 RHSKnown.One.lshrInPlace(C);
771 Known.Zero |= RHSKnown.One;
772 RHSKnown.Zero.lshrInPlace(C);
773 Known.One |= RHSKnown.Zero;
774 // assume(v >> c = a)
775 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
776 m_Value(A))) &&
777 C < BitWidth) {
778 KnownBits RHSKnown =
779 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
780 // For those bits in RHS that are known, we can propagate them to known
781 // bits in V shifted to the right by C.
782 Known.Zero |= RHSKnown.Zero << C;
783 Known.One |= RHSKnown.One << C;
784 // assume(~(v >> c) = a)
785 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
786 m_Value(A))) &&
787 C < BitWidth) {
788 KnownBits RHSKnown =
789 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
790 // For those bits in RHS that are known, we can propagate them inverted
791 // to known bits in V shifted to the right by C.
792 Known.Zero |= RHSKnown.One << C;
793 Known.One |= RHSKnown.Zero << C;
795 break;
796 case ICmpInst::ICMP_SGE:
797 // assume(v >=_s c) where c is non-negative
798 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
799 KnownBits RHSKnown =
800 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
802 if (RHSKnown.isNonNegative()) {
803 // We know that the sign bit is zero.
804 Known.makeNonNegative();
807 break;
808 case ICmpInst::ICMP_SGT:
809 // assume(v >_s c) where c is at least -1.
810 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
811 KnownBits RHSKnown =
812 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
814 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
815 // We know that the sign bit is zero.
816 Known.makeNonNegative();
819 break;
820 case ICmpInst::ICMP_SLE:
821 // assume(v <=_s c) where c is negative
822 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
823 KnownBits RHSKnown =
824 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
826 if (RHSKnown.isNegative()) {
827 // We know that the sign bit is one.
828 Known.makeNegative();
831 break;
832 case ICmpInst::ICMP_SLT:
833 // assume(v <_s c) where c is non-positive
834 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
835 KnownBits RHSKnown =
836 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
838 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
839 // We know that the sign bit is one.
840 Known.makeNegative();
843 break;
844 case ICmpInst::ICMP_ULE:
845 // assume(v <=_u c)
846 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
847 KnownBits RHSKnown =
848 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
850 // Whatever high bits in c are zero are known to be zero.
851 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
853 break;
854 case ICmpInst::ICMP_ULT:
855 // assume(v <_u c)
856 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
857 KnownBits RHSKnown =
858 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
860 // If the RHS is known zero, then this assumption must be wrong (nothing
861 // is unsigned less than zero). Signal a conflict and get out of here.
862 if (RHSKnown.isZero()) {
863 Known.Zero.setAllBits();
864 Known.One.setAllBits();
865 break;
868 // Whatever high bits in c are zero are known to be zero (if c is a power
869 // of 2, then one more).
870 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
871 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
872 else
873 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
875 break;
876 case ICmpInst::ICMP_NE: {
877 // assume (v & b != 0) where b is a power of 2
878 const APInt *BPow2;
879 if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) {
880 Known.One |= BPow2->zextOrTrunc(BitWidth);
882 } break;
886 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
887 unsigned Depth, const Query &Q) {
888 // Use of assumptions is context-sensitive. If we don't have a context, we
889 // cannot use them!
890 if (!Q.AC || !Q.CxtI)
891 return;
893 unsigned BitWidth = Known.getBitWidth();
895 // Refine Known set if the pointer alignment is set by assume bundles.
896 if (V->getType()->isPointerTy()) {
897 if (RetainedKnowledge RK = getKnowledgeValidInContext(
898 V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) {
899 if (isPowerOf2_64(RK.ArgValue))
900 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
904 // Note that the patterns below need to be kept in sync with the code
905 // in AssumptionCache::updateAffectedValues.
907 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
908 if (!AssumeVH)
909 continue;
910 CallInst *I = cast<CallInst>(AssumeVH);
911 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
912 "Got assumption for the wrong function!");
914 // Warning: This loop can end up being somewhat performance sensitive.
915 // We're running this loop for once for each value queried resulting in a
916 // runtime of ~O(#assumes * #values).
918 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
919 "must be an assume intrinsic");
921 Value *Arg = I->getArgOperand(0);
923 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
924 assert(BitWidth == 1 && "assume operand is not i1?");
925 (void)BitWidth;
926 Known.setAllOnes();
927 return;
929 if (match(Arg, m_Not(m_Specific(V))) &&
930 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
931 assert(BitWidth == 1 && "assume operand is not i1?");
932 (void)BitWidth;
933 Known.setAllZero();
934 return;
937 // The remaining tests are all recursive, so bail out if we hit the limit.
938 if (Depth == MaxAnalysisRecursionDepth)
939 continue;
941 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
942 if (!Cmp)
943 continue;
945 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
946 continue;
948 computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q);
951 // If assumptions conflict with each other or previous known bits, then we
952 // have a logical fallacy. It's possible that the assumption is not reachable,
953 // so this isn't a real bug. On the other hand, the program may have undefined
954 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
955 // clear out the known bits, try to warn the user, and hope for the best.
956 if (Known.Zero.intersects(Known.One)) {
957 Known.resetAll();
959 if (Q.ORE)
960 Q.ORE->emit([&]() {
961 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
962 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
963 CxtI)
964 << "Detected conflicting code assumptions. Program may "
965 "have undefined behavior, or compiler may have "
966 "internal error.";
971 /// Compute known bits from a shift operator, including those with a
972 /// non-constant shift amount. Known is the output of this function. Known2 is a
973 /// pre-allocated temporary with the same bit width as Known and on return
974 /// contains the known bit of the shift value source. KF is an
975 /// operator-specific function that, given the known-bits and a shift amount,
976 /// compute the implied known-bits of the shift operator's result respectively
977 /// for that shift amount. The results from calling KF are conservatively
978 /// combined for all permitted shift amounts.
979 static void computeKnownBitsFromShiftOperator(
980 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
981 KnownBits &Known2, unsigned Depth, const Query &Q,
982 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
983 unsigned BitWidth = Known.getBitWidth();
984 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
985 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
987 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
988 // BitWidth > 64 and any upper bits are known, we'll end up returning the
989 // limit value (which implies all bits are known).
990 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
991 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
992 bool ShiftAmtIsConstant = Known.isConstant();
993 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
995 if (ShiftAmtIsConstant) {
996 Known = KF(Known2, Known);
998 // If the known bits conflict, this must be an overflowing left shift, so
999 // the shift result is poison. We can return anything we want. Choose 0 for
1000 // the best folding opportunity.
1001 if (Known.hasConflict())
1002 Known.setAllZero();
1004 return;
1007 // If the shift amount could be greater than or equal to the bit-width of the
1008 // LHS, the value could be poison, but bail out because the check below is
1009 // expensive.
1010 // TODO: Should we just carry on?
1011 if (MaxShiftAmtIsOutOfRange) {
1012 Known.resetAll();
1013 return;
1016 // It would be more-clearly correct to use the two temporaries for this
1017 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1018 Known.resetAll();
1020 // If we know the shifter operand is nonzero, we can sometimes infer more
1021 // known bits. However this is expensive to compute, so be lazy about it and
1022 // only compute it when absolutely necessary.
1023 std::optional<bool> ShifterOperandIsNonZero;
1025 // Early exit if we can't constrain any well-defined shift amount.
1026 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1027 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1028 ShifterOperandIsNonZero =
1029 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1030 if (!*ShifterOperandIsNonZero)
1031 return;
1034 Known.Zero.setAllBits();
1035 Known.One.setAllBits();
1036 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1037 // Combine the shifted known input bits only for those shift amounts
1038 // compatible with its known constraints.
1039 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1040 continue;
1041 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1042 continue;
1043 // If we know the shifter is nonzero, we may be able to infer more known
1044 // bits. This check is sunk down as far as possible to avoid the expensive
1045 // call to isKnownNonZero if the cheaper checks above fail.
1046 if (ShiftAmt == 0) {
1047 if (!ShifterOperandIsNonZero)
1048 ShifterOperandIsNonZero =
1049 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1050 if (*ShifterOperandIsNonZero)
1051 continue;
1054 Known = Known.intersectWith(
1055 KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1058 // If the known bits conflict, the result is poison. Return a 0 and hope the
1059 // caller can further optimize that.
1060 if (Known.hasConflict())
1061 Known.setAllZero();
1064 static KnownBits getKnownBitsFromAndXorOr(const Operator *I,
1065 const APInt &DemandedElts,
1066 const KnownBits &KnownLHS,
1067 const KnownBits &KnownRHS,
1068 unsigned Depth, const Query &Q) {
1069 unsigned BitWidth = KnownLHS.getBitWidth();
1070 KnownBits KnownOut(BitWidth);
1071 bool IsAnd = false;
1072 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero();
1073 Value *X = nullptr, *Y = nullptr;
1075 switch (I->getOpcode()) {
1076 case Instruction::And:
1077 KnownOut = KnownLHS & KnownRHS;
1078 IsAnd = true;
1079 // and(x, -x) is common idioms that will clear all but lowest set
1080 // bit. If we have a single known bit in x, we can clear all bits
1081 // above it.
1082 // TODO: instcombine often reassociates independent `and` which can hide
1083 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
1084 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) {
1085 // -(-x) == x so using whichever (LHS/RHS) gets us a better result.
1086 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros())
1087 KnownOut = KnownLHS.blsi();
1088 else
1089 KnownOut = KnownRHS.blsi();
1091 break;
1092 case Instruction::Or:
1093 KnownOut = KnownLHS | KnownRHS;
1094 break;
1095 case Instruction::Xor:
1096 KnownOut = KnownLHS ^ KnownRHS;
1097 // xor(x, x-1) is common idioms that will clear all but lowest set
1098 // bit. If we have a single known bit in x, we can clear all bits
1099 // above it.
1100 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C !=
1101 // -1 but for the purpose of demanded bits (xor(x, x-C) &
1102 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern
1103 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1).
1104 if (HasKnownOne &&
1105 match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) {
1106 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS;
1107 KnownOut = XBits.blsmsk();
1109 break;
1110 default:
1111 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'");
1114 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1115 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit.
1116 // here we handle the more general case of adding any odd number by
1117 // matching the form and/xor/or(x, add(x, y)) where y is odd.
1118 // TODO: This could be generalized to clearing any bit set in y where the
1119 // following bit is known to be unset in y.
1120 if (!KnownOut.Zero[0] && !KnownOut.One[0] &&
1121 (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) ||
1122 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) ||
1123 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) {
1124 KnownBits KnownY(BitWidth);
1125 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q);
1126 if (KnownY.countMinTrailingOnes() > 0) {
1127 if (IsAnd)
1128 KnownOut.Zero.setBit(0);
1129 else
1130 KnownOut.One.setBit(0);
1133 return KnownOut;
1136 // Public so this can be used in `SimplifyDemandedUseBits`.
1137 KnownBits llvm::analyzeKnownBitsFromAndXorOr(
1138 const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
1139 unsigned Depth, const DataLayout &DL, AssumptionCache *AC,
1140 const Instruction *CxtI, const DominatorTree *DT,
1141 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
1142 auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
1143 APInt DemandedElts =
1144 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
1146 return getKnownBitsFromAndXorOr(
1147 I, DemandedElts, KnownLHS, KnownRHS, Depth,
1148 Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo, ORE));
1151 ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
1152 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
1153 // Without vscale_range, we only know that vscale is non-zero.
1154 if (!Attr.isValid())
1155 return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth));
1157 unsigned AttrMin = Attr.getVScaleRangeMin();
1158 // Minimum is larger than vscale width, result is always poison.
1159 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth)
1160 return ConstantRange::getEmpty(BitWidth);
1162 APInt Min(BitWidth, AttrMin);
1163 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax();
1164 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth)
1165 return ConstantRange(Min, APInt::getZero(BitWidth));
1167 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1);
1170 static void computeKnownBitsFromOperator(const Operator *I,
1171 const APInt &DemandedElts,
1172 KnownBits &Known, unsigned Depth,
1173 const Query &Q) {
1174 unsigned BitWidth = Known.getBitWidth();
1176 KnownBits Known2(BitWidth);
1177 switch (I->getOpcode()) {
1178 default: break;
1179 case Instruction::Load:
1180 if (MDNode *MD =
1181 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1182 computeKnownBitsFromRangeMetadata(*MD, Known);
1183 break;
1184 case Instruction::And:
1185 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1186 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1188 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1189 break;
1190 case Instruction::Or:
1191 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1192 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1194 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1195 break;
1196 case Instruction::Xor:
1197 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1198 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1200 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1201 break;
1202 case Instruction::Mul: {
1203 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1204 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1205 Known, Known2, Depth, Q);
1206 break;
1208 case Instruction::UDiv: {
1209 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1210 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1211 Known =
1212 KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1213 break;
1215 case Instruction::SDiv: {
1216 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1217 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1218 Known =
1219 KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1220 break;
1222 case Instruction::Select: {
1223 const Value *LHS = nullptr, *RHS = nullptr;
1224 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1225 if (SelectPatternResult::isMinOrMax(SPF)) {
1226 computeKnownBits(RHS, Known, Depth + 1, Q);
1227 computeKnownBits(LHS, Known2, Depth + 1, Q);
1228 switch (SPF) {
1229 default:
1230 llvm_unreachable("Unhandled select pattern flavor!");
1231 case SPF_SMAX:
1232 Known = KnownBits::smax(Known, Known2);
1233 break;
1234 case SPF_SMIN:
1235 Known = KnownBits::smin(Known, Known2);
1236 break;
1237 case SPF_UMAX:
1238 Known = KnownBits::umax(Known, Known2);
1239 break;
1240 case SPF_UMIN:
1241 Known = KnownBits::umin(Known, Known2);
1242 break;
1244 break;
1247 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1248 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1250 // Only known if known in both the LHS and RHS.
1251 Known = Known.intersectWith(Known2);
1253 if (SPF == SPF_ABS) {
1254 // RHS from matchSelectPattern returns the negation part of abs pattern.
1255 // If the negate has an NSW flag we can assume the sign bit of the result
1256 // will be 0 because that makes abs(INT_MIN) undefined.
1257 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1258 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1259 Known.Zero.setSignBit();
1262 break;
1264 case Instruction::FPTrunc:
1265 case Instruction::FPExt:
1266 case Instruction::FPToUI:
1267 case Instruction::FPToSI:
1268 case Instruction::SIToFP:
1269 case Instruction::UIToFP:
1270 break; // Can't work with floating point.
1271 case Instruction::PtrToInt:
1272 case Instruction::IntToPtr:
1273 // Fall through and handle them the same as zext/trunc.
1274 [[fallthrough]];
1275 case Instruction::ZExt:
1276 case Instruction::Trunc: {
1277 Type *SrcTy = I->getOperand(0)->getType();
1279 unsigned SrcBitWidth;
1280 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1281 // which fall through here.
1282 Type *ScalarTy = SrcTy->getScalarType();
1283 SrcBitWidth = ScalarTy->isPointerTy() ?
1284 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1285 Q.DL.getTypeSizeInBits(ScalarTy);
1287 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1288 Known = Known.anyextOrTrunc(SrcBitWidth);
1289 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1290 Known = Known.zextOrTrunc(BitWidth);
1291 break;
1293 case Instruction::BitCast: {
1294 Type *SrcTy = I->getOperand(0)->getType();
1295 if (SrcTy->isIntOrPtrTy() &&
1296 // TODO: For now, not handling conversions like:
1297 // (bitcast i64 %x to <2 x i32>)
1298 !I->getType()->isVectorTy()) {
1299 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1300 break;
1303 // Handle cast from vector integer type to scalar or vector integer.
1304 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1305 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1306 !I->getType()->isIntOrIntVectorTy() ||
1307 isa<ScalableVectorType>(I->getType()))
1308 break;
1310 // Look through a cast from narrow vector elements to wider type.
1311 // Examples: v4i32 -> v2i64, v3i8 -> v24
1312 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1313 if (BitWidth % SubBitWidth == 0) {
1314 // Known bits are automatically intersected across demanded elements of a
1315 // vector. So for example, if a bit is computed as known zero, it must be
1316 // zero across all demanded elements of the vector.
1318 // For this bitcast, each demanded element of the output is sub-divided
1319 // across a set of smaller vector elements in the source vector. To get
1320 // the known bits for an entire element of the output, compute the known
1321 // bits for each sub-element sequentially. This is done by shifting the
1322 // one-set-bit demanded elements parameter across the sub-elements for
1323 // consecutive calls to computeKnownBits. We are using the demanded
1324 // elements parameter as a mask operator.
1326 // The known bits of each sub-element are then inserted into place
1327 // (dependent on endian) to form the full result of known bits.
1328 unsigned NumElts = DemandedElts.getBitWidth();
1329 unsigned SubScale = BitWidth / SubBitWidth;
1330 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1331 for (unsigned i = 0; i != NumElts; ++i) {
1332 if (DemandedElts[i])
1333 SubDemandedElts.setBit(i * SubScale);
1336 KnownBits KnownSrc(SubBitWidth);
1337 for (unsigned i = 0; i != SubScale; ++i) {
1338 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1339 Depth + 1, Q);
1340 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1341 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1344 break;
1346 case Instruction::SExt: {
1347 // Compute the bits in the result that are not present in the input.
1348 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1350 Known = Known.trunc(SrcBitWidth);
1351 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1352 // If the sign bit of the input is known set or clear, then we know the
1353 // top bits of the result.
1354 Known = Known.sext(BitWidth);
1355 break;
1357 case Instruction::Shl: {
1358 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1359 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1360 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1361 // If this shift has "nsw" keyword, then the result is either a poison
1362 // value or has the same sign bit as the first operand.
1363 if (NSW) {
1364 if (KnownVal.Zero.isSignBitSet())
1365 Result.Zero.setSignBit();
1366 if (KnownVal.One.isSignBitSet())
1367 Result.One.setSignBit();
1369 return Result;
1371 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1372 KF);
1373 // Trailing zeros of a right-shifted constant never decrease.
1374 const APInt *C;
1375 if (match(I->getOperand(0), m_APInt(C)))
1376 Known.Zero.setLowBits(C->countr_zero());
1377 break;
1379 case Instruction::LShr: {
1380 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1381 return KnownBits::lshr(KnownVal, KnownAmt);
1383 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1384 KF);
1385 // Leading zeros of a left-shifted constant never decrease.
1386 const APInt *C;
1387 if (match(I->getOperand(0), m_APInt(C)))
1388 Known.Zero.setHighBits(C->countl_zero());
1389 break;
1391 case Instruction::AShr: {
1392 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1393 return KnownBits::ashr(KnownVal, KnownAmt);
1395 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1396 KF);
1397 break;
1399 case Instruction::Sub: {
1400 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1401 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1402 DemandedElts, Known, Known2, Depth, Q);
1403 break;
1405 case Instruction::Add: {
1406 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1407 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1408 DemandedElts, Known, Known2, Depth, Q);
1409 break;
1411 case Instruction::SRem:
1412 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1413 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1414 Known = KnownBits::srem(Known, Known2);
1415 break;
1417 case Instruction::URem:
1418 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1419 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1420 Known = KnownBits::urem(Known, Known2);
1421 break;
1422 case Instruction::Alloca:
1423 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1424 break;
1425 case Instruction::GetElementPtr: {
1426 // Analyze all of the subscripts of this getelementptr instruction
1427 // to determine if we can prove known low zero bits.
1428 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1429 // Accumulate the constant indices in a separate variable
1430 // to minimize the number of calls to computeForAddSub.
1431 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1433 gep_type_iterator GTI = gep_type_begin(I);
1434 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1435 // TrailZ can only become smaller, short-circuit if we hit zero.
1436 if (Known.isUnknown())
1437 break;
1439 Value *Index = I->getOperand(i);
1441 // Handle case when index is zero.
1442 Constant *CIndex = dyn_cast<Constant>(Index);
1443 if (CIndex && CIndex->isZeroValue())
1444 continue;
1446 if (StructType *STy = GTI.getStructTypeOrNull()) {
1447 // Handle struct member offset arithmetic.
1449 assert(CIndex &&
1450 "Access to structure field must be known at compile time");
1452 if (CIndex->getType()->isVectorTy())
1453 Index = CIndex->getSplatValue();
1455 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1456 const StructLayout *SL = Q.DL.getStructLayout(STy);
1457 uint64_t Offset = SL->getElementOffset(Idx);
1458 AccConstIndices += Offset;
1459 continue;
1462 // Handle array index arithmetic.
1463 Type *IndexedTy = GTI.getIndexedType();
1464 if (!IndexedTy->isSized()) {
1465 Known.resetAll();
1466 break;
1469 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1470 KnownBits IndexBits(IndexBitWidth);
1471 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1472 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1473 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
1474 KnownBits ScalingFactor(IndexBitWidth);
1475 // Multiply by current sizeof type.
1476 // &A[i] == A + i * sizeof(*A[i]).
1477 if (IndexTypeSize.isScalable()) {
1478 // For scalable types the only thing we know about sizeof is
1479 // that this is a multiple of the minimum size.
1480 ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
1481 } else if (IndexBits.isConstant()) {
1482 APInt IndexConst = IndexBits.getConstant();
1483 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1484 IndexConst *= ScalingFactor;
1485 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1486 continue;
1487 } else {
1488 ScalingFactor =
1489 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1491 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1493 // If the offsets have a different width from the pointer, according
1494 // to the language reference we need to sign-extend or truncate them
1495 // to the width of the pointer.
1496 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1498 // Note that inbounds does *not* guarantee nsw for the addition, as only
1499 // the offset is signed, while the base address is unsigned.
1500 Known = KnownBits::computeForAddSub(
1501 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1503 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1504 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1505 Known = KnownBits::computeForAddSub(
1506 /*Add=*/true, /*NSW=*/false, Known, Index);
1508 break;
1510 case Instruction::PHI: {
1511 const PHINode *P = cast<PHINode>(I);
1512 BinaryOperator *BO = nullptr;
1513 Value *R = nullptr, *L = nullptr;
1514 if (matchSimpleRecurrence(P, BO, R, L)) {
1515 // Handle the case of a simple two-predecessor recurrence PHI.
1516 // There's a lot more that could theoretically be done here, but
1517 // this is sufficient to catch some interesting cases.
1518 unsigned Opcode = BO->getOpcode();
1520 // If this is a shift recurrence, we know the bits being shifted in.
1521 // We can combine that with information about the start value of the
1522 // recurrence to conclude facts about the result.
1523 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1524 Opcode == Instruction::Shl) &&
1525 BO->getOperand(0) == I) {
1527 // We have matched a recurrence of the form:
1528 // %iv = [R, %entry], [%iv.next, %backedge]
1529 // %iv.next = shift_op %iv, L
1531 // Recurse with the phi context to avoid concern about whether facts
1532 // inferred hold at original context instruction. TODO: It may be
1533 // correct to use the original context. IF warranted, explore and
1534 // add sufficient tests to cover.
1535 Query RecQ = Q;
1536 RecQ.CxtI = P;
1537 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1538 switch (Opcode) {
1539 case Instruction::Shl:
1540 // A shl recurrence will only increase the tailing zeros
1541 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1542 break;
1543 case Instruction::LShr:
1544 // A lshr recurrence will preserve the leading zeros of the
1545 // start value
1546 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1547 break;
1548 case Instruction::AShr:
1549 // An ashr recurrence will extend the initial sign bit
1550 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1551 Known.One.setHighBits(Known2.countMinLeadingOnes());
1552 break;
1556 // Check for operations that have the property that if
1557 // both their operands have low zero bits, the result
1558 // will have low zero bits.
1559 if (Opcode == Instruction::Add ||
1560 Opcode == Instruction::Sub ||
1561 Opcode == Instruction::And ||
1562 Opcode == Instruction::Or ||
1563 Opcode == Instruction::Mul) {
1564 // Change the context instruction to the "edge" that flows into the
1565 // phi. This is important because that is where the value is actually
1566 // "evaluated" even though it is used later somewhere else. (see also
1567 // D69571).
1568 Query RecQ = Q;
1570 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1571 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1572 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1574 // Ok, we have a PHI of the form L op= R. Check for low
1575 // zero bits.
1576 RecQ.CxtI = RInst;
1577 computeKnownBits(R, Known2, Depth + 1, RecQ);
1579 // We need to take the minimum number of known bits
1580 KnownBits Known3(BitWidth);
1581 RecQ.CxtI = LInst;
1582 computeKnownBits(L, Known3, Depth + 1, RecQ);
1584 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1585 Known3.countMinTrailingZeros()));
1587 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1588 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1589 // If initial value of recurrence is nonnegative, and we are adding
1590 // a nonnegative number with nsw, the result can only be nonnegative
1591 // or poison value regardless of the number of times we execute the
1592 // add in phi recurrence. If initial value is negative and we are
1593 // adding a negative number with nsw, the result can only be
1594 // negative or poison value. Similar arguments apply to sub and mul.
1596 // (add non-negative, non-negative) --> non-negative
1597 // (add negative, negative) --> negative
1598 if (Opcode == Instruction::Add) {
1599 if (Known2.isNonNegative() && Known3.isNonNegative())
1600 Known.makeNonNegative();
1601 else if (Known2.isNegative() && Known3.isNegative())
1602 Known.makeNegative();
1605 // (sub nsw non-negative, negative) --> non-negative
1606 // (sub nsw negative, non-negative) --> negative
1607 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1608 if (Known2.isNonNegative() && Known3.isNegative())
1609 Known.makeNonNegative();
1610 else if (Known2.isNegative() && Known3.isNonNegative())
1611 Known.makeNegative();
1614 // (mul nsw non-negative, non-negative) --> non-negative
1615 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1616 Known3.isNonNegative())
1617 Known.makeNonNegative();
1620 break;
1624 // Unreachable blocks may have zero-operand PHI nodes.
1625 if (P->getNumIncomingValues() == 0)
1626 break;
1628 // Otherwise take the unions of the known bit sets of the operands,
1629 // taking conservative care to avoid excessive recursion.
1630 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) {
1631 // Skip if every incoming value references to ourself.
1632 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1633 break;
1635 Known.Zero.setAllBits();
1636 Known.One.setAllBits();
1637 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1638 Value *IncValue = P->getIncomingValue(u);
1639 // Skip direct self references.
1640 if (IncValue == P) continue;
1642 // Change the context instruction to the "edge" that flows into the
1643 // phi. This is important because that is where the value is actually
1644 // "evaluated" even though it is used later somewhere else. (see also
1645 // D69571).
1646 Query RecQ = Q;
1647 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1649 Known2 = KnownBits(BitWidth);
1651 // Recurse, but cap the recursion to one level, because we don't
1652 // want to waste time spinning around in loops.
1653 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1655 // If this failed, see if we can use a conditional branch into the phi
1656 // to help us determine the range of the value.
1657 if (Known2.isUnknown()) {
1658 ICmpInst::Predicate Pred;
1659 const APInt *RHSC;
1660 BasicBlock *TrueSucc, *FalseSucc;
1661 // TODO: Use RHS Value and compute range from its known bits.
1662 if (match(RecQ.CxtI,
1663 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1664 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1665 // Check for cases of duplicate successors.
1666 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1667 // If we're using the false successor, invert the predicate.
1668 if (FalseSucc == P->getParent())
1669 Pred = CmpInst::getInversePredicate(Pred);
1671 switch (Pred) {
1672 case CmpInst::Predicate::ICMP_EQ:
1673 Known2 = KnownBits::makeConstant(*RHSC);
1674 break;
1675 case CmpInst::Predicate::ICMP_ULE:
1676 Known2.Zero.setHighBits(RHSC->countl_zero());
1677 break;
1678 case CmpInst::Predicate::ICMP_ULT:
1679 Known2.Zero.setHighBits((*RHSC - 1).countl_zero());
1680 break;
1681 default:
1682 // TODO - add additional integer predicate handling.
1683 break;
1689 Known = Known.intersectWith(Known2);
1690 // If all bits have been ruled out, there's no need to check
1691 // more operands.
1692 if (Known.isUnknown())
1693 break;
1696 break;
1698 case Instruction::Call:
1699 case Instruction::Invoke:
1700 // If range metadata is attached to this call, set known bits from that,
1701 // and then intersect with known bits based on other properties of the
1702 // function.
1703 if (MDNode *MD =
1704 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1705 computeKnownBitsFromRangeMetadata(*MD, Known);
1706 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1707 computeKnownBits(RV, Known2, Depth + 1, Q);
1708 Known = Known.unionWith(Known2);
1710 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1711 switch (II->getIntrinsicID()) {
1712 default: break;
1713 case Intrinsic::abs: {
1714 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1715 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1716 Known = Known2.abs(IntMinIsPoison);
1717 break;
1719 case Intrinsic::bitreverse:
1720 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1721 Known.Zero |= Known2.Zero.reverseBits();
1722 Known.One |= Known2.One.reverseBits();
1723 break;
1724 case Intrinsic::bswap:
1725 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1726 Known.Zero |= Known2.Zero.byteSwap();
1727 Known.One |= Known2.One.byteSwap();
1728 break;
1729 case Intrinsic::ctlz: {
1730 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1731 // If we have a known 1, its position is our upper bound.
1732 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1733 // If this call is poison for 0 input, the result will be less than 2^n.
1734 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1735 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1736 unsigned LowBits = llvm::bit_width(PossibleLZ);
1737 Known.Zero.setBitsFrom(LowBits);
1738 break;
1740 case Intrinsic::cttz: {
1741 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1742 // If we have a known 1, its position is our upper bound.
1743 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1744 // If this call is poison for 0 input, the result will be less than 2^n.
1745 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1746 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1747 unsigned LowBits = llvm::bit_width(PossibleTZ);
1748 Known.Zero.setBitsFrom(LowBits);
1749 break;
1751 case Intrinsic::ctpop: {
1752 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1753 // We can bound the space the count needs. Also, bits known to be zero
1754 // can't contribute to the population.
1755 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1756 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
1757 Known.Zero.setBitsFrom(LowBits);
1758 // TODO: we could bound KnownOne using the lower bound on the number
1759 // of bits which might be set provided by popcnt KnownOne2.
1760 break;
1762 case Intrinsic::fshr:
1763 case Intrinsic::fshl: {
1764 const APInt *SA;
1765 if (!match(I->getOperand(2), m_APInt(SA)))
1766 break;
1768 // Normalize to funnel shift left.
1769 uint64_t ShiftAmt = SA->urem(BitWidth);
1770 if (II->getIntrinsicID() == Intrinsic::fshr)
1771 ShiftAmt = BitWidth - ShiftAmt;
1773 KnownBits Known3(BitWidth);
1774 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1775 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1777 Known.Zero =
1778 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1779 Known.One =
1780 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1781 break;
1783 case Intrinsic::uadd_sat:
1784 case Intrinsic::usub_sat: {
1785 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1786 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1787 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1789 // Add: Leading ones of either operand are preserved.
1790 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1791 // as leading zeros in the result.
1792 unsigned LeadingKnown;
1793 if (IsAdd)
1794 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1795 Known2.countMinLeadingOnes());
1796 else
1797 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1798 Known2.countMinLeadingOnes());
1800 Known = KnownBits::computeForAddSub(
1801 IsAdd, /* NSW */ false, Known, Known2);
1803 // We select between the operation result and all-ones/zero
1804 // respectively, so we can preserve known ones/zeros.
1805 if (IsAdd) {
1806 Known.One.setHighBits(LeadingKnown);
1807 Known.Zero.clearAllBits();
1808 } else {
1809 Known.Zero.setHighBits(LeadingKnown);
1810 Known.One.clearAllBits();
1812 break;
1814 case Intrinsic::umin:
1815 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1816 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1817 Known = KnownBits::umin(Known, Known2);
1818 break;
1819 case Intrinsic::umax:
1820 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1821 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1822 Known = KnownBits::umax(Known, Known2);
1823 break;
1824 case Intrinsic::smin:
1825 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1826 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1827 Known = KnownBits::smin(Known, Known2);
1828 break;
1829 case Intrinsic::smax:
1830 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1831 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1832 Known = KnownBits::smax(Known, Known2);
1833 break;
1834 case Intrinsic::x86_sse42_crc32_64_64:
1835 Known.Zero.setBitsFrom(32);
1836 break;
1837 case Intrinsic::riscv_vsetvli:
1838 case Intrinsic::riscv_vsetvlimax:
1839 // Assume that VL output is >= 65536.
1840 // TODO: Take SEW and LMUL into account.
1841 if (BitWidth > 17)
1842 Known.Zero.setBitsFrom(17);
1843 break;
1844 case Intrinsic::vscale: {
1845 if (!II->getParent() || !II->getFunction())
1846 break;
1848 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits();
1849 break;
1853 break;
1854 case Instruction::ShuffleVector: {
1855 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1856 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1857 if (!Shuf) {
1858 Known.resetAll();
1859 return;
1861 // For undef elements, we don't know anything about the common state of
1862 // the shuffle result.
1863 APInt DemandedLHS, DemandedRHS;
1864 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1865 Known.resetAll();
1866 return;
1868 Known.One.setAllBits();
1869 Known.Zero.setAllBits();
1870 if (!!DemandedLHS) {
1871 const Value *LHS = Shuf->getOperand(0);
1872 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1873 // If we don't know any bits, early out.
1874 if (Known.isUnknown())
1875 break;
1877 if (!!DemandedRHS) {
1878 const Value *RHS = Shuf->getOperand(1);
1879 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1880 Known = Known.intersectWith(Known2);
1882 break;
1884 case Instruction::InsertElement: {
1885 if (isa<ScalableVectorType>(I->getType())) {
1886 Known.resetAll();
1887 return;
1889 const Value *Vec = I->getOperand(0);
1890 const Value *Elt = I->getOperand(1);
1891 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1892 // Early out if the index is non-constant or out-of-range.
1893 unsigned NumElts = DemandedElts.getBitWidth();
1894 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1895 Known.resetAll();
1896 return;
1898 Known.One.setAllBits();
1899 Known.Zero.setAllBits();
1900 unsigned EltIdx = CIdx->getZExtValue();
1901 // Do we demand the inserted element?
1902 if (DemandedElts[EltIdx]) {
1903 computeKnownBits(Elt, Known, Depth + 1, Q);
1904 // If we don't know any bits, early out.
1905 if (Known.isUnknown())
1906 break;
1908 // We don't need the base vector element that has been inserted.
1909 APInt DemandedVecElts = DemandedElts;
1910 DemandedVecElts.clearBit(EltIdx);
1911 if (!!DemandedVecElts) {
1912 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1913 Known = Known.intersectWith(Known2);
1915 break;
1917 case Instruction::ExtractElement: {
1918 // Look through extract element. If the index is non-constant or
1919 // out-of-range demand all elements, otherwise just the extracted element.
1920 const Value *Vec = I->getOperand(0);
1921 const Value *Idx = I->getOperand(1);
1922 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1923 if (isa<ScalableVectorType>(Vec->getType())) {
1924 // FIXME: there's probably *something* we can do with scalable vectors
1925 Known.resetAll();
1926 break;
1928 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1929 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1930 if (CIdx && CIdx->getValue().ult(NumElts))
1931 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1932 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1933 break;
1935 case Instruction::ExtractValue:
1936 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1937 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1938 if (EVI->getNumIndices() != 1) break;
1939 if (EVI->getIndices()[0] == 0) {
1940 switch (II->getIntrinsicID()) {
1941 default: break;
1942 case Intrinsic::uadd_with_overflow:
1943 case Intrinsic::sadd_with_overflow:
1944 computeKnownBitsAddSub(true, II->getArgOperand(0),
1945 II->getArgOperand(1), false, DemandedElts,
1946 Known, Known2, Depth, Q);
1947 break;
1948 case Intrinsic::usub_with_overflow:
1949 case Intrinsic::ssub_with_overflow:
1950 computeKnownBitsAddSub(false, II->getArgOperand(0),
1951 II->getArgOperand(1), false, DemandedElts,
1952 Known, Known2, Depth, Q);
1953 break;
1954 case Intrinsic::umul_with_overflow:
1955 case Intrinsic::smul_with_overflow:
1956 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1957 DemandedElts, Known, Known2, Depth, Q);
1958 break;
1962 break;
1963 case Instruction::Freeze:
1964 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1965 Depth + 1))
1966 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1967 break;
1971 /// Determine which bits of V are known to be either zero or one and return
1972 /// them.
1973 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1974 unsigned Depth, const Query &Q) {
1975 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1976 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1977 return Known;
1980 /// Determine which bits of V are known to be either zero or one and return
1981 /// them.
1982 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1983 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1984 computeKnownBits(V, Known, Depth, Q);
1985 return Known;
1988 /// Determine which bits of V are known to be either zero or one and return
1989 /// them in the Known bit set.
1991 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1992 /// we cannot optimize based on the assumption that it is zero without changing
1993 /// it to be an explicit zero. If we don't change it to zero, other code could
1994 /// optimized based on the contradictory assumption that it is non-zero.
1995 /// Because instcombine aggressively folds operations with undef args anyway,
1996 /// this won't lose us code quality.
1998 /// This function is defined on values with integer type, values with pointer
1999 /// type, and vectors of integers. In the case
2000 /// where V is a vector, known zero, and known one values are the
2001 /// same width as the vector element, and the bit is set only if it is true
2002 /// for all of the demanded elements in the vector specified by DemandedElts.
2003 void computeKnownBits(const Value *V, const APInt &DemandedElts,
2004 KnownBits &Known, unsigned Depth, const Query &Q) {
2005 if (!DemandedElts) {
2006 // No demanded elts, better to assume we don't know anything.
2007 Known.resetAll();
2008 return;
2011 assert(V && "No Value?");
2012 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2014 #ifndef NDEBUG
2015 Type *Ty = V->getType();
2016 unsigned BitWidth = Known.getBitWidth();
2018 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2019 "Not integer or pointer type!");
2021 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2022 assert(
2023 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2024 "DemandedElt width should equal the fixed vector number of elements");
2025 } else {
2026 assert(DemandedElts == APInt(1, 1) &&
2027 "DemandedElt width should be 1 for scalars or scalable vectors");
2030 Type *ScalarTy = Ty->getScalarType();
2031 if (ScalarTy->isPointerTy()) {
2032 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
2033 "V and Known should have same BitWidth");
2034 } else {
2035 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
2036 "V and Known should have same BitWidth");
2038 #endif
2040 const APInt *C;
2041 if (match(V, m_APInt(C))) {
2042 // We know all of the bits for a scalar constant or a splat vector constant!
2043 Known = KnownBits::makeConstant(*C);
2044 return;
2046 // Null and aggregate-zero are all-zeros.
2047 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
2048 Known.setAllZero();
2049 return;
2051 // Handle a constant vector by taking the intersection of the known bits of
2052 // each element.
2053 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
2054 assert(!isa<ScalableVectorType>(V->getType()));
2055 // We know that CDV must be a vector of integers. Take the intersection of
2056 // each element.
2057 Known.Zero.setAllBits(); Known.One.setAllBits();
2058 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2059 if (!DemandedElts[i])
2060 continue;
2061 APInt Elt = CDV->getElementAsAPInt(i);
2062 Known.Zero &= ~Elt;
2063 Known.One &= Elt;
2065 return;
2068 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2069 assert(!isa<ScalableVectorType>(V->getType()));
2070 // We know that CV must be a vector of integers. Take the intersection of
2071 // each element.
2072 Known.Zero.setAllBits(); Known.One.setAllBits();
2073 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2074 if (!DemandedElts[i])
2075 continue;
2076 Constant *Element = CV->getAggregateElement(i);
2077 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2078 if (!ElementCI) {
2079 Known.resetAll();
2080 return;
2082 const APInt &Elt = ElementCI->getValue();
2083 Known.Zero &= ~Elt;
2084 Known.One &= Elt;
2086 return;
2089 // Start out not knowing anything.
2090 Known.resetAll();
2092 // We can't imply anything about undefs.
2093 if (isa<UndefValue>(V))
2094 return;
2096 // There's no point in looking through other users of ConstantData for
2097 // assumptions. Confirm that we've handled them all.
2098 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2100 // All recursive calls that increase depth must come after this.
2101 if (Depth == MaxAnalysisRecursionDepth)
2102 return;
2104 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2105 // the bits of its aliasee.
2106 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2107 if (!GA->isInterposable())
2108 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2109 return;
2112 if (const Operator *I = dyn_cast<Operator>(V))
2113 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2115 // Aligned pointers have trailing zeros - refine Known.Zero set
2116 if (isa<PointerType>(V->getType())) {
2117 Align Alignment = V->getPointerAlignment(Q.DL);
2118 Known.Zero.setLowBits(Log2(Alignment));
2121 // computeKnownBitsFromAssume strictly refines Known.
2122 // Therefore, we run them after computeKnownBitsFromOperator.
2124 // Check whether a nearby assume intrinsic can determine some known bits.
2125 computeKnownBitsFromAssume(V, Known, Depth, Q);
2127 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2130 /// Try to detect a recurrence that the value of the induction variable is
2131 /// always a power of two (or zero).
2132 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2133 unsigned Depth, Query &Q) {
2134 BinaryOperator *BO = nullptr;
2135 Value *Start = nullptr, *Step = nullptr;
2136 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2137 return false;
2139 // Initial value must be a power of two.
2140 for (const Use &U : PN->operands()) {
2141 if (U.get() == Start) {
2142 // Initial value comes from a different BB, need to adjust context
2143 // instruction for analysis.
2144 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2145 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2146 return false;
2150 // Except for Mul, the induction variable must be on the left side of the
2151 // increment expression, otherwise its value can be arbitrary.
2152 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2153 return false;
2155 Q.CxtI = BO->getParent()->getTerminator();
2156 switch (BO->getOpcode()) {
2157 case Instruction::Mul:
2158 // Power of two is closed under multiplication.
2159 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2160 Q.IIQ.hasNoSignedWrap(BO)) &&
2161 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2162 case Instruction::SDiv:
2163 // Start value must not be signmask for signed division, so simply being a
2164 // power of two is not sufficient, and it has to be a constant.
2165 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2166 return false;
2167 [[fallthrough]];
2168 case Instruction::UDiv:
2169 // Divisor must be a power of two.
2170 // If OrZero is false, cannot guarantee induction variable is non-zero after
2171 // division, same for Shr, unless it is exact division.
2172 return (OrZero || Q.IIQ.isExact(BO)) &&
2173 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2174 case Instruction::Shl:
2175 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2176 case Instruction::AShr:
2177 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2178 return false;
2179 [[fallthrough]];
2180 case Instruction::LShr:
2181 return OrZero || Q.IIQ.isExact(BO);
2182 default:
2183 return false;
2187 /// Return true if the given value is known to have exactly one
2188 /// bit set when defined. For vectors return true if every element is known to
2189 /// be a power of two when defined. Supports values with integer or pointer
2190 /// types and vectors of integers.
2191 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2192 const Query &Q) {
2193 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2195 // Attempt to match against constants.
2196 if (OrZero && match(V, m_Power2OrZero()))
2197 return true;
2198 if (match(V, m_Power2()))
2199 return true;
2201 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2202 // it is shifted off the end then the result is undefined.
2203 if (match(V, m_Shl(m_One(), m_Value())))
2204 return true;
2206 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2207 // the bottom. If it is shifted off the bottom then the result is undefined.
2208 if (match(V, m_LShr(m_SignMask(), m_Value())))
2209 return true;
2211 // The remaining tests are all recursive, so bail out if we hit the limit.
2212 if (Depth++ == MaxAnalysisRecursionDepth)
2213 return false;
2215 Value *X = nullptr, *Y = nullptr;
2216 // A shift left or a logical shift right of a power of two is a power of two
2217 // or zero.
2218 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2219 match(V, m_LShr(m_Value(X), m_Value()))))
2220 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2222 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2223 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2225 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2226 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2227 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2229 // Peek through min/max.
2230 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2231 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2232 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2235 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2236 // A power of two and'd with anything is a power of two or zero.
2237 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2238 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2239 return true;
2240 // X & (-X) is always a power of two or zero.
2241 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2242 return true;
2243 return false;
2246 // Adding a power-of-two or zero to the same power-of-two or zero yields
2247 // either the original power-of-two, a larger power-of-two or zero.
2248 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2249 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2250 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2251 Q.IIQ.hasNoSignedWrap(VOBO)) {
2252 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2253 match(X, m_And(m_Value(), m_Specific(Y))))
2254 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2255 return true;
2256 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2257 match(Y, m_And(m_Value(), m_Specific(X))))
2258 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2259 return true;
2261 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2262 KnownBits LHSBits(BitWidth);
2263 computeKnownBits(X, LHSBits, Depth, Q);
2265 KnownBits RHSBits(BitWidth);
2266 computeKnownBits(Y, RHSBits, Depth, Q);
2267 // If i8 V is a power of two or zero:
2268 // ZeroBits: 1 1 1 0 1 1 1 1
2269 // ~ZeroBits: 0 0 0 1 0 0 0 0
2270 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2271 // If OrZero isn't set, we cannot give back a zero result.
2272 // Make sure either the LHS or RHS has a bit set.
2273 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2274 return true;
2278 // A PHI node is power of two if all incoming values are power of two, or if
2279 // it is an induction variable where in each step its value is a power of two.
2280 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2281 Query RecQ = Q;
2283 // Check if it is an induction variable and always power of two.
2284 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2285 return true;
2287 // Recursively check all incoming values. Limit recursion to 2 levels, so
2288 // that search complexity is limited to number of operands^2.
2289 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2290 return llvm::all_of(PN->operands(), [&](const Use &U) {
2291 // Value is power of 2 if it is coming from PHI node itself by induction.
2292 if (U.get() == PN)
2293 return true;
2295 // Change the context instruction to the incoming block where it is
2296 // evaluated.
2297 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2298 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2302 // An exact divide or right shift can only shift off zero bits, so the result
2303 // is a power of two only if the first operand is a power of two and not
2304 // copying a sign bit (sdiv int_min, 2).
2305 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2306 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2307 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2308 Depth, Q);
2311 return false;
2314 /// Test whether a GEP's result is known to be non-null.
2316 /// Uses properties inherent in a GEP to try to determine whether it is known
2317 /// to be non-null.
2319 /// Currently this routine does not support vector GEPs.
2320 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2321 const Query &Q) {
2322 const Function *F = nullptr;
2323 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2324 F = I->getFunction();
2326 if (!GEP->isInBounds() ||
2327 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2328 return false;
2330 // FIXME: Support vector-GEPs.
2331 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2333 // If the base pointer is non-null, we cannot walk to a null address with an
2334 // inbounds GEP in address space zero.
2335 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2336 return true;
2338 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2339 // If so, then the GEP cannot produce a null pointer, as doing so would
2340 // inherently violate the inbounds contract within address space zero.
2341 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2342 GTI != GTE; ++GTI) {
2343 // Struct types are easy -- they must always be indexed by a constant.
2344 if (StructType *STy = GTI.getStructTypeOrNull()) {
2345 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2346 unsigned ElementIdx = OpC->getZExtValue();
2347 const StructLayout *SL = Q.DL.getStructLayout(STy);
2348 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2349 if (ElementOffset > 0)
2350 return true;
2351 continue;
2354 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2355 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero())
2356 continue;
2358 // Fast path the constant operand case both for efficiency and so we don't
2359 // increment Depth when just zipping down an all-constant GEP.
2360 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2361 if (!OpC->isZero())
2362 return true;
2363 continue;
2366 // We post-increment Depth here because while isKnownNonZero increments it
2367 // as well, when we pop back up that increment won't persist. We don't want
2368 // to recurse 10k times just because we have 10k GEP operands. We don't
2369 // bail completely out because we want to handle constant GEPs regardless
2370 // of depth.
2371 if (Depth++ >= MaxAnalysisRecursionDepth)
2372 continue;
2374 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2375 return true;
2378 return false;
2381 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2382 const Instruction *CtxI,
2383 const DominatorTree *DT) {
2384 assert(!isa<Constant>(V) && "Called for constant?");
2386 if (!CtxI || !DT)
2387 return false;
2389 unsigned NumUsesExplored = 0;
2390 for (const auto *U : V->users()) {
2391 // Avoid massive lists
2392 if (NumUsesExplored >= DomConditionsMaxUses)
2393 break;
2394 NumUsesExplored++;
2396 // If the value is used as an argument to a call or invoke, then argument
2397 // attributes may provide an answer about null-ness.
2398 if (const auto *CB = dyn_cast<CallBase>(U))
2399 if (auto *CalledFunc = CB->getCalledFunction())
2400 for (const Argument &Arg : CalledFunc->args())
2401 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2402 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2403 DT->dominates(CB, CtxI))
2404 return true;
2406 // If the value is used as a load/store, then the pointer must be non null.
2407 if (V == getLoadStorePointerOperand(U)) {
2408 const Instruction *I = cast<Instruction>(U);
2409 if (!NullPointerIsDefined(I->getFunction(),
2410 V->getType()->getPointerAddressSpace()) &&
2411 DT->dominates(I, CtxI))
2412 return true;
2415 // Consider only compare instructions uniquely controlling a branch
2416 Value *RHS;
2417 CmpInst::Predicate Pred;
2418 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2419 continue;
2421 bool NonNullIfTrue;
2422 if (cmpExcludesZero(Pred, RHS))
2423 NonNullIfTrue = true;
2424 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2425 NonNullIfTrue = false;
2426 else
2427 continue;
2429 SmallVector<const User *, 4> WorkList;
2430 SmallPtrSet<const User *, 4> Visited;
2431 for (const auto *CmpU : U->users()) {
2432 assert(WorkList.empty() && "Should be!");
2433 if (Visited.insert(CmpU).second)
2434 WorkList.push_back(CmpU);
2436 while (!WorkList.empty()) {
2437 auto *Curr = WorkList.pop_back_val();
2439 // If a user is an AND, add all its users to the work list. We only
2440 // propagate "pred != null" condition through AND because it is only
2441 // correct to assume that all conditions of AND are met in true branch.
2442 // TODO: Support similar logic of OR and EQ predicate?
2443 if (NonNullIfTrue)
2444 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2445 for (const auto *CurrU : Curr->users())
2446 if (Visited.insert(CurrU).second)
2447 WorkList.push_back(CurrU);
2448 continue;
2451 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2452 assert(BI->isConditional() && "uses a comparison!");
2454 BasicBlock *NonNullSuccessor =
2455 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2456 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2457 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2458 return true;
2459 } else if (NonNullIfTrue && isGuard(Curr) &&
2460 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2461 return true;
2467 return false;
2470 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2471 /// ensure that the value it's attached to is never Value? 'RangeType' is
2472 /// is the type of the value described by the range.
2473 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2474 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2475 assert(NumRanges >= 1);
2476 for (unsigned i = 0; i < NumRanges; ++i) {
2477 ConstantInt *Lower =
2478 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2479 ConstantInt *Upper =
2480 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2481 ConstantRange Range(Lower->getValue(), Upper->getValue());
2482 if (Range.contains(Value))
2483 return false;
2485 return true;
2488 /// Try to detect a recurrence that monotonically increases/decreases from a
2489 /// non-zero starting value. These are common as induction variables.
2490 static bool isNonZeroRecurrence(const PHINode *PN) {
2491 BinaryOperator *BO = nullptr;
2492 Value *Start = nullptr, *Step = nullptr;
2493 const APInt *StartC, *StepC;
2494 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2495 !match(Start, m_APInt(StartC)) || StartC->isZero())
2496 return false;
2498 switch (BO->getOpcode()) {
2499 case Instruction::Add:
2500 // Starting from non-zero and stepping away from zero can never wrap back
2501 // to zero.
2502 return BO->hasNoUnsignedWrap() ||
2503 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2504 StartC->isNegative() == StepC->isNegative());
2505 case Instruction::Mul:
2506 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2507 match(Step, m_APInt(StepC)) && !StepC->isZero();
2508 case Instruction::Shl:
2509 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2510 case Instruction::AShr:
2511 case Instruction::LShr:
2512 return BO->isExact();
2513 default:
2514 return false;
2518 static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
2519 const Query &Q, unsigned BitWidth, Value *X, Value *Y,
2520 bool NSW) {
2521 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2522 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2524 // If X and Y are both non-negative (as signed values) then their sum is not
2525 // zero unless both X and Y are zero.
2526 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2527 if (isKnownNonZero(Y, DemandedElts, Depth, Q) ||
2528 isKnownNonZero(X, DemandedElts, Depth, Q))
2529 return true;
2531 // If X and Y are both negative (as signed values) then their sum is not
2532 // zero unless both X and Y equal INT_MIN.
2533 if (XKnown.isNegative() && YKnown.isNegative()) {
2534 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2535 // The sign bit of X is set. If some other bit is set then X is not equal
2536 // to INT_MIN.
2537 if (XKnown.One.intersects(Mask))
2538 return true;
2539 // The sign bit of Y is set. If some other bit is set then Y is not equal
2540 // to INT_MIN.
2541 if (YKnown.One.intersects(Mask))
2542 return true;
2545 // The sum of a non-negative number and a power of two is not zero.
2546 if (XKnown.isNonNegative() &&
2547 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2548 return true;
2549 if (YKnown.isNonNegative() &&
2550 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2551 return true;
2553 return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown)
2554 .isNonZero();
2557 static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
2558 const Query &Q, unsigned BitWidth, Value *X,
2559 Value *Y) {
2560 if (auto *C = dyn_cast<Constant>(X))
2561 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q))
2562 return true;
2564 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2565 if (XKnown.isUnknown())
2566 return false;
2567 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2568 // If X != Y then X - Y is non zero.
2569 std::optional<bool> ne = KnownBits::ne(XKnown, YKnown);
2570 // If we are unable to compute if X != Y, we won't be able to do anything
2571 // computing the knownbits of the sub expression so just return here.
2572 return ne && *ne;
2575 static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
2576 unsigned Depth, const Query &Q,
2577 const KnownBits &KnownVal) {
2578 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2579 switch (I->getOpcode()) {
2580 case Instruction::Shl:
2581 return Lhs.shl(Rhs);
2582 case Instruction::LShr:
2583 return Lhs.lshr(Rhs);
2584 case Instruction::AShr:
2585 return Lhs.ashr(Rhs);
2586 default:
2587 llvm_unreachable("Unknown Shift Opcode");
2591 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2592 switch (I->getOpcode()) {
2593 case Instruction::Shl:
2594 return Lhs.lshr(Rhs);
2595 case Instruction::LShr:
2596 case Instruction::AShr:
2597 return Lhs.shl(Rhs);
2598 default:
2599 llvm_unreachable("Unknown Shift Opcode");
2603 if (KnownVal.isUnknown())
2604 return false;
2606 KnownBits KnownCnt =
2607 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2608 APInt MaxShift = KnownCnt.getMaxValue();
2609 unsigned NumBits = KnownVal.getBitWidth();
2610 if (MaxShift.uge(NumBits))
2611 return false;
2613 if (!ShiftOp(KnownVal.One, MaxShift).isZero())
2614 return true;
2616 // If all of the bits shifted out are known to be zero, and Val is known
2617 // non-zero then at least one non-zero bit must remain.
2618 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift)
2619 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) &&
2620 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q))
2621 return true;
2623 return false;
2626 /// Return true if the given value is known to be non-zero when defined. For
2627 /// vectors, return true if every demanded element is known to be non-zero when
2628 /// defined. For pointers, if the context instruction and dominator tree are
2629 /// specified, perform context-sensitive analysis and return true if the
2630 /// pointer couldn't possibly be null at the specified instruction.
2631 /// Supports values with integer or pointer type and vectors of integers.
2632 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2633 const Query &Q) {
2635 #ifndef NDEBUG
2636 Type *Ty = V->getType();
2637 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2639 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2640 assert(
2641 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2642 "DemandedElt width should equal the fixed vector number of elements");
2643 } else {
2644 assert(DemandedElts == APInt(1, 1) &&
2645 "DemandedElt width should be 1 for scalars");
2647 #endif
2649 if (auto *C = dyn_cast<Constant>(V)) {
2650 if (C->isNullValue())
2651 return false;
2652 if (isa<ConstantInt>(C))
2653 // Must be non-zero due to null test above.
2654 return true;
2656 // For constant vectors, check that all elements are undefined or known
2657 // non-zero to determine that the whole vector is known non-zero.
2658 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2659 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2660 if (!DemandedElts[i])
2661 continue;
2662 Constant *Elt = C->getAggregateElement(i);
2663 if (!Elt || Elt->isNullValue())
2664 return false;
2665 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2666 return false;
2668 return true;
2671 // A global variable in address space 0 is non null unless extern weak
2672 // or an absolute symbol reference. Other address spaces may have null as a
2673 // valid address for a global, so we can't assume anything.
2674 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2675 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2676 GV->getType()->getAddressSpace() == 0)
2677 return true;
2680 // For constant expressions, fall through to the Operator code below.
2681 if (!isa<ConstantExpr>(V))
2682 return false;
2685 if (auto *I = dyn_cast<Instruction>(V)) {
2686 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2687 // If the possible ranges don't contain zero, then the value is
2688 // definitely non-zero.
2689 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2690 const APInt ZeroValue(Ty->getBitWidth(), 0);
2691 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2692 return true;
2697 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
2698 return true;
2700 // Some of the tests below are recursive, so bail out if we hit the limit.
2701 if (Depth++ >= MaxAnalysisRecursionDepth)
2702 return false;
2704 // Check for pointer simplifications.
2706 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2707 // Alloca never returns null, malloc might.
2708 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2709 return true;
2711 // A byval, inalloca may not be null in a non-default addres space. A
2712 // nonnull argument is assumed never 0.
2713 if (const Argument *A = dyn_cast<Argument>(V)) {
2714 if (((A->hasPassPointeeByValueCopyAttr() &&
2715 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2716 A->hasNonNullAttr()))
2717 return true;
2720 // A Load tagged with nonnull metadata is never null.
2721 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2722 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2723 return true;
2725 if (const auto *Call = dyn_cast<CallBase>(V)) {
2726 if (Call->isReturnNonNull())
2727 return true;
2728 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2729 return isKnownNonZero(RP, Depth, Q);
2733 if (!isa<Constant>(V) &&
2734 isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2735 return true;
2737 const Operator *I = dyn_cast<Operator>(V);
2738 if (!I)
2739 return false;
2741 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2742 switch (I->getOpcode()) {
2743 case Instruction::GetElementPtr:
2744 if (I->getType()->isPointerTy())
2745 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
2746 break;
2747 case Instruction::BitCast: {
2748 // We need to be a bit careful here. We can only peek through the bitcast
2749 // if the scalar size of elements in the operand are smaller than and a
2750 // multiple of the size they are casting too. Take three cases:
2752 // 1) Unsafe:
2753 // bitcast <2 x i16> %NonZero to <4 x i8>
2755 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a
2756 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't
2757 // guranteed (imagine just sign bit set in the 2 i16 elements).
2759 // 2) Unsafe:
2760 // bitcast <4 x i3> %NonZero to <3 x i4>
2762 // Even though the scalar size of the src (`i3`) is smaller than the
2763 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4`
2764 // its possible for the `3 x i4` elements to be zero because there are
2765 // some elements in the destination that don't contain any full src
2766 // element.
2768 // 3) Safe:
2769 // bitcast <4 x i8> %NonZero to <2 x i16>
2771 // This is always safe as non-zero in the 4 i8 elements implies
2772 // non-zero in the combination of any two adjacent ones. Since i8 is a
2773 // multiple of i16, each i16 is guranteed to have 2 full i8 elements.
2774 // This all implies the 2 i16 elements are non-zero.
2775 Type *FromTy = I->getOperand(0)->getType();
2776 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) &&
2777 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0)
2778 return isKnownNonZero(I->getOperand(0), Depth, Q);
2779 } break;
2780 case Instruction::IntToPtr:
2781 // Note that we have to take special care to avoid looking through
2782 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2783 // as casts that can alter the value, e.g., AddrSpaceCasts.
2784 if (!isa<ScalableVectorType>(I->getType()) &&
2785 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2786 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2787 return isKnownNonZero(I->getOperand(0), Depth, Q);
2788 break;
2789 case Instruction::PtrToInt:
2790 // Similar to int2ptr above, we can look through ptr2int here if the cast
2791 // is a no-op or an extend and not a truncate.
2792 if (!isa<ScalableVectorType>(I->getType()) &&
2793 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2794 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2795 return isKnownNonZero(I->getOperand(0), Depth, Q);
2796 break;
2797 case Instruction::Sub:
2798 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2799 I->getOperand(1));
2800 case Instruction::Or:
2801 // X | Y != 0 if X != 0 or Y != 0.
2802 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2803 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2804 case Instruction::SExt:
2805 case Instruction::ZExt:
2806 // ext X != 0 if X != 0.
2807 return isKnownNonZero(I->getOperand(0), Depth, Q);
2809 case Instruction::Shl: {
2810 // shl nsw/nuw can't remove any non-zero bits.
2811 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2812 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO))
2813 return isKnownNonZero(I->getOperand(0), Depth, Q);
2815 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2816 // if the lowest bit is shifted off the end.
2817 KnownBits Known(BitWidth);
2818 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
2819 if (Known.One[0])
2820 return true;
2822 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2824 case Instruction::LShr:
2825 case Instruction::AShr: {
2826 // shr exact can only shift out zero bits.
2827 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2828 if (BO->isExact())
2829 return isKnownNonZero(I->getOperand(0), Depth, Q);
2831 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2832 // defined if the sign bit is shifted off the end.
2833 KnownBits Known =
2834 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2835 if (Known.isNegative())
2836 return true;
2838 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2840 case Instruction::UDiv:
2841 case Instruction::SDiv:
2842 // X / Y
2843 // div exact can only produce a zero if the dividend is zero.
2844 if (cast<PossiblyExactOperator>(I)->isExact())
2845 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2846 if (I->getOpcode() == Instruction::UDiv) {
2847 std::optional<bool> XUgeY;
2848 KnownBits XKnown =
2849 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2850 if (!XKnown.isUnknown()) {
2851 KnownBits YKnown =
2852 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2853 // If X u>= Y then div is non zero (0/0 is UB).
2854 XUgeY = KnownBits::uge(XKnown, YKnown);
2856 // If X is total unknown or X u< Y we won't be able to prove non-zero
2857 // with compute known bits so just return early.
2858 return XUgeY && *XUgeY;
2860 break;
2861 case Instruction::Add: {
2862 // X + Y.
2864 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
2865 // non-zero.
2866 auto *BO = cast<OverflowingBinaryOperator>(V);
2867 if (Q.IIQ.hasNoUnsignedWrap(BO))
2868 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2869 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2871 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2872 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO));
2874 case Instruction::Mul: {
2875 // If X and Y are non-zero then so is X * Y as long as the multiplication
2876 // does not overflow.
2877 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2878 if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO))
2879 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) &&
2880 isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2882 // If either X or Y is odd, then if the other is non-zero the result can't
2883 // be zero.
2884 KnownBits XKnown =
2885 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2886 if (XKnown.One[0])
2887 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2889 KnownBits YKnown =
2890 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2891 if (YKnown.One[0])
2892 return XKnown.isNonZero() ||
2893 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2895 // If there exists any subset of X (sX) and subset of Y (sY) s.t sX * sY is
2896 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking
2897 // the the lowest known One of X and Y. If they are non-zero, the result
2898 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing
2899 // X.CountLeadingZeros + Y.CountLeadingZeros < BitWidth.
2900 return (XKnown.countMaxTrailingZeros() + YKnown.countMaxTrailingZeros()) <
2901 BitWidth;
2903 case Instruction::Select:
2904 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2905 if (isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) &&
2906 isKnownNonZero(I->getOperand(2), DemandedElts, Depth, Q))
2907 return true;
2908 break;
2909 case Instruction::PHI: {
2910 auto *PN = cast<PHINode>(I);
2911 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2912 return true;
2914 // Check if all incoming values are non-zero using recursion.
2915 Query RecQ = Q;
2916 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2917 return llvm::all_of(PN->operands(), [&](const Use &U) {
2918 if (U.get() == PN)
2919 return true;
2920 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2921 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2924 case Instruction::ExtractElement:
2925 if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2926 const Value *Vec = EEI->getVectorOperand();
2927 const Value *Idx = EEI->getIndexOperand();
2928 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2929 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2930 unsigned NumElts = VecTy->getNumElements();
2931 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2932 if (CIdx && CIdx->getValue().ult(NumElts))
2933 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2934 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2937 break;
2938 case Instruction::Freeze:
2939 return isKnownNonZero(I->getOperand(0), Depth, Q) &&
2940 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
2941 Depth);
2942 case Instruction::Call:
2943 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2944 switch (II->getIntrinsicID()) {
2945 case Intrinsic::sshl_sat:
2946 case Intrinsic::ushl_sat:
2947 case Intrinsic::abs:
2948 case Intrinsic::bitreverse:
2949 case Intrinsic::bswap:
2950 case Intrinsic::ctpop:
2951 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2952 case Intrinsic::ssub_sat:
2953 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
2954 II->getArgOperand(0), II->getArgOperand(1));
2955 case Intrinsic::sadd_sat:
2956 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
2957 II->getArgOperand(0), II->getArgOperand(1),
2958 /*NSW*/ true);
2959 case Intrinsic::umax:
2960 case Intrinsic::uadd_sat:
2961 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) ||
2962 isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2963 case Intrinsic::smin:
2964 case Intrinsic::smax: {
2965 auto KnownOpImpliesNonZero = [&](const KnownBits &K) {
2966 return II->getIntrinsicID() == Intrinsic::smin
2967 ? K.isNegative()
2968 : K.isStrictlyPositive();
2970 KnownBits XKnown =
2971 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
2972 if (KnownOpImpliesNonZero(XKnown))
2973 return true;
2974 KnownBits YKnown =
2975 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
2976 if (KnownOpImpliesNonZero(YKnown))
2977 return true;
2979 if (XKnown.isNonZero() && YKnown.isNonZero())
2980 return true;
2982 [[fallthrough]];
2983 case Intrinsic::umin:
2984 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) &&
2985 isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q);
2986 case Intrinsic::cttz:
2987 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2988 .Zero[0];
2989 case Intrinsic::ctlz:
2990 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2991 .isNonNegative();
2992 case Intrinsic::fshr:
2993 case Intrinsic::fshl:
2994 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0.
2995 if (II->getArgOperand(0) == II->getArgOperand(1))
2996 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2997 break;
2998 case Intrinsic::vscale:
2999 return true;
3000 default:
3001 break;
3004 break;
3007 KnownBits Known(BitWidth);
3008 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3009 return Known.One != 0;
3012 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
3013 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
3014 APInt DemandedElts =
3015 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
3016 return isKnownNonZero(V, DemandedElts, Depth, Q);
3019 /// If the pair of operators are the same invertible function, return the
3020 /// the operands of the function corresponding to each input. Otherwise,
3021 /// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3022 /// every input value to exactly one output value. This is equivalent to
3023 /// saying that Op1 and Op2 are equal exactly when the specified pair of
3024 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
3025 static std::optional<std::pair<Value*, Value*>>
3026 getInvertibleOperands(const Operator *Op1,
3027 const Operator *Op2) {
3028 if (Op1->getOpcode() != Op2->getOpcode())
3029 return std::nullopt;
3031 auto getOperands = [&](unsigned OpNum) -> auto {
3032 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
3035 switch (Op1->getOpcode()) {
3036 default:
3037 break;
3038 case Instruction::Add:
3039 case Instruction::Sub:
3040 if (Op1->getOperand(0) == Op2->getOperand(0))
3041 return getOperands(1);
3042 if (Op1->getOperand(1) == Op2->getOperand(1))
3043 return getOperands(0);
3044 break;
3045 case Instruction::Mul: {
3046 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
3047 // and N is the bitwdith. The nsw case is non-obvious, but proven by
3048 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
3049 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3050 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3051 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3052 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3053 break;
3055 // Assume operand order has been canonicalized
3056 if (Op1->getOperand(1) == Op2->getOperand(1) &&
3057 isa<ConstantInt>(Op1->getOperand(1)) &&
3058 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
3059 return getOperands(0);
3060 break;
3062 case Instruction::Shl: {
3063 // Same as multiplies, with the difference that we don't need to check
3064 // for a non-zero multiply. Shifts always multiply by non-zero.
3065 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3066 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3067 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3068 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3069 break;
3071 if (Op1->getOperand(1) == Op2->getOperand(1))
3072 return getOperands(0);
3073 break;
3075 case Instruction::AShr:
3076 case Instruction::LShr: {
3077 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3078 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3079 if (!PEO1->isExact() || !PEO2->isExact())
3080 break;
3082 if (Op1->getOperand(1) == Op2->getOperand(1))
3083 return getOperands(0);
3084 break;
3086 case Instruction::SExt:
3087 case Instruction::ZExt:
3088 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
3089 return getOperands(0);
3090 break;
3091 case Instruction::PHI: {
3092 const PHINode *PN1 = cast<PHINode>(Op1);
3093 const PHINode *PN2 = cast<PHINode>(Op2);
3095 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
3096 // are a single invertible function of the start values? Note that repeated
3097 // application of an invertible function is also invertible
3098 BinaryOperator *BO1 = nullptr;
3099 Value *Start1 = nullptr, *Step1 = nullptr;
3100 BinaryOperator *BO2 = nullptr;
3101 Value *Start2 = nullptr, *Step2 = nullptr;
3102 if (PN1->getParent() != PN2->getParent() ||
3103 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
3104 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
3105 break;
3107 auto Values = getInvertibleOperands(cast<Operator>(BO1),
3108 cast<Operator>(BO2));
3109 if (!Values)
3110 break;
3112 // We have to be careful of mutually defined recurrences here. Ex:
3113 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
3114 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
3115 // The invertibility of these is complicated, and not worth reasoning
3116 // about (yet?).
3117 if (Values->first != PN1 || Values->second != PN2)
3118 break;
3120 return std::make_pair(Start1, Start2);
3123 return std::nullopt;
3126 /// Return true if V2 == V1 + X, where X is known non-zero.
3127 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
3128 const Query &Q) {
3129 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
3130 if (!BO || BO->getOpcode() != Instruction::Add)
3131 return false;
3132 Value *Op = nullptr;
3133 if (V2 == BO->getOperand(0))
3134 Op = BO->getOperand(1);
3135 else if (V2 == BO->getOperand(1))
3136 Op = BO->getOperand(0);
3137 else
3138 return false;
3139 return isKnownNonZero(Op, Depth + 1, Q);
3142 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3143 /// the multiplication is nuw or nsw.
3144 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
3145 const Query &Q) {
3146 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3147 const APInt *C;
3148 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
3149 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3150 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
3152 return false;
3155 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3156 /// the shift is nuw or nsw.
3157 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
3158 const Query &Q) {
3159 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3160 const APInt *C;
3161 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
3162 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3163 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
3165 return false;
3168 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
3169 unsigned Depth, const Query &Q) {
3170 // Check two PHIs are in same block.
3171 if (PN1->getParent() != PN2->getParent())
3172 return false;
3174 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
3175 bool UsedFullRecursion = false;
3176 for (const BasicBlock *IncomBB : PN1->blocks()) {
3177 if (!VisitedBBs.insert(IncomBB).second)
3178 continue; // Don't reprocess blocks that we have dealt with already.
3179 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
3180 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
3181 const APInt *C1, *C2;
3182 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
3183 continue;
3185 // Only one pair of phi operands is allowed for full recursion.
3186 if (UsedFullRecursion)
3187 return false;
3189 Query RecQ = Q;
3190 RecQ.CxtI = IncomBB->getTerminator();
3191 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
3192 return false;
3193 UsedFullRecursion = true;
3195 return true;
3198 /// Return true if it is known that V1 != V2.
3199 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
3200 const Query &Q) {
3201 if (V1 == V2)
3202 return false;
3203 if (V1->getType() != V2->getType())
3204 // We can't look through casts yet.
3205 return false;
3207 if (Depth >= MaxAnalysisRecursionDepth)
3208 return false;
3210 // See if we can recurse through (exactly one of) our operands. This
3211 // requires our operation be 1-to-1 and map every input value to exactly
3212 // one output value. Such an operation is invertible.
3213 auto *O1 = dyn_cast<Operator>(V1);
3214 auto *O2 = dyn_cast<Operator>(V2);
3215 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3216 if (auto Values = getInvertibleOperands(O1, O2))
3217 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
3219 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3220 const PHINode *PN2 = cast<PHINode>(V2);
3221 // FIXME: This is missing a generalization to handle the case where one is
3222 // a PHI and another one isn't.
3223 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
3224 return true;
3228 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
3229 return true;
3231 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
3232 return true;
3234 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
3235 return true;
3237 if (V1->getType()->isIntOrIntVectorTy()) {
3238 // Are any known bits in V1 contradictory to known bits in V2? If V1
3239 // has a known zero where V2 has a known one, they must not be equal.
3240 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
3241 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
3243 if (Known1.Zero.intersects(Known2.One) ||
3244 Known2.Zero.intersects(Known1.One))
3245 return true;
3247 return false;
3250 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
3251 /// simplify operations downstream. Mask is known to be zero for bits that V
3252 /// cannot have.
3254 /// This function is defined on values with integer type, values with pointer
3255 /// type, and vectors of integers. In the case
3256 /// where V is a vector, the mask, known zero, and known one values are the
3257 /// same width as the vector element, and the bit is set only if it is true
3258 /// for all of the elements in the vector.
3259 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
3260 const Query &Q) {
3261 KnownBits Known(Mask.getBitWidth());
3262 computeKnownBits(V, Known, Depth, Q);
3263 return Mask.isSubsetOf(Known.Zero);
3266 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
3267 // Returns the input and lower/upper bounds.
3268 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
3269 const APInt *&CLow, const APInt *&CHigh) {
3270 assert(isa<Operator>(Select) &&
3271 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
3272 "Input should be a Select!");
3274 const Value *LHS = nullptr, *RHS = nullptr;
3275 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
3276 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3277 return false;
3279 if (!match(RHS, m_APInt(CLow)))
3280 return false;
3282 const Value *LHS2 = nullptr, *RHS2 = nullptr;
3283 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
3284 if (getInverseMinMaxFlavor(SPF) != SPF2)
3285 return false;
3287 if (!match(RHS2, m_APInt(CHigh)))
3288 return false;
3290 if (SPF == SPF_SMIN)
3291 std::swap(CLow, CHigh);
3293 In = LHS2;
3294 return CLow->sle(*CHigh);
3297 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3298 const APInt *&CLow,
3299 const APInt *&CHigh) {
3300 assert((II->getIntrinsicID() == Intrinsic::smin ||
3301 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
3303 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3304 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3305 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3306 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3307 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3308 return false;
3310 if (II->getIntrinsicID() == Intrinsic::smin)
3311 std::swap(CLow, CHigh);
3312 return CLow->sle(*CHigh);
3315 /// For vector constants, loop over the elements and find the constant with the
3316 /// minimum number of sign bits. Return 0 if the value is not a vector constant
3317 /// or if any element was not analyzed; otherwise, return the count for the
3318 /// element with the minimum number of sign bits.
3319 static unsigned computeNumSignBitsVectorConstant(const Value *V,
3320 const APInt &DemandedElts,
3321 unsigned TyBits) {
3322 const auto *CV = dyn_cast<Constant>(V);
3323 if (!CV || !isa<FixedVectorType>(CV->getType()))
3324 return 0;
3326 unsigned MinSignBits = TyBits;
3327 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3328 for (unsigned i = 0; i != NumElts; ++i) {
3329 if (!DemandedElts[i])
3330 continue;
3331 // If we find a non-ConstantInt, bail out.
3332 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3333 if (!Elt)
3334 return 0;
3336 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3339 return MinSignBits;
3342 static unsigned ComputeNumSignBitsImpl(const Value *V,
3343 const APInt &DemandedElts,
3344 unsigned Depth, const Query &Q);
3346 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3347 unsigned Depth, const Query &Q) {
3348 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3349 assert(Result > 0 && "At least one sign bit needs to be present!");
3350 return Result;
3353 /// Return the number of times the sign bit of the register is replicated into
3354 /// the other bits. We know that at least 1 bit is always equal to the sign bit
3355 /// (itself), but other cases can give us information. For example, immediately
3356 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3357 /// other, so we return 3. For vectors, return the number of sign bits for the
3358 /// vector element with the minimum number of known sign bits of the demanded
3359 /// elements in the vector specified by DemandedElts.
3360 static unsigned ComputeNumSignBitsImpl(const Value *V,
3361 const APInt &DemandedElts,
3362 unsigned Depth, const Query &Q) {
3363 Type *Ty = V->getType();
3364 #ifndef NDEBUG
3365 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3367 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3368 assert(
3369 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3370 "DemandedElt width should equal the fixed vector number of elements");
3371 } else {
3372 assert(DemandedElts == APInt(1, 1) &&
3373 "DemandedElt width should be 1 for scalars");
3375 #endif
3377 // We return the minimum number of sign bits that are guaranteed to be present
3378 // in V, so for undef we have to conservatively return 1. We don't have the
3379 // same behavior for poison though -- that's a FIXME today.
3381 Type *ScalarTy = Ty->getScalarType();
3382 unsigned TyBits = ScalarTy->isPointerTy() ?
3383 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3384 Q.DL.getTypeSizeInBits(ScalarTy);
3386 unsigned Tmp, Tmp2;
3387 unsigned FirstAnswer = 1;
3389 // Note that ConstantInt is handled by the general computeKnownBits case
3390 // below.
3392 if (Depth == MaxAnalysisRecursionDepth)
3393 return 1;
3395 if (auto *U = dyn_cast<Operator>(V)) {
3396 switch (Operator::getOpcode(V)) {
3397 default: break;
3398 case Instruction::SExt:
3399 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3400 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3402 case Instruction::SDiv: {
3403 const APInt *Denominator;
3404 // sdiv X, C -> adds log(C) sign bits.
3405 if (match(U->getOperand(1), m_APInt(Denominator))) {
3407 // Ignore non-positive denominator.
3408 if (!Denominator->isStrictlyPositive())
3409 break;
3411 // Calculate the incoming numerator bits.
3412 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3414 // Add floor(log(C)) bits to the numerator bits.
3415 return std::min(TyBits, NumBits + Denominator->logBase2());
3417 break;
3420 case Instruction::SRem: {
3421 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3423 const APInt *Denominator;
3424 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3425 // positive constant. This let us put a lower bound on the number of sign
3426 // bits.
3427 if (match(U->getOperand(1), m_APInt(Denominator))) {
3429 // Ignore non-positive denominator.
3430 if (Denominator->isStrictlyPositive()) {
3431 // Calculate the leading sign bit constraints by examining the
3432 // denominator. Given that the denominator is positive, there are two
3433 // cases:
3435 // 1. The numerator is positive. The result range is [0,C) and
3436 // [0,C) u< (1 << ceilLogBase2(C)).
3438 // 2. The numerator is negative. Then the result range is (-C,0] and
3439 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3441 // Thus a lower bound on the number of sign bits is `TyBits -
3442 // ceilLogBase2(C)`.
3444 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3445 Tmp = std::max(Tmp, ResBits);
3448 return Tmp;
3451 case Instruction::AShr: {
3452 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3453 // ashr X, C -> adds C sign bits. Vectors too.
3454 const APInt *ShAmt;
3455 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3456 if (ShAmt->uge(TyBits))
3457 break; // Bad shift.
3458 unsigned ShAmtLimited = ShAmt->getZExtValue();
3459 Tmp += ShAmtLimited;
3460 if (Tmp > TyBits) Tmp = TyBits;
3462 return Tmp;
3464 case Instruction::Shl: {
3465 const APInt *ShAmt;
3466 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3467 // shl destroys sign bits.
3468 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3469 if (ShAmt->uge(TyBits) || // Bad shift.
3470 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3471 Tmp2 = ShAmt->getZExtValue();
3472 return Tmp - Tmp2;
3474 break;
3476 case Instruction::And:
3477 case Instruction::Or:
3478 case Instruction::Xor: // NOT is handled here.
3479 // Logical binary ops preserve the number of sign bits at the worst.
3480 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3481 if (Tmp != 1) {
3482 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3483 FirstAnswer = std::min(Tmp, Tmp2);
3484 // We computed what we know about the sign bits as our first
3485 // answer. Now proceed to the generic code that uses
3486 // computeKnownBits, and pick whichever answer is better.
3488 break;
3490 case Instruction::Select: {
3491 // If we have a clamp pattern, we know that the number of sign bits will
3492 // be the minimum of the clamp min/max range.
3493 const Value *X;
3494 const APInt *CLow, *CHigh;
3495 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3496 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3498 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3499 if (Tmp == 1) break;
3500 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3501 return std::min(Tmp, Tmp2);
3504 case Instruction::Add:
3505 // Add can have at most one carry bit. Thus we know that the output
3506 // is, at worst, one more bit than the inputs.
3507 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3508 if (Tmp == 1) break;
3510 // Special case decrementing a value (ADD X, -1):
3511 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3512 if (CRHS->isAllOnesValue()) {
3513 KnownBits Known(TyBits);
3514 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3516 // If the input is known to be 0 or 1, the output is 0/-1, which is
3517 // all sign bits set.
3518 if ((Known.Zero | 1).isAllOnes())
3519 return TyBits;
3521 // If we are subtracting one from a positive number, there is no carry
3522 // out of the result.
3523 if (Known.isNonNegative())
3524 return Tmp;
3527 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3528 if (Tmp2 == 1) break;
3529 return std::min(Tmp, Tmp2) - 1;
3531 case Instruction::Sub:
3532 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3533 if (Tmp2 == 1) break;
3535 // Handle NEG.
3536 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3537 if (CLHS->isNullValue()) {
3538 KnownBits Known(TyBits);
3539 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3540 // If the input is known to be 0 or 1, the output is 0/-1, which is
3541 // all sign bits set.
3542 if ((Known.Zero | 1).isAllOnes())
3543 return TyBits;
3545 // If the input is known to be positive (the sign bit is known clear),
3546 // the output of the NEG has the same number of sign bits as the
3547 // input.
3548 if (Known.isNonNegative())
3549 return Tmp2;
3551 // Otherwise, we treat this like a SUB.
3554 // Sub can have at most one carry bit. Thus we know that the output
3555 // is, at worst, one more bit than the inputs.
3556 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3557 if (Tmp == 1) break;
3558 return std::min(Tmp, Tmp2) - 1;
3560 case Instruction::Mul: {
3561 // The output of the Mul can be at most twice the valid bits in the
3562 // inputs.
3563 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3564 if (SignBitsOp0 == 1) break;
3565 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3566 if (SignBitsOp1 == 1) break;
3567 unsigned OutValidBits =
3568 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3569 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3572 case Instruction::PHI: {
3573 const PHINode *PN = cast<PHINode>(U);
3574 unsigned NumIncomingValues = PN->getNumIncomingValues();
3575 // Don't analyze large in-degree PHIs.
3576 if (NumIncomingValues > 4) break;
3577 // Unreachable blocks may have zero-operand PHI nodes.
3578 if (NumIncomingValues == 0) break;
3580 // Take the minimum of all incoming values. This can't infinitely loop
3581 // because of our depth threshold.
3582 Query RecQ = Q;
3583 Tmp = TyBits;
3584 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3585 if (Tmp == 1) return Tmp;
3586 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3587 Tmp = std::min(
3588 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3590 return Tmp;
3593 case Instruction::Trunc: {
3594 // If the input contained enough sign bits that some remain after the
3595 // truncation, then we can make use of that. Otherwise we don't know
3596 // anything.
3597 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3598 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
3599 if (Tmp > (OperandTyBits - TyBits))
3600 return Tmp - (OperandTyBits - TyBits);
3602 return 1;
3605 case Instruction::ExtractElement:
3606 // Look through extract element. At the moment we keep this simple and
3607 // skip tracking the specific element. But at least we might find
3608 // information valid for all elements of the vector (for example if vector
3609 // is sign extended, shifted, etc).
3610 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3612 case Instruction::ShuffleVector: {
3613 // Collect the minimum number of sign bits that are shared by every vector
3614 // element referenced by the shuffle.
3615 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3616 if (!Shuf) {
3617 // FIXME: Add support for shufflevector constant expressions.
3618 return 1;
3620 APInt DemandedLHS, DemandedRHS;
3621 // For undef elements, we don't know anything about the common state of
3622 // the shuffle result.
3623 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3624 return 1;
3625 Tmp = std::numeric_limits<unsigned>::max();
3626 if (!!DemandedLHS) {
3627 const Value *LHS = Shuf->getOperand(0);
3628 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3630 // If we don't know anything, early out and try computeKnownBits
3631 // fall-back.
3632 if (Tmp == 1)
3633 break;
3634 if (!!DemandedRHS) {
3635 const Value *RHS = Shuf->getOperand(1);
3636 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3637 Tmp = std::min(Tmp, Tmp2);
3639 // If we don't know anything, early out and try computeKnownBits
3640 // fall-back.
3641 if (Tmp == 1)
3642 break;
3643 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3644 return Tmp;
3646 case Instruction::Call: {
3647 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3648 switch (II->getIntrinsicID()) {
3649 default: break;
3650 case Intrinsic::abs:
3651 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3652 if (Tmp == 1) break;
3654 // Absolute value reduces number of sign bits by at most 1.
3655 return Tmp - 1;
3656 case Intrinsic::smin:
3657 case Intrinsic::smax: {
3658 const APInt *CLow, *CHigh;
3659 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3660 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3668 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3669 // use this information.
3671 // If we can examine all elements of a vector constant successfully, we're
3672 // done (we can't do any better than that). If not, keep trying.
3673 if (unsigned VecSignBits =
3674 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3675 return VecSignBits;
3677 KnownBits Known(TyBits);
3678 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3680 // If we know that the sign bit is either zero or one, determine the number of
3681 // identical bits in the top of the input value.
3682 return std::max(FirstAnswer, Known.countMinSignBits());
3685 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3686 const TargetLibraryInfo *TLI) {
3687 const Function *F = CB.getCalledFunction();
3688 if (!F)
3689 return Intrinsic::not_intrinsic;
3691 if (F->isIntrinsic())
3692 return F->getIntrinsicID();
3694 // We are going to infer semantics of a library function based on mapping it
3695 // to an LLVM intrinsic. Check that the library function is available from
3696 // this callbase and in this environment.
3697 LibFunc Func;
3698 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3699 !CB.onlyReadsMemory())
3700 return Intrinsic::not_intrinsic;
3702 switch (Func) {
3703 default:
3704 break;
3705 case LibFunc_sin:
3706 case LibFunc_sinf:
3707 case LibFunc_sinl:
3708 return Intrinsic::sin;
3709 case LibFunc_cos:
3710 case LibFunc_cosf:
3711 case LibFunc_cosl:
3712 return Intrinsic::cos;
3713 case LibFunc_exp:
3714 case LibFunc_expf:
3715 case LibFunc_expl:
3716 return Intrinsic::exp;
3717 case LibFunc_exp2:
3718 case LibFunc_exp2f:
3719 case LibFunc_exp2l:
3720 return Intrinsic::exp2;
3721 case LibFunc_log:
3722 case LibFunc_logf:
3723 case LibFunc_logl:
3724 return Intrinsic::log;
3725 case LibFunc_log10:
3726 case LibFunc_log10f:
3727 case LibFunc_log10l:
3728 return Intrinsic::log10;
3729 case LibFunc_log2:
3730 case LibFunc_log2f:
3731 case LibFunc_log2l:
3732 return Intrinsic::log2;
3733 case LibFunc_fabs:
3734 case LibFunc_fabsf:
3735 case LibFunc_fabsl:
3736 return Intrinsic::fabs;
3737 case LibFunc_fmin:
3738 case LibFunc_fminf:
3739 case LibFunc_fminl:
3740 return Intrinsic::minnum;
3741 case LibFunc_fmax:
3742 case LibFunc_fmaxf:
3743 case LibFunc_fmaxl:
3744 return Intrinsic::maxnum;
3745 case LibFunc_copysign:
3746 case LibFunc_copysignf:
3747 case LibFunc_copysignl:
3748 return Intrinsic::copysign;
3749 case LibFunc_floor:
3750 case LibFunc_floorf:
3751 case LibFunc_floorl:
3752 return Intrinsic::floor;
3753 case LibFunc_ceil:
3754 case LibFunc_ceilf:
3755 case LibFunc_ceill:
3756 return Intrinsic::ceil;
3757 case LibFunc_trunc:
3758 case LibFunc_truncf:
3759 case LibFunc_truncl:
3760 return Intrinsic::trunc;
3761 case LibFunc_rint:
3762 case LibFunc_rintf:
3763 case LibFunc_rintl:
3764 return Intrinsic::rint;
3765 case LibFunc_nearbyint:
3766 case LibFunc_nearbyintf:
3767 case LibFunc_nearbyintl:
3768 return Intrinsic::nearbyint;
3769 case LibFunc_round:
3770 case LibFunc_roundf:
3771 case LibFunc_roundl:
3772 return Intrinsic::round;
3773 case LibFunc_roundeven:
3774 case LibFunc_roundevenf:
3775 case LibFunc_roundevenl:
3776 return Intrinsic::roundeven;
3777 case LibFunc_pow:
3778 case LibFunc_powf:
3779 case LibFunc_powl:
3780 return Intrinsic::pow;
3781 case LibFunc_sqrt:
3782 case LibFunc_sqrtf:
3783 case LibFunc_sqrtl:
3784 return Intrinsic::sqrt;
3787 return Intrinsic::not_intrinsic;
3790 /// Return true if we can prove that the specified FP value is never equal to
3791 /// -0.0.
3792 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3793 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3794 /// the same as +0.0 in floating-point ops.
3795 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3796 unsigned Depth) {
3797 if (auto *CFP = dyn_cast<ConstantFP>(V))
3798 return !CFP->getValueAPF().isNegZero();
3800 if (Depth == MaxAnalysisRecursionDepth)
3801 return false;
3803 auto *Op = dyn_cast<Operator>(V);
3804 if (!Op)
3805 return false;
3807 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3808 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3809 return true;
3811 // sitofp and uitofp turn into +0.0 for zero.
3812 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3813 return true;
3815 if (auto *Call = dyn_cast<CallInst>(Op)) {
3816 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3817 switch (IID) {
3818 default:
3819 break;
3820 // sqrt(-0.0) = -0.0, no other negative results are possible.
3821 case Intrinsic::sqrt:
3822 case Intrinsic::canonicalize:
3823 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3824 case Intrinsic::experimental_constrained_sqrt: {
3825 // NOTE: This rounding mode restriction may be too strict.
3826 const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3827 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3828 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3829 else
3830 return false;
3832 // fabs(x) != -0.0
3833 case Intrinsic::fabs:
3834 return true;
3835 // sitofp and uitofp turn into +0.0 for zero.
3836 case Intrinsic::experimental_constrained_sitofp:
3837 case Intrinsic::experimental_constrained_uitofp:
3838 return true;
3842 return false;
3845 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3846 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3847 /// bit despite comparing equal.
3848 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3849 const DataLayout &DL,
3850 const TargetLibraryInfo *TLI,
3851 bool SignBitOnly, unsigned Depth) {
3852 // TODO: This function does not do the right thing when SignBitOnly is true
3853 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3854 // which flips the sign bits of NaNs. See
3855 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3857 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3858 return !CFP->getValueAPF().isNegative() ||
3859 (!SignBitOnly && CFP->getValueAPF().isZero());
3862 // Handle vector of constants.
3863 if (auto *CV = dyn_cast<Constant>(V)) {
3864 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3865 unsigned NumElts = CVFVTy->getNumElements();
3866 for (unsigned i = 0; i != NumElts; ++i) {
3867 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3868 if (!CFP)
3869 return false;
3870 if (CFP->getValueAPF().isNegative() &&
3871 (SignBitOnly || !CFP->getValueAPF().isZero()))
3872 return false;
3875 // All non-negative ConstantFPs.
3876 return true;
3880 if (Depth == MaxAnalysisRecursionDepth)
3881 return false;
3883 const Operator *I = dyn_cast<Operator>(V);
3884 if (!I)
3885 return false;
3887 switch (I->getOpcode()) {
3888 default:
3889 break;
3890 // Unsigned integers are always nonnegative.
3891 case Instruction::UIToFP:
3892 return true;
3893 case Instruction::FDiv:
3894 // X / X is always exactly 1.0 or a NaN.
3895 if (I->getOperand(0) == I->getOperand(1) &&
3896 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3897 return true;
3899 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3900 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3901 SignBitOnly, Depth + 1) &&
3902 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3903 /*SignBitOnly*/ true, Depth + 1);
3904 case Instruction::FMul:
3905 // X * X is always non-negative or a NaN.
3906 if (I->getOperand(0) == I->getOperand(1) &&
3907 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3908 return true;
3910 [[fallthrough]];
3911 case Instruction::FAdd:
3912 case Instruction::FRem:
3913 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3914 SignBitOnly, Depth + 1) &&
3915 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3916 SignBitOnly, Depth + 1);
3917 case Instruction::Select:
3918 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3919 SignBitOnly, Depth + 1) &&
3920 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI,
3921 SignBitOnly, Depth + 1);
3922 case Instruction::FPExt:
3923 case Instruction::FPTrunc:
3924 // Widening/narrowing never change sign.
3925 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3926 SignBitOnly, Depth + 1);
3927 case Instruction::ExtractElement:
3928 // Look through extract element. At the moment we keep this simple and skip
3929 // tracking the specific element. But at least we might find information
3930 // valid for all elements of the vector.
3931 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3932 SignBitOnly, Depth + 1);
3933 case Instruction::Call:
3934 const auto *CI = cast<CallInst>(I);
3935 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3936 switch (IID) {
3937 default:
3938 break;
3939 case Intrinsic::canonicalize:
3940 case Intrinsic::arithmetic_fence:
3941 case Intrinsic::floor:
3942 case Intrinsic::ceil:
3943 case Intrinsic::trunc:
3944 case Intrinsic::rint:
3945 case Intrinsic::nearbyint:
3946 case Intrinsic::round:
3947 case Intrinsic::roundeven:
3948 case Intrinsic::fptrunc_round:
3949 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3950 SignBitOnly, Depth + 1);
3951 case Intrinsic::maxnum: {
3952 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3953 auto isPositiveNum = [&](Value *V) {
3954 if (SignBitOnly) {
3955 // With SignBitOnly, this is tricky because the result of
3956 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3957 // a constant strictly greater than 0.0.
3958 const APFloat *C;
3959 return match(V, m_APFloat(C)) &&
3960 *C > APFloat::getZero(C->getSemantics());
3963 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3964 // maxnum can't be ordered-less-than-zero.
3965 return isKnownNeverNaN(V, DL, TLI) &&
3966 cannotBeOrderedLessThanZeroImpl(V, DL, TLI, false, Depth + 1);
3969 // TODO: This could be improved. We could also check that neither operand
3970 // has its sign bit set (and at least 1 is not-NAN?).
3971 return isPositiveNum(V0) || isPositiveNum(V1);
3974 case Intrinsic::maximum:
3975 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3976 SignBitOnly, Depth + 1) ||
3977 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3978 SignBitOnly, Depth + 1);
3979 case Intrinsic::minnum:
3980 case Intrinsic::minimum:
3981 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3982 SignBitOnly, Depth + 1) &&
3983 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3984 SignBitOnly, Depth + 1);
3985 case Intrinsic::exp:
3986 case Intrinsic::exp2:
3987 case Intrinsic::fabs:
3988 return true;
3989 case Intrinsic::copysign:
3990 // Only the sign operand matters.
3991 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, true,
3992 Depth + 1);
3993 case Intrinsic::sqrt:
3994 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3995 if (!SignBitOnly)
3996 return true;
3997 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3998 CannotBeNegativeZero(CI->getOperand(0), TLI));
4000 case Intrinsic::powi:
4001 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
4002 // powi(x,n) is non-negative if n is even.
4003 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
4004 return true;
4006 // TODO: This is not correct. Given that exp is an integer, here are the
4007 // ways that pow can return a negative value:
4009 // pow(x, exp) --> negative if exp is odd and x is negative.
4010 // pow(-0, exp) --> -inf if exp is negative odd.
4011 // pow(-0, exp) --> -0 if exp is positive odd.
4012 // pow(-inf, exp) --> -0 if exp is negative odd.
4013 // pow(-inf, exp) --> -inf if exp is positive odd.
4015 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
4016 // but we must return false if x == -0. Unfortunately we do not currently
4017 // have a way of expressing this constraint. See details in
4018 // https://llvm.org/bugs/show_bug.cgi?id=31702.
4019 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
4020 SignBitOnly, Depth + 1);
4022 case Intrinsic::fma:
4023 case Intrinsic::fmuladd:
4024 // x*x+y is non-negative if y is non-negative.
4025 return I->getOperand(0) == I->getOperand(1) &&
4026 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
4027 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI,
4028 SignBitOnly, Depth + 1);
4030 break;
4032 return false;
4035 bool llvm::CannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
4036 const TargetLibraryInfo *TLI) {
4037 return cannotBeOrderedLessThanZeroImpl(V, DL, TLI, false, 0);
4040 bool llvm::isKnownNeverInfinity(const Value *V, const DataLayout &DL,
4041 const TargetLibraryInfo *TLI, unsigned Depth,
4042 AssumptionCache *AC, const Instruction *CtxI,
4043 const DominatorTree *DT,
4044 OptimizationRemarkEmitter *ORE,
4045 bool UseInstrInfo) {
4046 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
4048 // If we're told that infinities won't happen, assume they won't.
4049 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
4050 if (FPMathOp->hasNoInfs())
4051 return true;
4053 if (const auto *Arg = dyn_cast<Argument>(V)) {
4054 if ((Arg->getNoFPClass() & fcInf) == fcInf)
4055 return true;
4058 // TODO: Use fpclass like API for isKnown queries and distinguish +inf from
4059 // -inf.
4060 if (const auto *CB = dyn_cast<CallBase>(V)) {
4061 if ((CB->getRetNoFPClass() & fcInf) == fcInf)
4062 return true;
4065 // Handle scalar constants.
4066 if (auto *CFP = dyn_cast<ConstantFP>(V))
4067 return !CFP->isInfinity();
4069 if (Depth == MaxAnalysisRecursionDepth)
4070 return false;
4072 if (auto *Inst = dyn_cast<Instruction>(V)) {
4073 switch (Inst->getOpcode()) {
4074 case Instruction::Select: {
4075 return isKnownNeverInfinity(Inst->getOperand(1), DL, TLI, Depth + 1) &&
4076 isKnownNeverInfinity(Inst->getOperand(2), DL, TLI, Depth + 1);
4078 case Instruction::SIToFP:
4079 case Instruction::UIToFP: {
4080 // Get width of largest magnitude integer (remove a bit if signed).
4081 // This still works for a signed minimum value because the largest FP
4082 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
4083 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
4084 if (Inst->getOpcode() == Instruction::SIToFP)
4085 --IntSize;
4087 // If the exponent of the largest finite FP value can hold the largest
4088 // integer, the result of the cast must be finite.
4089 Type *FPTy = Inst->getType()->getScalarType();
4090 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
4092 case Instruction::FNeg:
4093 case Instruction::FPExt: {
4094 // Peek through to source op. If it is not infinity, this is not infinity.
4095 return isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1);
4097 case Instruction::FPTrunc: {
4098 // Need a range check.
4099 return false;
4101 default:
4102 break;
4105 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
4106 switch (II->getIntrinsicID()) {
4107 case Intrinsic::sin:
4108 case Intrinsic::cos:
4109 // Return NaN on infinite inputs.
4110 return true;
4111 case Intrinsic::fabs:
4112 case Intrinsic::sqrt:
4113 case Intrinsic::canonicalize:
4114 case Intrinsic::copysign:
4115 case Intrinsic::arithmetic_fence:
4116 case Intrinsic::trunc:
4117 return isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1);
4118 case Intrinsic::floor:
4119 case Intrinsic::ceil:
4120 case Intrinsic::rint:
4121 case Intrinsic::nearbyint:
4122 case Intrinsic::round:
4123 case Intrinsic::roundeven:
4124 // PPC_FP128 is a special case.
4125 if (V->getType()->isMultiUnitFPType())
4126 return false;
4127 return isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1);
4128 case Intrinsic::fptrunc_round:
4129 // Requires knowing the value range.
4130 return false;
4131 case Intrinsic::minnum:
4132 case Intrinsic::maxnum:
4133 case Intrinsic::minimum:
4134 case Intrinsic::maximum:
4135 return isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1) &&
4136 isKnownNeverInfinity(Inst->getOperand(1), DL, TLI, Depth + 1);
4137 case Intrinsic::log:
4138 case Intrinsic::log10:
4139 case Intrinsic::log2:
4140 // log(+inf) -> +inf
4141 // log([+-]0.0) -> -inf
4142 // log(-inf) -> nan
4143 // log(-x) -> nan
4144 // TODO: We lack API to check the == 0 case.
4145 return false;
4146 case Intrinsic::exp:
4147 case Intrinsic::exp2:
4148 case Intrinsic::pow:
4149 case Intrinsic::powi:
4150 case Intrinsic::fma:
4151 case Intrinsic::fmuladd:
4152 // These can return infinities on overflow cases, so it's hard to prove
4153 // anything about it.
4154 return false;
4155 default:
4156 break;
4161 // try to handle fixed width vector constants
4162 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4163 if (VFVTy && isa<Constant>(V)) {
4164 // For vectors, verify that each element is not infinity.
4165 unsigned NumElts = VFVTy->getNumElements();
4166 for (unsigned i = 0; i != NumElts; ++i) {
4167 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
4168 if (!Elt)
4169 return false;
4170 if (isa<UndefValue>(Elt))
4171 continue;
4172 auto *CElt = dyn_cast<ConstantFP>(Elt);
4173 if (!CElt || CElt->isInfinity())
4174 return false;
4176 // All elements were confirmed non-infinity or undefined.
4177 return true;
4180 // was not able to prove that V never contains infinity
4181 return false;
4184 bool llvm::SignBitMustBeZero(const Value *V, const DataLayout &DL,
4185 const TargetLibraryInfo *TLI) {
4186 return cannotBeOrderedLessThanZeroImpl(V, DL, TLI, true, 0);
4189 bool llvm::isKnownNeverNaN(const Value *V, const DataLayout &DL,
4190 const TargetLibraryInfo *TLI, unsigned Depth,
4191 AssumptionCache *AC, const Instruction *CtxI,
4192 const DominatorTree *DT,
4193 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
4194 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
4196 // If we're told that NaNs won't happen, assume they won't.
4197 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
4198 if (FPMathOp->hasNoNaNs())
4199 return true;
4201 if (const auto *Arg = dyn_cast<Argument>(V)) {
4202 if ((Arg->getNoFPClass() & fcNan) == fcNan)
4203 return true;
4206 // TODO: Use fpclass like API for isKnown queries and distinguish snan from
4207 // qnan.
4208 if (const auto *CB = dyn_cast<CallBase>(V)) {
4209 FPClassTest Mask = CB->getRetNoFPClass();
4210 if ((Mask & fcNan) == fcNan)
4211 return true;
4214 // Handle scalar constants.
4215 if (auto *CFP = dyn_cast<ConstantFP>(V))
4216 return !CFP->isNaN();
4218 if (Depth == MaxAnalysisRecursionDepth)
4219 return false;
4221 if (auto *Inst = dyn_cast<Instruction>(V)) {
4222 switch (Inst->getOpcode()) {
4223 case Instruction::FAdd:
4224 case Instruction::FSub:
4225 // Adding positive and negative infinity produces NaN.
4226 return isKnownNeverNaN(Inst->getOperand(0), DL, TLI, Depth + 1) &&
4227 isKnownNeverNaN(Inst->getOperand(1), DL, TLI, Depth + 1) &&
4228 (isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1) ||
4229 isKnownNeverInfinity(Inst->getOperand(1), DL, TLI, Depth + 1));
4231 case Instruction::FMul:
4232 // Zero multiplied with infinity produces NaN.
4233 // FIXME: If neither side can be zero fmul never produces NaN.
4234 return isKnownNeverNaN(Inst->getOperand(0), DL, TLI, Depth + 1) &&
4235 isKnownNeverInfinity(Inst->getOperand(0), DL, TLI, Depth + 1) &&
4236 isKnownNeverNaN(Inst->getOperand(1), DL, TLI, Depth + 1) &&
4237 isKnownNeverInfinity(Inst->getOperand(1), DL, TLI, Depth + 1);
4239 case Instruction::FDiv:
4240 case Instruction::FRem:
4241 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
4242 return false;
4244 case Instruction::Select: {
4245 return isKnownNeverNaN(Inst->getOperand(1), DL, TLI, Depth + 1) &&
4246 isKnownNeverNaN(Inst->getOperand(2), DL, TLI, Depth + 1);
4248 case Instruction::SIToFP:
4249 case Instruction::UIToFP:
4250 return true;
4251 case Instruction::FPTrunc:
4252 case Instruction::FPExt:
4253 case Instruction::FNeg:
4254 return isKnownNeverNaN(Inst->getOperand(0), DL, TLI, Depth + 1);
4255 default:
4256 break;
4260 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
4261 switch (II->getIntrinsicID()) {
4262 case Intrinsic::canonicalize:
4263 case Intrinsic::fabs:
4264 case Intrinsic::copysign:
4265 case Intrinsic::exp:
4266 case Intrinsic::exp2:
4267 case Intrinsic::floor:
4268 case Intrinsic::ceil:
4269 case Intrinsic::trunc:
4270 case Intrinsic::rint:
4271 case Intrinsic::nearbyint:
4272 case Intrinsic::round:
4273 case Intrinsic::roundeven:
4274 case Intrinsic::arithmetic_fence:
4275 return isKnownNeverNaN(II->getArgOperand(0), DL, TLI, Depth + 1);
4276 case Intrinsic::sqrt:
4277 return isKnownNeverNaN(II->getArgOperand(0), DL, TLI, Depth + 1) &&
4278 CannotBeOrderedLessThanZero(II->getArgOperand(0), DL, TLI);
4279 case Intrinsic::minnum:
4280 case Intrinsic::maxnum:
4281 // If either operand is not NaN, the result is not NaN.
4282 return isKnownNeverNaN(II->getArgOperand(0), DL, TLI, Depth + 1) ||
4283 isKnownNeverNaN(II->getArgOperand(1), DL, TLI, Depth + 1);
4284 default:
4285 return false;
4289 // Try to handle fixed width vector constants
4290 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4291 if (VFVTy && isa<Constant>(V)) {
4292 // For vectors, verify that each element is not NaN.
4293 unsigned NumElts = VFVTy->getNumElements();
4294 for (unsigned i = 0; i != NumElts; ++i) {
4295 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
4296 if (!Elt)
4297 return false;
4298 if (isa<UndefValue>(Elt))
4299 continue;
4300 auto *CElt = dyn_cast<ConstantFP>(Elt);
4301 if (!CElt || CElt->isNaN())
4302 return false;
4304 // All elements were confirmed not-NaN or undefined.
4305 return true;
4308 // Was not able to prove that V never contains NaN
4309 return false;
4312 /// Return true if it's possible to assume IEEE treatment of input denormals in
4313 /// \p F for \p Val.
4314 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
4315 Ty = Ty->getScalarType();
4316 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
4319 static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
4320 Ty = Ty->getScalarType();
4321 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
4322 return Mode.Input == DenormalMode::IEEE ||
4323 Mode.Input == DenormalMode::PositiveZero;
4326 static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
4327 Ty = Ty->getScalarType();
4328 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
4329 return Mode.Output == DenormalMode::IEEE ||
4330 Mode.Output == DenormalMode::PositiveZero;
4333 bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const {
4334 return isKnownNeverZero() &&
4335 (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty));
4338 bool KnownFPClass::isKnownNeverLogicalNegZero(const Function &F,
4339 Type *Ty) const {
4340 return isKnownNeverNegZero() &&
4341 (isKnownNeverNegSubnormal() || inputDenormalIsIEEEOrPosZero(F, Ty));
4344 bool KnownFPClass::isKnownNeverLogicalPosZero(const Function &F,
4345 Type *Ty) const {
4346 if (!isKnownNeverPosZero())
4347 return false;
4349 // If we know there are no denormals, nothing can be flushed to zero.
4350 if (isKnownNeverSubnormal())
4351 return true;
4353 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics());
4354 switch (Mode.Input) {
4355 case DenormalMode::IEEE:
4356 return true;
4357 case DenormalMode::PreserveSign:
4358 // Negative subnormal won't flush to +0
4359 return isKnownNeverPosSubnormal();
4360 case DenormalMode::PositiveZero:
4361 default:
4362 // Both positive and negative subnormal could flush to +0
4363 return false;
4366 llvm_unreachable("covered switch over denormal mode");
4369 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
4370 /// same result as an fcmp with the given operands.
4371 std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
4372 const Function &F,
4373 Value *LHS, Value *RHS,
4374 bool LookThroughSrc) {
4375 const APFloat *ConstRHS;
4376 if (!match(RHS, m_APFloat(ConstRHS)))
4377 return {nullptr, fcNone};
4379 if (ConstRHS->isZero()) {
4380 // Compares with fcNone are only exactly equal to fcZero if input denormals
4381 // are not flushed.
4382 // TODO: Handle DAZ by expanding masks to cover subnormal cases.
4383 if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO &&
4384 !inputDenormalIsIEEE(F, LHS->getType()))
4385 return {nullptr, fcNone};
4387 switch (Pred) {
4388 case FCmpInst::FCMP_OEQ: // Match x == 0.0
4389 return {LHS, fcZero};
4390 case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0)
4391 return {LHS, fcZero | fcNan};
4392 case FCmpInst::FCMP_UNE: // Match (x != 0.0)
4393 return {LHS, ~fcZero};
4394 case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0
4395 return {LHS, ~fcNan & ~fcZero};
4396 case FCmpInst::FCMP_ORD:
4397 // Canonical form of ord/uno is with a zero. We could also handle
4398 // non-canonical other non-NaN constants or LHS == RHS.
4399 return {LHS, ~fcNan};
4400 case FCmpInst::FCMP_UNO:
4401 return {LHS, fcNan};
4402 case FCmpInst::FCMP_OGT: // x > 0
4403 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf};
4404 case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
4405 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan};
4406 case FCmpInst::FCMP_OGE: // x >= 0
4407 return {LHS, fcPositive | fcNegZero};
4408 case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
4409 return {LHS, fcPositive | fcNegZero | fcNan};
4410 case FCmpInst::FCMP_OLT: // x < 0
4411 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf};
4412 case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
4413 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan};
4414 case FCmpInst::FCMP_OLE: // x <= 0
4415 return {LHS, fcNegative | fcPosZero};
4416 case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
4417 return {LHS, fcNegative | fcPosZero | fcNan};
4418 default:
4419 break;
4422 return {nullptr, fcNone};
4425 Value *Src = LHS;
4426 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src)));
4428 // Compute the test mask that would return true for the ordered comparisons.
4429 FPClassTest Mask;
4431 if (ConstRHS->isInfinity()) {
4432 switch (Pred) {
4433 case FCmpInst::FCMP_OEQ:
4434 case FCmpInst::FCMP_UNE: {
4435 // Match __builtin_isinf patterns
4437 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf
4438 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf
4439 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf
4440 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false
4442 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf
4443 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf
4444 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf
4445 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true
4447 if (ConstRHS->isNegative()) {
4448 Mask = fcNegInf;
4449 if (IsFabs)
4450 Mask = fcNone;
4451 } else {
4452 Mask = fcPosInf;
4453 if (IsFabs)
4454 Mask |= fcNegInf;
4457 break;
4459 case FCmpInst::FCMP_ONE:
4460 case FCmpInst::FCMP_UEQ: {
4461 // Match __builtin_isinf patterns
4462 // fcmp one x, -inf -> is_fpclass x, fcNegInf
4463 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan
4464 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan
4465 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan
4467 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan
4468 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan
4469 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan
4470 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan
4471 if (ConstRHS->isNegative()) {
4472 Mask = ~fcNegInf & ~fcNan;
4473 if (IsFabs)
4474 Mask = ~fcNan;
4475 } else {
4476 Mask = ~fcPosInf & ~fcNan;
4477 if (IsFabs)
4478 Mask &= ~fcNegInf;
4481 break;
4483 case FCmpInst::FCMP_OLT:
4484 case FCmpInst::FCMP_UGE: {
4485 if (ConstRHS->isNegative()) // TODO
4486 return {nullptr, fcNone};
4488 // fcmp olt fabs(x), +inf -> fcFinite
4489 // fcmp uge fabs(x), +inf -> ~fcFinite
4490 // fcmp olt x, +inf -> fcFinite|fcNegInf
4491 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf)
4492 Mask = fcFinite;
4493 if (!IsFabs)
4494 Mask |= fcNegInf;
4495 break;
4497 case FCmpInst::FCMP_OGE:
4498 case FCmpInst::FCMP_ULT: {
4499 if (ConstRHS->isNegative()) // TODO
4500 return {nullptr, fcNone};
4502 // fcmp oge fabs(x), +inf -> fcInf
4503 // fcmp oge x, +inf -> fcPosInf
4504 // fcmp ult fabs(x), +inf -> ~fcInf
4505 // fcmp ult x, +inf -> ~fcPosInf
4506 Mask = fcPosInf;
4507 if (IsFabs)
4508 Mask |= fcNegInf;
4509 break;
4511 default:
4512 return {nullptr, fcNone};
4514 } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) {
4515 // Match pattern that's used in __builtin_isnormal.
4516 switch (Pred) {
4517 case FCmpInst::FCMP_OLT:
4518 case FCmpInst::FCMP_UGE: {
4519 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero
4520 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero
4521 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf
4522 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero)
4523 Mask = fcZero | fcSubnormal;
4524 if (!IsFabs)
4525 Mask |= fcNegNormal | fcNegInf;
4527 break;
4529 case FCmpInst::FCMP_OGE:
4530 case FCmpInst::FCMP_ULT: {
4531 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf
4532 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal
4533 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf)
4534 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal)
4535 Mask = fcPosInf | fcPosNormal;
4536 if (IsFabs)
4537 Mask |= fcNegInf | fcNegNormal;
4538 break;
4540 default:
4541 return {nullptr, fcNone};
4543 } else
4544 return {nullptr, fcNone};
4546 // Invert the comparison for the unordered cases.
4547 if (FCmpInst::isUnordered(Pred))
4548 Mask = ~Mask;
4550 return {Src, Mask};
4553 static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
4554 const Query &Q) {
4555 FPClassTest KnownFromAssume = fcAllFlags;
4557 // Try to restrict the floating-point classes based on information from
4558 // assumptions.
4559 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
4560 if (!AssumeVH)
4561 continue;
4562 CallInst *I = cast<CallInst>(AssumeVH);
4563 const Function *F = I->getFunction();
4565 assert(F == Q.CxtI->getParent()->getParent() &&
4566 "Got assumption for the wrong function!");
4567 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
4568 "must be an assume intrinsic");
4570 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
4571 continue;
4573 CmpInst::Predicate Pred;
4574 Value *LHS, *RHS;
4575 uint64_t ClassVal = 0;
4576 if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) {
4577 auto [TestedValue, TestedMask] =
4578 fcmpToClassTest(Pred, *F, LHS, RHS, true);
4579 // First see if we can fold in fabs/fneg into the test.
4580 if (TestedValue == V)
4581 KnownFromAssume &= TestedMask;
4582 else {
4583 // Try again without the lookthrough if we found a different source
4584 // value.
4585 auto [TestedValue, TestedMask] =
4586 fcmpToClassTest(Pred, *F, LHS, RHS, false);
4587 if (TestedValue == V)
4588 KnownFromAssume &= TestedMask;
4590 } else if (match(I->getArgOperand(0),
4591 m_Intrinsic<Intrinsic::is_fpclass>(
4592 m_Value(LHS), m_ConstantInt(ClassVal)))) {
4593 KnownFromAssume &= static_cast<FPClassTest>(ClassVal);
4597 return KnownFromAssume;
4600 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4601 FPClassTest InterestedClasses, KnownFPClass &Known,
4602 unsigned Depth, const Query &Q,
4603 const TargetLibraryInfo *TLI);
4605 static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
4606 FPClassTest InterestedClasses, unsigned Depth,
4607 const Query &Q, const TargetLibraryInfo *TLI) {
4608 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4609 APInt DemandedElts =
4610 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
4611 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q, TLI);
4614 static void computeKnownFPClassForFPTrunc(const Operator *Op,
4615 const APInt &DemandedElts,
4616 FPClassTest InterestedClasses,
4617 KnownFPClass &Known, unsigned Depth,
4618 const Query &Q,
4619 const TargetLibraryInfo *TLI) {
4620 if ((InterestedClasses & fcNan) == fcNone)
4621 return;
4623 KnownFPClass KnownSrc;
4624 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4625 KnownSrc, Depth + 1, Q, TLI);
4626 if (KnownSrc.isKnownNeverNaN())
4627 Known.knownNot(fcNan);
4629 // Infinity needs a range check.
4630 // TODO: Sign bit should be preserved
4633 // TODO: Merge implementations of isKnownNeverNaN, isKnownNeverInfinity,
4634 // CannotBeNegativeZero, cannotBeOrderedLessThanZero into here.
4636 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4637 FPClassTest InterestedClasses, KnownFPClass &Known,
4638 unsigned Depth, const Query &Q,
4639 const TargetLibraryInfo *TLI) {
4640 assert(Known.isUnknown() && "should not be called with known information");
4642 if (!DemandedElts) {
4643 // No demanded elts, better to assume we don't know anything.
4644 Known.resetAll();
4645 return;
4648 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
4650 if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) {
4651 Known.KnownFPClasses = CFP->getValueAPF().classify();
4652 Known.SignBit = CFP->isNegative();
4653 return;
4656 // Try to handle fixed width vector constants
4657 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4658 const Constant *CV = dyn_cast<Constant>(V);
4659 if (VFVTy && CV) {
4660 Known.KnownFPClasses = fcNone;
4662 // For vectors, verify that each element is not NaN.
4663 unsigned NumElts = VFVTy->getNumElements();
4664 for (unsigned i = 0; i != NumElts; ++i) {
4665 Constant *Elt = CV->getAggregateElement(i);
4666 if (!Elt) {
4667 Known = KnownFPClass();
4668 return;
4670 if (isa<UndefValue>(Elt))
4671 continue;
4672 auto *CElt = dyn_cast<ConstantFP>(Elt);
4673 if (!CElt) {
4674 Known = KnownFPClass();
4675 return;
4678 KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()};
4679 Known |= KnownElt;
4682 return;
4685 FPClassTest KnownNotFromFlags = fcNone;
4686 if (const auto *CB = dyn_cast<CallBase>(V))
4687 KnownNotFromFlags |= CB->getRetNoFPClass();
4688 else if (const auto *Arg = dyn_cast<Argument>(V))
4689 KnownNotFromFlags |= Arg->getNoFPClass();
4691 const Operator *Op = dyn_cast<Operator>(V);
4692 if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) {
4693 if (FPOp->hasNoNaNs())
4694 KnownNotFromFlags |= fcNan;
4695 if (FPOp->hasNoInfs())
4696 KnownNotFromFlags |= fcInf;
4699 if (Q.AC) {
4700 FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q);
4701 KnownNotFromFlags |= ~AssumedClasses;
4704 // We no longer need to find out about these bits from inputs if we can
4705 // assume this from flags/attributes.
4706 InterestedClasses &= ~KnownNotFromFlags;
4708 auto ClearClassesFromFlags = make_scope_exit([=, &Known] {
4709 Known.knownNot(KnownNotFromFlags);
4712 if (!Op)
4713 return;
4715 // All recursive calls that increase depth must come after this.
4716 if (Depth == MaxAnalysisRecursionDepth)
4717 return;
4719 const unsigned Opc = Op->getOpcode();
4720 switch (Opc) {
4721 case Instruction::FNeg: {
4722 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4723 Known, Depth + 1, Q, TLI);
4724 Known.fneg();
4725 break;
4727 case Instruction::Select: {
4728 KnownFPClass Known2;
4729 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedClasses,
4730 Known, Depth + 1, Q, TLI);
4731 computeKnownFPClass(Op->getOperand(2), DemandedElts, InterestedClasses,
4732 Known2, Depth + 1, Q, TLI);
4733 Known |= Known2;
4734 break;
4736 case Instruction::Call: {
4737 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op)) {
4738 const Intrinsic::ID IID = II->getIntrinsicID();
4739 switch (IID) {
4740 case Intrinsic::fabs: {
4741 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
4742 // If we only care about the sign bit we don't need to inspect the
4743 // operand.
4744 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4745 InterestedClasses, Known, Depth + 1, Q, TLI);
4748 Known.fabs();
4749 break;
4751 case Intrinsic::copysign: {
4752 KnownFPClass KnownSign;
4754 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4755 InterestedClasses, Known, Depth + 1, Q, TLI);
4756 computeKnownFPClass(II->getArgOperand(1), DemandedElts,
4757 InterestedClasses, KnownSign, Depth + 1, Q, TLI);
4758 Known.copysign(KnownSign);
4759 break;
4761 case Intrinsic::fma:
4762 case Intrinsic::fmuladd: {
4763 if ((InterestedClasses & fcNegative) == fcNone)
4764 break;
4766 if (II->getArgOperand(0) != II->getArgOperand(1))
4767 break;
4769 // The multiply cannot be -0 and therefore the add can't be -0
4770 Known.knownNot(fcNegZero);
4772 // x * x + y is non-negative if y is non-negative.
4773 KnownFPClass KnownAddend;
4774 computeKnownFPClass(II->getArgOperand(2), DemandedElts,
4775 InterestedClasses, KnownAddend, Depth + 1, Q, TLI);
4777 // TODO: Known sign bit with no nans
4778 if (KnownAddend.cannotBeOrderedLessThanZero())
4779 Known.knownNot(fcNegative);
4780 break;
4782 case Intrinsic::sqrt: {
4783 KnownFPClass KnownSrc;
4784 FPClassTest InterestedSrcs = InterestedClasses;
4785 if (InterestedClasses & fcNan)
4786 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
4788 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4789 InterestedSrcs, KnownSrc, Depth + 1, Q, TLI);
4791 if (KnownSrc.isKnownNeverPosInfinity())
4792 Known.knownNot(fcPosInf);
4793 if (KnownSrc.isKnownNever(fcSNan))
4794 Known.knownNot(fcSNan);
4796 // Any negative value besides -0 returns a nan.
4797 if (KnownSrc.isKnownNeverNaN() &&
4798 KnownSrc.cannotBeOrderedLessThanZero())
4799 Known.knownNot(fcNan);
4801 // The only negative value that can be returned is -0 for -0 inputs.
4802 Known.knownNot(fcNegInf | fcNegSubnormal | fcNegNormal);
4804 // If the input denormal mode could be PreserveSign, a negative
4805 // subnormal input could produce a negative zero output.
4806 if (KnownSrc.isKnownNeverLogicalNegZero(*II->getFunction(),
4807 II->getType())) {
4808 Known.knownNot(fcNegZero);
4809 if (KnownSrc.isKnownNeverNaN())
4810 Known.SignBit = false;
4813 break;
4815 case Intrinsic::sin:
4816 case Intrinsic::cos: {
4817 // Return NaN on infinite inputs.
4818 KnownFPClass KnownSrc;
4819 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4820 InterestedClasses, KnownSrc, Depth + 1, Q, TLI);
4821 Known.knownNot(fcInf);
4822 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
4823 Known.knownNot(fcNan);
4824 break;
4827 case Intrinsic::maxnum:
4828 case Intrinsic::minnum:
4829 case Intrinsic::minimum:
4830 case Intrinsic::maximum: {
4831 KnownFPClass KnownLHS, KnownRHS;
4832 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4833 InterestedClasses, KnownLHS, Depth + 1, Q, TLI);
4834 computeKnownFPClass(II->getArgOperand(1), DemandedElts,
4835 InterestedClasses, KnownRHS, Depth + 1, Q, TLI);
4837 bool NeverNaN =
4838 KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
4839 Known = KnownLHS | KnownRHS;
4841 // If either operand is not NaN, the result is not NaN.
4842 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
4843 Known.knownNot(fcNan);
4845 if (IID == Intrinsic::maxnum) {
4846 // If at least one operand is known to be positive, the result must be
4847 // positive.
4848 if ((KnownLHS.cannotBeOrderedLessThanZero() &&
4849 KnownLHS.isKnownNeverNaN()) ||
4850 (KnownRHS.cannotBeOrderedLessThanZero() &&
4851 KnownRHS.isKnownNeverNaN()))
4852 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4853 } else if (IID == Intrinsic::maximum) {
4854 // If at least one operand is known to be positive, the result must be
4855 // positive.
4856 if (KnownLHS.cannotBeOrderedLessThanZero() ||
4857 KnownRHS.cannotBeOrderedLessThanZero())
4858 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4859 } else if (IID == Intrinsic::minnum) {
4860 // If at least one operand is known to be negative, the result must be
4861 // negative.
4862 if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
4863 KnownLHS.isKnownNeverNaN()) ||
4864 (KnownRHS.cannotBeOrderedGreaterThanZero() &&
4865 KnownRHS.isKnownNeverNaN()))
4866 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4867 } else {
4868 // If at least one operand is known to be negative, the result must be
4869 // negative.
4870 if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
4871 KnownRHS.cannotBeOrderedGreaterThanZero())
4872 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4875 // Fixup zero handling if denormals could be returned as a zero.
4877 // As there's no spec for denormal flushing, be conservative with the
4878 // treatment of denormals that could be flushed to zero. For older
4879 // subtargets on AMDGPU the min/max instructions would not flush the
4880 // output and return the original value.
4882 // TODO: This could be refined based on the sign
4883 if ((Known.KnownFPClasses & fcZero) != fcNone &&
4884 !Known.isKnownNeverSubnormal()) {
4885 const Function *Parent = II->getFunction();
4886 DenormalMode Mode = Parent->getDenormalMode(
4887 II->getType()->getScalarType()->getFltSemantics());
4888 if (Mode != DenormalMode::getIEEE())
4889 Known.KnownFPClasses |= fcZero;
4892 break;
4894 case Intrinsic::canonicalize: {
4895 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4896 InterestedClasses, Known, Depth + 1, Q, TLI);
4897 // Canonicalize is guaranteed to quiet signaling nans.
4898 Known.knownNot(fcSNan);
4900 // If the parent function flushes denormals, the canonical output cannot
4901 // be a denormal.
4902 const fltSemantics &FPType =
4903 II->getType()->getScalarType()->getFltSemantics();
4904 DenormalMode DenormMode = II->getFunction()->getDenormalMode(FPType);
4905 if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
4906 Known.knownNot(fcSubnormal);
4908 if (DenormMode.Input == DenormalMode::PositiveZero ||
4909 (DenormMode.Output == DenormalMode::PositiveZero &&
4910 DenormMode.Input == DenormalMode::IEEE))
4911 Known.knownNot(fcNegZero);
4913 break;
4915 case Intrinsic::trunc: {
4916 KnownFPClass KnownSrc;
4918 FPClassTest InterestedSrcs = InterestedClasses;
4919 if (InterestedClasses & fcZero)
4920 InterestedClasses |= fcNormal | fcSubnormal;
4922 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4923 KnownSrc, Depth + 1, Q, TLI);
4925 // Integer results cannot be subnormal.
4926 Known.knownNot(fcSubnormal);
4928 // trunc passes through infinities.
4929 if (KnownSrc.isKnownNeverPosInfinity())
4930 Known.knownNot(fcPosInf);
4931 if (KnownSrc.isKnownNeverNegInfinity())
4932 Known.knownNot(fcNegInf);
4934 // Non-constrained intrinsics do not guarantee signaling nan quieting.
4935 if (KnownSrc.isKnownNeverNaN())
4936 Known.knownNot(fcNan);
4938 if (KnownSrc.isKnownNever(fcPosNormal))
4939 Known.knownNot(fcPosNormal);
4941 if (KnownSrc.isKnownNever(fcNegNormal))
4942 Known.knownNot(fcNegNormal);
4944 if (KnownSrc.isKnownNever(fcPosZero | fcPosSubnormal | fcPosNormal))
4945 Known.knownNot(fcPosZero);
4947 if (KnownSrc.isKnownNever(fcNegZero | fcNegSubnormal | fcNegNormal))
4948 Known.knownNot(fcNegZero);
4950 // Sign should be preserved
4951 Known.SignBit = KnownSrc.SignBit;
4952 break;
4954 case Intrinsic::exp:
4955 case Intrinsic::exp2: {
4956 Known.knownNot(fcNegative);
4957 if ((InterestedClasses & fcNan) == fcNone)
4958 break;
4960 KnownFPClass KnownSrc;
4961 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4962 InterestedClasses, KnownSrc, Depth + 1, Q, TLI);
4963 if (KnownSrc.isKnownNeverNaN()) {
4964 Known.knownNot(fcNan);
4965 Known.SignBit = false;
4968 break;
4970 case Intrinsic::fptrunc_round: {
4971 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses,
4972 Known, Depth, Q, TLI);
4973 break;
4975 case Intrinsic::log:
4976 case Intrinsic::log10:
4977 case Intrinsic::log2:
4978 case Intrinsic::experimental_constrained_log:
4979 case Intrinsic::experimental_constrained_log10:
4980 case Intrinsic::experimental_constrained_log2: {
4981 // log(+inf) -> +inf
4982 // log([+-]0.0) -> -inf
4983 // log(-inf) -> nan
4984 // log(-x) -> nan
4985 if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
4986 break;
4988 FPClassTest InterestedSrcs = InterestedClasses;
4989 if ((InterestedClasses & fcNegInf) != fcNone)
4990 InterestedSrcs |= fcZero | fcSubnormal;
4991 if ((InterestedClasses & fcNan) != fcNone)
4992 InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
4994 KnownFPClass KnownSrc;
4995 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4996 KnownSrc, Depth + 1, Q, TLI);
4998 if (KnownSrc.isKnownNeverPosInfinity())
4999 Known.knownNot(fcPosInf);
5001 if (KnownSrc.isKnownNeverNaN() &&
5002 KnownSrc.cannotBeOrderedLessThanZero())
5003 Known.knownNot(fcNan);
5005 if (KnownSrc.isKnownNeverLogicalZero(*II->getFunction(), II->getType()))
5006 Known.knownNot(fcNegInf);
5008 break;
5010 case Intrinsic::powi: {
5011 if ((InterestedClasses & fcNegative) == fcNone)
5012 break;
5014 const Value *Exp = II->getArgOperand(1);
5015 unsigned BitWidth =
5016 Exp->getType()->getScalarType()->getIntegerBitWidth();
5017 KnownBits ExponentKnownBits(BitWidth);
5018 computeKnownBits(Exp, DemandedElts, ExponentKnownBits, Depth + 1, Q);
5020 if (ExponentKnownBits.Zero[0]) { // Is even
5021 Known.knownNot(fcNegative);
5022 break;
5025 // Given that exp is an integer, here are the
5026 // ways that pow can return a negative value:
5028 // pow(-x, exp) --> negative if exp is odd and x is negative.
5029 // pow(-0, exp) --> -inf if exp is negative odd.
5030 // pow(-0, exp) --> -0 if exp is positive odd.
5031 // pow(-inf, exp) --> -0 if exp is negative odd.
5032 // pow(-inf, exp) --> -inf if exp is positive odd.
5033 KnownFPClass KnownSrc;
5034 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
5035 KnownSrc, Depth + 1, Q, TLI);
5036 if (KnownSrc.isKnownNever(fcNegative))
5037 Known.knownNot(fcNegative);
5038 break;
5040 case Intrinsic::arithmetic_fence: {
5041 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5042 InterestedClasses, Known, Depth + 1, Q, TLI);
5043 break;
5045 case Intrinsic::experimental_constrained_sitofp:
5046 case Intrinsic::experimental_constrained_uitofp:
5047 // Cannot produce nan
5048 Known.knownNot(fcNan);
5050 // sitofp and uitofp turn into +0.0 for zero.
5051 Known.knownNot(fcNegZero);
5053 // Integers cannot be subnormal
5054 Known.knownNot(fcSubnormal);
5056 if (IID == Intrinsic::experimental_constrained_uitofp)
5057 Known.signBitMustBeZero();
5059 // TODO: Copy inf handling from instructions
5060 break;
5061 default:
5062 break;
5066 break;
5068 case Instruction::FAdd:
5069 case Instruction::FSub: {
5070 KnownFPClass KnownLHS, KnownRHS;
5071 computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf,
5072 KnownRHS, Depth + 1, Q, TLI);
5074 if (KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNegZero() ||
5075 (Opc == Instruction::FSub && KnownRHS.isKnownNeverPosZero())) {
5076 // RHS is canonically cheaper to compute. Skip inspecting the LHS if
5077 // there's no point.
5078 computeKnownFPClass(Op->getOperand(0), DemandedElts, fcNan | fcInf,
5079 KnownLHS, Depth + 1, Q, TLI);
5080 // Adding positive and negative infinity produces NaN.
5081 // TODO: Check sign of infinities.
5082 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5083 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
5084 Known.knownNot(fcNan);
5086 const Function *F = cast<Instruction>(Op)->getFunction();
5087 if (Op->getOpcode() == Instruction::FAdd) {
5088 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
5089 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
5090 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) &&
5091 // Make sure output negative denormal can't flush to -0
5092 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
5093 Known.knownNot(fcNegZero);
5094 } else {
5095 // Only fsub -0, +0 can return -0
5096 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
5097 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) &&
5098 // Make sure output negative denormal can't flush to -0
5099 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
5100 Known.knownNot(fcNegZero);
5104 break;
5106 case Instruction::FMul: {
5107 // X * X is always non-negative or a NaN.
5108 if (Op->getOperand(0) == Op->getOperand(1))
5109 Known.knownNot(fcNegative);
5111 if ((InterestedClasses & fcNan) != fcNan)
5112 break;
5114 KnownFPClass KnownLHS, KnownRHS;
5115 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5116 fcNan | fcInf | fcZero | fcSubnormal, KnownRHS,
5117 Depth + 1, Q, TLI);
5118 if (KnownRHS.isKnownNeverNaN() &&
5119 (KnownRHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverZero())) {
5120 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5121 fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q, TLI);
5122 if (!KnownLHS.isKnownNeverNaN())
5123 break;
5125 const Function *F = cast<Instruction>(Op)->getFunction();
5127 // If neither side can be zero (or nan) fmul never produces NaN.
5128 // TODO: Check operand combinations.
5129 // e.g. fmul nofpclass(inf nan zero), nofpclass(nan) -> nofpclass(nan)
5130 if ((KnownLHS.isKnownNeverInfinity() ||
5131 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) &&
5132 (KnownRHS.isKnownNeverInfinity() ||
5133 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))
5134 Known.knownNot(fcNan);
5137 break;
5139 case Instruction::FDiv:
5140 case Instruction::FRem: {
5141 if (Op->getOperand(0) == Op->getOperand(1)) {
5142 // TODO: Could filter out snan if we inspect the operand
5143 if (Op->getOpcode() == Instruction::FDiv) {
5144 // X / X is always exactly 1.0 or a NaN.
5145 Known.KnownFPClasses = fcNan | fcPosNormal;
5146 } else {
5147 // X % X is always exactly [+-]0.0 or a NaN.
5148 Known.KnownFPClasses = fcNan | fcZero;
5151 break;
5154 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
5155 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
5156 const bool WantPositive =
5157 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone;
5158 if (!WantNan && !WantNegative && !WantPositive)
5159 break;
5161 KnownFPClass KnownLHS, KnownRHS;
5163 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5164 fcNan | fcInf | fcZero | fcNegative, KnownRHS,
5165 Depth + 1, Q, TLI);
5167 bool KnowSomethingUseful =
5168 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
5170 if (KnowSomethingUseful || WantPositive) {
5171 const FPClassTest InterestedLHS =
5172 WantPositive ? fcAllFlags
5173 : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
5175 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5176 InterestedClasses & InterestedLHS, KnownLHS,
5177 Depth + 1, Q, TLI);
5180 const Function *F = cast<Instruction>(Op)->getFunction();
5182 if (Op->getOpcode() == Instruction::FDiv) {
5183 // Only 0/0, Inf/Inf produce NaN.
5184 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5185 (KnownLHS.isKnownNeverInfinity() ||
5186 KnownRHS.isKnownNeverInfinity()) &&
5187 (KnownLHS.isKnownNeverLogicalZero(*F, Op->getType()) ||
5188 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) {
5189 Known.knownNot(fcNan);
5192 // X / -0.0 is -Inf (or NaN).
5193 // +X / +X is +X
5194 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative))
5195 Known.knownNot(fcNegative);
5196 } else {
5197 // Inf REM x and x REM 0 produce NaN.
5198 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5199 KnownLHS.isKnownNeverInfinity() &&
5200 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) {
5201 Known.knownNot(fcNan);
5204 // The sign for frem is the same as the first operand.
5205 if (KnownLHS.cannotBeOrderedLessThanZero())
5206 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
5207 if (KnownLHS.cannotBeOrderedGreaterThanZero())
5208 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
5210 // See if we can be more aggressive about the sign of 0.
5211 if (KnownLHS.isKnownNever(fcNegative))
5212 Known.knownNot(fcNegative);
5213 if (KnownLHS.isKnownNever(fcPositive))
5214 Known.knownNot(fcPositive);
5217 break;
5219 case Instruction::FPExt: {
5220 // Infinity, nan and zero propagate from source.
5221 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5222 Known, Depth + 1, Q, TLI);
5224 const fltSemantics &DstTy =
5225 Op->getType()->getScalarType()->getFltSemantics();
5226 const fltSemantics &SrcTy =
5227 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5229 // All subnormal inputs should be in the normal range in the result type.
5230 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy))
5231 Known.knownNot(fcSubnormal);
5233 // Sign bit of a nan isn't guaranteed.
5234 if (!Known.isKnownNeverNaN())
5235 Known.SignBit = std::nullopt;
5236 break;
5238 case Instruction::FPTrunc: {
5239 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5240 Depth, Q, TLI);
5241 break;
5243 case Instruction::SIToFP:
5244 case Instruction::UIToFP: {
5245 // Cannot produce nan
5246 Known.knownNot(fcNan);
5248 // Integers cannot be subnormal
5249 Known.knownNot(fcSubnormal);
5251 // sitofp and uitofp turn into +0.0 for zero.
5252 Known.knownNot(fcNegZero);
5253 if (Op->getOpcode() == Instruction::UIToFP)
5254 Known.signBitMustBeZero();
5256 if (InterestedClasses & fcInf) {
5257 // Get width of largest magnitude integer (remove a bit if signed).
5258 // This still works for a signed minimum value because the largest FP
5259 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
5260 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits();
5261 if (Op->getOpcode() == Instruction::SIToFP)
5262 --IntSize;
5264 // If the exponent of the largest finite FP value can hold the largest
5265 // integer, the result of the cast must be finite.
5266 Type *FPTy = Op->getType()->getScalarType();
5267 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize)
5268 Known.knownNot(fcInf);
5271 break;
5273 case Instruction::ExtractElement: {
5274 // Look through extract element. If the index is non-constant or
5275 // out-of-range demand all elements, otherwise just the extracted element.
5276 const Value *Vec = Op->getOperand(0);
5277 const Value *Idx = Op->getOperand(1);
5278 auto *CIdx = dyn_cast<ConstantInt>(Idx);
5280 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
5281 unsigned NumElts = VecTy->getNumElements();
5282 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
5283 if (CIdx && CIdx->getValue().ult(NumElts))
5284 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
5285 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
5286 Depth + 1, Q, TLI);
5289 break;
5291 case Instruction::InsertElement: {
5292 if (isa<ScalableVectorType>(Op->getType()))
5293 return;
5295 const Value *Vec = Op->getOperand(0);
5296 const Value *Elt = Op->getOperand(1);
5297 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2));
5298 // Early out if the index is non-constant or out-of-range.
5299 unsigned NumElts = DemandedElts.getBitWidth();
5300 if (!CIdx || CIdx->getValue().uge(NumElts))
5301 return;
5303 unsigned EltIdx = CIdx->getZExtValue();
5304 // Do we demand the inserted element?
5305 if (DemandedElts[EltIdx]) {
5306 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q, TLI);
5307 // If we don't know any bits, early out.
5308 if (Known.isUnknown())
5309 break;
5310 } else {
5311 Known.KnownFPClasses = fcNone;
5314 // We don't need the base vector element that has been inserted.
5315 APInt DemandedVecElts = DemandedElts;
5316 DemandedVecElts.clearBit(EltIdx);
5317 if (!!DemandedVecElts) {
5318 KnownFPClass Known2;
5319 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
5320 Depth + 1, Q, TLI);
5321 Known |= Known2;
5324 break;
5326 case Instruction::ShuffleVector: {
5327 // For undef elements, we don't know anything about the common state of
5328 // the shuffle result.
5329 APInt DemandedLHS, DemandedRHS;
5330 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op);
5331 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
5332 return;
5334 if (!!DemandedLHS) {
5335 const Value *LHS = Shuf->getOperand(0);
5336 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
5337 Depth + 1, Q, TLI);
5339 // If we don't know any bits, early out.
5340 if (Known.isUnknown())
5341 break;
5342 } else {
5343 Known.KnownFPClasses = fcNone;
5346 if (!!DemandedRHS) {
5347 KnownFPClass Known2;
5348 const Value *RHS = Shuf->getOperand(1);
5349 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
5350 Depth + 1, Q, TLI);
5351 Known |= Known2;
5354 break;
5356 case Instruction::ExtractValue: {
5357 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5358 Known, Depth + 1, Q, TLI);
5359 break;
5361 default:
5362 break;
5366 KnownFPClass llvm::computeKnownFPClass(
5367 const Value *V, const APInt &DemandedElts, const DataLayout &DL,
5368 FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI,
5369 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
5370 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
5371 KnownFPClass KnownClasses;
5372 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
5373 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
5374 TLI);
5375 return KnownClasses;
5378 KnownFPClass
5379 llvm::computeKnownFPClass(const Value *V, const DataLayout &DL,
5380 FPClassTest InterestedClasses, unsigned Depth,
5381 const TargetLibraryInfo *TLI, AssumptionCache *AC,
5382 const Instruction *CxtI, const DominatorTree *DT,
5383 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
5384 KnownFPClass Known;
5385 ::computeKnownFPClass(V, Known, InterestedClasses, Depth,
5386 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
5387 TLI);
5388 return Known;
5391 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
5393 // All byte-wide stores are splatable, even of arbitrary variables.
5394 if (V->getType()->isIntegerTy(8))
5395 return V;
5397 LLVMContext &Ctx = V->getContext();
5399 // Undef don't care.
5400 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
5401 if (isa<UndefValue>(V))
5402 return UndefInt8;
5404 // Return Undef for zero-sized type.
5405 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
5406 return UndefInt8;
5408 Constant *C = dyn_cast<Constant>(V);
5409 if (!C) {
5410 // Conceptually, we could handle things like:
5411 // %a = zext i8 %X to i16
5412 // %b = shl i16 %a, 8
5413 // %c = or i16 %a, %b
5414 // but until there is an example that actually needs this, it doesn't seem
5415 // worth worrying about.
5416 return nullptr;
5419 // Handle 'null' ConstantArrayZero etc.
5420 if (C->isNullValue())
5421 return Constant::getNullValue(Type::getInt8Ty(Ctx));
5423 // Constant floating-point values can be handled as integer values if the
5424 // corresponding integer value is "byteable". An important case is 0.0.
5425 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
5426 Type *Ty = nullptr;
5427 if (CFP->getType()->isHalfTy())
5428 Ty = Type::getInt16Ty(Ctx);
5429 else if (CFP->getType()->isFloatTy())
5430 Ty = Type::getInt32Ty(Ctx);
5431 else if (CFP->getType()->isDoubleTy())
5432 Ty = Type::getInt64Ty(Ctx);
5433 // Don't handle long double formats, which have strange constraints.
5434 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
5435 : nullptr;
5438 // We can handle constant integers that are multiple of 8 bits.
5439 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
5440 if (CI->getBitWidth() % 8 == 0) {
5441 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
5442 if (!CI->getValue().isSplat(8))
5443 return nullptr;
5444 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
5448 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
5449 if (CE->getOpcode() == Instruction::IntToPtr) {
5450 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
5451 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
5452 return isBytewiseValue(
5453 ConstantExpr::getIntegerCast(CE->getOperand(0),
5454 Type::getIntNTy(Ctx, BitWidth), false),
5455 DL);
5460 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
5461 if (LHS == RHS)
5462 return LHS;
5463 if (!LHS || !RHS)
5464 return nullptr;
5465 if (LHS == UndefInt8)
5466 return RHS;
5467 if (RHS == UndefInt8)
5468 return LHS;
5469 return nullptr;
5472 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
5473 Value *Val = UndefInt8;
5474 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
5475 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
5476 return nullptr;
5477 return Val;
5480 if (isa<ConstantAggregate>(C)) {
5481 Value *Val = UndefInt8;
5482 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
5483 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
5484 return nullptr;
5485 return Val;
5488 // Don't try to handle the handful of other constants.
5489 return nullptr;
5492 // This is the recursive version of BuildSubAggregate. It takes a few different
5493 // arguments. Idxs is the index within the nested struct From that we are
5494 // looking at now (which is of type IndexedType). IdxSkip is the number of
5495 // indices from Idxs that should be left out when inserting into the resulting
5496 // struct. To is the result struct built so far, new insertvalue instructions
5497 // build on that.
5498 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
5499 SmallVectorImpl<unsigned> &Idxs,
5500 unsigned IdxSkip,
5501 Instruction *InsertBefore) {
5502 StructType *STy = dyn_cast<StructType>(IndexedType);
5503 if (STy) {
5504 // Save the original To argument so we can modify it
5505 Value *OrigTo = To;
5506 // General case, the type indexed by Idxs is a struct
5507 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5508 // Process each struct element recursively
5509 Idxs.push_back(i);
5510 Value *PrevTo = To;
5511 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
5512 InsertBefore);
5513 Idxs.pop_back();
5514 if (!To) {
5515 // Couldn't find any inserted value for this index? Cleanup
5516 while (PrevTo != OrigTo) {
5517 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
5518 PrevTo = Del->getAggregateOperand();
5519 Del->eraseFromParent();
5521 // Stop processing elements
5522 break;
5525 // If we successfully found a value for each of our subaggregates
5526 if (To)
5527 return To;
5529 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
5530 // the struct's elements had a value that was inserted directly. In the latter
5531 // case, perhaps we can't determine each of the subelements individually, but
5532 // we might be able to find the complete struct somewhere.
5534 // Find the value that is at that particular spot
5535 Value *V = FindInsertedValue(From, Idxs);
5537 if (!V)
5538 return nullptr;
5540 // Insert the value in the new (sub) aggregate
5541 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
5542 InsertBefore);
5545 // This helper takes a nested struct and extracts a part of it (which is again a
5546 // struct) into a new value. For example, given the struct:
5547 // { a, { b, { c, d }, e } }
5548 // and the indices "1, 1" this returns
5549 // { c, d }.
5551 // It does this by inserting an insertvalue for each element in the resulting
5552 // struct, as opposed to just inserting a single struct. This will only work if
5553 // each of the elements of the substruct are known (ie, inserted into From by an
5554 // insertvalue instruction somewhere).
5556 // All inserted insertvalue instructions are inserted before InsertBefore
5557 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
5558 Instruction *InsertBefore) {
5559 assert(InsertBefore && "Must have someplace to insert!");
5560 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
5561 idx_range);
5562 Value *To = PoisonValue::get(IndexedType);
5563 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
5564 unsigned IdxSkip = Idxs.size();
5566 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
5569 /// Given an aggregate and a sequence of indices, see if the scalar value
5570 /// indexed is already around as a register, for example if it was inserted
5571 /// directly into the aggregate.
5573 /// If InsertBefore is not null, this function will duplicate (modified)
5574 /// insertvalues when a part of a nested struct is extracted.
5575 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
5576 Instruction *InsertBefore) {
5577 // Nothing to index? Just return V then (this is useful at the end of our
5578 // recursion).
5579 if (idx_range.empty())
5580 return V;
5581 // We have indices, so V should have an indexable type.
5582 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
5583 "Not looking at a struct or array?");
5584 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
5585 "Invalid indices for type?");
5587 if (Constant *C = dyn_cast<Constant>(V)) {
5588 C = C->getAggregateElement(idx_range[0]);
5589 if (!C) return nullptr;
5590 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
5593 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
5594 // Loop the indices for the insertvalue instruction in parallel with the
5595 // requested indices
5596 const unsigned *req_idx = idx_range.begin();
5597 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
5598 i != e; ++i, ++req_idx) {
5599 if (req_idx == idx_range.end()) {
5600 // We can't handle this without inserting insertvalues
5601 if (!InsertBefore)
5602 return nullptr;
5604 // The requested index identifies a part of a nested aggregate. Handle
5605 // this specially. For example,
5606 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
5607 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
5608 // %C = extractvalue {i32, { i32, i32 } } %B, 1
5609 // This can be changed into
5610 // %A = insertvalue {i32, i32 } undef, i32 10, 0
5611 // %C = insertvalue {i32, i32 } %A, i32 11, 1
5612 // which allows the unused 0,0 element from the nested struct to be
5613 // removed.
5614 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
5615 InsertBefore);
5618 // This insert value inserts something else than what we are looking for.
5619 // See if the (aggregate) value inserted into has the value we are
5620 // looking for, then.
5621 if (*req_idx != *i)
5622 return FindInsertedValue(I->getAggregateOperand(), idx_range,
5623 InsertBefore);
5625 // If we end up here, the indices of the insertvalue match with those
5626 // requested (though possibly only partially). Now we recursively look at
5627 // the inserted value, passing any remaining indices.
5628 return FindInsertedValue(I->getInsertedValueOperand(),
5629 ArrayRef(req_idx, idx_range.end()), InsertBefore);
5632 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
5633 // If we're extracting a value from an aggregate that was extracted from
5634 // something else, we can extract from that something else directly instead.
5635 // However, we will need to chain I's indices with the requested indices.
5637 // Calculate the number of indices required
5638 unsigned size = I->getNumIndices() + idx_range.size();
5639 // Allocate some space to put the new indices in
5640 SmallVector<unsigned, 5> Idxs;
5641 Idxs.reserve(size);
5642 // Add indices from the extract value instruction
5643 Idxs.append(I->idx_begin(), I->idx_end());
5645 // Add requested indices
5646 Idxs.append(idx_range.begin(), idx_range.end());
5648 assert(Idxs.size() == size
5649 && "Number of indices added not correct?");
5651 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
5653 // Otherwise, we don't know (such as, extracting from a function return value
5654 // or load instruction)
5655 return nullptr;
5658 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
5659 unsigned CharSize) {
5660 // Make sure the GEP has exactly three arguments.
5661 if (GEP->getNumOperands() != 3)
5662 return false;
5664 // Make sure the index-ee is a pointer to array of \p CharSize integers.
5665 // CharSize.
5666 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
5667 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
5668 return false;
5670 // Check to make sure that the first operand of the GEP is an integer and
5671 // has value 0 so that we are sure we're indexing into the initializer.
5672 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
5673 if (!FirstIdx || !FirstIdx->isZero())
5674 return false;
5676 return true;
5679 // If V refers to an initialized global constant, set Slice either to
5680 // its initializer if the size of its elements equals ElementSize, or,
5681 // for ElementSize == 8, to its representation as an array of unsiged
5682 // char. Return true on success.
5683 // Offset is in the unit "nr of ElementSize sized elements".
5684 bool llvm::getConstantDataArrayInfo(const Value *V,
5685 ConstantDataArraySlice &Slice,
5686 unsigned ElementSize, uint64_t Offset) {
5687 assert(V && "V should not be null.");
5688 assert((ElementSize % 8) == 0 &&
5689 "ElementSize expected to be a multiple of the size of a byte.");
5690 unsigned ElementSizeInBytes = ElementSize / 8;
5692 // Drill down into the pointer expression V, ignoring any intervening
5693 // casts, and determine the identity of the object it references along
5694 // with the cumulative byte offset into it.
5695 const GlobalVariable *GV =
5696 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
5697 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
5698 // Fail if V is not based on constant global object.
5699 return false;
5701 const DataLayout &DL = GV->getParent()->getDataLayout();
5702 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
5704 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
5705 /*AllowNonInbounds*/ true))
5706 // Fail if a constant offset could not be determined.
5707 return false;
5709 uint64_t StartIdx = Off.getLimitedValue();
5710 if (StartIdx == UINT64_MAX)
5711 // Fail if the constant offset is excessive.
5712 return false;
5714 // Off/StartIdx is in the unit of bytes. So we need to convert to number of
5715 // elements. Simply bail out if that isn't possible.
5716 if ((StartIdx % ElementSizeInBytes) != 0)
5717 return false;
5719 Offset += StartIdx / ElementSizeInBytes;
5720 ConstantDataArray *Array = nullptr;
5721 ArrayType *ArrayTy = nullptr;
5723 if (GV->getInitializer()->isNullValue()) {
5724 Type *GVTy = GV->getValueType();
5725 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
5726 uint64_t Length = SizeInBytes / ElementSizeInBytes;
5728 Slice.Array = nullptr;
5729 Slice.Offset = 0;
5730 // Return an empty Slice for undersized constants to let callers
5731 // transform even undefined library calls into simpler, well-defined
5732 // expressions. This is preferable to making the calls although it
5733 // prevents sanitizers from detecting such calls.
5734 Slice.Length = Length < Offset ? 0 : Length - Offset;
5735 return true;
5738 auto *Init = const_cast<Constant *>(GV->getInitializer());
5739 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
5740 Type *InitElTy = ArrayInit->getElementType();
5741 if (InitElTy->isIntegerTy(ElementSize)) {
5742 // If Init is an initializer for an array of the expected type
5743 // and size, use it as is.
5744 Array = ArrayInit;
5745 ArrayTy = ArrayInit->getType();
5749 if (!Array) {
5750 if (ElementSize != 8)
5751 // TODO: Handle conversions to larger integral types.
5752 return false;
5754 // Otherwise extract the portion of the initializer starting
5755 // at Offset as an array of bytes, and reset Offset.
5756 Init = ReadByteArrayFromGlobal(GV, Offset);
5757 if (!Init)
5758 return false;
5760 Offset = 0;
5761 Array = dyn_cast<ConstantDataArray>(Init);
5762 ArrayTy = dyn_cast<ArrayType>(Init->getType());
5765 uint64_t NumElts = ArrayTy->getArrayNumElements();
5766 if (Offset > NumElts)
5767 return false;
5769 Slice.Array = Array;
5770 Slice.Offset = Offset;
5771 Slice.Length = NumElts - Offset;
5772 return true;
5775 /// Extract bytes from the initializer of the constant array V, which need
5776 /// not be a nul-terminated string. On success, store the bytes in Str and
5777 /// return true. When TrimAtNul is set, Str will contain only the bytes up
5778 /// to but not including the first nul. Return false on failure.
5779 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
5780 bool TrimAtNul) {
5781 ConstantDataArraySlice Slice;
5782 if (!getConstantDataArrayInfo(V, Slice, 8))
5783 return false;
5785 if (Slice.Array == nullptr) {
5786 if (TrimAtNul) {
5787 // Return a nul-terminated string even for an empty Slice. This is
5788 // safe because all existing SimplifyLibcalls callers require string
5789 // arguments and the behavior of the functions they fold is undefined
5790 // otherwise. Folding the calls this way is preferable to making
5791 // the undefined library calls, even though it prevents sanitizers
5792 // from reporting such calls.
5793 Str = StringRef();
5794 return true;
5796 if (Slice.Length == 1) {
5797 Str = StringRef("", 1);
5798 return true;
5800 // We cannot instantiate a StringRef as we do not have an appropriate string
5801 // of 0s at hand.
5802 return false;
5805 // Start out with the entire array in the StringRef.
5806 Str = Slice.Array->getAsString();
5807 // Skip over 'offset' bytes.
5808 Str = Str.substr(Slice.Offset);
5810 if (TrimAtNul) {
5811 // Trim off the \0 and anything after it. If the array is not nul
5812 // terminated, we just return the whole end of string. The client may know
5813 // some other way that the string is length-bound.
5814 Str = Str.substr(0, Str.find('\0'));
5816 return true;
5819 // These next two are very similar to the above, but also look through PHI
5820 // nodes.
5821 // TODO: See if we can integrate these two together.
5823 /// If we can compute the length of the string pointed to by
5824 /// the specified pointer, return 'len+1'. If we can't, return 0.
5825 static uint64_t GetStringLengthH(const Value *V,
5826 SmallPtrSetImpl<const PHINode*> &PHIs,
5827 unsigned CharSize) {
5828 // Look through noop bitcast instructions.
5829 V = V->stripPointerCasts();
5831 // If this is a PHI node, there are two cases: either we have already seen it
5832 // or we haven't.
5833 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
5834 if (!PHIs.insert(PN).second)
5835 return ~0ULL; // already in the set.
5837 // If it was new, see if all the input strings are the same length.
5838 uint64_t LenSoFar = ~0ULL;
5839 for (Value *IncValue : PN->incoming_values()) {
5840 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
5841 if (Len == 0) return 0; // Unknown length -> unknown.
5843 if (Len == ~0ULL) continue;
5845 if (Len != LenSoFar && LenSoFar != ~0ULL)
5846 return 0; // Disagree -> unknown.
5847 LenSoFar = Len;
5850 // Success, all agree.
5851 return LenSoFar;
5854 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
5855 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
5856 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
5857 if (Len1 == 0) return 0;
5858 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
5859 if (Len2 == 0) return 0;
5860 if (Len1 == ~0ULL) return Len2;
5861 if (Len2 == ~0ULL) return Len1;
5862 if (Len1 != Len2) return 0;
5863 return Len1;
5866 // Otherwise, see if we can read the string.
5867 ConstantDataArraySlice Slice;
5868 if (!getConstantDataArrayInfo(V, Slice, CharSize))
5869 return 0;
5871 if (Slice.Array == nullptr)
5872 // Zeroinitializer (including an empty one).
5873 return 1;
5875 // Search for the first nul character. Return a conservative result even
5876 // when there is no nul. This is safe since otherwise the string function
5877 // being folded such as strlen is undefined, and can be preferable to
5878 // making the undefined library call.
5879 unsigned NullIndex = 0;
5880 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
5881 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
5882 break;
5885 return NullIndex + 1;
5888 /// If we can compute the length of the string pointed to by
5889 /// the specified pointer, return 'len+1'. If we can't, return 0.
5890 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
5891 if (!V->getType()->isPointerTy())
5892 return 0;
5894 SmallPtrSet<const PHINode*, 32> PHIs;
5895 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
5896 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
5897 // an empty string as a length.
5898 return Len == ~0ULL ? 1 : Len;
5901 const Value *
5902 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
5903 bool MustPreserveNullness) {
5904 assert(Call &&
5905 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
5906 if (const Value *RV = Call->getReturnedArgOperand())
5907 return RV;
5908 // This can be used only as a aliasing property.
5909 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5910 Call, MustPreserveNullness))
5911 return Call->getArgOperand(0);
5912 return nullptr;
5915 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5916 const CallBase *Call, bool MustPreserveNullness) {
5917 switch (Call->getIntrinsicID()) {
5918 case Intrinsic::launder_invariant_group:
5919 case Intrinsic::strip_invariant_group:
5920 case Intrinsic::aarch64_irg:
5921 case Intrinsic::aarch64_tagp:
5922 return true;
5923 case Intrinsic::ptrmask:
5924 return !MustPreserveNullness;
5925 default:
5926 return false;
5930 /// \p PN defines a loop-variant pointer to an object. Check if the
5931 /// previous iteration of the loop was referring to the same object as \p PN.
5932 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
5933 const LoopInfo *LI) {
5934 // Find the loop-defined value.
5935 Loop *L = LI->getLoopFor(PN->getParent());
5936 if (PN->getNumIncomingValues() != 2)
5937 return true;
5939 // Find the value from previous iteration.
5940 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
5941 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5942 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
5943 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5944 return true;
5946 // If a new pointer is loaded in the loop, the pointer references a different
5947 // object in every iteration. E.g.:
5948 // for (i)
5949 // int *p = a[i];
5950 // ...
5951 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
5952 if (!L->isLoopInvariant(Load->getPointerOperand()))
5953 return false;
5954 return true;
5957 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
5958 if (!V->getType()->isPointerTy())
5959 return V;
5960 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
5961 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
5962 V = GEP->getPointerOperand();
5963 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
5964 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
5965 V = cast<Operator>(V)->getOperand(0);
5966 if (!V->getType()->isPointerTy())
5967 return V;
5968 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
5969 if (GA->isInterposable())
5970 return V;
5971 V = GA->getAliasee();
5972 } else {
5973 if (auto *PHI = dyn_cast<PHINode>(V)) {
5974 // Look through single-arg phi nodes created by LCSSA.
5975 if (PHI->getNumIncomingValues() == 1) {
5976 V = PHI->getIncomingValue(0);
5977 continue;
5979 } else if (auto *Call = dyn_cast<CallBase>(V)) {
5980 // CaptureTracking can know about special capturing properties of some
5981 // intrinsics like launder.invariant.group, that can't be expressed with
5982 // the attributes, but have properties like returning aliasing pointer.
5983 // Because some analysis may assume that nocaptured pointer is not
5984 // returned from some special intrinsic (because function would have to
5985 // be marked with returns attribute), it is crucial to use this function
5986 // because it should be in sync with CaptureTracking. Not using it may
5987 // cause weird miscompilations where 2 aliasing pointers are assumed to
5988 // noalias.
5989 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
5990 V = RP;
5991 continue;
5995 return V;
5997 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
5999 return V;
6002 void llvm::getUnderlyingObjects(const Value *V,
6003 SmallVectorImpl<const Value *> &Objects,
6004 LoopInfo *LI, unsigned MaxLookup) {
6005 SmallPtrSet<const Value *, 4> Visited;
6006 SmallVector<const Value *, 4> Worklist;
6007 Worklist.push_back(V);
6008 do {
6009 const Value *P = Worklist.pop_back_val();
6010 P = getUnderlyingObject(P, MaxLookup);
6012 if (!Visited.insert(P).second)
6013 continue;
6015 if (auto *SI = dyn_cast<SelectInst>(P)) {
6016 Worklist.push_back(SI->getTrueValue());
6017 Worklist.push_back(SI->getFalseValue());
6018 continue;
6021 if (auto *PN = dyn_cast<PHINode>(P)) {
6022 // If this PHI changes the underlying object in every iteration of the
6023 // loop, don't look through it. Consider:
6024 // int **A;
6025 // for (i) {
6026 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
6027 // Curr = A[i];
6028 // *Prev, *Curr;
6030 // Prev is tracking Curr one iteration behind so they refer to different
6031 // underlying objects.
6032 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
6033 isSameUnderlyingObjectInLoop(PN, LI))
6034 append_range(Worklist, PN->incoming_values());
6035 continue;
6038 Objects.push_back(P);
6039 } while (!Worklist.empty());
6042 /// This is the function that does the work of looking through basic
6043 /// ptrtoint+arithmetic+inttoptr sequences.
6044 static const Value *getUnderlyingObjectFromInt(const Value *V) {
6045 do {
6046 if (const Operator *U = dyn_cast<Operator>(V)) {
6047 // If we find a ptrtoint, we can transfer control back to the
6048 // regular getUnderlyingObjectFromInt.
6049 if (U->getOpcode() == Instruction::PtrToInt)
6050 return U->getOperand(0);
6051 // If we find an add of a constant, a multiplied value, or a phi, it's
6052 // likely that the other operand will lead us to the base
6053 // object. We don't have to worry about the case where the
6054 // object address is somehow being computed by the multiply,
6055 // because our callers only care when the result is an
6056 // identifiable object.
6057 if (U->getOpcode() != Instruction::Add ||
6058 (!isa<ConstantInt>(U->getOperand(1)) &&
6059 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
6060 !isa<PHINode>(U->getOperand(1))))
6061 return V;
6062 V = U->getOperand(0);
6063 } else {
6064 return V;
6066 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
6067 } while (true);
6070 /// This is a wrapper around getUnderlyingObjects and adds support for basic
6071 /// ptrtoint+arithmetic+inttoptr sequences.
6072 /// It returns false if unidentified object is found in getUnderlyingObjects.
6073 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
6074 SmallVectorImpl<Value *> &Objects) {
6075 SmallPtrSet<const Value *, 16> Visited;
6076 SmallVector<const Value *, 4> Working(1, V);
6077 do {
6078 V = Working.pop_back_val();
6080 SmallVector<const Value *, 4> Objs;
6081 getUnderlyingObjects(V, Objs);
6083 for (const Value *V : Objs) {
6084 if (!Visited.insert(V).second)
6085 continue;
6086 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
6087 const Value *O =
6088 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
6089 if (O->getType()->isPointerTy()) {
6090 Working.push_back(O);
6091 continue;
6094 // If getUnderlyingObjects fails to find an identifiable object,
6095 // getUnderlyingObjectsForCodeGen also fails for safety.
6096 if (!isIdentifiedObject(V)) {
6097 Objects.clear();
6098 return false;
6100 Objects.push_back(const_cast<Value *>(V));
6102 } while (!Working.empty());
6103 return true;
6106 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
6107 AllocaInst *Result = nullptr;
6108 SmallPtrSet<Value *, 4> Visited;
6109 SmallVector<Value *, 4> Worklist;
6111 auto AddWork = [&](Value *V) {
6112 if (Visited.insert(V).second)
6113 Worklist.push_back(V);
6116 AddWork(V);
6117 do {
6118 V = Worklist.pop_back_val();
6119 assert(Visited.count(V));
6121 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
6122 if (Result && Result != AI)
6123 return nullptr;
6124 Result = AI;
6125 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
6126 AddWork(CI->getOperand(0));
6127 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
6128 for (Value *IncValue : PN->incoming_values())
6129 AddWork(IncValue);
6130 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
6131 AddWork(SI->getTrueValue());
6132 AddWork(SI->getFalseValue());
6133 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
6134 if (OffsetZero && !GEP->hasAllZeroIndices())
6135 return nullptr;
6136 AddWork(GEP->getPointerOperand());
6137 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
6138 Value *Returned = CB->getReturnedArgOperand();
6139 if (Returned)
6140 AddWork(Returned);
6141 else
6142 return nullptr;
6143 } else {
6144 return nullptr;
6146 } while (!Worklist.empty());
6148 return Result;
6151 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6152 const Value *V, bool AllowLifetime, bool AllowDroppable) {
6153 for (const User *U : V->users()) {
6154 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
6155 if (!II)
6156 return false;
6158 if (AllowLifetime && II->isLifetimeStartOrEnd())
6159 continue;
6161 if (AllowDroppable && II->isDroppable())
6162 continue;
6164 return false;
6166 return true;
6169 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
6170 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6171 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
6173 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
6174 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6175 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
6178 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
6179 if (!LI.isUnordered())
6180 return true;
6181 const Function &F = *LI.getFunction();
6182 // Speculative load may create a race that did not exist in the source.
6183 return F.hasFnAttribute(Attribute::SanitizeThread) ||
6184 // Speculative load may load data from dirty regions.
6185 F.hasFnAttribute(Attribute::SanitizeAddress) ||
6186 F.hasFnAttribute(Attribute::SanitizeHWAddress);
6189 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
6190 const Instruction *CtxI,
6191 AssumptionCache *AC,
6192 const DominatorTree *DT,
6193 const TargetLibraryInfo *TLI) {
6194 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
6195 AC, DT, TLI);
6198 bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
6199 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
6200 AssumptionCache *AC, const DominatorTree *DT,
6201 const TargetLibraryInfo *TLI) {
6202 #ifndef NDEBUG
6203 if (Inst->getOpcode() != Opcode) {
6204 // Check that the operands are actually compatible with the Opcode override.
6205 auto hasEqualReturnAndLeadingOperandTypes =
6206 [](const Instruction *Inst, unsigned NumLeadingOperands) {
6207 if (Inst->getNumOperands() < NumLeadingOperands)
6208 return false;
6209 const Type *ExpectedType = Inst->getType();
6210 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6211 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
6212 return false;
6213 return true;
6215 assert(!Instruction::isBinaryOp(Opcode) ||
6216 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6217 assert(!Instruction::isUnaryOp(Opcode) ||
6218 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6220 #endif
6222 switch (Opcode) {
6223 default:
6224 return true;
6225 case Instruction::UDiv:
6226 case Instruction::URem: {
6227 // x / y is undefined if y == 0.
6228 const APInt *V;
6229 if (match(Inst->getOperand(1), m_APInt(V)))
6230 return *V != 0;
6231 return false;
6233 case Instruction::SDiv:
6234 case Instruction::SRem: {
6235 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
6236 const APInt *Numerator, *Denominator;
6237 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
6238 return false;
6239 // We cannot hoist this division if the denominator is 0.
6240 if (*Denominator == 0)
6241 return false;
6242 // It's safe to hoist if the denominator is not 0 or -1.
6243 if (!Denominator->isAllOnes())
6244 return true;
6245 // At this point we know that the denominator is -1. It is safe to hoist as
6246 // long we know that the numerator is not INT_MIN.
6247 if (match(Inst->getOperand(0), m_APInt(Numerator)))
6248 return !Numerator->isMinSignedValue();
6249 // The numerator *might* be MinSignedValue.
6250 return false;
6252 case Instruction::Load: {
6253 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
6254 if (!LI)
6255 return false;
6256 if (mustSuppressSpeculation(*LI))
6257 return false;
6258 const DataLayout &DL = LI->getModule()->getDataLayout();
6259 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
6260 LI->getType(), LI->getAlign(), DL,
6261 CtxI, AC, DT, TLI);
6263 case Instruction::Call: {
6264 auto *CI = dyn_cast<const CallInst>(Inst);
6265 if (!CI)
6266 return false;
6267 const Function *Callee = CI->getCalledFunction();
6269 // The called function could have undefined behavior or side-effects, even
6270 // if marked readnone nounwind.
6271 return Callee && Callee->isSpeculatable();
6273 case Instruction::VAArg:
6274 case Instruction::Alloca:
6275 case Instruction::Invoke:
6276 case Instruction::CallBr:
6277 case Instruction::PHI:
6278 case Instruction::Store:
6279 case Instruction::Ret:
6280 case Instruction::Br:
6281 case Instruction::IndirectBr:
6282 case Instruction::Switch:
6283 case Instruction::Unreachable:
6284 case Instruction::Fence:
6285 case Instruction::AtomicRMW:
6286 case Instruction::AtomicCmpXchg:
6287 case Instruction::LandingPad:
6288 case Instruction::Resume:
6289 case Instruction::CatchSwitch:
6290 case Instruction::CatchPad:
6291 case Instruction::CatchRet:
6292 case Instruction::CleanupPad:
6293 case Instruction::CleanupRet:
6294 return false; // Misc instructions which have effects
6298 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
6299 if (I.mayReadOrWriteMemory())
6300 // Memory dependency possible
6301 return true;
6302 if (!isSafeToSpeculativelyExecute(&I))
6303 // Can't move above a maythrow call or infinite loop. Or if an
6304 // inalloca alloca, above a stacksave call.
6305 return true;
6306 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6307 // 1) Can't reorder two inf-loop calls, even if readonly
6308 // 2) Also can't reorder an inf-loop call below a instruction which isn't
6309 // safe to speculative execute. (Inverse of above)
6310 return true;
6311 return false;
6314 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
6315 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
6316 switch (OR) {
6317 case ConstantRange::OverflowResult::MayOverflow:
6318 return OverflowResult::MayOverflow;
6319 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
6320 return OverflowResult::AlwaysOverflowsLow;
6321 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
6322 return OverflowResult::AlwaysOverflowsHigh;
6323 case ConstantRange::OverflowResult::NeverOverflows:
6324 return OverflowResult::NeverOverflows;
6326 llvm_unreachable("Unknown OverflowResult");
6329 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
6330 static ConstantRange computeConstantRangeIncludingKnownBits(
6331 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
6332 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6333 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
6334 KnownBits Known = computeKnownBits(
6335 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
6336 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
6337 ConstantRange CR2 = computeConstantRange(V, ForSigned, UseInstrInfo);
6338 ConstantRange::PreferredRangeType RangeType =
6339 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
6340 return CR1.intersectWith(CR2, RangeType);
6343 OverflowResult llvm::computeOverflowForUnsignedMul(
6344 const Value *LHS, const Value *RHS, const DataLayout &DL,
6345 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6346 bool UseInstrInfo) {
6347 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
6348 nullptr, UseInstrInfo);
6349 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
6350 nullptr, UseInstrInfo);
6351 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
6352 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
6353 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
6356 OverflowResult
6357 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
6358 const DataLayout &DL, AssumptionCache *AC,
6359 const Instruction *CxtI,
6360 const DominatorTree *DT, bool UseInstrInfo) {
6361 // Multiplying n * m significant bits yields a result of n + m significant
6362 // bits. If the total number of significant bits does not exceed the
6363 // result bit width (minus 1), there is no overflow.
6364 // This means if we have enough leading sign bits in the operands
6365 // we can guarantee that the result does not overflow.
6366 // Ref: "Hacker's Delight" by Henry Warren
6367 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
6369 // Note that underestimating the number of sign bits gives a more
6370 // conservative answer.
6371 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
6372 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
6374 // First handle the easy case: if we have enough sign bits there's
6375 // definitely no overflow.
6376 if (SignBits > BitWidth + 1)
6377 return OverflowResult::NeverOverflows;
6379 // There are two ambiguous cases where there can be no overflow:
6380 // SignBits == BitWidth + 1 and
6381 // SignBits == BitWidth
6382 // The second case is difficult to check, therefore we only handle the
6383 // first case.
6384 if (SignBits == BitWidth + 1) {
6385 // It overflows only when both arguments are negative and the true
6386 // product is exactly the minimum negative number.
6387 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
6388 // For simplicity we just check if at least one side is not negative.
6389 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
6390 nullptr, UseInstrInfo);
6391 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
6392 nullptr, UseInstrInfo);
6393 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
6394 return OverflowResult::NeverOverflows;
6396 return OverflowResult::MayOverflow;
6399 OverflowResult llvm::computeOverflowForUnsignedAdd(
6400 const Value *LHS, const Value *RHS, const DataLayout &DL,
6401 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6402 bool UseInstrInfo) {
6403 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6404 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
6405 nullptr, UseInstrInfo);
6406 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6407 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
6408 nullptr, UseInstrInfo);
6409 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
6412 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
6413 const Value *RHS,
6414 const AddOperator *Add,
6415 const DataLayout &DL,
6416 AssumptionCache *AC,
6417 const Instruction *CxtI,
6418 const DominatorTree *DT) {
6419 if (Add && Add->hasNoSignedWrap()) {
6420 return OverflowResult::NeverOverflows;
6423 // If LHS and RHS each have at least two sign bits, the addition will look
6424 // like
6426 // XX..... +
6427 // YY.....
6429 // If the carry into the most significant position is 0, X and Y can't both
6430 // be 1 and therefore the carry out of the addition is also 0.
6432 // If the carry into the most significant position is 1, X and Y can't both
6433 // be 0 and therefore the carry out of the addition is also 1.
6435 // Since the carry into the most significant position is always equal to
6436 // the carry out of the addition, there is no signed overflow.
6437 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
6438 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
6439 return OverflowResult::NeverOverflows;
6441 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6442 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6443 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6444 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6445 OverflowResult OR =
6446 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
6447 if (OR != OverflowResult::MayOverflow)
6448 return OR;
6450 // The remaining code needs Add to be available. Early returns if not so.
6451 if (!Add)
6452 return OverflowResult::MayOverflow;
6454 // If the sign of Add is the same as at least one of the operands, this add
6455 // CANNOT overflow. If this can be determined from the known bits of the
6456 // operands the above signedAddMayOverflow() check will have already done so.
6457 // The only other way to improve on the known bits is from an assumption, so
6458 // call computeKnownBitsFromAssume() directly.
6459 bool LHSOrRHSKnownNonNegative =
6460 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
6461 bool LHSOrRHSKnownNegative =
6462 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
6463 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
6464 KnownBits AddKnown(LHSRange.getBitWidth());
6465 computeKnownBitsFromAssume(
6466 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
6467 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
6468 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
6469 return OverflowResult::NeverOverflows;
6472 return OverflowResult::MayOverflow;
6475 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
6476 const Value *RHS,
6477 const DataLayout &DL,
6478 AssumptionCache *AC,
6479 const Instruction *CxtI,
6480 const DominatorTree *DT) {
6481 // X - (X % ?)
6482 // The remainder of a value can't have greater magnitude than itself,
6483 // so the subtraction can't overflow.
6485 // X - (X -nuw ?)
6486 // In the minimal case, this would simplify to "?", so there's no subtract
6487 // at all. But if this analysis is used to peek through casts, for example,
6488 // then determining no-overflow may allow other transforms.
6490 // TODO: There are other patterns like this.
6491 // See simplifyICmpWithBinOpOnLHS() for candidates.
6492 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
6493 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
6494 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
6495 return OverflowResult::NeverOverflows;
6497 // Checking for conditions implied by dominating conditions may be expensive.
6498 // Limit it to usub_with_overflow calls for now.
6499 if (match(CxtI,
6500 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
6501 if (auto C =
6502 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
6503 if (*C)
6504 return OverflowResult::NeverOverflows;
6505 return OverflowResult::AlwaysOverflowsLow;
6507 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6508 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
6509 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6510 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
6511 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
6514 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
6515 const Value *RHS,
6516 const DataLayout &DL,
6517 AssumptionCache *AC,
6518 const Instruction *CxtI,
6519 const DominatorTree *DT) {
6520 // X - (X % ?)
6521 // The remainder of a value can't have greater magnitude than itself,
6522 // so the subtraction can't overflow.
6524 // X - (X -nsw ?)
6525 // In the minimal case, this would simplify to "?", so there's no subtract
6526 // at all. But if this analysis is used to peek through casts, for example,
6527 // then determining no-overflow may allow other transforms.
6528 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
6529 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
6530 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
6531 return OverflowResult::NeverOverflows;
6533 // If LHS and RHS each have at least two sign bits, the subtraction
6534 // cannot overflow.
6535 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
6536 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
6537 return OverflowResult::NeverOverflows;
6539 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6540 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6541 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6542 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6543 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
6546 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
6547 const DominatorTree &DT) {
6548 SmallVector<const BranchInst *, 2> GuardingBranches;
6549 SmallVector<const ExtractValueInst *, 2> Results;
6551 for (const User *U : WO->users()) {
6552 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
6553 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
6555 if (EVI->getIndices()[0] == 0)
6556 Results.push_back(EVI);
6557 else {
6558 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
6560 for (const auto *U : EVI->users())
6561 if (const auto *B = dyn_cast<BranchInst>(U)) {
6562 assert(B->isConditional() && "How else is it using an i1?");
6563 GuardingBranches.push_back(B);
6566 } else {
6567 // We are using the aggregate directly in a way we don't want to analyze
6568 // here (storing it to a global, say).
6569 return false;
6573 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
6574 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
6575 if (!NoWrapEdge.isSingleEdge())
6576 return false;
6578 // Check if all users of the add are provably no-wrap.
6579 for (const auto *Result : Results) {
6580 // If the extractvalue itself is not executed on overflow, the we don't
6581 // need to check each use separately, since domination is transitive.
6582 if (DT.dominates(NoWrapEdge, Result->getParent()))
6583 continue;
6585 for (const auto &RU : Result->uses())
6586 if (!DT.dominates(NoWrapEdge, RU))
6587 return false;
6590 return true;
6593 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
6596 /// Shifts return poison if shiftwidth is larger than the bitwidth.
6597 static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
6598 auto *C = dyn_cast<Constant>(ShiftAmount);
6599 if (!C)
6600 return false;
6602 // Shifts return poison if shiftwidth is larger than the bitwidth.
6603 SmallVector<const Constant *, 4> ShiftAmounts;
6604 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
6605 unsigned NumElts = FVTy->getNumElements();
6606 for (unsigned i = 0; i < NumElts; ++i)
6607 ShiftAmounts.push_back(C->getAggregateElement(i));
6608 } else if (isa<ScalableVectorType>(C->getType()))
6609 return false; // Can't tell, just return false to be safe
6610 else
6611 ShiftAmounts.push_back(C);
6613 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
6614 auto *CI = dyn_cast_or_null<ConstantInt>(C);
6615 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
6618 return Safe;
6621 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
6622 bool ConsiderFlagsAndMetadata) {
6624 if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata())
6625 return true;
6627 unsigned Opcode = Op->getOpcode();
6629 // Check whether opcode is a poison/undef-generating operation
6630 switch (Opcode) {
6631 case Instruction::Shl:
6632 case Instruction::AShr:
6633 case Instruction::LShr:
6634 return !shiftAmountKnownInRange(Op->getOperand(1));
6635 case Instruction::FPToSI:
6636 case Instruction::FPToUI:
6637 // fptosi/ui yields poison if the resulting value does not fit in the
6638 // destination type.
6639 return true;
6640 case Instruction::Call:
6641 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
6642 switch (II->getIntrinsicID()) {
6643 // TODO: Add more intrinsics.
6644 case Intrinsic::ctlz:
6645 case Intrinsic::cttz:
6646 case Intrinsic::abs:
6647 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
6648 return false;
6649 break;
6650 case Intrinsic::ctpop:
6651 case Intrinsic::bswap:
6652 case Intrinsic::bitreverse:
6653 case Intrinsic::fshl:
6654 case Intrinsic::fshr:
6655 case Intrinsic::smax:
6656 case Intrinsic::smin:
6657 case Intrinsic::umax:
6658 case Intrinsic::umin:
6659 case Intrinsic::ptrmask:
6660 case Intrinsic::fptoui_sat:
6661 case Intrinsic::fptosi_sat:
6662 case Intrinsic::sadd_with_overflow:
6663 case Intrinsic::ssub_with_overflow:
6664 case Intrinsic::smul_with_overflow:
6665 case Intrinsic::uadd_with_overflow:
6666 case Intrinsic::usub_with_overflow:
6667 case Intrinsic::umul_with_overflow:
6668 case Intrinsic::sadd_sat:
6669 case Intrinsic::uadd_sat:
6670 case Intrinsic::ssub_sat:
6671 case Intrinsic::usub_sat:
6672 return false;
6673 case Intrinsic::sshl_sat:
6674 case Intrinsic::ushl_sat:
6675 return !shiftAmountKnownInRange(II->getArgOperand(1));
6676 case Intrinsic::fma:
6677 case Intrinsic::fmuladd:
6678 case Intrinsic::sqrt:
6679 case Intrinsic::powi:
6680 case Intrinsic::sin:
6681 case Intrinsic::cos:
6682 case Intrinsic::pow:
6683 case Intrinsic::log:
6684 case Intrinsic::log10:
6685 case Intrinsic::log2:
6686 case Intrinsic::exp:
6687 case Intrinsic::exp2:
6688 case Intrinsic::fabs:
6689 case Intrinsic::copysign:
6690 case Intrinsic::floor:
6691 case Intrinsic::ceil:
6692 case Intrinsic::trunc:
6693 case Intrinsic::rint:
6694 case Intrinsic::nearbyint:
6695 case Intrinsic::round:
6696 case Intrinsic::roundeven:
6697 case Intrinsic::fptrunc_round:
6698 case Intrinsic::canonicalize:
6699 case Intrinsic::arithmetic_fence:
6700 case Intrinsic::minnum:
6701 case Intrinsic::maxnum:
6702 case Intrinsic::minimum:
6703 case Intrinsic::maximum:
6704 case Intrinsic::is_fpclass:
6705 return false;
6706 case Intrinsic::lround:
6707 case Intrinsic::llround:
6708 case Intrinsic::lrint:
6709 case Intrinsic::llrint:
6710 // If the value doesn't fit an unspecified value is returned (but this
6711 // is not poison).
6712 return false;
6715 [[fallthrough]];
6716 case Instruction::CallBr:
6717 case Instruction::Invoke: {
6718 const auto *CB = cast<CallBase>(Op);
6719 return !CB->hasRetAttr(Attribute::NoUndef);
6721 case Instruction::InsertElement:
6722 case Instruction::ExtractElement: {
6723 // If index exceeds the length of the vector, it returns poison
6724 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
6725 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
6726 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
6727 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
6728 return true;
6729 return false;
6731 case Instruction::ShuffleVector: {
6732 // shufflevector may return undef.
6733 if (PoisonOnly)
6734 return false;
6735 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
6736 ? cast<ConstantExpr>(Op)->getShuffleMask()
6737 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
6738 return is_contained(Mask, PoisonMaskElem);
6740 case Instruction::FNeg:
6741 case Instruction::PHI:
6742 case Instruction::Select:
6743 case Instruction::URem:
6744 case Instruction::SRem:
6745 case Instruction::ExtractValue:
6746 case Instruction::InsertValue:
6747 case Instruction::Freeze:
6748 case Instruction::ICmp:
6749 case Instruction::FCmp:
6750 case Instruction::FAdd:
6751 case Instruction::FSub:
6752 case Instruction::FMul:
6753 case Instruction::FDiv:
6754 case Instruction::FRem:
6755 return false;
6756 case Instruction::GetElementPtr:
6757 // inbounds is handled above
6758 // TODO: what about inrange on constexpr?
6759 return false;
6760 default: {
6761 const auto *CE = dyn_cast<ConstantExpr>(Op);
6762 if (isa<CastInst>(Op) || (CE && CE->isCast()))
6763 return false;
6764 else if (Instruction::isBinaryOp(Opcode))
6765 return false;
6766 // Be conservative and return true.
6767 return true;
6772 bool llvm::canCreateUndefOrPoison(const Operator *Op,
6773 bool ConsiderFlagsAndMetadata) {
6774 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false,
6775 ConsiderFlagsAndMetadata);
6778 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
6779 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true,
6780 ConsiderFlagsAndMetadata);
6783 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
6784 const Value *V, unsigned Depth) {
6785 if (ValAssumedPoison == V)
6786 return true;
6788 const unsigned MaxDepth = 2;
6789 if (Depth >= MaxDepth)
6790 return false;
6792 if (const auto *I = dyn_cast<Instruction>(V)) {
6793 if (any_of(I->operands(), [=](const Use &Op) {
6794 return propagatesPoison(Op) &&
6795 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
6797 return true;
6799 // V = extractvalue V0, idx
6800 // V2 = extractvalue V0, idx2
6801 // V0's elements are all poison or not. (e.g., add_with_overflow)
6802 const WithOverflowInst *II;
6803 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
6804 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
6805 llvm::is_contained(II->args(), ValAssumedPoison)))
6806 return true;
6808 return false;
6811 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
6812 unsigned Depth) {
6813 if (isGuaranteedNotToBePoison(ValAssumedPoison))
6814 return true;
6816 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
6817 return true;
6819 const unsigned MaxDepth = 2;
6820 if (Depth >= MaxDepth)
6821 return false;
6823 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
6824 if (I && !canCreatePoison(cast<Operator>(I))) {
6825 return all_of(I->operands(), [=](const Value *Op) {
6826 return impliesPoison(Op, V, Depth + 1);
6829 return false;
6832 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
6833 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
6836 static bool programUndefinedIfUndefOrPoison(const Value *V,
6837 bool PoisonOnly);
6839 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
6840 AssumptionCache *AC,
6841 const Instruction *CtxI,
6842 const DominatorTree *DT,
6843 unsigned Depth, bool PoisonOnly) {
6844 if (Depth >= MaxAnalysisRecursionDepth)
6845 return false;
6847 if (isa<MetadataAsValue>(V))
6848 return false;
6850 if (const auto *A = dyn_cast<Argument>(V)) {
6851 if (A->hasAttribute(Attribute::NoUndef) ||
6852 A->hasAttribute(Attribute::Dereferenceable) ||
6853 A->hasAttribute(Attribute::DereferenceableOrNull))
6854 return true;
6857 if (auto *C = dyn_cast<Constant>(V)) {
6858 if (isa<UndefValue>(C))
6859 return PoisonOnly && !isa<PoisonValue>(C);
6861 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
6862 isa<ConstantPointerNull>(C) || isa<Function>(C))
6863 return true;
6865 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
6866 return (PoisonOnly ? !C->containsPoisonElement()
6867 : !C->containsUndefOrPoisonElement()) &&
6868 !C->containsConstantExpression();
6871 // Strip cast operations from a pointer value.
6872 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
6873 // inbounds with zero offset. To guarantee that the result isn't poison, the
6874 // stripped pointer is checked as it has to be pointing into an allocated
6875 // object or be null `null` to ensure `inbounds` getelement pointers with a
6876 // zero offset could not produce poison.
6877 // It can strip off addrspacecast that do not change bit representation as
6878 // well. We believe that such addrspacecast is equivalent to no-op.
6879 auto *StrippedV = V->stripPointerCastsSameRepresentation();
6880 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
6881 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
6882 return true;
6884 auto OpCheck = [&](const Value *V) {
6885 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
6886 PoisonOnly);
6889 if (auto *Opr = dyn_cast<Operator>(V)) {
6890 // If the value is a freeze instruction, then it can never
6891 // be undef or poison.
6892 if (isa<FreezeInst>(V))
6893 return true;
6895 if (const auto *CB = dyn_cast<CallBase>(V)) {
6896 if (CB->hasRetAttr(Attribute::NoUndef))
6897 return true;
6900 if (const auto *PN = dyn_cast<PHINode>(V)) {
6901 unsigned Num = PN->getNumIncomingValues();
6902 bool IsWellDefined = true;
6903 for (unsigned i = 0; i < Num; ++i) {
6904 auto *TI = PN->getIncomingBlock(i)->getTerminator();
6905 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
6906 DT, Depth + 1, PoisonOnly)) {
6907 IsWellDefined = false;
6908 break;
6911 if (IsWellDefined)
6912 return true;
6913 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
6914 return true;
6917 if (auto *I = dyn_cast<LoadInst>(V))
6918 if (I->hasMetadata(LLVMContext::MD_noundef) ||
6919 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
6920 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
6921 return true;
6923 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
6924 return true;
6926 // CxtI may be null or a cloned instruction.
6927 if (!CtxI || !CtxI->getParent() || !DT)
6928 return false;
6930 auto *DNode = DT->getNode(CtxI->getParent());
6931 if (!DNode)
6932 // Unreachable block
6933 return false;
6935 // If V is used as a branch condition before reaching CtxI, V cannot be
6936 // undef or poison.
6937 // br V, BB1, BB2
6938 // BB1:
6939 // CtxI ; V cannot be undef or poison here
6940 auto *Dominator = DNode->getIDom();
6941 while (Dominator) {
6942 auto *TI = Dominator->getBlock()->getTerminator();
6944 Value *Cond = nullptr;
6945 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
6946 if (BI->isConditional())
6947 Cond = BI->getCondition();
6948 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
6949 Cond = SI->getCondition();
6952 if (Cond) {
6953 if (Cond == V)
6954 return true;
6955 else if (PoisonOnly && isa<Operator>(Cond)) {
6956 // For poison, we can analyze further
6957 auto *Opr = cast<Operator>(Cond);
6958 if (any_of(Opr->operands(),
6959 [V](const Use &U) { return V == U && propagatesPoison(U); }))
6960 return true;
6964 Dominator = Dominator->getIDom();
6967 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
6968 return true;
6970 return false;
6973 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
6974 const Instruction *CtxI,
6975 const DominatorTree *DT,
6976 unsigned Depth) {
6977 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
6980 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
6981 const Instruction *CtxI,
6982 const DominatorTree *DT, unsigned Depth) {
6983 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
6986 /// Return true if undefined behavior would provably be executed on the path to
6987 /// OnPathTo if Root produced a posion result. Note that this doesn't say
6988 /// anything about whether OnPathTo is actually executed or whether Root is
6989 /// actually poison. This can be used to assess whether a new use of Root can
6990 /// be added at a location which is control equivalent with OnPathTo (such as
6991 /// immediately before it) without introducing UB which didn't previously
6992 /// exist. Note that a false result conveys no information.
6993 bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
6994 Instruction *OnPathTo,
6995 DominatorTree *DT) {
6996 // Basic approach is to assume Root is poison, propagate poison forward
6997 // through all users we can easily track, and then check whether any of those
6998 // users are provable UB and must execute before out exiting block might
6999 // exit.
7001 // The set of all recursive users we've visited (which are assumed to all be
7002 // poison because of said visit)
7003 SmallSet<const Value *, 16> KnownPoison;
7004 SmallVector<const Instruction*, 16> Worklist;
7005 Worklist.push_back(Root);
7006 while (!Worklist.empty()) {
7007 const Instruction *I = Worklist.pop_back_val();
7009 // If we know this must trigger UB on a path leading our target.
7010 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
7011 return true;
7013 // If we can't analyze propagation through this instruction, just skip it
7014 // and transitive users. Safe as false is a conservative result.
7015 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) {
7016 return KnownPoison.contains(U) && propagatesPoison(U);
7018 continue;
7020 if (KnownPoison.insert(I).second)
7021 for (const User *User : I->users())
7022 Worklist.push_back(cast<Instruction>(User));
7025 // Might be non-UB, or might have a path we couldn't prove must execute on
7026 // way to exiting bb.
7027 return false;
7030 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
7031 const DataLayout &DL,
7032 AssumptionCache *AC,
7033 const Instruction *CxtI,
7034 const DominatorTree *DT) {
7035 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
7036 Add, DL, AC, CxtI, DT);
7039 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
7040 const Value *RHS,
7041 const DataLayout &DL,
7042 AssumptionCache *AC,
7043 const Instruction *CxtI,
7044 const DominatorTree *DT) {
7045 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
7048 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
7049 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
7050 // of time because it's possible for another thread to interfere with it for an
7051 // arbitrary length of time, but programs aren't allowed to rely on that.
7053 // If there is no successor, then execution can't transfer to it.
7054 if (isa<ReturnInst>(I))
7055 return false;
7056 if (isa<UnreachableInst>(I))
7057 return false;
7059 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
7060 // Instruction::willReturn.
7062 // FIXME: Move this check into Instruction::willReturn.
7063 if (isa<CatchPadInst>(I)) {
7064 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
7065 default:
7066 // A catchpad may invoke exception object constructors and such, which
7067 // in some languages can be arbitrary code, so be conservative by default.
7068 return false;
7069 case EHPersonality::CoreCLR:
7070 // For CoreCLR, it just involves a type test.
7071 return true;
7075 // An instruction that returns without throwing must transfer control flow
7076 // to a successor.
7077 return !I->mayThrow() && I->willReturn();
7080 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
7081 // TODO: This is slightly conservative for invoke instruction since exiting
7082 // via an exception *is* normal control for them.
7083 for (const Instruction &I : *BB)
7084 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7085 return false;
7086 return true;
7089 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
7090 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
7091 unsigned ScanLimit) {
7092 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
7093 ScanLimit);
7096 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
7097 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
7098 assert(ScanLimit && "scan limit must be non-zero");
7099 for (const Instruction &I : Range) {
7100 if (isa<DbgInfoIntrinsic>(I))
7101 continue;
7102 if (--ScanLimit == 0)
7103 return false;
7104 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7105 return false;
7107 return true;
7110 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
7111 const Loop *L) {
7112 // The loop header is guaranteed to be executed for every iteration.
7114 // FIXME: Relax this constraint to cover all basic blocks that are
7115 // guaranteed to be executed at every iteration.
7116 if (I->getParent() != L->getHeader()) return false;
7118 for (const Instruction &LI : *L->getHeader()) {
7119 if (&LI == I) return true;
7120 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
7122 llvm_unreachable("Instruction not contained in its own parent basic block.");
7125 bool llvm::propagatesPoison(const Use &PoisonOp) {
7126 const Operator *I = cast<Operator>(PoisonOp.getUser());
7127 switch (I->getOpcode()) {
7128 case Instruction::Freeze:
7129 case Instruction::PHI:
7130 case Instruction::Invoke:
7131 return false;
7132 case Instruction::Select:
7133 return PoisonOp.getOperandNo() == 0;
7134 case Instruction::Call:
7135 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
7136 switch (II->getIntrinsicID()) {
7137 // TODO: Add more intrinsics.
7138 case Intrinsic::sadd_with_overflow:
7139 case Intrinsic::ssub_with_overflow:
7140 case Intrinsic::smul_with_overflow:
7141 case Intrinsic::uadd_with_overflow:
7142 case Intrinsic::usub_with_overflow:
7143 case Intrinsic::umul_with_overflow:
7144 // If an input is a vector containing a poison element, the
7145 // two output vectors (calculated results, overflow bits)'
7146 // corresponding lanes are poison.
7147 return true;
7148 case Intrinsic::ctpop:
7149 return true;
7152 return false;
7153 case Instruction::ICmp:
7154 case Instruction::FCmp:
7155 case Instruction::GetElementPtr:
7156 return true;
7157 default:
7158 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
7159 return true;
7161 // Be conservative and return false.
7162 return false;
7166 void llvm::getGuaranteedWellDefinedOps(
7167 const Instruction *I, SmallVectorImpl<const Value *> &Operands) {
7168 switch (I->getOpcode()) {
7169 case Instruction::Store:
7170 Operands.push_back(cast<StoreInst>(I)->getPointerOperand());
7171 break;
7173 case Instruction::Load:
7174 Operands.push_back(cast<LoadInst>(I)->getPointerOperand());
7175 break;
7177 // Since dereferenceable attribute imply noundef, atomic operations
7178 // also implicitly have noundef pointers too
7179 case Instruction::AtomicCmpXchg:
7180 Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
7181 break;
7183 case Instruction::AtomicRMW:
7184 Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand());
7185 break;
7187 case Instruction::Call:
7188 case Instruction::Invoke: {
7189 const CallBase *CB = cast<CallBase>(I);
7190 if (CB->isIndirectCall())
7191 Operands.push_back(CB->getCalledOperand());
7192 for (unsigned i = 0; i < CB->arg_size(); ++i) {
7193 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
7194 CB->paramHasAttr(i, Attribute::Dereferenceable))
7195 Operands.push_back(CB->getArgOperand(i));
7197 break;
7199 case Instruction::Ret:
7200 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
7201 Operands.push_back(I->getOperand(0));
7202 break;
7203 case Instruction::Switch:
7204 Operands.push_back(cast<SwitchInst>(I)->getCondition());
7205 break;
7206 case Instruction::Br: {
7207 auto *BR = cast<BranchInst>(I);
7208 if (BR->isConditional())
7209 Operands.push_back(BR->getCondition());
7210 break;
7212 default:
7213 break;
7217 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
7218 SmallVectorImpl<const Value *> &Operands) {
7219 getGuaranteedWellDefinedOps(I, Operands);
7220 switch (I->getOpcode()) {
7221 // Divisors of these operations are allowed to be partially undef.
7222 case Instruction::UDiv:
7223 case Instruction::SDiv:
7224 case Instruction::URem:
7225 case Instruction::SRem:
7226 Operands.push_back(I->getOperand(1));
7227 break;
7228 default:
7229 break;
7233 bool llvm::mustTriggerUB(const Instruction *I,
7234 const SmallPtrSetImpl<const Value *> &KnownPoison) {
7235 SmallVector<const Value *, 4> NonPoisonOps;
7236 getGuaranteedNonPoisonOps(I, NonPoisonOps);
7238 for (const auto *V : NonPoisonOps)
7239 if (KnownPoison.count(V))
7240 return true;
7242 return false;
7245 static bool programUndefinedIfUndefOrPoison(const Value *V,
7246 bool PoisonOnly) {
7247 // We currently only look for uses of values within the same basic
7248 // block, as that makes it easier to guarantee that the uses will be
7249 // executed given that Inst is executed.
7251 // FIXME: Expand this to consider uses beyond the same basic block. To do
7252 // this, look out for the distinction between post-dominance and strong
7253 // post-dominance.
7254 const BasicBlock *BB = nullptr;
7255 BasicBlock::const_iterator Begin;
7256 if (const auto *Inst = dyn_cast<Instruction>(V)) {
7257 BB = Inst->getParent();
7258 Begin = Inst->getIterator();
7259 Begin++;
7260 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
7261 BB = &Arg->getParent()->getEntryBlock();
7262 Begin = BB->begin();
7263 } else {
7264 return false;
7267 // Limit number of instructions we look at, to avoid scanning through large
7268 // blocks. The current limit is chosen arbitrarily.
7269 unsigned ScanLimit = 32;
7270 BasicBlock::const_iterator End = BB->end();
7272 if (!PoisonOnly) {
7273 // Since undef does not propagate eagerly, be conservative & just check
7274 // whether a value is directly passed to an instruction that must take
7275 // well-defined operands.
7277 for (const auto &I : make_range(Begin, End)) {
7278 if (isa<DbgInfoIntrinsic>(I))
7279 continue;
7280 if (--ScanLimit == 0)
7281 break;
7283 SmallVector<const Value *, 4> WellDefinedOps;
7284 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
7285 if (is_contained(WellDefinedOps, V))
7286 return true;
7288 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7289 break;
7291 return false;
7294 // Set of instructions that we have proved will yield poison if Inst
7295 // does.
7296 SmallSet<const Value *, 16> YieldsPoison;
7297 SmallSet<const BasicBlock *, 4> Visited;
7299 YieldsPoison.insert(V);
7300 Visited.insert(BB);
7302 while (true) {
7303 for (const auto &I : make_range(Begin, End)) {
7304 if (isa<DbgInfoIntrinsic>(I))
7305 continue;
7306 if (--ScanLimit == 0)
7307 return false;
7308 if (mustTriggerUB(&I, YieldsPoison))
7309 return true;
7310 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7311 return false;
7313 // If an operand is poison and propagates it, mark I as yielding poison.
7314 for (const Use &Op : I.operands()) {
7315 if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
7316 YieldsPoison.insert(&I);
7317 break;
7321 // Special handling for select, which returns poison if its operand 0 is
7322 // poison (handled in the loop above) *or* if both its true/false operands
7323 // are poison (handled here).
7324 if (I.getOpcode() == Instruction::Select &&
7325 YieldsPoison.count(I.getOperand(1)) &&
7326 YieldsPoison.count(I.getOperand(2))) {
7327 YieldsPoison.insert(&I);
7331 BB = BB->getSingleSuccessor();
7332 if (!BB || !Visited.insert(BB).second)
7333 break;
7335 Begin = BB->getFirstNonPHI()->getIterator();
7336 End = BB->end();
7338 return false;
7341 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
7342 return ::programUndefinedIfUndefOrPoison(Inst, false);
7345 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
7346 return ::programUndefinedIfUndefOrPoison(Inst, true);
7349 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
7350 if (FMF.noNaNs())
7351 return true;
7353 if (auto *C = dyn_cast<ConstantFP>(V))
7354 return !C->isNaN();
7356 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7357 if (!C->getElementType()->isFloatingPointTy())
7358 return false;
7359 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7360 if (C->getElementAsAPFloat(I).isNaN())
7361 return false;
7363 return true;
7366 if (isa<ConstantAggregateZero>(V))
7367 return true;
7369 return false;
7372 static bool isKnownNonZero(const Value *V) {
7373 if (auto *C = dyn_cast<ConstantFP>(V))
7374 return !C->isZero();
7376 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7377 if (!C->getElementType()->isFloatingPointTy())
7378 return false;
7379 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7380 if (C->getElementAsAPFloat(I).isZero())
7381 return false;
7383 return true;
7386 return false;
7389 /// Match clamp pattern for float types without care about NaNs or signed zeros.
7390 /// Given non-min/max outer cmp/select from the clamp pattern this
7391 /// function recognizes if it can be substitued by a "canonical" min/max
7392 /// pattern.
7393 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
7394 Value *CmpLHS, Value *CmpRHS,
7395 Value *TrueVal, Value *FalseVal,
7396 Value *&LHS, Value *&RHS) {
7397 // Try to match
7398 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
7399 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
7400 // and return description of the outer Max/Min.
7402 // First, check if select has inverse order:
7403 if (CmpRHS == FalseVal) {
7404 std::swap(TrueVal, FalseVal);
7405 Pred = CmpInst::getInversePredicate(Pred);
7408 // Assume success now. If there's no match, callers should not use these anyway.
7409 LHS = TrueVal;
7410 RHS = FalseVal;
7412 const APFloat *FC1;
7413 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
7414 return {SPF_UNKNOWN, SPNB_NA, false};
7416 const APFloat *FC2;
7417 switch (Pred) {
7418 case CmpInst::FCMP_OLT:
7419 case CmpInst::FCMP_OLE:
7420 case CmpInst::FCMP_ULT:
7421 case CmpInst::FCMP_ULE:
7422 if (match(FalseVal,
7423 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
7424 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7425 *FC1 < *FC2)
7426 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
7427 break;
7428 case CmpInst::FCMP_OGT:
7429 case CmpInst::FCMP_OGE:
7430 case CmpInst::FCMP_UGT:
7431 case CmpInst::FCMP_UGE:
7432 if (match(FalseVal,
7433 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
7434 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7435 *FC1 > *FC2)
7436 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
7437 break;
7438 default:
7439 break;
7442 return {SPF_UNKNOWN, SPNB_NA, false};
7445 /// Recognize variations of:
7446 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
7447 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
7448 Value *CmpLHS, Value *CmpRHS,
7449 Value *TrueVal, Value *FalseVal) {
7450 // Swap the select operands and predicate to match the patterns below.
7451 if (CmpRHS != TrueVal) {
7452 Pred = ICmpInst::getSwappedPredicate(Pred);
7453 std::swap(TrueVal, FalseVal);
7455 const APInt *C1;
7456 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
7457 const APInt *C2;
7458 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
7459 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7460 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
7461 return {SPF_SMAX, SPNB_NA, false};
7463 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
7464 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7465 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
7466 return {SPF_SMIN, SPNB_NA, false};
7468 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
7469 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7470 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
7471 return {SPF_UMAX, SPNB_NA, false};
7473 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
7474 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7475 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
7476 return {SPF_UMIN, SPNB_NA, false};
7478 return {SPF_UNKNOWN, SPNB_NA, false};
7481 /// Recognize variations of:
7482 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
7483 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
7484 Value *CmpLHS, Value *CmpRHS,
7485 Value *TVal, Value *FVal,
7486 unsigned Depth) {
7487 // TODO: Allow FP min/max with nnan/nsz.
7488 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
7490 Value *A = nullptr, *B = nullptr;
7491 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
7492 if (!SelectPatternResult::isMinOrMax(L.Flavor))
7493 return {SPF_UNKNOWN, SPNB_NA, false};
7495 Value *C = nullptr, *D = nullptr;
7496 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
7497 if (L.Flavor != R.Flavor)
7498 return {SPF_UNKNOWN, SPNB_NA, false};
7500 // We have something like: x Pred y ? min(a, b) : min(c, d).
7501 // Try to match the compare to the min/max operations of the select operands.
7502 // First, make sure we have the right compare predicate.
7503 switch (L.Flavor) {
7504 case SPF_SMIN:
7505 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
7506 Pred = ICmpInst::getSwappedPredicate(Pred);
7507 std::swap(CmpLHS, CmpRHS);
7509 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
7510 break;
7511 return {SPF_UNKNOWN, SPNB_NA, false};
7512 case SPF_SMAX:
7513 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
7514 Pred = ICmpInst::getSwappedPredicate(Pred);
7515 std::swap(CmpLHS, CmpRHS);
7517 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
7518 break;
7519 return {SPF_UNKNOWN, SPNB_NA, false};
7520 case SPF_UMIN:
7521 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
7522 Pred = ICmpInst::getSwappedPredicate(Pred);
7523 std::swap(CmpLHS, CmpRHS);
7525 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
7526 break;
7527 return {SPF_UNKNOWN, SPNB_NA, false};
7528 case SPF_UMAX:
7529 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
7530 Pred = ICmpInst::getSwappedPredicate(Pred);
7531 std::swap(CmpLHS, CmpRHS);
7533 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
7534 break;
7535 return {SPF_UNKNOWN, SPNB_NA, false};
7536 default:
7537 return {SPF_UNKNOWN, SPNB_NA, false};
7540 // If there is a common operand in the already matched min/max and the other
7541 // min/max operands match the compare operands (either directly or inverted),
7542 // then this is min/max of the same flavor.
7544 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7545 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7546 if (D == B) {
7547 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7548 match(A, m_Not(m_Specific(CmpRHS)))))
7549 return {L.Flavor, SPNB_NA, false};
7551 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7552 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7553 if (C == B) {
7554 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7555 match(A, m_Not(m_Specific(CmpRHS)))))
7556 return {L.Flavor, SPNB_NA, false};
7558 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7559 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7560 if (D == A) {
7561 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7562 match(B, m_Not(m_Specific(CmpRHS)))))
7563 return {L.Flavor, SPNB_NA, false};
7565 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7566 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7567 if (C == A) {
7568 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7569 match(B, m_Not(m_Specific(CmpRHS)))))
7570 return {L.Flavor, SPNB_NA, false};
7573 return {SPF_UNKNOWN, SPNB_NA, false};
7576 /// If the input value is the result of a 'not' op, constant integer, or vector
7577 /// splat of a constant integer, return the bitwise-not source value.
7578 /// TODO: This could be extended to handle non-splat vector integer constants.
7579 static Value *getNotValue(Value *V) {
7580 Value *NotV;
7581 if (match(V, m_Not(m_Value(NotV))))
7582 return NotV;
7584 const APInt *C;
7585 if (match(V, m_APInt(C)))
7586 return ConstantInt::get(V->getType(), ~(*C));
7588 return nullptr;
7591 /// Match non-obvious integer minimum and maximum sequences.
7592 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
7593 Value *CmpLHS, Value *CmpRHS,
7594 Value *TrueVal, Value *FalseVal,
7595 Value *&LHS, Value *&RHS,
7596 unsigned Depth) {
7597 // Assume success. If there's no match, callers should not use these anyway.
7598 LHS = TrueVal;
7599 RHS = FalseVal;
7601 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
7602 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7603 return SPR;
7605 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
7606 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7607 return SPR;
7609 // Look through 'not' ops to find disguised min/max.
7610 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
7611 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
7612 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
7613 switch (Pred) {
7614 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
7615 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
7616 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
7617 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
7618 default: break;
7622 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
7623 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
7624 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
7625 switch (Pred) {
7626 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
7627 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
7628 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
7629 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
7630 default: break;
7634 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
7635 return {SPF_UNKNOWN, SPNB_NA, false};
7637 const APInt *C1;
7638 if (!match(CmpRHS, m_APInt(C1)))
7639 return {SPF_UNKNOWN, SPNB_NA, false};
7641 // An unsigned min/max can be written with a signed compare.
7642 const APInt *C2;
7643 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
7644 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
7645 // Is the sign bit set?
7646 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
7647 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
7648 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
7649 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7651 // Is the sign bit clear?
7652 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
7653 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
7654 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
7655 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7658 return {SPF_UNKNOWN, SPNB_NA, false};
7661 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
7662 assert(X && Y && "Invalid operand");
7664 // X = sub (0, Y) || X = sub nsw (0, Y)
7665 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
7666 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
7667 return true;
7669 // Y = sub (0, X) || Y = sub nsw (0, X)
7670 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
7671 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
7672 return true;
7674 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
7675 Value *A, *B;
7676 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
7677 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
7678 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
7679 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
7682 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
7683 FastMathFlags FMF,
7684 Value *CmpLHS, Value *CmpRHS,
7685 Value *TrueVal, Value *FalseVal,
7686 Value *&LHS, Value *&RHS,
7687 unsigned Depth) {
7688 bool HasMismatchedZeros = false;
7689 if (CmpInst::isFPPredicate(Pred)) {
7690 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
7691 // 0.0 operand, set the compare's 0.0 operands to that same value for the
7692 // purpose of identifying min/max. Disregard vector constants with undefined
7693 // elements because those can not be back-propagated for analysis.
7694 Value *OutputZeroVal = nullptr;
7695 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
7696 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
7697 OutputZeroVal = TrueVal;
7698 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
7699 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
7700 OutputZeroVal = FalseVal;
7702 if (OutputZeroVal) {
7703 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
7704 HasMismatchedZeros = true;
7705 CmpLHS = OutputZeroVal;
7707 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
7708 HasMismatchedZeros = true;
7709 CmpRHS = OutputZeroVal;
7714 LHS = CmpLHS;
7715 RHS = CmpRHS;
7717 // Signed zero may return inconsistent results between implementations.
7718 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
7719 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
7720 // Therefore, we behave conservatively and only proceed if at least one of the
7721 // operands is known to not be zero or if we don't care about signed zero.
7722 switch (Pred) {
7723 default: break;
7724 case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT:
7725 case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT:
7726 if (!HasMismatchedZeros)
7727 break;
7728 [[fallthrough]];
7729 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
7730 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
7731 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7732 !isKnownNonZero(CmpRHS))
7733 return {SPF_UNKNOWN, SPNB_NA, false};
7736 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
7737 bool Ordered = false;
7739 // When given one NaN and one non-NaN input:
7740 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
7741 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
7742 // ordered comparison fails), which could be NaN or non-NaN.
7743 // so here we discover exactly what NaN behavior is required/accepted.
7744 if (CmpInst::isFPPredicate(Pred)) {
7745 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
7746 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
7748 if (LHSSafe && RHSSafe) {
7749 // Both operands are known non-NaN.
7750 NaNBehavior = SPNB_RETURNS_ANY;
7751 } else if (CmpInst::isOrdered(Pred)) {
7752 // An ordered comparison will return false when given a NaN, so it
7753 // returns the RHS.
7754 Ordered = true;
7755 if (LHSSafe)
7756 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
7757 NaNBehavior = SPNB_RETURNS_NAN;
7758 else if (RHSSafe)
7759 NaNBehavior = SPNB_RETURNS_OTHER;
7760 else
7761 // Completely unsafe.
7762 return {SPF_UNKNOWN, SPNB_NA, false};
7763 } else {
7764 Ordered = false;
7765 // An unordered comparison will return true when given a NaN, so it
7766 // returns the LHS.
7767 if (LHSSafe)
7768 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
7769 NaNBehavior = SPNB_RETURNS_OTHER;
7770 else if (RHSSafe)
7771 NaNBehavior = SPNB_RETURNS_NAN;
7772 else
7773 // Completely unsafe.
7774 return {SPF_UNKNOWN, SPNB_NA, false};
7778 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
7779 std::swap(CmpLHS, CmpRHS);
7780 Pred = CmpInst::getSwappedPredicate(Pred);
7781 if (NaNBehavior == SPNB_RETURNS_NAN)
7782 NaNBehavior = SPNB_RETURNS_OTHER;
7783 else if (NaNBehavior == SPNB_RETURNS_OTHER)
7784 NaNBehavior = SPNB_RETURNS_NAN;
7785 Ordered = !Ordered;
7788 // ([if]cmp X, Y) ? X : Y
7789 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
7790 switch (Pred) {
7791 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
7792 case ICmpInst::ICMP_UGT:
7793 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
7794 case ICmpInst::ICMP_SGT:
7795 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
7796 case ICmpInst::ICMP_ULT:
7797 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
7798 case ICmpInst::ICMP_SLT:
7799 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
7800 case FCmpInst::FCMP_UGT:
7801 case FCmpInst::FCMP_UGE:
7802 case FCmpInst::FCMP_OGT:
7803 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
7804 case FCmpInst::FCMP_ULT:
7805 case FCmpInst::FCMP_ULE:
7806 case FCmpInst::FCMP_OLT:
7807 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
7811 if (isKnownNegation(TrueVal, FalseVal)) {
7812 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
7813 // match against either LHS or sext(LHS).
7814 auto MaybeSExtCmpLHS =
7815 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
7816 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
7817 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
7818 if (match(TrueVal, MaybeSExtCmpLHS)) {
7819 // Set the return values. If the compare uses the negated value (-X >s 0),
7820 // swap the return values because the negated value is always 'RHS'.
7821 LHS = TrueVal;
7822 RHS = FalseVal;
7823 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
7824 std::swap(LHS, RHS);
7826 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
7827 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
7828 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7829 return {SPF_ABS, SPNB_NA, false};
7831 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
7832 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
7833 return {SPF_ABS, SPNB_NA, false};
7835 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
7836 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
7837 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7838 return {SPF_NABS, SPNB_NA, false};
7840 else if (match(FalseVal, MaybeSExtCmpLHS)) {
7841 // Set the return values. If the compare uses the negated value (-X >s 0),
7842 // swap the return values because the negated value is always 'RHS'.
7843 LHS = FalseVal;
7844 RHS = TrueVal;
7845 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
7846 std::swap(LHS, RHS);
7848 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
7849 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
7850 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7851 return {SPF_NABS, SPNB_NA, false};
7853 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
7854 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
7855 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7856 return {SPF_ABS, SPNB_NA, false};
7860 if (CmpInst::isIntPredicate(Pred))
7861 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
7863 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
7864 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
7865 // semantics than minNum. Be conservative in such case.
7866 if (NaNBehavior != SPNB_RETURNS_ANY ||
7867 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7868 !isKnownNonZero(CmpRHS)))
7869 return {SPF_UNKNOWN, SPNB_NA, false};
7871 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
7874 /// Helps to match a select pattern in case of a type mismatch.
7876 /// The function processes the case when type of true and false values of a
7877 /// select instruction differs from type of the cmp instruction operands because
7878 /// of a cast instruction. The function checks if it is legal to move the cast
7879 /// operation after "select". If yes, it returns the new second value of
7880 /// "select" (with the assumption that cast is moved):
7881 /// 1. As operand of cast instruction when both values of "select" are same cast
7882 /// instructions.
7883 /// 2. As restored constant (by applying reverse cast operation) when the first
7884 /// value of the "select" is a cast operation and the second value is a
7885 /// constant.
7886 /// NOTE: We return only the new second value because the first value could be
7887 /// accessed as operand of cast instruction.
7888 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
7889 Instruction::CastOps *CastOp) {
7890 auto *Cast1 = dyn_cast<CastInst>(V1);
7891 if (!Cast1)
7892 return nullptr;
7894 *CastOp = Cast1->getOpcode();
7895 Type *SrcTy = Cast1->getSrcTy();
7896 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
7897 // If V1 and V2 are both the same cast from the same type, look through V1.
7898 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
7899 return Cast2->getOperand(0);
7900 return nullptr;
7903 auto *C = dyn_cast<Constant>(V2);
7904 if (!C)
7905 return nullptr;
7907 Constant *CastedTo = nullptr;
7908 switch (*CastOp) {
7909 case Instruction::ZExt:
7910 if (CmpI->isUnsigned())
7911 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
7912 break;
7913 case Instruction::SExt:
7914 if (CmpI->isSigned())
7915 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
7916 break;
7917 case Instruction::Trunc:
7918 Constant *CmpConst;
7919 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
7920 CmpConst->getType() == SrcTy) {
7921 // Here we have the following case:
7923 // %cond = cmp iN %x, CmpConst
7924 // %tr = trunc iN %x to iK
7925 // %narrowsel = select i1 %cond, iK %t, iK C
7927 // We can always move trunc after select operation:
7929 // %cond = cmp iN %x, CmpConst
7930 // %widesel = select i1 %cond, iN %x, iN CmpConst
7931 // %tr = trunc iN %widesel to iK
7933 // Note that C could be extended in any way because we don't care about
7934 // upper bits after truncation. It can't be abs pattern, because it would
7935 // look like:
7937 // select i1 %cond, x, -x.
7939 // So only min/max pattern could be matched. Such match requires widened C
7940 // == CmpConst. That is why set widened C = CmpConst, condition trunc
7941 // CmpConst == C is checked below.
7942 CastedTo = CmpConst;
7943 } else {
7944 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
7946 break;
7947 case Instruction::FPTrunc:
7948 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
7949 break;
7950 case Instruction::FPExt:
7951 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
7952 break;
7953 case Instruction::FPToUI:
7954 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
7955 break;
7956 case Instruction::FPToSI:
7957 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
7958 break;
7959 case Instruction::UIToFP:
7960 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
7961 break;
7962 case Instruction::SIToFP:
7963 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
7964 break;
7965 default:
7966 break;
7969 if (!CastedTo)
7970 return nullptr;
7972 // Make sure the cast doesn't lose any information.
7973 Constant *CastedBack =
7974 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
7975 if (CastedBack != C)
7976 return nullptr;
7978 return CastedTo;
7981 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
7982 Instruction::CastOps *CastOp,
7983 unsigned Depth) {
7984 if (Depth >= MaxAnalysisRecursionDepth)
7985 return {SPF_UNKNOWN, SPNB_NA, false};
7987 SelectInst *SI = dyn_cast<SelectInst>(V);
7988 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
7990 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
7991 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
7993 Value *TrueVal = SI->getTrueValue();
7994 Value *FalseVal = SI->getFalseValue();
7996 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
7997 CastOp, Depth);
8000 SelectPatternResult llvm::matchDecomposedSelectPattern(
8001 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
8002 Instruction::CastOps *CastOp, unsigned Depth) {
8003 CmpInst::Predicate Pred = CmpI->getPredicate();
8004 Value *CmpLHS = CmpI->getOperand(0);
8005 Value *CmpRHS = CmpI->getOperand(1);
8006 FastMathFlags FMF;
8007 if (isa<FPMathOperator>(CmpI))
8008 FMF = CmpI->getFastMathFlags();
8010 // Bail out early.
8011 if (CmpI->isEquality())
8012 return {SPF_UNKNOWN, SPNB_NA, false};
8014 // Deal with type mismatches.
8015 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
8016 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
8017 // If this is a potential fmin/fmax with a cast to integer, then ignore
8018 // -0.0 because there is no corresponding integer value.
8019 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8020 FMF.setNoSignedZeros();
8021 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8022 cast<CastInst>(TrueVal)->getOperand(0), C,
8023 LHS, RHS, Depth);
8025 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
8026 // If this is a potential fmin/fmax with a cast to integer, then ignore
8027 // -0.0 because there is no corresponding integer value.
8028 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8029 FMF.setNoSignedZeros();
8030 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8031 C, cast<CastInst>(FalseVal)->getOperand(0),
8032 LHS, RHS, Depth);
8035 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
8036 LHS, RHS, Depth);
8039 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
8040 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
8041 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
8042 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
8043 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
8044 if (SPF == SPF_FMINNUM)
8045 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
8046 if (SPF == SPF_FMAXNUM)
8047 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
8048 llvm_unreachable("unhandled!");
8051 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
8052 if (SPF == SPF_SMIN) return SPF_SMAX;
8053 if (SPF == SPF_UMIN) return SPF_UMAX;
8054 if (SPF == SPF_SMAX) return SPF_SMIN;
8055 if (SPF == SPF_UMAX) return SPF_UMIN;
8056 llvm_unreachable("unhandled!");
8059 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
8060 switch (MinMaxID) {
8061 case Intrinsic::smax: return Intrinsic::smin;
8062 case Intrinsic::smin: return Intrinsic::smax;
8063 case Intrinsic::umax: return Intrinsic::umin;
8064 case Intrinsic::umin: return Intrinsic::umax;
8065 // Please note that next four intrinsics may produce the same result for
8066 // original and inverted case even if X != Y due to NaN is handled specially.
8067 case Intrinsic::maximum: return Intrinsic::minimum;
8068 case Intrinsic::minimum: return Intrinsic::maximum;
8069 case Intrinsic::maxnum: return Intrinsic::minnum;
8070 case Intrinsic::minnum: return Intrinsic::maxnum;
8071 default: llvm_unreachable("Unexpected intrinsic");
8075 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
8076 switch (SPF) {
8077 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
8078 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
8079 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
8080 case SPF_UMIN: return APInt::getMinValue(BitWidth);
8081 default: llvm_unreachable("Unexpected flavor");
8085 std::pair<Intrinsic::ID, bool>
8086 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
8087 // Check if VL contains select instructions that can be folded into a min/max
8088 // vector intrinsic and return the intrinsic if it is possible.
8089 // TODO: Support floating point min/max.
8090 bool AllCmpSingleUse = true;
8091 SelectPatternResult SelectPattern;
8092 SelectPattern.Flavor = SPF_UNKNOWN;
8093 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
8094 Value *LHS, *RHS;
8095 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
8096 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
8097 CurrentPattern.Flavor == SPF_FMINNUM ||
8098 CurrentPattern.Flavor == SPF_FMAXNUM ||
8099 !I->getType()->isIntOrIntVectorTy())
8100 return false;
8101 if (SelectPattern.Flavor != SPF_UNKNOWN &&
8102 SelectPattern.Flavor != CurrentPattern.Flavor)
8103 return false;
8104 SelectPattern = CurrentPattern;
8105 AllCmpSingleUse &=
8106 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
8107 return true;
8108 })) {
8109 switch (SelectPattern.Flavor) {
8110 case SPF_SMIN:
8111 return {Intrinsic::smin, AllCmpSingleUse};
8112 case SPF_UMIN:
8113 return {Intrinsic::umin, AllCmpSingleUse};
8114 case SPF_SMAX:
8115 return {Intrinsic::smax, AllCmpSingleUse};
8116 case SPF_UMAX:
8117 return {Intrinsic::umax, AllCmpSingleUse};
8118 default:
8119 llvm_unreachable("unexpected select pattern flavor");
8122 return {Intrinsic::not_intrinsic, false};
8125 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
8126 Value *&Start, Value *&Step) {
8127 // Handle the case of a simple two-predecessor recurrence PHI.
8128 // There's a lot more that could theoretically be done here, but
8129 // this is sufficient to catch some interesting cases.
8130 if (P->getNumIncomingValues() != 2)
8131 return false;
8133 for (unsigned i = 0; i != 2; ++i) {
8134 Value *L = P->getIncomingValue(i);
8135 Value *R = P->getIncomingValue(!i);
8136 Operator *LU = dyn_cast<Operator>(L);
8137 if (!LU)
8138 continue;
8139 unsigned Opcode = LU->getOpcode();
8141 switch (Opcode) {
8142 default:
8143 continue;
8144 // TODO: Expand list -- xor, div, gep, uaddo, etc..
8145 case Instruction::LShr:
8146 case Instruction::AShr:
8147 case Instruction::Shl:
8148 case Instruction::Add:
8149 case Instruction::Sub:
8150 case Instruction::And:
8151 case Instruction::Or:
8152 case Instruction::Mul:
8153 case Instruction::FMul: {
8154 Value *LL = LU->getOperand(0);
8155 Value *LR = LU->getOperand(1);
8156 // Find a recurrence.
8157 if (LL == P)
8158 L = LR;
8159 else if (LR == P)
8160 L = LL;
8161 else
8162 continue; // Check for recurrence with L and R flipped.
8164 break; // Match!
8168 // We have matched a recurrence of the form:
8169 // %iv = [R, %entry], [%iv.next, %backedge]
8170 // %iv.next = binop %iv, L
8171 // OR
8172 // %iv = [R, %entry], [%iv.next, %backedge]
8173 // %iv.next = binop L, %iv
8174 BO = cast<BinaryOperator>(LU);
8175 Start = R;
8176 Step = L;
8177 return true;
8179 return false;
8182 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
8183 Value *&Start, Value *&Step) {
8184 BinaryOperator *BO = nullptr;
8185 P = dyn_cast<PHINode>(I->getOperand(0));
8186 if (!P)
8187 P = dyn_cast<PHINode>(I->getOperand(1));
8188 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
8191 /// Return true if "icmp Pred LHS RHS" is always true.
8192 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
8193 const Value *RHS, const DataLayout &DL,
8194 unsigned Depth) {
8195 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
8196 return true;
8198 switch (Pred) {
8199 default:
8200 return false;
8202 case CmpInst::ICMP_SLE: {
8203 const APInt *C;
8205 // LHS s<= LHS +_{nsw} C if C >= 0
8206 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
8207 return !C->isNegative();
8208 return false;
8211 case CmpInst::ICMP_ULE: {
8212 const APInt *C;
8214 // LHS u<= LHS +_{nuw} C for any C
8215 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
8216 return true;
8218 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
8219 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
8220 const Value *&X,
8221 const APInt *&CA, const APInt *&CB) {
8222 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
8223 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
8224 return true;
8226 // If X & C == 0 then (X | C) == X +_{nuw} C
8227 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
8228 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
8229 KnownBits Known(CA->getBitWidth());
8230 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
8231 /*CxtI*/ nullptr, /*DT*/ nullptr);
8232 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
8233 return true;
8236 return false;
8239 const Value *X;
8240 const APInt *CLHS, *CRHS;
8241 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
8242 return CLHS->ule(*CRHS);
8244 return false;
8249 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
8250 /// ALHS ARHS" is true. Otherwise, return std::nullopt.
8251 static std::optional<bool>
8252 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
8253 const Value *ARHS, const Value *BLHS, const Value *BRHS,
8254 const DataLayout &DL, unsigned Depth) {
8255 switch (Pred) {
8256 default:
8257 return std::nullopt;
8259 case CmpInst::ICMP_SLT:
8260 case CmpInst::ICMP_SLE:
8261 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
8262 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
8263 return true;
8264 return std::nullopt;
8266 case CmpInst::ICMP_SGT:
8267 case CmpInst::ICMP_SGE:
8268 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS, DL, Depth) &&
8269 isTruePredicate(CmpInst::ICMP_SLE, BRHS, ARHS, DL, Depth))
8270 return true;
8271 return std::nullopt;
8273 case CmpInst::ICMP_ULT:
8274 case CmpInst::ICMP_ULE:
8275 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
8276 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
8277 return true;
8278 return std::nullopt;
8280 case CmpInst::ICMP_UGT:
8281 case CmpInst::ICMP_UGE:
8282 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS, DL, Depth) &&
8283 isTruePredicate(CmpInst::ICMP_ULE, BRHS, ARHS, DL, Depth))
8284 return true;
8285 return std::nullopt;
8289 /// Return true if the operands of two compares (expanded as "L0 pred L1" and
8290 /// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are
8291 /// swapped.
8292 static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0,
8293 const Value *R1, bool &AreSwappedOps) {
8294 bool AreMatchingOps = (L0 == R0 && L1 == R1);
8295 AreSwappedOps = (L0 == R1 && L1 == R0);
8296 return AreMatchingOps || AreSwappedOps;
8299 /// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
8300 /// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
8301 /// Otherwise, return std::nullopt if we can't infer anything.
8302 static std::optional<bool>
8303 isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
8304 CmpInst::Predicate RPred, bool AreSwappedOps) {
8305 // Canonicalize the predicate as if the operands were not commuted.
8306 if (AreSwappedOps)
8307 RPred = ICmpInst::getSwappedPredicate(RPred);
8309 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
8310 return true;
8311 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
8312 return false;
8314 return std::nullopt;
8317 /// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
8318 /// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
8319 /// Otherwise, return std::nullopt if we can't infer anything.
8320 static std::optional<bool> isImpliedCondCommonOperandWithConstants(
8321 CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
8322 const APInt &RC) {
8323 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
8324 ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
8325 ConstantRange Intersection = DomCR.intersectWith(CR);
8326 ConstantRange Difference = DomCR.difference(CR);
8327 if (Intersection.isEmptySet())
8328 return false;
8329 if (Difference.isEmptySet())
8330 return true;
8331 return std::nullopt;
8334 /// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
8335 /// is true. Return false if LHS implies RHS is false. Otherwise, return
8336 /// std::nullopt if we can't infer anything.
8337 static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
8338 CmpInst::Predicate RPred,
8339 const Value *R0, const Value *R1,
8340 const DataLayout &DL,
8341 bool LHSIsTrue, unsigned Depth) {
8342 Value *L0 = LHS->getOperand(0);
8343 Value *L1 = LHS->getOperand(1);
8345 // The rest of the logic assumes the LHS condition is true. If that's not the
8346 // case, invert the predicate to make it so.
8347 CmpInst::Predicate LPred =
8348 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
8350 // Can we infer anything when the two compares have matching operands?
8351 bool AreSwappedOps;
8352 if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps))
8353 return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps);
8355 // Can we infer anything when the 0-operands match and the 1-operands are
8356 // constants (not necessarily matching)?
8357 const APInt *LC, *RC;
8358 if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
8359 return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
8361 if (LPred == RPred)
8362 return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
8364 return std::nullopt;
8367 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
8368 /// false. Otherwise, return std::nullopt if we can't infer anything. We
8369 /// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
8370 /// instruction.
8371 static std::optional<bool>
8372 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
8373 const Value *RHSOp0, const Value *RHSOp1,
8374 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8375 // The LHS must be an 'or', 'and', or a 'select' instruction.
8376 assert((LHS->getOpcode() == Instruction::And ||
8377 LHS->getOpcode() == Instruction::Or ||
8378 LHS->getOpcode() == Instruction::Select) &&
8379 "Expected LHS to be 'and', 'or', or 'select'.");
8381 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
8383 // If the result of an 'or' is false, then we know both legs of the 'or' are
8384 // false. Similarly, if the result of an 'and' is true, then we know both
8385 // legs of the 'and' are true.
8386 const Value *ALHS, *ARHS;
8387 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
8388 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
8389 // FIXME: Make this non-recursion.
8390 if (std::optional<bool> Implication = isImpliedCondition(
8391 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8392 return Implication;
8393 if (std::optional<bool> Implication = isImpliedCondition(
8394 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8395 return Implication;
8396 return std::nullopt;
8398 return std::nullopt;
8401 std::optional<bool>
8402 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
8403 const Value *RHSOp0, const Value *RHSOp1,
8404 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8405 // Bail out when we hit the limit.
8406 if (Depth == MaxAnalysisRecursionDepth)
8407 return std::nullopt;
8409 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
8410 // example.
8411 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
8412 return std::nullopt;
8414 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
8415 "Expected integer type only!");
8417 // Both LHS and RHS are icmps.
8418 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
8419 if (LHSCmp)
8420 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8421 Depth);
8423 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
8424 /// the RHS to be an icmp.
8425 /// FIXME: Add support for and/or/select on the RHS.
8426 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
8427 if ((LHSI->getOpcode() == Instruction::And ||
8428 LHSI->getOpcode() == Instruction::Or ||
8429 LHSI->getOpcode() == Instruction::Select))
8430 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8431 Depth);
8433 return std::nullopt;
8436 std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
8437 const DataLayout &DL,
8438 bool LHSIsTrue, unsigned Depth) {
8439 // LHS ==> RHS by definition
8440 if (LHS == RHS)
8441 return LHSIsTrue;
8443 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
8444 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
8445 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
8446 LHSIsTrue, Depth);
8448 if (Depth == MaxAnalysisRecursionDepth)
8449 return std::nullopt;
8451 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
8452 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
8453 const Value *RHS1, *RHS2;
8454 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
8455 if (std::optional<bool> Imp =
8456 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8457 if (*Imp == true)
8458 return true;
8459 if (std::optional<bool> Imp =
8460 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8461 if (*Imp == true)
8462 return true;
8464 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
8465 if (std::optional<bool> Imp =
8466 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8467 if (*Imp == false)
8468 return false;
8469 if (std::optional<bool> Imp =
8470 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8471 if (*Imp == false)
8472 return false;
8475 return std::nullopt;
8478 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
8479 // condition dominating ContextI or nullptr, if no condition is found.
8480 static std::pair<Value *, bool>
8481 getDomPredecessorCondition(const Instruction *ContextI) {
8482 if (!ContextI || !ContextI->getParent())
8483 return {nullptr, false};
8485 // TODO: This is a poor/cheap way to determine dominance. Should we use a
8486 // dominator tree (eg, from a SimplifyQuery) instead?
8487 const BasicBlock *ContextBB = ContextI->getParent();
8488 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
8489 if (!PredBB)
8490 return {nullptr, false};
8492 // We need a conditional branch in the predecessor.
8493 Value *PredCond;
8494 BasicBlock *TrueBB, *FalseBB;
8495 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
8496 return {nullptr, false};
8498 // The branch should get simplified. Don't bother simplifying this condition.
8499 if (TrueBB == FalseBB)
8500 return {nullptr, false};
8502 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
8503 "Predecessor block does not point to successor?");
8505 // Is this condition implied by the predecessor condition?
8506 return {PredCond, TrueBB == ContextBB};
8509 std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
8510 const Instruction *ContextI,
8511 const DataLayout &DL) {
8512 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
8513 auto PredCond = getDomPredecessorCondition(ContextI);
8514 if (PredCond.first)
8515 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
8516 return std::nullopt;
8519 std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
8520 const Value *LHS,
8521 const Value *RHS,
8522 const Instruction *ContextI,
8523 const DataLayout &DL) {
8524 auto PredCond = getDomPredecessorCondition(ContextI);
8525 if (PredCond.first)
8526 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
8527 PredCond.second);
8528 return std::nullopt;
8531 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
8532 APInt &Upper, const InstrInfoQuery &IIQ,
8533 bool PreferSignedRange) {
8534 unsigned Width = Lower.getBitWidth();
8535 const APInt *C;
8536 switch (BO.getOpcode()) {
8537 case Instruction::Add:
8538 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8539 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
8540 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
8542 // If the caller expects a signed compare, then try to use a signed range.
8543 // Otherwise if both no-wraps are set, use the unsigned range because it
8544 // is never larger than the signed range. Example:
8545 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
8546 if (PreferSignedRange && HasNSW && HasNUW)
8547 HasNUW = false;
8549 if (HasNUW) {
8550 // 'add nuw x, C' produces [C, UINT_MAX].
8551 Lower = *C;
8552 } else if (HasNSW) {
8553 if (C->isNegative()) {
8554 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
8555 Lower = APInt::getSignedMinValue(Width);
8556 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
8557 } else {
8558 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
8559 Lower = APInt::getSignedMinValue(Width) + *C;
8560 Upper = APInt::getSignedMaxValue(Width) + 1;
8564 break;
8566 case Instruction::And:
8567 if (match(BO.getOperand(1), m_APInt(C)))
8568 // 'and x, C' produces [0, C].
8569 Upper = *C + 1;
8570 break;
8572 case Instruction::Or:
8573 if (match(BO.getOperand(1), m_APInt(C)))
8574 // 'or x, C' produces [C, UINT_MAX].
8575 Lower = *C;
8576 break;
8578 case Instruction::AShr:
8579 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8580 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
8581 Lower = APInt::getSignedMinValue(Width).ashr(*C);
8582 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
8583 } else if (match(BO.getOperand(0), m_APInt(C))) {
8584 unsigned ShiftAmount = Width - 1;
8585 if (!C->isZero() && IIQ.isExact(&BO))
8586 ShiftAmount = C->countr_zero();
8587 if (C->isNegative()) {
8588 // 'ashr C, x' produces [C, C >> (Width-1)]
8589 Lower = *C;
8590 Upper = C->ashr(ShiftAmount) + 1;
8591 } else {
8592 // 'ashr C, x' produces [C >> (Width-1), C]
8593 Lower = C->ashr(ShiftAmount);
8594 Upper = *C + 1;
8597 break;
8599 case Instruction::LShr:
8600 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8601 // 'lshr x, C' produces [0, UINT_MAX >> C].
8602 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
8603 } else if (match(BO.getOperand(0), m_APInt(C))) {
8604 // 'lshr C, x' produces [C >> (Width-1), C].
8605 unsigned ShiftAmount = Width - 1;
8606 if (!C->isZero() && IIQ.isExact(&BO))
8607 ShiftAmount = C->countr_zero();
8608 Lower = C->lshr(ShiftAmount);
8609 Upper = *C + 1;
8611 break;
8613 case Instruction::Shl:
8614 if (match(BO.getOperand(0), m_APInt(C))) {
8615 if (IIQ.hasNoUnsignedWrap(&BO)) {
8616 // 'shl nuw C, x' produces [C, C << CLZ(C)]
8617 Lower = *C;
8618 Upper = Lower.shl(Lower.countl_zero()) + 1;
8619 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
8620 if (C->isNegative()) {
8621 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
8622 unsigned ShiftAmount = C->countl_one() - 1;
8623 Lower = C->shl(ShiftAmount);
8624 Upper = *C + 1;
8625 } else {
8626 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
8627 unsigned ShiftAmount = C->countl_zero() - 1;
8628 Lower = *C;
8629 Upper = C->shl(ShiftAmount) + 1;
8633 break;
8635 case Instruction::SDiv:
8636 if (match(BO.getOperand(1), m_APInt(C))) {
8637 APInt IntMin = APInt::getSignedMinValue(Width);
8638 APInt IntMax = APInt::getSignedMaxValue(Width);
8639 if (C->isAllOnes()) {
8640 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
8641 // where C != -1 and C != 0 and C != 1
8642 Lower = IntMin + 1;
8643 Upper = IntMax + 1;
8644 } else if (C->countl_zero() < Width - 1) {
8645 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
8646 // where C != -1 and C != 0 and C != 1
8647 Lower = IntMin.sdiv(*C);
8648 Upper = IntMax.sdiv(*C);
8649 if (Lower.sgt(Upper))
8650 std::swap(Lower, Upper);
8651 Upper = Upper + 1;
8652 assert(Upper != Lower && "Upper part of range has wrapped!");
8654 } else if (match(BO.getOperand(0), m_APInt(C))) {
8655 if (C->isMinSignedValue()) {
8656 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
8657 Lower = *C;
8658 Upper = Lower.lshr(1) + 1;
8659 } else {
8660 // 'sdiv C, x' produces [-|C|, |C|].
8661 Upper = C->abs() + 1;
8662 Lower = (-Upper) + 1;
8665 break;
8667 case Instruction::UDiv:
8668 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8669 // 'udiv x, C' produces [0, UINT_MAX / C].
8670 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
8671 } else if (match(BO.getOperand(0), m_APInt(C))) {
8672 // 'udiv C, x' produces [0, C].
8673 Upper = *C + 1;
8675 break;
8677 case Instruction::SRem:
8678 if (match(BO.getOperand(1), m_APInt(C))) {
8679 // 'srem x, C' produces (-|C|, |C|).
8680 Upper = C->abs();
8681 Lower = (-Upper) + 1;
8683 break;
8685 case Instruction::URem:
8686 if (match(BO.getOperand(1), m_APInt(C)))
8687 // 'urem x, C' produces [0, C).
8688 Upper = *C;
8689 break;
8691 default:
8692 break;
8696 static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) {
8697 unsigned Width = II.getType()->getScalarSizeInBits();
8698 const APInt *C;
8699 switch (II.getIntrinsicID()) {
8700 case Intrinsic::ctpop:
8701 case Intrinsic::ctlz:
8702 case Intrinsic::cttz:
8703 // Maximum of set/clear bits is the bit width.
8704 return ConstantRange(APInt::getZero(Width), APInt(Width, Width + 1));
8705 case Intrinsic::uadd_sat:
8706 // uadd.sat(x, C) produces [C, UINT_MAX].
8707 if (match(II.getOperand(0), m_APInt(C)) ||
8708 match(II.getOperand(1), m_APInt(C)))
8709 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8710 break;
8711 case Intrinsic::sadd_sat:
8712 if (match(II.getOperand(0), m_APInt(C)) ||
8713 match(II.getOperand(1), m_APInt(C))) {
8714 if (C->isNegative())
8715 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
8716 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8717 APInt::getSignedMaxValue(Width) + *C +
8720 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
8721 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) + *C,
8722 APInt::getSignedMaxValue(Width) + 1);
8724 break;
8725 case Intrinsic::usub_sat:
8726 // usub.sat(C, x) produces [0, C].
8727 if (match(II.getOperand(0), m_APInt(C)))
8728 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8730 // usub.sat(x, C) produces [0, UINT_MAX - C].
8731 if (match(II.getOperand(1), m_APInt(C)))
8732 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8733 APInt::getMaxValue(Width) - *C + 1);
8734 break;
8735 case Intrinsic::ssub_sat:
8736 if (match(II.getOperand(0), m_APInt(C))) {
8737 if (C->isNegative())
8738 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
8739 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8740 *C - APInt::getSignedMinValue(Width) +
8743 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
8744 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width),
8745 APInt::getSignedMaxValue(Width) + 1);
8746 } else if (match(II.getOperand(1), m_APInt(C))) {
8747 if (C->isNegative())
8748 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
8749 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C,
8750 APInt::getSignedMaxValue(Width) + 1);
8752 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
8753 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8754 APInt::getSignedMaxValue(Width) - *C +
8757 break;
8758 case Intrinsic::umin:
8759 case Intrinsic::umax:
8760 case Intrinsic::smin:
8761 case Intrinsic::smax:
8762 if (!match(II.getOperand(0), m_APInt(C)) &&
8763 !match(II.getOperand(1), m_APInt(C)))
8764 break;
8766 switch (II.getIntrinsicID()) {
8767 case Intrinsic::umin:
8768 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8769 case Intrinsic::umax:
8770 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8771 case Intrinsic::smin:
8772 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8773 *C + 1);
8774 case Intrinsic::smax:
8775 return ConstantRange::getNonEmpty(*C,
8776 APInt::getSignedMaxValue(Width) + 1);
8777 default:
8778 llvm_unreachable("Must be min/max intrinsic");
8780 break;
8781 case Intrinsic::abs:
8782 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
8783 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8784 if (match(II.getOperand(1), m_One()))
8785 return ConstantRange(APInt::getZero(Width),
8786 APInt::getSignedMaxValue(Width) + 1);
8788 return ConstantRange(APInt::getZero(Width),
8789 APInt::getSignedMinValue(Width) + 1);
8790 case Intrinsic::vscale:
8791 if (!II.getParent() || !II.getFunction())
8792 break;
8793 return getVScaleRange(II.getFunction(), Width);
8794 default:
8795 break;
8798 return ConstantRange::getFull(Width);
8801 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
8802 APInt &Upper, const InstrInfoQuery &IIQ) {
8803 const Value *LHS = nullptr, *RHS = nullptr;
8804 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
8805 if (R.Flavor == SPF_UNKNOWN)
8806 return;
8808 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
8810 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
8811 // If the negation part of the abs (in RHS) has the NSW flag,
8812 // then the result of abs(X) is [0..SIGNED_MAX],
8813 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8814 Lower = APInt::getZero(BitWidth);
8815 if (match(RHS, m_Neg(m_Specific(LHS))) &&
8816 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
8817 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
8818 else
8819 Upper = APInt::getSignedMinValue(BitWidth) + 1;
8820 return;
8823 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
8824 // The result of -abs(X) is <= 0.
8825 Lower = APInt::getSignedMinValue(BitWidth);
8826 Upper = APInt(BitWidth, 1);
8827 return;
8830 const APInt *C;
8831 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
8832 return;
8834 switch (R.Flavor) {
8835 case SPF_UMIN:
8836 Upper = *C + 1;
8837 break;
8838 case SPF_UMAX:
8839 Lower = *C;
8840 break;
8841 case SPF_SMIN:
8842 Lower = APInt::getSignedMinValue(BitWidth);
8843 Upper = *C + 1;
8844 break;
8845 case SPF_SMAX:
8846 Lower = *C;
8847 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
8848 break;
8849 default:
8850 break;
8854 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
8855 // The maximum representable value of a half is 65504. For floats the maximum
8856 // value is 3.4e38 which requires roughly 129 bits.
8857 unsigned BitWidth = I->getType()->getScalarSizeInBits();
8858 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
8859 return;
8860 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
8861 Lower = APInt(BitWidth, -65504);
8862 Upper = APInt(BitWidth, 65505);
8865 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
8866 // For a fptoui the lower limit is left as 0.
8867 Upper = APInt(BitWidth, 65505);
8871 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
8872 bool UseInstrInfo, AssumptionCache *AC,
8873 const Instruction *CtxI,
8874 const DominatorTree *DT,
8875 unsigned Depth) {
8876 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
8878 if (Depth == MaxAnalysisRecursionDepth)
8879 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
8881 const APInt *C;
8882 if (match(V, m_APInt(C)))
8883 return ConstantRange(*C);
8885 InstrInfoQuery IIQ(UseInstrInfo);
8886 unsigned BitWidth = V->getType()->getScalarSizeInBits();
8887 ConstantRange CR = ConstantRange::getFull(BitWidth);
8888 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
8889 APInt Lower = APInt(BitWidth, 0);
8890 APInt Upper = APInt(BitWidth, 0);
8891 // TODO: Return ConstantRange.
8892 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
8893 CR = ConstantRange::getNonEmpty(Lower, Upper);
8894 } else if (auto *II = dyn_cast<IntrinsicInst>(V))
8895 CR = getRangeForIntrinsic(*II);
8896 else if (auto *SI = dyn_cast<SelectInst>(V)) {
8897 APInt Lower = APInt(BitWidth, 0);
8898 APInt Upper = APInt(BitWidth, 0);
8899 // TODO: Return ConstantRange.
8900 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
8901 CR = ConstantRange::getNonEmpty(Lower, Upper);
8902 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
8903 APInt Lower = APInt(BitWidth, 0);
8904 APInt Upper = APInt(BitWidth, 0);
8905 // TODO: Return ConstantRange.
8906 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
8907 CR = ConstantRange::getNonEmpty(Lower, Upper);
8910 if (auto *I = dyn_cast<Instruction>(V))
8911 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
8912 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
8914 if (CtxI && AC) {
8915 // Try to restrict the range based on information from assumptions.
8916 for (auto &AssumeVH : AC->assumptionsFor(V)) {
8917 if (!AssumeVH)
8918 continue;
8919 CallInst *I = cast<CallInst>(AssumeVH);
8920 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
8921 "Got assumption for the wrong function!");
8922 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
8923 "must be an assume intrinsic");
8925 if (!isValidAssumeForContext(I, CtxI, DT))
8926 continue;
8927 Value *Arg = I->getArgOperand(0);
8928 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
8929 // Currently we just use information from comparisons.
8930 if (!Cmp || Cmp->getOperand(0) != V)
8931 continue;
8932 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
8933 ConstantRange RHS =
8934 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
8935 UseInstrInfo, AC, I, DT, Depth + 1);
8936 CR = CR.intersectWith(
8937 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
8941 return CR;