[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / llvm / lib / Analysis / ValueTracking.cpp
blob2458c1cb9f8ec1d9c909717496bddc398babdfe4
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/iterator_range.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumeBundleQueries.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/Analysis/WithCache.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/ConstantRange.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/EHPersonalities.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GetElementPtrTypeIterator.h"
49 #include "llvm/IR/GlobalAlias.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/Intrinsics.h"
57 #include "llvm/IR/IntrinsicsAArch64.h"
58 #include "llvm/IR/IntrinsicsAMDGPU.h"
59 #include "llvm/IR/IntrinsicsRISCV.h"
60 #include "llvm/IR/IntrinsicsX86.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/Operator.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/User.h"
68 #include "llvm/IR/Value.h"
69 #include "llvm/Support/Casting.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/KnownBits.h"
74 #include "llvm/Support/MathExtras.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstdint>
78 #include <optional>
79 #include <utility>
81 using namespace llvm;
82 using namespace llvm::PatternMatch;
84 // Controls the number of uses of the value searched for possible
85 // dominating comparisons.
86 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
87 cl::Hidden, cl::init(20));
90 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
91 /// returns the element type's bitwidth.
92 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
93 if (unsigned BitWidth = Ty->getScalarSizeInBits())
94 return BitWidth;
96 return DL.getPointerTypeSizeInBits(Ty);
99 // Given the provided Value and, potentially, a context instruction, return
100 // the preferred context instruction (if any).
101 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
102 // If we've been provided with a context instruction, then use that (provided
103 // it has been inserted).
104 if (CxtI && CxtI->getParent())
105 return CxtI;
107 // If the value is really an already-inserted instruction, then use that.
108 CxtI = dyn_cast<Instruction>(V);
109 if (CxtI && CxtI->getParent())
110 return CxtI;
112 return nullptr;
115 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
116 // If we've been provided with a context instruction, then use that (provided
117 // it has been inserted).
118 if (CxtI && CxtI->getParent())
119 return CxtI;
121 // If the value is really an already-inserted instruction, then use that.
122 CxtI = dyn_cast<Instruction>(V1);
123 if (CxtI && CxtI->getParent())
124 return CxtI;
126 CxtI = dyn_cast<Instruction>(V2);
127 if (CxtI && CxtI->getParent())
128 return CxtI;
130 return nullptr;
133 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
134 const APInt &DemandedElts,
135 APInt &DemandedLHS, APInt &DemandedRHS) {
136 if (isa<ScalableVectorType>(Shuf->getType())) {
137 assert(DemandedElts == APInt(1,1));
138 DemandedLHS = DemandedRHS = DemandedElts;
139 return true;
142 int NumElts =
143 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
144 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
145 DemandedElts, DemandedLHS, DemandedRHS);
148 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
149 KnownBits &Known, unsigned Depth,
150 const SimplifyQuery &Q);
152 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
153 const SimplifyQuery &Q) {
154 // Since the number of lanes in a scalable vector is unknown at compile time,
155 // we track one bit which is implicitly broadcast to all lanes. This means
156 // that all lanes in a scalable vector are considered demanded.
157 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
158 APInt DemandedElts =
159 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
160 computeKnownBits(V, DemandedElts, Known, Depth, Q);
163 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
164 const DataLayout &DL, unsigned Depth,
165 AssumptionCache *AC, const Instruction *CxtI,
166 const DominatorTree *DT, bool UseInstrInfo) {
167 ::computeKnownBits(
168 V, Known, Depth,
169 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
172 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
173 KnownBits &Known, const DataLayout &DL,
174 unsigned Depth, AssumptionCache *AC,
175 const Instruction *CxtI, const DominatorTree *DT,
176 bool UseInstrInfo) {
177 ::computeKnownBits(
178 V, DemandedElts, Known, Depth,
179 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
182 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
183 unsigned Depth, AssumptionCache *AC,
184 const Instruction *CxtI,
185 const DominatorTree *DT, bool UseInstrInfo) {
186 return computeKnownBits(
187 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
190 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
191 const DataLayout &DL, unsigned Depth,
192 AssumptionCache *AC, const Instruction *CxtI,
193 const DominatorTree *DT, bool UseInstrInfo) {
194 return computeKnownBits(
195 V, DemandedElts, Depth,
196 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
199 bool llvm::haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
200 const WithCache<const Value *> &RHSCache,
201 const SimplifyQuery &SQ) {
202 const Value *LHS = LHSCache.getValue();
203 const Value *RHS = RHSCache.getValue();
205 assert(LHS->getType() == RHS->getType() &&
206 "LHS and RHS should have the same type");
207 assert(LHS->getType()->isIntOrIntVectorTy() &&
208 "LHS and RHS should be integers");
209 // Look for an inverted mask: (X & ~M) op (Y & M).
211 Value *M;
212 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
213 match(RHS, m_c_And(m_Specific(M), m_Value())))
214 return true;
215 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
216 match(LHS, m_c_And(m_Specific(M), m_Value())))
217 return true;
220 // X op (Y & ~X)
221 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
222 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
223 return true;
225 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
226 // for constant Y.
227 Value *Y;
228 if (match(RHS,
229 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
230 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
231 return true;
233 // Peek through extends to find a 'not' of the other side:
234 // (ext Y) op ext(~Y)
235 // (ext ~Y) op ext(Y)
236 if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
237 match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) ||
238 (match(RHS, m_ZExtOrSExt(m_Value(Y))) &&
239 match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))))
240 return true;
242 // Look for: (A & B) op ~(A | B)
244 Value *A, *B;
245 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
246 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
247 return true;
248 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
249 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
250 return true;
253 return KnownBits::haveNoCommonBitsSet(LHSCache.getKnownBits(SQ),
254 RHSCache.getKnownBits(SQ));
257 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
258 return !I->user_empty() && all_of(I->users(), [](const User *U) {
259 ICmpInst::Predicate P;
260 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
264 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
265 const SimplifyQuery &Q);
267 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
268 bool OrZero, unsigned Depth,
269 AssumptionCache *AC, const Instruction *CxtI,
270 const DominatorTree *DT, bool UseInstrInfo) {
271 return ::isKnownToBeAPowerOfTwo(
272 V, OrZero, Depth,
273 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
276 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
277 unsigned Depth, const SimplifyQuery &Q);
279 static bool isKnownNonZero(const Value *V, unsigned Depth,
280 const SimplifyQuery &Q);
282 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
283 AssumptionCache *AC, const Instruction *CxtI,
284 const DominatorTree *DT, bool UseInstrInfo) {
285 return ::isKnownNonZero(
286 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
289 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
290 unsigned Depth, AssumptionCache *AC,
291 const Instruction *CxtI, const DominatorTree *DT,
292 bool UseInstrInfo) {
293 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
294 return Known.isNonNegative();
297 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
298 AssumptionCache *AC, const Instruction *CxtI,
299 const DominatorTree *DT, bool UseInstrInfo) {
300 if (auto *CI = dyn_cast<ConstantInt>(V))
301 return CI->getValue().isStrictlyPositive();
303 // TODO: We'd doing two recursive queries here. We should factor this such
304 // that only a single query is needed.
305 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
306 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
309 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
310 AssumptionCache *AC, const Instruction *CxtI,
311 const DominatorTree *DT, bool UseInstrInfo) {
312 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
313 return Known.isNegative();
316 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
317 const SimplifyQuery &Q);
319 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
320 const DataLayout &DL, AssumptionCache *AC,
321 const Instruction *CxtI, const DominatorTree *DT,
322 bool UseInstrInfo) {
323 return ::isKnownNonEqual(
324 V1, V2, 0,
325 SimplifyQuery(DL, DT, AC, safeCxtI(V2, V1, CxtI), UseInstrInfo));
328 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
329 const SimplifyQuery &Q);
331 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
332 const DataLayout &DL, unsigned Depth,
333 AssumptionCache *AC, const Instruction *CxtI,
334 const DominatorTree *DT, bool UseInstrInfo) {
335 return ::MaskedValueIsZero(
336 V, Mask, Depth,
337 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
340 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
341 unsigned Depth, const SimplifyQuery &Q);
343 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
344 const SimplifyQuery &Q) {
345 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
346 APInt DemandedElts =
347 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
348 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
351 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
352 unsigned Depth, AssumptionCache *AC,
353 const Instruction *CxtI,
354 const DominatorTree *DT, bool UseInstrInfo) {
355 return ::ComputeNumSignBits(
356 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
359 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
360 unsigned Depth, AssumptionCache *AC,
361 const Instruction *CxtI,
362 const DominatorTree *DT) {
363 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
364 return V->getType()->getScalarSizeInBits() - SignBits + 1;
367 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
368 bool NSW, const APInt &DemandedElts,
369 KnownBits &KnownOut, KnownBits &Known2,
370 unsigned Depth, const SimplifyQuery &Q) {
371 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
373 // If one operand is unknown and we have no nowrap information,
374 // the result will be unknown independently of the second operand.
375 if (KnownOut.isUnknown() && !NSW)
376 return;
378 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
379 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
382 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
383 const APInt &DemandedElts, KnownBits &Known,
384 KnownBits &Known2, unsigned Depth,
385 const SimplifyQuery &Q) {
386 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
387 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
389 bool isKnownNegative = false;
390 bool isKnownNonNegative = false;
391 // If the multiplication is known not to overflow, compute the sign bit.
392 if (NSW) {
393 if (Op0 == Op1) {
394 // The product of a number with itself is non-negative.
395 isKnownNonNegative = true;
396 } else {
397 bool isKnownNonNegativeOp1 = Known.isNonNegative();
398 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
399 bool isKnownNegativeOp1 = Known.isNegative();
400 bool isKnownNegativeOp0 = Known2.isNegative();
401 // The product of two numbers with the same sign is non-negative.
402 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
403 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
404 // The product of a negative number and a non-negative number is either
405 // negative or zero.
406 if (!isKnownNonNegative)
407 isKnownNegative =
408 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
409 Known2.isNonZero()) ||
410 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
414 bool SelfMultiply = Op0 == Op1;
415 // TODO: SelfMultiply can be poison, but not undef.
416 if (SelfMultiply)
417 SelfMultiply &=
418 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
419 Known = KnownBits::mul(Known, Known2, SelfMultiply);
421 // Only make use of no-wrap flags if we failed to compute the sign bit
422 // directly. This matters if the multiplication always overflows, in
423 // which case we prefer to follow the result of the direct computation,
424 // though as the program is invoking undefined behaviour we can choose
425 // whatever we like here.
426 if (isKnownNonNegative && !Known.isNegative())
427 Known.makeNonNegative();
428 else if (isKnownNegative && !Known.isNonNegative())
429 Known.makeNegative();
432 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
433 KnownBits &Known) {
434 unsigned BitWidth = Known.getBitWidth();
435 unsigned NumRanges = Ranges.getNumOperands() / 2;
436 assert(NumRanges >= 1);
438 Known.Zero.setAllBits();
439 Known.One.setAllBits();
441 for (unsigned i = 0; i < NumRanges; ++i) {
442 ConstantInt *Lower =
443 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
444 ConstantInt *Upper =
445 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
446 ConstantRange Range(Lower->getValue(), Upper->getValue());
448 // The first CommonPrefixBits of all values in Range are equal.
449 unsigned CommonPrefixBits =
450 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero();
451 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
452 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
453 Known.One &= UnsignedMax & Mask;
454 Known.Zero &= ~UnsignedMax & Mask;
458 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
459 SmallVector<const Value *, 16> WorkSet(1, I);
460 SmallPtrSet<const Value *, 32> Visited;
461 SmallPtrSet<const Value *, 16> EphValues;
463 // The instruction defining an assumption's condition itself is always
464 // considered ephemeral to that assumption (even if it has other
465 // non-ephemeral users). See r246696's test case for an example.
466 if (is_contained(I->operands(), E))
467 return true;
469 while (!WorkSet.empty()) {
470 const Value *V = WorkSet.pop_back_val();
471 if (!Visited.insert(V).second)
472 continue;
474 // If all uses of this value are ephemeral, then so is this value.
475 if (llvm::all_of(V->users(), [&](const User *U) {
476 return EphValues.count(U);
477 })) {
478 if (V == E)
479 return true;
481 if (V == I || (isa<Instruction>(V) &&
482 !cast<Instruction>(V)->mayHaveSideEffects() &&
483 !cast<Instruction>(V)->isTerminator())) {
484 EphValues.insert(V);
485 if (const User *U = dyn_cast<User>(V))
486 append_range(WorkSet, U->operands());
491 return false;
494 // Is this an intrinsic that cannot be speculated but also cannot trap?
495 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
496 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
497 return CI->isAssumeLikeIntrinsic();
499 return false;
502 bool llvm::isValidAssumeForContext(const Instruction *Inv,
503 const Instruction *CxtI,
504 const DominatorTree *DT) {
505 // There are two restrictions on the use of an assume:
506 // 1. The assume must dominate the context (or the control flow must
507 // reach the assume whenever it reaches the context).
508 // 2. The context must not be in the assume's set of ephemeral values
509 // (otherwise we will use the assume to prove that the condition
510 // feeding the assume is trivially true, thus causing the removal of
511 // the assume).
513 if (Inv->getParent() == CxtI->getParent()) {
514 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
515 // in the BB.
516 if (Inv->comesBefore(CxtI))
517 return true;
519 // Don't let an assume affect itself - this would cause the problems
520 // `isEphemeralValueOf` is trying to prevent, and it would also make
521 // the loop below go out of bounds.
522 if (Inv == CxtI)
523 return false;
525 // The context comes first, but they're both in the same block.
526 // Make sure there is nothing in between that might interrupt
527 // the control flow, not even CxtI itself.
528 // We limit the scan distance between the assume and its context instruction
529 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
530 // it can be adjusted if needed (could be turned into a cl::opt).
531 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
532 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
533 return false;
535 return !isEphemeralValueOf(Inv, CxtI);
538 // Inv and CxtI are in different blocks.
539 if (DT) {
540 if (DT->dominates(Inv, CxtI))
541 return true;
542 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
543 // We don't have a DT, but this trivially dominates.
544 return true;
547 return false;
550 // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
551 // we still have enough information about `RHS` to conclude non-zero. For
552 // example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops
553 // so the extra compile time may not be worth it, but possibly a second API
554 // should be created for use outside of loops.
555 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
556 // v u> y implies v != 0.
557 if (Pred == ICmpInst::ICMP_UGT)
558 return true;
560 // Special-case v != 0 to also handle v != null.
561 if (Pred == ICmpInst::ICMP_NE)
562 return match(RHS, m_Zero());
564 // All other predicates - rely on generic ConstantRange handling.
565 const APInt *C;
566 auto Zero = APInt::getZero(RHS->getType()->getScalarSizeInBits());
567 if (match(RHS, m_APInt(C))) {
568 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
569 return !TrueValues.contains(Zero);
572 auto *VC = dyn_cast<ConstantDataVector>(RHS);
573 if (VC == nullptr)
574 return false;
576 for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
577 ++ElemIdx) {
578 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
579 Pred, VC->getElementAsAPInt(ElemIdx));
580 if (TrueValues.contains(Zero))
581 return false;
583 return true;
586 static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) {
587 // Use of assumptions is context-sensitive. If we don't have a context, we
588 // cannot use them!
589 if (!Q.AC || !Q.CxtI)
590 return false;
592 if (Q.CxtI && V->getType()->isPointerTy()) {
593 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
594 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
595 V->getType()->getPointerAddressSpace()))
596 AttrKinds.push_back(Attribute::Dereferenceable);
598 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
599 return true;
602 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
603 if (!AssumeVH)
604 continue;
605 CallInst *I = cast<CallInst>(AssumeVH);
606 assert(I->getFunction() == Q.CxtI->getFunction() &&
607 "Got assumption for the wrong function!");
609 // Warning: This loop can end up being somewhat performance sensitive.
610 // We're running this loop for once for each value queried resulting in a
611 // runtime of ~O(#assumes * #values).
613 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
614 "must be an assume intrinsic");
616 Value *RHS;
617 CmpInst::Predicate Pred;
618 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
619 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
620 return false;
622 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
623 return true;
626 return false;
629 static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp,
630 KnownBits &Known, unsigned Depth,
631 const SimplifyQuery &Q) {
632 unsigned BitWidth = Known.getBitWidth();
633 // We are attempting to compute known bits for the operands of an assume.
634 // Do not try to use other assumptions for those recursive calls because
635 // that can lead to mutual recursion and a compile-time explosion.
636 // An example of the mutual recursion: computeKnownBits can call
637 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
638 // and so on.
639 SimplifyQuery QueryNoAC = Q;
640 QueryNoAC.AC = nullptr;
642 // Note that ptrtoint may change the bitwidth.
643 Value *A, *B;
644 auto m_V =
645 m_CombineOr(m_Specific(V), m_PtrToIntSameSize(Q.DL, m_Specific(V)));
647 CmpInst::Predicate Pred;
648 uint64_t C;
649 switch (Cmp->getPredicate()) {
650 case ICmpInst::ICMP_EQ:
651 // assume(v = a)
652 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) {
653 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
654 Known = Known.unionWith(RHSKnown);
655 // assume(v & b = a)
656 } else if (match(Cmp,
657 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) {
658 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
659 KnownBits MaskKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
661 // For those bits in the mask that are known to be one, we can propagate
662 // known bits from the RHS to V.
663 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
664 Known.One |= RHSKnown.One & MaskKnown.One;
665 // assume(~(v & b) = a)
666 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
667 m_Value(A)))) {
668 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
669 KnownBits MaskKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
671 // For those bits in the mask that are known to be one, we can propagate
672 // inverted known bits from the RHS to V.
673 Known.Zero |= RHSKnown.One & MaskKnown.One;
674 Known.One |= RHSKnown.Zero & MaskKnown.One;
675 // assume(v | b = a)
676 } else if (match(Cmp,
677 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) {
678 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
679 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
681 // For those bits in B that are known to be zero, we can propagate known
682 // bits from the RHS to V.
683 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
684 Known.One |= RHSKnown.One & BKnown.Zero;
685 // assume(~(v | b) = a)
686 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
687 m_Value(A)))) {
688 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
689 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
691 // For those bits in B that are known to be zero, we can propagate
692 // inverted known bits from the RHS to V.
693 Known.Zero |= RHSKnown.One & BKnown.Zero;
694 Known.One |= RHSKnown.Zero & BKnown.Zero;
695 // assume(v ^ b = a)
696 } else if (match(Cmp,
697 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) {
698 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
699 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
701 // For those bits in B that are known to be zero, we can propagate known
702 // bits from the RHS to V. For those bits in B that are known to be one,
703 // we can propagate inverted known bits from the RHS to V.
704 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
705 Known.One |= RHSKnown.One & BKnown.Zero;
706 Known.Zero |= RHSKnown.One & BKnown.One;
707 Known.One |= RHSKnown.Zero & BKnown.One;
708 // assume(~(v ^ b) = a)
709 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
710 m_Value(A)))) {
711 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
712 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC);
714 // For those bits in B that are known to be zero, we can propagate
715 // inverted known bits from the RHS to V. For those bits in B that are
716 // known to be one, we can propagate known bits from the RHS to V.
717 Known.Zero |= RHSKnown.One & BKnown.Zero;
718 Known.One |= RHSKnown.Zero & BKnown.Zero;
719 Known.Zero |= RHSKnown.Zero & BKnown.One;
720 Known.One |= RHSKnown.One & BKnown.One;
721 // assume(v << c = a)
722 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
723 m_Value(A))) &&
724 C < BitWidth) {
725 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
727 // For those bits in RHS that are known, we can propagate them to known
728 // bits in V shifted to the right by C.
729 RHSKnown.Zero.lshrInPlace(C);
730 RHSKnown.One.lshrInPlace(C);
731 Known = Known.unionWith(RHSKnown);
732 // assume(~(v << c) = a)
733 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
734 m_Value(A))) &&
735 C < BitWidth) {
736 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
737 // For those bits in RHS that are known, we can propagate them inverted
738 // to known bits in V shifted to the right by C.
739 RHSKnown.One.lshrInPlace(C);
740 Known.Zero |= RHSKnown.One;
741 RHSKnown.Zero.lshrInPlace(C);
742 Known.One |= RHSKnown.Zero;
743 // assume(v >> c = a)
744 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
745 m_Value(A))) &&
746 C < BitWidth) {
747 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
748 // For those bits in RHS that are known, we can propagate them to known
749 // bits in V shifted to the right by C.
750 Known.Zero |= RHSKnown.Zero << C;
751 Known.One |= RHSKnown.One << C;
752 // assume(~(v >> c) = a)
753 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
754 m_Value(A))) &&
755 C < BitWidth) {
756 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
757 // For those bits in RHS that are known, we can propagate them inverted
758 // to known bits in V shifted to the right by C.
759 Known.Zero |= RHSKnown.One << C;
760 Known.One |= RHSKnown.Zero << C;
762 break;
763 case ICmpInst::ICMP_NE: {
764 // assume (v & b != 0) where b is a power of 2
765 const APInt *BPow2;
766 if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) {
767 Known.One |= *BPow2;
769 break;
771 default:
772 const APInt *Offset = nullptr;
773 if (match(Cmp, m_ICmp(Pred, m_CombineOr(m_V, m_Add(m_V, m_APInt(Offset))),
774 m_Value(A)))) {
775 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC);
776 ConstantRange RHSRange =
777 ConstantRange::fromKnownBits(RHSKnown, Cmp->isSigned());
778 ConstantRange LHSRange =
779 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
780 if (Offset)
781 LHSRange = LHSRange.sub(*Offset);
782 Known = Known.unionWith(LHSRange.toKnownBits());
784 break;
788 void llvm::computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
789 unsigned Depth, const SimplifyQuery &Q) {
790 // Use of assumptions is context-sensitive. If we don't have a context, we
791 // cannot use them!
792 if (!Q.AC || !Q.CxtI)
793 return;
795 unsigned BitWidth = Known.getBitWidth();
797 // Refine Known set if the pointer alignment is set by assume bundles.
798 if (V->getType()->isPointerTy()) {
799 if (RetainedKnowledge RK = getKnowledgeValidInContext(
800 V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) {
801 if (isPowerOf2_64(RK.ArgValue))
802 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
806 // Note that the patterns below need to be kept in sync with the code
807 // in AssumptionCache::updateAffectedValues.
809 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
810 if (!AssumeVH)
811 continue;
812 CallInst *I = cast<CallInst>(AssumeVH);
813 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
814 "Got assumption for the wrong function!");
816 // Warning: This loop can end up being somewhat performance sensitive.
817 // We're running this loop for once for each value queried resulting in a
818 // runtime of ~O(#assumes * #values).
820 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
821 "must be an assume intrinsic");
823 Value *Arg = I->getArgOperand(0);
825 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
826 assert(BitWidth == 1 && "assume operand is not i1?");
827 (void)BitWidth;
828 Known.setAllOnes();
829 return;
831 if (match(Arg, m_Not(m_Specific(V))) &&
832 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
833 assert(BitWidth == 1 && "assume operand is not i1?");
834 (void)BitWidth;
835 Known.setAllZero();
836 return;
839 // The remaining tests are all recursive, so bail out if we hit the limit.
840 if (Depth == MaxAnalysisRecursionDepth)
841 continue;
843 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
844 if (!Cmp)
845 continue;
847 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
848 continue;
850 computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q);
853 // Conflicting assumption: Undefined behavior will occur on this execution
854 // path.
855 if (Known.hasConflict())
856 Known.resetAll();
859 /// Compute known bits from a shift operator, including those with a
860 /// non-constant shift amount. Known is the output of this function. Known2 is a
861 /// pre-allocated temporary with the same bit width as Known and on return
862 /// contains the known bit of the shift value source. KF is an
863 /// operator-specific function that, given the known-bits and a shift amount,
864 /// compute the implied known-bits of the shift operator's result respectively
865 /// for that shift amount. The results from calling KF are conservatively
866 /// combined for all permitted shift amounts.
867 static void computeKnownBitsFromShiftOperator(
868 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
869 KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q,
870 function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) {
871 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
872 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
873 // To limit compile-time impact, only query isKnownNonZero() if we know at
874 // least something about the shift amount.
875 bool ShAmtNonZero =
876 Known.isNonZero() ||
877 (Known.getMaxValue().ult(Known.getBitWidth()) &&
878 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q));
879 Known = KF(Known2, Known, ShAmtNonZero);
882 static KnownBits
883 getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
884 const KnownBits &KnownLHS, const KnownBits &KnownRHS,
885 unsigned Depth, const SimplifyQuery &Q) {
886 unsigned BitWidth = KnownLHS.getBitWidth();
887 KnownBits KnownOut(BitWidth);
888 bool IsAnd = false;
889 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero();
890 Value *X = nullptr, *Y = nullptr;
892 switch (I->getOpcode()) {
893 case Instruction::And:
894 KnownOut = KnownLHS & KnownRHS;
895 IsAnd = true;
896 // and(x, -x) is common idioms that will clear all but lowest set
897 // bit. If we have a single known bit in x, we can clear all bits
898 // above it.
899 // TODO: instcombine often reassociates independent `and` which can hide
900 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
901 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) {
902 // -(-x) == x so using whichever (LHS/RHS) gets us a better result.
903 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros())
904 KnownOut = KnownLHS.blsi();
905 else
906 KnownOut = KnownRHS.blsi();
908 break;
909 case Instruction::Or:
910 KnownOut = KnownLHS | KnownRHS;
911 break;
912 case Instruction::Xor:
913 KnownOut = KnownLHS ^ KnownRHS;
914 // xor(x, x-1) is common idioms that will clear all but lowest set
915 // bit. If we have a single known bit in x, we can clear all bits
916 // above it.
917 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C !=
918 // -1 but for the purpose of demanded bits (xor(x, x-C) &
919 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern
920 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1).
921 if (HasKnownOne &&
922 match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) {
923 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS;
924 KnownOut = XBits.blsmsk();
926 break;
927 default:
928 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'");
931 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
932 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit.
933 // here we handle the more general case of adding any odd number by
934 // matching the form and/xor/or(x, add(x, y)) where y is odd.
935 // TODO: This could be generalized to clearing any bit set in y where the
936 // following bit is known to be unset in y.
937 if (!KnownOut.Zero[0] && !KnownOut.One[0] &&
938 (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) ||
939 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) ||
940 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) {
941 KnownBits KnownY(BitWidth);
942 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q);
943 if (KnownY.countMinTrailingOnes() > 0) {
944 if (IsAnd)
945 KnownOut.Zero.setBit(0);
946 else
947 KnownOut.One.setBit(0);
950 return KnownOut;
953 // Public so this can be used in `SimplifyDemandedUseBits`.
954 KnownBits llvm::analyzeKnownBitsFromAndXorOr(
955 const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
956 unsigned Depth, const DataLayout &DL, AssumptionCache *AC,
957 const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) {
958 auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
959 APInt DemandedElts =
960 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
962 return getKnownBitsFromAndXorOr(
963 I, DemandedElts, KnownLHS, KnownRHS, Depth,
964 SimplifyQuery(DL, DT, AC, safeCxtI(I, CxtI), UseInstrInfo));
967 ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
968 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
969 // Without vscale_range, we only know that vscale is non-zero.
970 if (!Attr.isValid())
971 return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth));
973 unsigned AttrMin = Attr.getVScaleRangeMin();
974 // Minimum is larger than vscale width, result is always poison.
975 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth)
976 return ConstantRange::getEmpty(BitWidth);
978 APInt Min(BitWidth, AttrMin);
979 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax();
980 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth)
981 return ConstantRange(Min, APInt::getZero(BitWidth));
983 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1);
986 static void computeKnownBitsFromOperator(const Operator *I,
987 const APInt &DemandedElts,
988 KnownBits &Known, unsigned Depth,
989 const SimplifyQuery &Q) {
990 unsigned BitWidth = Known.getBitWidth();
992 KnownBits Known2(BitWidth);
993 switch (I->getOpcode()) {
994 default: break;
995 case Instruction::Load:
996 if (MDNode *MD =
997 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
998 computeKnownBitsFromRangeMetadata(*MD, Known);
999 break;
1000 case Instruction::And:
1001 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1002 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1004 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1005 break;
1006 case Instruction::Or:
1007 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1008 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1010 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1011 break;
1012 case Instruction::Xor:
1013 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1014 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1016 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1017 break;
1018 case Instruction::Mul: {
1019 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1020 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1021 Known, Known2, Depth, Q);
1022 break;
1024 case Instruction::UDiv: {
1025 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1026 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1027 Known =
1028 KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1029 break;
1031 case Instruction::SDiv: {
1032 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1033 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1034 Known =
1035 KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1036 break;
1038 case Instruction::Select: {
1039 const Value *LHS = nullptr, *RHS = nullptr;
1040 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1041 if (SelectPatternResult::isMinOrMax(SPF)) {
1042 computeKnownBits(RHS, Known, Depth + 1, Q);
1043 computeKnownBits(LHS, Known2, Depth + 1, Q);
1044 switch (SPF) {
1045 default:
1046 llvm_unreachable("Unhandled select pattern flavor!");
1047 case SPF_SMAX:
1048 Known = KnownBits::smax(Known, Known2);
1049 break;
1050 case SPF_SMIN:
1051 Known = KnownBits::smin(Known, Known2);
1052 break;
1053 case SPF_UMAX:
1054 Known = KnownBits::umax(Known, Known2);
1055 break;
1056 case SPF_UMIN:
1057 Known = KnownBits::umin(Known, Known2);
1058 break;
1060 break;
1063 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1064 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1066 // Only known if known in both the LHS and RHS.
1067 Known = Known.intersectWith(Known2);
1069 if (SPF == SPF_ABS) {
1070 // RHS from matchSelectPattern returns the negation part of abs pattern.
1071 // If the negate has an NSW flag we can assume the sign bit of the result
1072 // will be 0 because that makes abs(INT_MIN) undefined.
1073 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1074 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1075 Known.Zero.setSignBit();
1078 break;
1080 case Instruction::FPTrunc:
1081 case Instruction::FPExt:
1082 case Instruction::FPToUI:
1083 case Instruction::FPToSI:
1084 case Instruction::SIToFP:
1085 case Instruction::UIToFP:
1086 break; // Can't work with floating point.
1087 case Instruction::PtrToInt:
1088 case Instruction::IntToPtr:
1089 // Fall through and handle them the same as zext/trunc.
1090 [[fallthrough]];
1091 case Instruction::ZExt:
1092 case Instruction::Trunc: {
1093 Type *SrcTy = I->getOperand(0)->getType();
1095 unsigned SrcBitWidth;
1096 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1097 // which fall through here.
1098 Type *ScalarTy = SrcTy->getScalarType();
1099 SrcBitWidth = ScalarTy->isPointerTy() ?
1100 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1101 Q.DL.getTypeSizeInBits(ScalarTy);
1103 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1104 Known = Known.anyextOrTrunc(SrcBitWidth);
1105 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1106 Known = Known.zextOrTrunc(BitWidth);
1107 break;
1109 case Instruction::BitCast: {
1110 Type *SrcTy = I->getOperand(0)->getType();
1111 if (SrcTy->isIntOrPtrTy() &&
1112 // TODO: For now, not handling conversions like:
1113 // (bitcast i64 %x to <2 x i32>)
1114 !I->getType()->isVectorTy()) {
1115 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1116 break;
1119 // Handle cast from vector integer type to scalar or vector integer.
1120 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1121 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1122 !I->getType()->isIntOrIntVectorTy() ||
1123 isa<ScalableVectorType>(I->getType()))
1124 break;
1126 // Look through a cast from narrow vector elements to wider type.
1127 // Examples: v4i32 -> v2i64, v3i8 -> v24
1128 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1129 if (BitWidth % SubBitWidth == 0) {
1130 // Known bits are automatically intersected across demanded elements of a
1131 // vector. So for example, if a bit is computed as known zero, it must be
1132 // zero across all demanded elements of the vector.
1134 // For this bitcast, each demanded element of the output is sub-divided
1135 // across a set of smaller vector elements in the source vector. To get
1136 // the known bits for an entire element of the output, compute the known
1137 // bits for each sub-element sequentially. This is done by shifting the
1138 // one-set-bit demanded elements parameter across the sub-elements for
1139 // consecutive calls to computeKnownBits. We are using the demanded
1140 // elements parameter as a mask operator.
1142 // The known bits of each sub-element are then inserted into place
1143 // (dependent on endian) to form the full result of known bits.
1144 unsigned NumElts = DemandedElts.getBitWidth();
1145 unsigned SubScale = BitWidth / SubBitWidth;
1146 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1147 for (unsigned i = 0; i != NumElts; ++i) {
1148 if (DemandedElts[i])
1149 SubDemandedElts.setBit(i * SubScale);
1152 KnownBits KnownSrc(SubBitWidth);
1153 for (unsigned i = 0; i != SubScale; ++i) {
1154 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1155 Depth + 1, Q);
1156 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1157 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1160 break;
1162 case Instruction::SExt: {
1163 // Compute the bits in the result that are not present in the input.
1164 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1166 Known = Known.trunc(SrcBitWidth);
1167 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1168 // If the sign bit of the input is known set or clear, then we know the
1169 // top bits of the result.
1170 Known = Known.sext(BitWidth);
1171 break;
1173 case Instruction::Shl: {
1174 bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
1175 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1176 auto KF = [NUW, NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1177 bool ShAmtNonZero) {
1178 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1180 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1181 KF);
1182 // Trailing zeros of a right-shifted constant never decrease.
1183 const APInt *C;
1184 if (match(I->getOperand(0), m_APInt(C)))
1185 Known.Zero.setLowBits(C->countr_zero());
1186 break;
1188 case Instruction::LShr: {
1189 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1190 bool ShAmtNonZero) {
1191 return KnownBits::lshr(KnownVal, KnownAmt, ShAmtNonZero);
1193 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1194 KF);
1195 // Leading zeros of a left-shifted constant never decrease.
1196 const APInt *C;
1197 if (match(I->getOperand(0), m_APInt(C)))
1198 Known.Zero.setHighBits(C->countl_zero());
1199 break;
1201 case Instruction::AShr: {
1202 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1203 bool ShAmtNonZero) {
1204 return KnownBits::ashr(KnownVal, KnownAmt, ShAmtNonZero);
1206 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1207 KF);
1208 break;
1210 case Instruction::Sub: {
1211 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1212 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1213 DemandedElts, Known, Known2, Depth, Q);
1214 break;
1216 case Instruction::Add: {
1217 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1218 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1219 DemandedElts, Known, Known2, Depth, Q);
1220 break;
1222 case Instruction::SRem:
1223 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1224 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1225 Known = KnownBits::srem(Known, Known2);
1226 break;
1228 case Instruction::URem:
1229 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1230 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1231 Known = KnownBits::urem(Known, Known2);
1232 break;
1233 case Instruction::Alloca:
1234 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1235 break;
1236 case Instruction::GetElementPtr: {
1237 // Analyze all of the subscripts of this getelementptr instruction
1238 // to determine if we can prove known low zero bits.
1239 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1240 // Accumulate the constant indices in a separate variable
1241 // to minimize the number of calls to computeForAddSub.
1242 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1244 gep_type_iterator GTI = gep_type_begin(I);
1245 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1246 // TrailZ can only become smaller, short-circuit if we hit zero.
1247 if (Known.isUnknown())
1248 break;
1250 Value *Index = I->getOperand(i);
1252 // Handle case when index is zero.
1253 Constant *CIndex = dyn_cast<Constant>(Index);
1254 if (CIndex && CIndex->isZeroValue())
1255 continue;
1257 if (StructType *STy = GTI.getStructTypeOrNull()) {
1258 // Handle struct member offset arithmetic.
1260 assert(CIndex &&
1261 "Access to structure field must be known at compile time");
1263 if (CIndex->getType()->isVectorTy())
1264 Index = CIndex->getSplatValue();
1266 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1267 const StructLayout *SL = Q.DL.getStructLayout(STy);
1268 uint64_t Offset = SL->getElementOffset(Idx);
1269 AccConstIndices += Offset;
1270 continue;
1273 // Handle array index arithmetic.
1274 Type *IndexedTy = GTI.getIndexedType();
1275 if (!IndexedTy->isSized()) {
1276 Known.resetAll();
1277 break;
1280 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1281 KnownBits IndexBits(IndexBitWidth);
1282 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1283 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1284 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
1285 KnownBits ScalingFactor(IndexBitWidth);
1286 // Multiply by current sizeof type.
1287 // &A[i] == A + i * sizeof(*A[i]).
1288 if (IndexTypeSize.isScalable()) {
1289 // For scalable types the only thing we know about sizeof is
1290 // that this is a multiple of the minimum size.
1291 ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
1292 } else if (IndexBits.isConstant()) {
1293 APInt IndexConst = IndexBits.getConstant();
1294 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1295 IndexConst *= ScalingFactor;
1296 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1297 continue;
1298 } else {
1299 ScalingFactor =
1300 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1302 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1304 // If the offsets have a different width from the pointer, according
1305 // to the language reference we need to sign-extend or truncate them
1306 // to the width of the pointer.
1307 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1309 // Note that inbounds does *not* guarantee nsw for the addition, as only
1310 // the offset is signed, while the base address is unsigned.
1311 Known = KnownBits::computeForAddSub(
1312 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1314 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1315 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1316 Known = KnownBits::computeForAddSub(
1317 /*Add=*/true, /*NSW=*/false, Known, Index);
1319 break;
1321 case Instruction::PHI: {
1322 const PHINode *P = cast<PHINode>(I);
1323 BinaryOperator *BO = nullptr;
1324 Value *R = nullptr, *L = nullptr;
1325 if (matchSimpleRecurrence(P, BO, R, L)) {
1326 // Handle the case of a simple two-predecessor recurrence PHI.
1327 // There's a lot more that could theoretically be done here, but
1328 // this is sufficient to catch some interesting cases.
1329 unsigned Opcode = BO->getOpcode();
1331 // If this is a shift recurrence, we know the bits being shifted in.
1332 // We can combine that with information about the start value of the
1333 // recurrence to conclude facts about the result.
1334 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1335 Opcode == Instruction::Shl) &&
1336 BO->getOperand(0) == I) {
1338 // We have matched a recurrence of the form:
1339 // %iv = [R, %entry], [%iv.next, %backedge]
1340 // %iv.next = shift_op %iv, L
1342 // Recurse with the phi context to avoid concern about whether facts
1343 // inferred hold at original context instruction. TODO: It may be
1344 // correct to use the original context. IF warranted, explore and
1345 // add sufficient tests to cover.
1346 SimplifyQuery RecQ = Q;
1347 RecQ.CxtI = P;
1348 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1349 switch (Opcode) {
1350 case Instruction::Shl:
1351 // A shl recurrence will only increase the tailing zeros
1352 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1353 break;
1354 case Instruction::LShr:
1355 // A lshr recurrence will preserve the leading zeros of the
1356 // start value
1357 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1358 break;
1359 case Instruction::AShr:
1360 // An ashr recurrence will extend the initial sign bit
1361 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1362 Known.One.setHighBits(Known2.countMinLeadingOnes());
1363 break;
1367 // Check for operations that have the property that if
1368 // both their operands have low zero bits, the result
1369 // will have low zero bits.
1370 if (Opcode == Instruction::Add ||
1371 Opcode == Instruction::Sub ||
1372 Opcode == Instruction::And ||
1373 Opcode == Instruction::Or ||
1374 Opcode == Instruction::Mul) {
1375 // Change the context instruction to the "edge" that flows into the
1376 // phi. This is important because that is where the value is actually
1377 // "evaluated" even though it is used later somewhere else. (see also
1378 // D69571).
1379 SimplifyQuery RecQ = Q;
1381 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1382 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1383 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1385 // Ok, we have a PHI of the form L op= R. Check for low
1386 // zero bits.
1387 RecQ.CxtI = RInst;
1388 computeKnownBits(R, Known2, Depth + 1, RecQ);
1390 // We need to take the minimum number of known bits
1391 KnownBits Known3(BitWidth);
1392 RecQ.CxtI = LInst;
1393 computeKnownBits(L, Known3, Depth + 1, RecQ);
1395 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1396 Known3.countMinTrailingZeros()));
1398 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1399 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1400 // If initial value of recurrence is nonnegative, and we are adding
1401 // a nonnegative number with nsw, the result can only be nonnegative
1402 // or poison value regardless of the number of times we execute the
1403 // add in phi recurrence. If initial value is negative and we are
1404 // adding a negative number with nsw, the result can only be
1405 // negative or poison value. Similar arguments apply to sub and mul.
1407 // (add non-negative, non-negative) --> non-negative
1408 // (add negative, negative) --> negative
1409 if (Opcode == Instruction::Add) {
1410 if (Known2.isNonNegative() && Known3.isNonNegative())
1411 Known.makeNonNegative();
1412 else if (Known2.isNegative() && Known3.isNegative())
1413 Known.makeNegative();
1416 // (sub nsw non-negative, negative) --> non-negative
1417 // (sub nsw negative, non-negative) --> negative
1418 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1419 if (Known2.isNonNegative() && Known3.isNegative())
1420 Known.makeNonNegative();
1421 else if (Known2.isNegative() && Known3.isNonNegative())
1422 Known.makeNegative();
1425 // (mul nsw non-negative, non-negative) --> non-negative
1426 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1427 Known3.isNonNegative())
1428 Known.makeNonNegative();
1431 break;
1435 // Unreachable blocks may have zero-operand PHI nodes.
1436 if (P->getNumIncomingValues() == 0)
1437 break;
1439 // Otherwise take the unions of the known bit sets of the operands,
1440 // taking conservative care to avoid excessive recursion.
1441 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) {
1442 // Skip if every incoming value references to ourself.
1443 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1444 break;
1446 Known.Zero.setAllBits();
1447 Known.One.setAllBits();
1448 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1449 Value *IncValue = P->getIncomingValue(u);
1450 // Skip direct self references.
1451 if (IncValue == P) continue;
1453 // Change the context instruction to the "edge" that flows into the
1454 // phi. This is important because that is where the value is actually
1455 // "evaluated" even though it is used later somewhere else. (see also
1456 // D69571).
1457 SimplifyQuery RecQ = Q;
1458 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1460 Known2 = KnownBits(BitWidth);
1462 // Recurse, but cap the recursion to one level, because we don't
1463 // want to waste time spinning around in loops.
1464 // TODO: See if we can base recursion limiter on number of incoming phi
1465 // edges so we don't overly clamp analysis.
1466 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1468 // See if we can further use a conditional branch into the phi
1469 // to help us determine the range of the value.
1470 if (!Known2.isConstant()) {
1471 ICmpInst::Predicate Pred;
1472 const APInt *RHSC;
1473 BasicBlock *TrueSucc, *FalseSucc;
1474 // TODO: Use RHS Value and compute range from its known bits.
1475 if (match(RecQ.CxtI,
1476 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1477 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1478 // Check for cases of duplicate successors.
1479 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1480 // If we're using the false successor, invert the predicate.
1481 if (FalseSucc == P->getParent())
1482 Pred = CmpInst::getInversePredicate(Pred);
1483 // Get the knownbits implied by the incoming phi condition.
1484 auto CR = ConstantRange::makeExactICmpRegion(Pred, *RHSC);
1485 KnownBits KnownUnion = Known2.unionWith(CR.toKnownBits());
1486 // We can have conflicts here if we are analyzing deadcode (its
1487 // impossible for us reach this BB based the icmp).
1488 if (KnownUnion.hasConflict()) {
1489 // No reason to continue analyzing in a known dead region, so
1490 // just resetAll and break. This will cause us to also exit the
1491 // outer loop.
1492 Known.resetAll();
1493 break;
1495 Known2 = KnownUnion;
1500 Known = Known.intersectWith(Known2);
1501 // If all bits have been ruled out, there's no need to check
1502 // more operands.
1503 if (Known.isUnknown())
1504 break;
1507 break;
1509 case Instruction::Call:
1510 case Instruction::Invoke:
1511 // If range metadata is attached to this call, set known bits from that,
1512 // and then intersect with known bits based on other properties of the
1513 // function.
1514 if (MDNode *MD =
1515 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1516 computeKnownBitsFromRangeMetadata(*MD, Known);
1517 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1518 computeKnownBits(RV, Known2, Depth + 1, Q);
1519 Known = Known.unionWith(Known2);
1521 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1522 switch (II->getIntrinsicID()) {
1523 default: break;
1524 case Intrinsic::abs: {
1525 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1526 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1527 Known = Known2.abs(IntMinIsPoison);
1528 break;
1530 case Intrinsic::bitreverse:
1531 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1532 Known.Zero |= Known2.Zero.reverseBits();
1533 Known.One |= Known2.One.reverseBits();
1534 break;
1535 case Intrinsic::bswap:
1536 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1537 Known.Zero |= Known2.Zero.byteSwap();
1538 Known.One |= Known2.One.byteSwap();
1539 break;
1540 case Intrinsic::ctlz: {
1541 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1542 // If we have a known 1, its position is our upper bound.
1543 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1544 // If this call is poison for 0 input, the result will be less than 2^n.
1545 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1546 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1547 unsigned LowBits = llvm::bit_width(PossibleLZ);
1548 Known.Zero.setBitsFrom(LowBits);
1549 break;
1551 case Intrinsic::cttz: {
1552 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1553 // If we have a known 1, its position is our upper bound.
1554 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1555 // If this call is poison for 0 input, the result will be less than 2^n.
1556 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1557 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1558 unsigned LowBits = llvm::bit_width(PossibleTZ);
1559 Known.Zero.setBitsFrom(LowBits);
1560 break;
1562 case Intrinsic::ctpop: {
1563 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1564 // We can bound the space the count needs. Also, bits known to be zero
1565 // can't contribute to the population.
1566 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1567 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
1568 Known.Zero.setBitsFrom(LowBits);
1569 // TODO: we could bound KnownOne using the lower bound on the number
1570 // of bits which might be set provided by popcnt KnownOne2.
1571 break;
1573 case Intrinsic::fshr:
1574 case Intrinsic::fshl: {
1575 const APInt *SA;
1576 if (!match(I->getOperand(2), m_APInt(SA)))
1577 break;
1579 // Normalize to funnel shift left.
1580 uint64_t ShiftAmt = SA->urem(BitWidth);
1581 if (II->getIntrinsicID() == Intrinsic::fshr)
1582 ShiftAmt = BitWidth - ShiftAmt;
1584 KnownBits Known3(BitWidth);
1585 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1586 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1588 Known.Zero =
1589 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1590 Known.One =
1591 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1592 break;
1594 case Intrinsic::uadd_sat:
1595 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1596 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1597 Known = KnownBits::uadd_sat(Known, Known2);
1598 break;
1599 case Intrinsic::usub_sat:
1600 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1601 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1602 Known = KnownBits::usub_sat(Known, Known2);
1603 break;
1604 case Intrinsic::sadd_sat:
1605 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1606 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1607 Known = KnownBits::sadd_sat(Known, Known2);
1608 break;
1609 case Intrinsic::ssub_sat:
1610 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1611 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1612 Known = KnownBits::ssub_sat(Known, Known2);
1613 break;
1614 case Intrinsic::umin:
1615 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1616 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1617 Known = KnownBits::umin(Known, Known2);
1618 break;
1619 case Intrinsic::umax:
1620 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1621 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1622 Known = KnownBits::umax(Known, Known2);
1623 break;
1624 case Intrinsic::smin:
1625 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1626 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1627 Known = KnownBits::smin(Known, Known2);
1628 break;
1629 case Intrinsic::smax:
1630 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1631 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1632 Known = KnownBits::smax(Known, Known2);
1633 break;
1634 case Intrinsic::ptrmask: {
1635 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1637 const Value *Mask = I->getOperand(1);
1638 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits());
1639 computeKnownBits(Mask, Known2, Depth + 1, Q);
1640 // TODO: 1-extend would be more precise.
1641 Known &= Known2.anyextOrTrunc(BitWidth);
1642 break;
1644 case Intrinsic::x86_sse42_crc32_64_64:
1645 Known.Zero.setBitsFrom(32);
1646 break;
1647 case Intrinsic::riscv_vsetvli:
1648 case Intrinsic::riscv_vsetvlimax:
1649 // Assume that VL output is >= 65536.
1650 // TODO: Take SEW and LMUL into account.
1651 if (BitWidth > 17)
1652 Known.Zero.setBitsFrom(17);
1653 break;
1654 case Intrinsic::vscale: {
1655 if (!II->getParent() || !II->getFunction())
1656 break;
1658 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits();
1659 break;
1663 break;
1664 case Instruction::ShuffleVector: {
1665 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1666 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1667 if (!Shuf) {
1668 Known.resetAll();
1669 return;
1671 // For undef elements, we don't know anything about the common state of
1672 // the shuffle result.
1673 APInt DemandedLHS, DemandedRHS;
1674 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1675 Known.resetAll();
1676 return;
1678 Known.One.setAllBits();
1679 Known.Zero.setAllBits();
1680 if (!!DemandedLHS) {
1681 const Value *LHS = Shuf->getOperand(0);
1682 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1683 // If we don't know any bits, early out.
1684 if (Known.isUnknown())
1685 break;
1687 if (!!DemandedRHS) {
1688 const Value *RHS = Shuf->getOperand(1);
1689 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1690 Known = Known.intersectWith(Known2);
1692 break;
1694 case Instruction::InsertElement: {
1695 if (isa<ScalableVectorType>(I->getType())) {
1696 Known.resetAll();
1697 return;
1699 const Value *Vec = I->getOperand(0);
1700 const Value *Elt = I->getOperand(1);
1701 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1702 // Early out if the index is non-constant or out-of-range.
1703 unsigned NumElts = DemandedElts.getBitWidth();
1704 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1705 Known.resetAll();
1706 return;
1708 Known.One.setAllBits();
1709 Known.Zero.setAllBits();
1710 unsigned EltIdx = CIdx->getZExtValue();
1711 // Do we demand the inserted element?
1712 if (DemandedElts[EltIdx]) {
1713 computeKnownBits(Elt, Known, Depth + 1, Q);
1714 // If we don't know any bits, early out.
1715 if (Known.isUnknown())
1716 break;
1718 // We don't need the base vector element that has been inserted.
1719 APInt DemandedVecElts = DemandedElts;
1720 DemandedVecElts.clearBit(EltIdx);
1721 if (!!DemandedVecElts) {
1722 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1723 Known = Known.intersectWith(Known2);
1725 break;
1727 case Instruction::ExtractElement: {
1728 // Look through extract element. If the index is non-constant or
1729 // out-of-range demand all elements, otherwise just the extracted element.
1730 const Value *Vec = I->getOperand(0);
1731 const Value *Idx = I->getOperand(1);
1732 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1733 if (isa<ScalableVectorType>(Vec->getType())) {
1734 // FIXME: there's probably *something* we can do with scalable vectors
1735 Known.resetAll();
1736 break;
1738 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1739 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1740 if (CIdx && CIdx->getValue().ult(NumElts))
1741 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1742 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1743 break;
1745 case Instruction::ExtractValue:
1746 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1747 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1748 if (EVI->getNumIndices() != 1) break;
1749 if (EVI->getIndices()[0] == 0) {
1750 switch (II->getIntrinsicID()) {
1751 default: break;
1752 case Intrinsic::uadd_with_overflow:
1753 case Intrinsic::sadd_with_overflow:
1754 computeKnownBitsAddSub(true, II->getArgOperand(0),
1755 II->getArgOperand(1), false, DemandedElts,
1756 Known, Known2, Depth, Q);
1757 break;
1758 case Intrinsic::usub_with_overflow:
1759 case Intrinsic::ssub_with_overflow:
1760 computeKnownBitsAddSub(false, II->getArgOperand(0),
1761 II->getArgOperand(1), false, DemandedElts,
1762 Known, Known2, Depth, Q);
1763 break;
1764 case Intrinsic::umul_with_overflow:
1765 case Intrinsic::smul_with_overflow:
1766 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1767 DemandedElts, Known, Known2, Depth, Q);
1768 break;
1772 break;
1773 case Instruction::Freeze:
1774 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1775 Depth + 1))
1776 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1777 break;
1781 /// Determine which bits of V are known to be either zero or one and return
1782 /// them.
1783 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
1784 unsigned Depth, const SimplifyQuery &Q) {
1785 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1786 ::computeKnownBits(V, DemandedElts, Known, Depth, Q);
1787 return Known;
1790 /// Determine which bits of V are known to be either zero or one and return
1791 /// them.
1792 KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth,
1793 const SimplifyQuery &Q) {
1794 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1795 ::computeKnownBits(V, Known, Depth, Q);
1796 return Known;
1799 /// Determine which bits of V are known to be either zero or one and return
1800 /// them in the Known bit set.
1802 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1803 /// we cannot optimize based on the assumption that it is zero without changing
1804 /// it to be an explicit zero. If we don't change it to zero, other code could
1805 /// optimized based on the contradictory assumption that it is non-zero.
1806 /// Because instcombine aggressively folds operations with undef args anyway,
1807 /// this won't lose us code quality.
1809 /// This function is defined on values with integer type, values with pointer
1810 /// type, and vectors of integers. In the case
1811 /// where V is a vector, known zero, and known one values are the
1812 /// same width as the vector element, and the bit is set only if it is true
1813 /// for all of the demanded elements in the vector specified by DemandedElts.
1814 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1815 KnownBits &Known, unsigned Depth,
1816 const SimplifyQuery &Q) {
1817 if (!DemandedElts) {
1818 // No demanded elts, better to assume we don't know anything.
1819 Known.resetAll();
1820 return;
1823 assert(V && "No Value?");
1824 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1826 #ifndef NDEBUG
1827 Type *Ty = V->getType();
1828 unsigned BitWidth = Known.getBitWidth();
1830 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1831 "Not integer or pointer type!");
1833 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1834 assert(
1835 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1836 "DemandedElt width should equal the fixed vector number of elements");
1837 } else {
1838 assert(DemandedElts == APInt(1, 1) &&
1839 "DemandedElt width should be 1 for scalars or scalable vectors");
1842 Type *ScalarTy = Ty->getScalarType();
1843 if (ScalarTy->isPointerTy()) {
1844 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1845 "V and Known should have same BitWidth");
1846 } else {
1847 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1848 "V and Known should have same BitWidth");
1850 #endif
1852 const APInt *C;
1853 if (match(V, m_APInt(C))) {
1854 // We know all of the bits for a scalar constant or a splat vector constant!
1855 Known = KnownBits::makeConstant(*C);
1856 return;
1858 // Null and aggregate-zero are all-zeros.
1859 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1860 Known.setAllZero();
1861 return;
1863 // Handle a constant vector by taking the intersection of the known bits of
1864 // each element.
1865 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1866 assert(!isa<ScalableVectorType>(V->getType()));
1867 // We know that CDV must be a vector of integers. Take the intersection of
1868 // each element.
1869 Known.Zero.setAllBits(); Known.One.setAllBits();
1870 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1871 if (!DemandedElts[i])
1872 continue;
1873 APInt Elt = CDV->getElementAsAPInt(i);
1874 Known.Zero &= ~Elt;
1875 Known.One &= Elt;
1877 return;
1880 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1881 assert(!isa<ScalableVectorType>(V->getType()));
1882 // We know that CV must be a vector of integers. Take the intersection of
1883 // each element.
1884 Known.Zero.setAllBits(); Known.One.setAllBits();
1885 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1886 if (!DemandedElts[i])
1887 continue;
1888 Constant *Element = CV->getAggregateElement(i);
1889 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1890 if (!ElementCI) {
1891 Known.resetAll();
1892 return;
1894 const APInt &Elt = ElementCI->getValue();
1895 Known.Zero &= ~Elt;
1896 Known.One &= Elt;
1898 return;
1901 // Start out not knowing anything.
1902 Known.resetAll();
1904 // We can't imply anything about undefs.
1905 if (isa<UndefValue>(V))
1906 return;
1908 // There's no point in looking through other users of ConstantData for
1909 // assumptions. Confirm that we've handled them all.
1910 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1912 // All recursive calls that increase depth must come after this.
1913 if (Depth == MaxAnalysisRecursionDepth)
1914 return;
1916 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1917 // the bits of its aliasee.
1918 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1919 if (!GA->isInterposable())
1920 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1921 return;
1924 if (const Operator *I = dyn_cast<Operator>(V))
1925 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1926 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1927 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
1928 Known = CR->toKnownBits();
1931 // Aligned pointers have trailing zeros - refine Known.Zero set
1932 if (isa<PointerType>(V->getType())) {
1933 Align Alignment = V->getPointerAlignment(Q.DL);
1934 Known.Zero.setLowBits(Log2(Alignment));
1937 // computeKnownBitsFromAssume strictly refines Known.
1938 // Therefore, we run them after computeKnownBitsFromOperator.
1940 // Check whether a nearby assume intrinsic can determine some known bits.
1941 computeKnownBitsFromAssume(V, Known, Depth, Q);
1943 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1946 /// Try to detect a recurrence that the value of the induction variable is
1947 /// always a power of two (or zero).
1948 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
1949 unsigned Depth, SimplifyQuery &Q) {
1950 BinaryOperator *BO = nullptr;
1951 Value *Start = nullptr, *Step = nullptr;
1952 if (!matchSimpleRecurrence(PN, BO, Start, Step))
1953 return false;
1955 // Initial value must be a power of two.
1956 for (const Use &U : PN->operands()) {
1957 if (U.get() == Start) {
1958 // Initial value comes from a different BB, need to adjust context
1959 // instruction for analysis.
1960 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
1961 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
1962 return false;
1966 // Except for Mul, the induction variable must be on the left side of the
1967 // increment expression, otherwise its value can be arbitrary.
1968 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
1969 return false;
1971 Q.CxtI = BO->getParent()->getTerminator();
1972 switch (BO->getOpcode()) {
1973 case Instruction::Mul:
1974 // Power of two is closed under multiplication.
1975 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
1976 Q.IIQ.hasNoSignedWrap(BO)) &&
1977 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
1978 case Instruction::SDiv:
1979 // Start value must not be signmask for signed division, so simply being a
1980 // power of two is not sufficient, and it has to be a constant.
1981 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
1982 return false;
1983 [[fallthrough]];
1984 case Instruction::UDiv:
1985 // Divisor must be a power of two.
1986 // If OrZero is false, cannot guarantee induction variable is non-zero after
1987 // division, same for Shr, unless it is exact division.
1988 return (OrZero || Q.IIQ.isExact(BO)) &&
1989 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
1990 case Instruction::Shl:
1991 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
1992 case Instruction::AShr:
1993 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
1994 return false;
1995 [[fallthrough]];
1996 case Instruction::LShr:
1997 return OrZero || Q.IIQ.isExact(BO);
1998 default:
1999 return false;
2003 /// Return true if the given value is known to have exactly one
2004 /// bit set when defined. For vectors return true if every element is known to
2005 /// be a power of two when defined. Supports values with integer or pointer
2006 /// types and vectors of integers.
2007 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2008 const SimplifyQuery &Q) {
2009 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2011 if (isa<Constant>(V))
2012 return OrZero ? match(V, m_Power2OrZero()) : match(V, m_Power2());
2014 // i1 is by definition a power of 2 or zero.
2015 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2016 return true;
2018 auto *I = dyn_cast<Instruction>(V);
2019 if (!I)
2020 return false;
2022 if (Q.CxtI && match(V, m_VScale())) {
2023 const Function *F = Q.CxtI->getFunction();
2024 // The vscale_range indicates vscale is a power-of-two.
2025 return F->hasFnAttribute(Attribute::VScaleRange);
2028 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2029 // it is shifted off the end then the result is undefined.
2030 if (match(I, m_Shl(m_One(), m_Value())))
2031 return true;
2033 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2034 // the bottom. If it is shifted off the bottom then the result is undefined.
2035 if (match(I, m_LShr(m_SignMask(), m_Value())))
2036 return true;
2038 // The remaining tests are all recursive, so bail out if we hit the limit.
2039 if (Depth++ == MaxAnalysisRecursionDepth)
2040 return false;
2042 switch (I->getOpcode()) {
2043 case Instruction::ZExt:
2044 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2045 case Instruction::Trunc:
2046 return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2047 case Instruction::Shl:
2048 if (OrZero || Q.IIQ.hasNoUnsignedWrap(I) || Q.IIQ.hasNoSignedWrap(I))
2049 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2050 return false;
2051 case Instruction::LShr:
2052 if (OrZero || Q.IIQ.isExact(cast<BinaryOperator>(I)))
2053 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2054 return false;
2055 case Instruction::UDiv:
2056 if (Q.IIQ.isExact(cast<BinaryOperator>(I)))
2057 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2058 return false;
2059 case Instruction::Mul:
2060 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
2061 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q) &&
2062 (OrZero || isKnownNonZero(I, Depth, Q));
2063 case Instruction::And:
2064 // A power of two and'd with anything is a power of two or zero.
2065 if (OrZero &&
2066 (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Depth, Q) ||
2067 isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Depth, Q)))
2068 return true;
2069 // X & (-X) is always a power of two or zero.
2070 if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) ||
2071 match(I->getOperand(1), m_Neg(m_Specific(I->getOperand(0)))))
2072 return OrZero || isKnownNonZero(I->getOperand(0), Depth, Q);
2073 return false;
2074 case Instruction::Add: {
2075 // Adding a power-of-two or zero to the same power-of-two or zero yields
2076 // either the original power-of-two, a larger power-of-two or zero.
2077 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2078 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2079 Q.IIQ.hasNoSignedWrap(VOBO)) {
2080 if (match(I->getOperand(0),
2081 m_c_And(m_Specific(I->getOperand(1)), m_Value())) &&
2082 isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q))
2083 return true;
2084 if (match(I->getOperand(1),
2085 m_c_And(m_Specific(I->getOperand(0)), m_Value())) &&
2086 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q))
2087 return true;
2089 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2090 KnownBits LHSBits(BitWidth);
2091 computeKnownBits(I->getOperand(0), LHSBits, Depth, Q);
2093 KnownBits RHSBits(BitWidth);
2094 computeKnownBits(I->getOperand(1), RHSBits, Depth, Q);
2095 // If i8 V is a power of two or zero:
2096 // ZeroBits: 1 1 1 0 1 1 1 1
2097 // ~ZeroBits: 0 0 0 1 0 0 0 0
2098 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2099 // If OrZero isn't set, we cannot give back a zero result.
2100 // Make sure either the LHS or RHS has a bit set.
2101 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2102 return true;
2104 return false;
2106 case Instruction::Select:
2107 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
2108 isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Depth, Q);
2109 case Instruction::PHI: {
2110 // A PHI node is power of two if all incoming values are power of two, or if
2111 // it is an induction variable where in each step its value is a power of
2112 // two.
2113 auto *PN = cast<PHINode>(I);
2114 SimplifyQuery RecQ = Q;
2116 // Check if it is an induction variable and always power of two.
2117 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2118 return true;
2120 // Recursively check all incoming values. Limit recursion to 2 levels, so
2121 // that search complexity is limited to number of operands^2.
2122 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2123 return llvm::all_of(PN->operands(), [&](const Use &U) {
2124 // Value is power of 2 if it is coming from PHI node itself by induction.
2125 if (U.get() == PN)
2126 return true;
2128 // Change the context instruction to the incoming block where it is
2129 // evaluated.
2130 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2131 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2134 case Instruction::Invoke:
2135 case Instruction::Call: {
2136 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2137 switch (II->getIntrinsicID()) {
2138 case Intrinsic::umax:
2139 case Intrinsic::smax:
2140 case Intrinsic::umin:
2141 case Intrinsic::smin:
2142 return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Depth, Q) &&
2143 isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2144 // bswap/bitreverse just move around bits, but don't change any 1s/0s
2145 // thus dont change pow2/non-pow2 status.
2146 case Intrinsic::bitreverse:
2147 case Intrinsic::bswap:
2148 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2149 case Intrinsic::fshr:
2150 case Intrinsic::fshl:
2151 // If Op0 == Op1, this is a rotate. is_pow2(rotate(x, y)) == is_pow2(x)
2152 if (II->getArgOperand(0) == II->getArgOperand(1))
2153 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2154 break;
2155 default:
2156 break;
2159 return false;
2161 default:
2162 return false;
2166 /// Test whether a GEP's result is known to be non-null.
2168 /// Uses properties inherent in a GEP to try to determine whether it is known
2169 /// to be non-null.
2171 /// Currently this routine does not support vector GEPs.
2172 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2173 const SimplifyQuery &Q) {
2174 const Function *F = nullptr;
2175 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2176 F = I->getFunction();
2178 if (!GEP->isInBounds() ||
2179 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2180 return false;
2182 // FIXME: Support vector-GEPs.
2183 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2185 // If the base pointer is non-null, we cannot walk to a null address with an
2186 // inbounds GEP in address space zero.
2187 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2188 return true;
2190 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2191 // If so, then the GEP cannot produce a null pointer, as doing so would
2192 // inherently violate the inbounds contract within address space zero.
2193 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2194 GTI != GTE; ++GTI) {
2195 // Struct types are easy -- they must always be indexed by a constant.
2196 if (StructType *STy = GTI.getStructTypeOrNull()) {
2197 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2198 unsigned ElementIdx = OpC->getZExtValue();
2199 const StructLayout *SL = Q.DL.getStructLayout(STy);
2200 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2201 if (ElementOffset > 0)
2202 return true;
2203 continue;
2206 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2207 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero())
2208 continue;
2210 // Fast path the constant operand case both for efficiency and so we don't
2211 // increment Depth when just zipping down an all-constant GEP.
2212 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2213 if (!OpC->isZero())
2214 return true;
2215 continue;
2218 // We post-increment Depth here because while isKnownNonZero increments it
2219 // as well, when we pop back up that increment won't persist. We don't want
2220 // to recurse 10k times just because we have 10k GEP operands. We don't
2221 // bail completely out because we want to handle constant GEPs regardless
2222 // of depth.
2223 if (Depth++ >= MaxAnalysisRecursionDepth)
2224 continue;
2226 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2227 return true;
2230 return false;
2233 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2234 const Instruction *CtxI,
2235 const DominatorTree *DT) {
2236 assert(!isa<Constant>(V) && "Called for constant?");
2238 if (!CtxI || !DT)
2239 return false;
2241 unsigned NumUsesExplored = 0;
2242 for (const auto *U : V->users()) {
2243 // Avoid massive lists
2244 if (NumUsesExplored >= DomConditionsMaxUses)
2245 break;
2246 NumUsesExplored++;
2248 // If the value is used as an argument to a call or invoke, then argument
2249 // attributes may provide an answer about null-ness.
2250 if (const auto *CB = dyn_cast<CallBase>(U))
2251 if (auto *CalledFunc = CB->getCalledFunction())
2252 for (const Argument &Arg : CalledFunc->args())
2253 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2254 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2255 DT->dominates(CB, CtxI))
2256 return true;
2258 // If the value is used as a load/store, then the pointer must be non null.
2259 if (V == getLoadStorePointerOperand(U)) {
2260 const Instruction *I = cast<Instruction>(U);
2261 if (!NullPointerIsDefined(I->getFunction(),
2262 V->getType()->getPointerAddressSpace()) &&
2263 DT->dominates(I, CtxI))
2264 return true;
2267 if (match(U, m_IDiv(m_Value(), m_Specific(V))) &&
2268 isValidAssumeForContext(cast<Instruction>(U), CtxI, DT))
2269 return true;
2271 // Consider only compare instructions uniquely controlling a branch
2272 Value *RHS;
2273 CmpInst::Predicate Pred;
2274 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2275 continue;
2277 bool NonNullIfTrue;
2278 if (cmpExcludesZero(Pred, RHS))
2279 NonNullIfTrue = true;
2280 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2281 NonNullIfTrue = false;
2282 else
2283 continue;
2285 SmallVector<const User *, 4> WorkList;
2286 SmallPtrSet<const User *, 4> Visited;
2287 for (const auto *CmpU : U->users()) {
2288 assert(WorkList.empty() && "Should be!");
2289 if (Visited.insert(CmpU).second)
2290 WorkList.push_back(CmpU);
2292 while (!WorkList.empty()) {
2293 auto *Curr = WorkList.pop_back_val();
2295 // If a user is an AND, add all its users to the work list. We only
2296 // propagate "pred != null" condition through AND because it is only
2297 // correct to assume that all conditions of AND are met in true branch.
2298 // TODO: Support similar logic of OR and EQ predicate?
2299 if (NonNullIfTrue)
2300 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2301 for (const auto *CurrU : Curr->users())
2302 if (Visited.insert(CurrU).second)
2303 WorkList.push_back(CurrU);
2304 continue;
2307 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2308 assert(BI->isConditional() && "uses a comparison!");
2310 BasicBlock *NonNullSuccessor =
2311 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2312 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2313 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2314 return true;
2315 } else if (NonNullIfTrue && isGuard(Curr) &&
2316 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2317 return true;
2323 return false;
2326 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2327 /// ensure that the value it's attached to is never Value? 'RangeType' is
2328 /// is the type of the value described by the range.
2329 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2330 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2331 assert(NumRanges >= 1);
2332 for (unsigned i = 0; i < NumRanges; ++i) {
2333 ConstantInt *Lower =
2334 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2335 ConstantInt *Upper =
2336 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2337 ConstantRange Range(Lower->getValue(), Upper->getValue());
2338 if (Range.contains(Value))
2339 return false;
2341 return true;
2344 /// Try to detect a recurrence that monotonically increases/decreases from a
2345 /// non-zero starting value. These are common as induction variables.
2346 static bool isNonZeroRecurrence(const PHINode *PN) {
2347 BinaryOperator *BO = nullptr;
2348 Value *Start = nullptr, *Step = nullptr;
2349 const APInt *StartC, *StepC;
2350 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2351 !match(Start, m_APInt(StartC)) || StartC->isZero())
2352 return false;
2354 switch (BO->getOpcode()) {
2355 case Instruction::Add:
2356 // Starting from non-zero and stepping away from zero can never wrap back
2357 // to zero.
2358 return BO->hasNoUnsignedWrap() ||
2359 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2360 StartC->isNegative() == StepC->isNegative());
2361 case Instruction::Mul:
2362 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2363 match(Step, m_APInt(StepC)) && !StepC->isZero();
2364 case Instruction::Shl:
2365 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2366 case Instruction::AShr:
2367 case Instruction::LShr:
2368 return BO->isExact();
2369 default:
2370 return false;
2374 static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
2375 const SimplifyQuery &Q, unsigned BitWidth, Value *X,
2376 Value *Y, bool NSW) {
2377 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2378 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2380 // If X and Y are both non-negative (as signed values) then their sum is not
2381 // zero unless both X and Y are zero.
2382 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2383 if (isKnownNonZero(Y, DemandedElts, Depth, Q) ||
2384 isKnownNonZero(X, DemandedElts, Depth, Q))
2385 return true;
2387 // If X and Y are both negative (as signed values) then their sum is not
2388 // zero unless both X and Y equal INT_MIN.
2389 if (XKnown.isNegative() && YKnown.isNegative()) {
2390 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2391 // The sign bit of X is set. If some other bit is set then X is not equal
2392 // to INT_MIN.
2393 if (XKnown.One.intersects(Mask))
2394 return true;
2395 // The sign bit of Y is set. If some other bit is set then Y is not equal
2396 // to INT_MIN.
2397 if (YKnown.One.intersects(Mask))
2398 return true;
2401 // The sum of a non-negative number and a power of two is not zero.
2402 if (XKnown.isNonNegative() &&
2403 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2404 return true;
2405 if (YKnown.isNonNegative() &&
2406 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2407 return true;
2409 return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown)
2410 .isNonZero();
2413 static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
2414 const SimplifyQuery &Q, unsigned BitWidth, Value *X,
2415 Value *Y) {
2416 if (auto *C = dyn_cast<Constant>(X))
2417 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q))
2418 return true;
2420 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2421 if (XKnown.isUnknown())
2422 return false;
2423 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2424 // If X != Y then X - Y is non zero.
2425 std::optional<bool> ne = KnownBits::ne(XKnown, YKnown);
2426 // If we are unable to compute if X != Y, we won't be able to do anything
2427 // computing the knownbits of the sub expression so just return here.
2428 return ne && *ne;
2431 static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
2432 unsigned Depth, const SimplifyQuery &Q,
2433 const KnownBits &KnownVal) {
2434 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2435 switch (I->getOpcode()) {
2436 case Instruction::Shl:
2437 return Lhs.shl(Rhs);
2438 case Instruction::LShr:
2439 return Lhs.lshr(Rhs);
2440 case Instruction::AShr:
2441 return Lhs.ashr(Rhs);
2442 default:
2443 llvm_unreachable("Unknown Shift Opcode");
2447 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2448 switch (I->getOpcode()) {
2449 case Instruction::Shl:
2450 return Lhs.lshr(Rhs);
2451 case Instruction::LShr:
2452 case Instruction::AShr:
2453 return Lhs.shl(Rhs);
2454 default:
2455 llvm_unreachable("Unknown Shift Opcode");
2459 if (KnownVal.isUnknown())
2460 return false;
2462 KnownBits KnownCnt =
2463 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2464 APInt MaxShift = KnownCnt.getMaxValue();
2465 unsigned NumBits = KnownVal.getBitWidth();
2466 if (MaxShift.uge(NumBits))
2467 return false;
2469 if (!ShiftOp(KnownVal.One, MaxShift).isZero())
2470 return true;
2472 // If all of the bits shifted out are known to be zero, and Val is known
2473 // non-zero then at least one non-zero bit must remain.
2474 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift)
2475 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) &&
2476 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q))
2477 return true;
2479 return false;
2482 static bool isKnownNonZeroFromOperator(const Operator *I,
2483 const APInt &DemandedElts,
2484 unsigned Depth, const SimplifyQuery &Q) {
2485 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL);
2486 switch (I->getOpcode()) {
2487 case Instruction::Alloca:
2488 // Alloca never returns null, malloc might.
2489 return I->getType()->getPointerAddressSpace() == 0;
2490 case Instruction::GetElementPtr:
2491 if (I->getType()->isPointerTy())
2492 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
2493 break;
2494 case Instruction::BitCast: {
2495 // We need to be a bit careful here. We can only peek through the bitcast
2496 // if the scalar size of elements in the operand are smaller than and a
2497 // multiple of the size they are casting too. Take three cases:
2499 // 1) Unsafe:
2500 // bitcast <2 x i16> %NonZero to <4 x i8>
2502 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a
2503 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't
2504 // guranteed (imagine just sign bit set in the 2 i16 elements).
2506 // 2) Unsafe:
2507 // bitcast <4 x i3> %NonZero to <3 x i4>
2509 // Even though the scalar size of the src (`i3`) is smaller than the
2510 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4`
2511 // its possible for the `3 x i4` elements to be zero because there are
2512 // some elements in the destination that don't contain any full src
2513 // element.
2515 // 3) Safe:
2516 // bitcast <4 x i8> %NonZero to <2 x i16>
2518 // This is always safe as non-zero in the 4 i8 elements implies
2519 // non-zero in the combination of any two adjacent ones. Since i8 is a
2520 // multiple of i16, each i16 is guranteed to have 2 full i8 elements.
2521 // This all implies the 2 i16 elements are non-zero.
2522 Type *FromTy = I->getOperand(0)->getType();
2523 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) &&
2524 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0)
2525 return isKnownNonZero(I->getOperand(0), Depth, Q);
2526 } break;
2527 case Instruction::IntToPtr:
2528 // Note that we have to take special care to avoid looking through
2529 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2530 // as casts that can alter the value, e.g., AddrSpaceCasts.
2531 if (!isa<ScalableVectorType>(I->getType()) &&
2532 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2533 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2534 return isKnownNonZero(I->getOperand(0), Depth, Q);
2535 break;
2536 case Instruction::PtrToInt:
2537 // Similar to int2ptr above, we can look through ptr2int here if the cast
2538 // is a no-op or an extend and not a truncate.
2539 if (!isa<ScalableVectorType>(I->getType()) &&
2540 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2541 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2542 return isKnownNonZero(I->getOperand(0), Depth, Q);
2543 break;
2544 case Instruction::Sub:
2545 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2546 I->getOperand(1));
2547 case Instruction::Or:
2548 // X | Y != 0 if X != 0 or Y != 0.
2549 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2550 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2551 case Instruction::SExt:
2552 case Instruction::ZExt:
2553 // ext X != 0 if X != 0.
2554 return isKnownNonZero(I->getOperand(0), Depth, Q);
2556 case Instruction::Shl: {
2557 // shl nsw/nuw can't remove any non-zero bits.
2558 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I);
2559 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO))
2560 return isKnownNonZero(I->getOperand(0), Depth, Q);
2562 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2563 // if the lowest bit is shifted off the end.
2564 KnownBits Known(BitWidth);
2565 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
2566 if (Known.One[0])
2567 return true;
2569 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2571 case Instruction::LShr:
2572 case Instruction::AShr: {
2573 // shr exact can only shift out zero bits.
2574 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(I);
2575 if (BO->isExact())
2576 return isKnownNonZero(I->getOperand(0), Depth, Q);
2578 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2579 // defined if the sign bit is shifted off the end.
2580 KnownBits Known =
2581 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2582 if (Known.isNegative())
2583 return true;
2585 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2587 case Instruction::UDiv:
2588 case Instruction::SDiv: {
2589 // X / Y
2590 // div exact can only produce a zero if the dividend is zero.
2591 if (cast<PossiblyExactOperator>(I)->isExact())
2592 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2594 std::optional<bool> XUgeY;
2595 KnownBits XKnown =
2596 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2597 // If X is fully unknown we won't be able to figure anything out so don't
2598 // both computing knownbits for Y.
2599 if (XKnown.isUnknown())
2600 return false;
2602 KnownBits YKnown =
2603 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2604 if (I->getOpcode() == Instruction::SDiv) {
2605 // For signed division need to compare abs value of the operands.
2606 XKnown = XKnown.abs(/*IntMinIsPoison*/ false);
2607 YKnown = YKnown.abs(/*IntMinIsPoison*/ false);
2609 // If X u>= Y then div is non zero (0/0 is UB).
2610 XUgeY = KnownBits::uge(XKnown, YKnown);
2611 // If X is total unknown or X u< Y we won't be able to prove non-zero
2612 // with compute known bits so just return early.
2613 return XUgeY && *XUgeY;
2615 case Instruction::Add: {
2616 // X + Y.
2618 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
2619 // non-zero.
2620 auto *BO = cast<OverflowingBinaryOperator>(I);
2621 if (Q.IIQ.hasNoUnsignedWrap(BO))
2622 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2623 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2625 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2626 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO));
2628 case Instruction::Mul: {
2629 // If X and Y are non-zero then so is X * Y as long as the multiplication
2630 // does not overflow.
2631 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I);
2632 if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO))
2633 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) &&
2634 isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2636 // If either X or Y is odd, then if the other is non-zero the result can't
2637 // be zero.
2638 KnownBits XKnown =
2639 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2640 if (XKnown.One[0])
2641 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2643 KnownBits YKnown =
2644 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2645 if (YKnown.One[0])
2646 return XKnown.isNonZero() ||
2647 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2649 // If there exists any subset of X (sX) and subset of Y (sY) s.t sX * sY is
2650 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking
2651 // the lowest known One of X and Y. If they are non-zero, the result
2652 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing
2653 // X.CountLeadingZeros + Y.CountLeadingZeros < BitWidth.
2654 return (XKnown.countMaxTrailingZeros() + YKnown.countMaxTrailingZeros()) <
2655 BitWidth;
2657 case Instruction::Select: {
2658 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2660 // First check if the arm is non-zero using `isKnownNonZero`. If that fails,
2661 // then see if the select condition implies the arm is non-zero. For example
2662 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is
2663 // dominated by `X != 0`.
2664 auto SelectArmIsNonZero = [&](bool IsTrueArm) {
2665 Value *Op;
2666 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2);
2667 // Op is trivially non-zero.
2668 if (isKnownNonZero(Op, DemandedElts, Depth, Q))
2669 return true;
2671 // The condition of the select dominates the true/false arm. Check if the
2672 // condition implies that a given arm is non-zero.
2673 Value *X;
2674 CmpInst::Predicate Pred;
2675 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X))))
2676 return false;
2678 if (!IsTrueArm)
2679 Pred = ICmpInst::getInversePredicate(Pred);
2681 return cmpExcludesZero(Pred, X);
2684 if (SelectArmIsNonZero(/* IsTrueArm */ true) &&
2685 SelectArmIsNonZero(/* IsTrueArm */ false))
2686 return true;
2687 break;
2689 case Instruction::PHI: {
2690 auto *PN = cast<PHINode>(I);
2691 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2692 return true;
2694 // Check if all incoming values are non-zero using recursion.
2695 SimplifyQuery RecQ = Q;
2696 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2697 return llvm::all_of(PN->operands(), [&](const Use &U) {
2698 if (U.get() == PN)
2699 return true;
2700 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2701 // Check if the branch on the phi excludes zero.
2702 ICmpInst::Predicate Pred;
2703 Value *X;
2704 BasicBlock *TrueSucc, *FalseSucc;
2705 if (match(RecQ.CxtI,
2706 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
2707 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
2708 // Check for cases of duplicate successors.
2709 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
2710 // If we're using the false successor, invert the predicate.
2711 if (FalseSucc == PN->getParent())
2712 Pred = CmpInst::getInversePredicate(Pred);
2713 if (cmpExcludesZero(Pred, X))
2714 return true;
2717 // Finally recurse on the edge and check it directly.
2718 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2721 case Instruction::ExtractElement:
2722 if (const auto *EEI = dyn_cast<ExtractElementInst>(I)) {
2723 const Value *Vec = EEI->getVectorOperand();
2724 const Value *Idx = EEI->getIndexOperand();
2725 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2726 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2727 unsigned NumElts = VecTy->getNumElements();
2728 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2729 if (CIdx && CIdx->getValue().ult(NumElts))
2730 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2731 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2734 break;
2735 case Instruction::Freeze:
2736 return isKnownNonZero(I->getOperand(0), Depth, Q) &&
2737 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
2738 Depth);
2739 case Instruction::Load: {
2740 auto *LI = cast<LoadInst>(I);
2741 // A Load tagged with nonnull or dereferenceable with null pointer undefined
2742 // is never null.
2743 if (auto *PtrT = dyn_cast<PointerType>(I->getType()))
2744 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull) ||
2745 (Q.IIQ.getMetadata(LI, LLVMContext::MD_dereferenceable) &&
2746 !NullPointerIsDefined(LI->getFunction(), PtrT->getAddressSpace())))
2747 return true;
2749 // No need to fall through to computeKnownBits as range metadata is already
2750 // handled in isKnownNonZero.
2751 return false;
2753 case Instruction::Call:
2754 case Instruction::Invoke:
2755 if (I->getType()->isPointerTy()) {
2756 const auto *Call = cast<CallBase>(I);
2757 if (Call->isReturnNonNull())
2758 return true;
2759 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2760 return isKnownNonZero(RP, Depth, Q);
2761 } else if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
2762 if (isKnownNonZero(RV, Depth, Q))
2763 return true;
2766 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2767 switch (II->getIntrinsicID()) {
2768 case Intrinsic::sshl_sat:
2769 case Intrinsic::ushl_sat:
2770 case Intrinsic::abs:
2771 case Intrinsic::bitreverse:
2772 case Intrinsic::bswap:
2773 case Intrinsic::ctpop:
2774 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2775 case Intrinsic::ssub_sat:
2776 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
2777 II->getArgOperand(0), II->getArgOperand(1));
2778 case Intrinsic::sadd_sat:
2779 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
2780 II->getArgOperand(0), II->getArgOperand(1),
2781 /*NSW*/ true);
2782 case Intrinsic::umax:
2783 case Intrinsic::uadd_sat:
2784 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) ||
2785 isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2786 case Intrinsic::smin:
2787 case Intrinsic::smax: {
2788 auto KnownOpImpliesNonZero = [&](const KnownBits &K) {
2789 return II->getIntrinsicID() == Intrinsic::smin
2790 ? K.isNegative()
2791 : K.isStrictlyPositive();
2793 KnownBits XKnown =
2794 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
2795 if (KnownOpImpliesNonZero(XKnown))
2796 return true;
2797 KnownBits YKnown =
2798 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
2799 if (KnownOpImpliesNonZero(YKnown))
2800 return true;
2802 if (XKnown.isNonZero() && YKnown.isNonZero())
2803 return true;
2805 [[fallthrough]];
2806 case Intrinsic::umin:
2807 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) &&
2808 isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q);
2809 case Intrinsic::cttz:
2810 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2811 .Zero[0];
2812 case Intrinsic::ctlz:
2813 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2814 .isNonNegative();
2815 case Intrinsic::fshr:
2816 case Intrinsic::fshl:
2817 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0.
2818 if (II->getArgOperand(0) == II->getArgOperand(1))
2819 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2820 break;
2821 case Intrinsic::vscale:
2822 return true;
2823 default:
2824 break;
2826 break;
2829 return false;
2832 KnownBits Known(BitWidth);
2833 computeKnownBits(I, DemandedElts, Known, Depth, Q);
2834 return Known.One != 0;
2837 /// Return true if the given value is known to be non-zero when defined. For
2838 /// vectors, return true if every demanded element is known to be non-zero when
2839 /// defined. For pointers, if the context instruction and dominator tree are
2840 /// specified, perform context-sensitive analysis and return true if the
2841 /// pointer couldn't possibly be null at the specified instruction.
2842 /// Supports values with integer or pointer type and vectors of integers.
2843 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2844 const SimplifyQuery &Q) {
2846 #ifndef NDEBUG
2847 Type *Ty = V->getType();
2848 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2850 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2851 assert(
2852 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2853 "DemandedElt width should equal the fixed vector number of elements");
2854 } else {
2855 assert(DemandedElts == APInt(1, 1) &&
2856 "DemandedElt width should be 1 for scalars");
2858 #endif
2860 if (auto *C = dyn_cast<Constant>(V)) {
2861 if (C->isNullValue())
2862 return false;
2863 if (isa<ConstantInt>(C))
2864 // Must be non-zero due to null test above.
2865 return true;
2867 // For constant vectors, check that all elements are undefined or known
2868 // non-zero to determine that the whole vector is known non-zero.
2869 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2870 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2871 if (!DemandedElts[i])
2872 continue;
2873 Constant *Elt = C->getAggregateElement(i);
2874 if (!Elt || Elt->isNullValue())
2875 return false;
2876 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2877 return false;
2879 return true;
2882 // A global variable in address space 0 is non null unless extern weak
2883 // or an absolute symbol reference. Other address spaces may have null as a
2884 // valid address for a global, so we can't assume anything.
2885 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2886 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2887 GV->getType()->getAddressSpace() == 0)
2888 return true;
2891 // For constant expressions, fall through to the Operator code below.
2892 if (!isa<ConstantExpr>(V))
2893 return false;
2896 if (auto *I = dyn_cast<Instruction>(V)) {
2897 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2898 // If the possible ranges don't contain zero, then the value is
2899 // definitely non-zero.
2900 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2901 const APInt ZeroValue(Ty->getBitWidth(), 0);
2902 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2903 return true;
2908 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
2909 return true;
2911 // Some of the tests below are recursive, so bail out if we hit the limit.
2912 if (Depth++ >= MaxAnalysisRecursionDepth)
2913 return false;
2915 // Check for pointer simplifications.
2917 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2918 // A byval, inalloca may not be null in a non-default addres space. A
2919 // nonnull argument is assumed never 0.
2920 if (const Argument *A = dyn_cast<Argument>(V)) {
2921 if (((A->hasPassPointeeByValueCopyAttr() &&
2922 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2923 A->hasNonNullAttr()))
2924 return true;
2928 if (const auto *I = dyn_cast<Operator>(V))
2929 if (isKnownNonZeroFromOperator(I, DemandedElts, Depth, Q))
2930 return true;
2932 if (!isa<Constant>(V) &&
2933 isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2934 return true;
2936 return false;
2939 bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q) {
2940 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2941 APInt DemandedElts =
2942 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2943 return isKnownNonZero(V, DemandedElts, Depth, Q);
2946 /// If the pair of operators are the same invertible function, return the
2947 /// the operands of the function corresponding to each input. Otherwise,
2948 /// return std::nullopt. An invertible function is one that is 1-to-1 and maps
2949 /// every input value to exactly one output value. This is equivalent to
2950 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2951 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2952 static std::optional<std::pair<Value*, Value*>>
2953 getInvertibleOperands(const Operator *Op1,
2954 const Operator *Op2) {
2955 if (Op1->getOpcode() != Op2->getOpcode())
2956 return std::nullopt;
2958 auto getOperands = [&](unsigned OpNum) -> auto {
2959 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2962 switch (Op1->getOpcode()) {
2963 default:
2964 break;
2965 case Instruction::Add:
2966 case Instruction::Sub:
2967 if (Op1->getOperand(0) == Op2->getOperand(0))
2968 return getOperands(1);
2969 if (Op1->getOperand(1) == Op2->getOperand(1))
2970 return getOperands(0);
2971 break;
2972 case Instruction::Mul: {
2973 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2974 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2975 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2976 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2977 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2978 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2979 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2980 break;
2982 // Assume operand order has been canonicalized
2983 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2984 isa<ConstantInt>(Op1->getOperand(1)) &&
2985 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2986 return getOperands(0);
2987 break;
2989 case Instruction::Shl: {
2990 // Same as multiplies, with the difference that we don't need to check
2991 // for a non-zero multiply. Shifts always multiply by non-zero.
2992 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2993 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2994 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2995 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2996 break;
2998 if (Op1->getOperand(1) == Op2->getOperand(1))
2999 return getOperands(0);
3000 break;
3002 case Instruction::AShr:
3003 case Instruction::LShr: {
3004 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3005 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3006 if (!PEO1->isExact() || !PEO2->isExact())
3007 break;
3009 if (Op1->getOperand(1) == Op2->getOperand(1))
3010 return getOperands(0);
3011 break;
3013 case Instruction::SExt:
3014 case Instruction::ZExt:
3015 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
3016 return getOperands(0);
3017 break;
3018 case Instruction::PHI: {
3019 const PHINode *PN1 = cast<PHINode>(Op1);
3020 const PHINode *PN2 = cast<PHINode>(Op2);
3022 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
3023 // are a single invertible function of the start values? Note that repeated
3024 // application of an invertible function is also invertible
3025 BinaryOperator *BO1 = nullptr;
3026 Value *Start1 = nullptr, *Step1 = nullptr;
3027 BinaryOperator *BO2 = nullptr;
3028 Value *Start2 = nullptr, *Step2 = nullptr;
3029 if (PN1->getParent() != PN2->getParent() ||
3030 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
3031 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
3032 break;
3034 auto Values = getInvertibleOperands(cast<Operator>(BO1),
3035 cast<Operator>(BO2));
3036 if (!Values)
3037 break;
3039 // We have to be careful of mutually defined recurrences here. Ex:
3040 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
3041 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
3042 // The invertibility of these is complicated, and not worth reasoning
3043 // about (yet?).
3044 if (Values->first != PN1 || Values->second != PN2)
3045 break;
3047 return std::make_pair(Start1, Start2);
3050 return std::nullopt;
3053 /// Return true if V2 == V1 + X, where X is known non-zero.
3054 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
3055 const SimplifyQuery &Q) {
3056 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
3057 if (!BO || BO->getOpcode() != Instruction::Add)
3058 return false;
3059 Value *Op = nullptr;
3060 if (V2 == BO->getOperand(0))
3061 Op = BO->getOperand(1);
3062 else if (V2 == BO->getOperand(1))
3063 Op = BO->getOperand(0);
3064 else
3065 return false;
3066 return isKnownNonZero(Op, Depth + 1, Q);
3069 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3070 /// the multiplication is nuw or nsw.
3071 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
3072 const SimplifyQuery &Q) {
3073 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3074 const APInt *C;
3075 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
3076 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3077 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
3079 return false;
3082 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3083 /// the shift is nuw or nsw.
3084 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
3085 const SimplifyQuery &Q) {
3086 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3087 const APInt *C;
3088 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
3089 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3090 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
3092 return false;
3095 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
3096 unsigned Depth, const SimplifyQuery &Q) {
3097 // Check two PHIs are in same block.
3098 if (PN1->getParent() != PN2->getParent())
3099 return false;
3101 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
3102 bool UsedFullRecursion = false;
3103 for (const BasicBlock *IncomBB : PN1->blocks()) {
3104 if (!VisitedBBs.insert(IncomBB).second)
3105 continue; // Don't reprocess blocks that we have dealt with already.
3106 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
3107 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
3108 const APInt *C1, *C2;
3109 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
3110 continue;
3112 // Only one pair of phi operands is allowed for full recursion.
3113 if (UsedFullRecursion)
3114 return false;
3116 SimplifyQuery RecQ = Q;
3117 RecQ.CxtI = IncomBB->getTerminator();
3118 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
3119 return false;
3120 UsedFullRecursion = true;
3122 return true;
3125 static bool isNonEqualSelect(const Value *V1, const Value *V2, unsigned Depth,
3126 const SimplifyQuery &Q) {
3127 const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
3128 if (!SI1)
3129 return false;
3131 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) {
3132 const Value *Cond1 = SI1->getCondition();
3133 const Value *Cond2 = SI2->getCondition();
3134 if (Cond1 == Cond2)
3135 return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(),
3136 Depth + 1, Q) &&
3137 isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(),
3138 Depth + 1, Q);
3140 return isKnownNonEqual(SI1->getTrueValue(), V2, Depth + 1, Q) &&
3141 isKnownNonEqual(SI1->getFalseValue(), V2, Depth + 1, Q);
3144 /// Return true if it is known that V1 != V2.
3145 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
3146 const SimplifyQuery &Q) {
3147 if (V1 == V2)
3148 return false;
3149 if (V1->getType() != V2->getType())
3150 // We can't look through casts yet.
3151 return false;
3153 if (Depth >= MaxAnalysisRecursionDepth)
3154 return false;
3156 // See if we can recurse through (exactly one of) our operands. This
3157 // requires our operation be 1-to-1 and map every input value to exactly
3158 // one output value. Such an operation is invertible.
3159 auto *O1 = dyn_cast<Operator>(V1);
3160 auto *O2 = dyn_cast<Operator>(V2);
3161 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3162 if (auto Values = getInvertibleOperands(O1, O2))
3163 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
3165 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3166 const PHINode *PN2 = cast<PHINode>(V2);
3167 // FIXME: This is missing a generalization to handle the case where one is
3168 // a PHI and another one isn't.
3169 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
3170 return true;
3174 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
3175 return true;
3177 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
3178 return true;
3180 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
3181 return true;
3183 if (V1->getType()->isIntOrIntVectorTy()) {
3184 // Are any known bits in V1 contradictory to known bits in V2? If V1
3185 // has a known zero where V2 has a known one, they must not be equal.
3186 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
3187 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
3189 if (Known1.Zero.intersects(Known2.One) ||
3190 Known2.Zero.intersects(Known1.One))
3191 return true;
3194 if (isNonEqualSelect(V1, V2, Depth, Q) || isNonEqualSelect(V2, V1, Depth, Q))
3195 return true;
3197 return false;
3200 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
3201 /// simplify operations downstream. Mask is known to be zero for bits that V
3202 /// cannot have.
3204 /// This function is defined on values with integer type, values with pointer
3205 /// type, and vectors of integers. In the case
3206 /// where V is a vector, the mask, known zero, and known one values are the
3207 /// same width as the vector element, and the bit is set only if it is true
3208 /// for all of the elements in the vector.
3209 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
3210 const SimplifyQuery &Q) {
3211 KnownBits Known(Mask.getBitWidth());
3212 computeKnownBits(V, Known, Depth, Q);
3213 return Mask.isSubsetOf(Known.Zero);
3216 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
3217 // Returns the input and lower/upper bounds.
3218 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
3219 const APInt *&CLow, const APInt *&CHigh) {
3220 assert(isa<Operator>(Select) &&
3221 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
3222 "Input should be a Select!");
3224 const Value *LHS = nullptr, *RHS = nullptr;
3225 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
3226 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3227 return false;
3229 if (!match(RHS, m_APInt(CLow)))
3230 return false;
3232 const Value *LHS2 = nullptr, *RHS2 = nullptr;
3233 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
3234 if (getInverseMinMaxFlavor(SPF) != SPF2)
3235 return false;
3237 if (!match(RHS2, m_APInt(CHigh)))
3238 return false;
3240 if (SPF == SPF_SMIN)
3241 std::swap(CLow, CHigh);
3243 In = LHS2;
3244 return CLow->sle(*CHigh);
3247 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3248 const APInt *&CLow,
3249 const APInt *&CHigh) {
3250 assert((II->getIntrinsicID() == Intrinsic::smin ||
3251 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
3253 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3254 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3255 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3256 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3257 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3258 return false;
3260 if (II->getIntrinsicID() == Intrinsic::smin)
3261 std::swap(CLow, CHigh);
3262 return CLow->sle(*CHigh);
3265 /// For vector constants, loop over the elements and find the constant with the
3266 /// minimum number of sign bits. Return 0 if the value is not a vector constant
3267 /// or if any element was not analyzed; otherwise, return the count for the
3268 /// element with the minimum number of sign bits.
3269 static unsigned computeNumSignBitsVectorConstant(const Value *V,
3270 const APInt &DemandedElts,
3271 unsigned TyBits) {
3272 const auto *CV = dyn_cast<Constant>(V);
3273 if (!CV || !isa<FixedVectorType>(CV->getType()))
3274 return 0;
3276 unsigned MinSignBits = TyBits;
3277 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3278 for (unsigned i = 0; i != NumElts; ++i) {
3279 if (!DemandedElts[i])
3280 continue;
3281 // If we find a non-ConstantInt, bail out.
3282 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3283 if (!Elt)
3284 return 0;
3286 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3289 return MinSignBits;
3292 static unsigned ComputeNumSignBitsImpl(const Value *V,
3293 const APInt &DemandedElts,
3294 unsigned Depth, const SimplifyQuery &Q);
3296 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3297 unsigned Depth, const SimplifyQuery &Q) {
3298 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3299 assert(Result > 0 && "At least one sign bit needs to be present!");
3300 return Result;
3303 /// Return the number of times the sign bit of the register is replicated into
3304 /// the other bits. We know that at least 1 bit is always equal to the sign bit
3305 /// (itself), but other cases can give us information. For example, immediately
3306 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3307 /// other, so we return 3. For vectors, return the number of sign bits for the
3308 /// vector element with the minimum number of known sign bits of the demanded
3309 /// elements in the vector specified by DemandedElts.
3310 static unsigned ComputeNumSignBitsImpl(const Value *V,
3311 const APInt &DemandedElts,
3312 unsigned Depth, const SimplifyQuery &Q) {
3313 Type *Ty = V->getType();
3314 #ifndef NDEBUG
3315 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3317 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3318 assert(
3319 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3320 "DemandedElt width should equal the fixed vector number of elements");
3321 } else {
3322 assert(DemandedElts == APInt(1, 1) &&
3323 "DemandedElt width should be 1 for scalars");
3325 #endif
3327 // We return the minimum number of sign bits that are guaranteed to be present
3328 // in V, so for undef we have to conservatively return 1. We don't have the
3329 // same behavior for poison though -- that's a FIXME today.
3331 Type *ScalarTy = Ty->getScalarType();
3332 unsigned TyBits = ScalarTy->isPointerTy() ?
3333 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3334 Q.DL.getTypeSizeInBits(ScalarTy);
3336 unsigned Tmp, Tmp2;
3337 unsigned FirstAnswer = 1;
3339 // Note that ConstantInt is handled by the general computeKnownBits case
3340 // below.
3342 if (Depth == MaxAnalysisRecursionDepth)
3343 return 1;
3345 if (auto *U = dyn_cast<Operator>(V)) {
3346 switch (Operator::getOpcode(V)) {
3347 default: break;
3348 case Instruction::SExt:
3349 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3350 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3352 case Instruction::SDiv: {
3353 const APInt *Denominator;
3354 // sdiv X, C -> adds log(C) sign bits.
3355 if (match(U->getOperand(1), m_APInt(Denominator))) {
3357 // Ignore non-positive denominator.
3358 if (!Denominator->isStrictlyPositive())
3359 break;
3361 // Calculate the incoming numerator bits.
3362 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3364 // Add floor(log(C)) bits to the numerator bits.
3365 return std::min(TyBits, NumBits + Denominator->logBase2());
3367 break;
3370 case Instruction::SRem: {
3371 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3373 const APInt *Denominator;
3374 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3375 // positive constant. This let us put a lower bound on the number of sign
3376 // bits.
3377 if (match(U->getOperand(1), m_APInt(Denominator))) {
3379 // Ignore non-positive denominator.
3380 if (Denominator->isStrictlyPositive()) {
3381 // Calculate the leading sign bit constraints by examining the
3382 // denominator. Given that the denominator is positive, there are two
3383 // cases:
3385 // 1. The numerator is positive. The result range is [0,C) and
3386 // [0,C) u< (1 << ceilLogBase2(C)).
3388 // 2. The numerator is negative. Then the result range is (-C,0] and
3389 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3391 // Thus a lower bound on the number of sign bits is `TyBits -
3392 // ceilLogBase2(C)`.
3394 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3395 Tmp = std::max(Tmp, ResBits);
3398 return Tmp;
3401 case Instruction::AShr: {
3402 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3403 // ashr X, C -> adds C sign bits. Vectors too.
3404 const APInt *ShAmt;
3405 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3406 if (ShAmt->uge(TyBits))
3407 break; // Bad shift.
3408 unsigned ShAmtLimited = ShAmt->getZExtValue();
3409 Tmp += ShAmtLimited;
3410 if (Tmp > TyBits) Tmp = TyBits;
3412 return Tmp;
3414 case Instruction::Shl: {
3415 const APInt *ShAmt;
3416 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3417 // shl destroys sign bits.
3418 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3419 if (ShAmt->uge(TyBits) || // Bad shift.
3420 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3421 Tmp2 = ShAmt->getZExtValue();
3422 return Tmp - Tmp2;
3424 break;
3426 case Instruction::And:
3427 case Instruction::Or:
3428 case Instruction::Xor: // NOT is handled here.
3429 // Logical binary ops preserve the number of sign bits at the worst.
3430 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3431 if (Tmp != 1) {
3432 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3433 FirstAnswer = std::min(Tmp, Tmp2);
3434 // We computed what we know about the sign bits as our first
3435 // answer. Now proceed to the generic code that uses
3436 // computeKnownBits, and pick whichever answer is better.
3438 break;
3440 case Instruction::Select: {
3441 // If we have a clamp pattern, we know that the number of sign bits will
3442 // be the minimum of the clamp min/max range.
3443 const Value *X;
3444 const APInt *CLow, *CHigh;
3445 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3446 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3448 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3449 if (Tmp == 1) break;
3450 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3451 return std::min(Tmp, Tmp2);
3454 case Instruction::Add:
3455 // Add can have at most one carry bit. Thus we know that the output
3456 // is, at worst, one more bit than the inputs.
3457 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3458 if (Tmp == 1) break;
3460 // Special case decrementing a value (ADD X, -1):
3461 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3462 if (CRHS->isAllOnesValue()) {
3463 KnownBits Known(TyBits);
3464 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3466 // If the input is known to be 0 or 1, the output is 0/-1, which is
3467 // all sign bits set.
3468 if ((Known.Zero | 1).isAllOnes())
3469 return TyBits;
3471 // If we are subtracting one from a positive number, there is no carry
3472 // out of the result.
3473 if (Known.isNonNegative())
3474 return Tmp;
3477 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3478 if (Tmp2 == 1) break;
3479 return std::min(Tmp, Tmp2) - 1;
3481 case Instruction::Sub:
3482 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3483 if (Tmp2 == 1) break;
3485 // Handle NEG.
3486 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3487 if (CLHS->isNullValue()) {
3488 KnownBits Known(TyBits);
3489 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3490 // If the input is known to be 0 or 1, the output is 0/-1, which is
3491 // all sign bits set.
3492 if ((Known.Zero | 1).isAllOnes())
3493 return TyBits;
3495 // If the input is known to be positive (the sign bit is known clear),
3496 // the output of the NEG has the same number of sign bits as the
3497 // input.
3498 if (Known.isNonNegative())
3499 return Tmp2;
3501 // Otherwise, we treat this like a SUB.
3504 // Sub can have at most one carry bit. Thus we know that the output
3505 // is, at worst, one more bit than the inputs.
3506 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3507 if (Tmp == 1) break;
3508 return std::min(Tmp, Tmp2) - 1;
3510 case Instruction::Mul: {
3511 // The output of the Mul can be at most twice the valid bits in the
3512 // inputs.
3513 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3514 if (SignBitsOp0 == 1) break;
3515 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3516 if (SignBitsOp1 == 1) break;
3517 unsigned OutValidBits =
3518 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3519 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3522 case Instruction::PHI: {
3523 const PHINode *PN = cast<PHINode>(U);
3524 unsigned NumIncomingValues = PN->getNumIncomingValues();
3525 // Don't analyze large in-degree PHIs.
3526 if (NumIncomingValues > 4) break;
3527 // Unreachable blocks may have zero-operand PHI nodes.
3528 if (NumIncomingValues == 0) break;
3530 // Take the minimum of all incoming values. This can't infinitely loop
3531 // because of our depth threshold.
3532 SimplifyQuery RecQ = Q;
3533 Tmp = TyBits;
3534 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3535 if (Tmp == 1) return Tmp;
3536 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3537 Tmp = std::min(
3538 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3540 return Tmp;
3543 case Instruction::Trunc: {
3544 // If the input contained enough sign bits that some remain after the
3545 // truncation, then we can make use of that. Otherwise we don't know
3546 // anything.
3547 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3548 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
3549 if (Tmp > (OperandTyBits - TyBits))
3550 return Tmp - (OperandTyBits - TyBits);
3552 return 1;
3555 case Instruction::ExtractElement:
3556 // Look through extract element. At the moment we keep this simple and
3557 // skip tracking the specific element. But at least we might find
3558 // information valid for all elements of the vector (for example if vector
3559 // is sign extended, shifted, etc).
3560 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3562 case Instruction::ShuffleVector: {
3563 // Collect the minimum number of sign bits that are shared by every vector
3564 // element referenced by the shuffle.
3565 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3566 if (!Shuf) {
3567 // FIXME: Add support for shufflevector constant expressions.
3568 return 1;
3570 APInt DemandedLHS, DemandedRHS;
3571 // For undef elements, we don't know anything about the common state of
3572 // the shuffle result.
3573 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3574 return 1;
3575 Tmp = std::numeric_limits<unsigned>::max();
3576 if (!!DemandedLHS) {
3577 const Value *LHS = Shuf->getOperand(0);
3578 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3580 // If we don't know anything, early out and try computeKnownBits
3581 // fall-back.
3582 if (Tmp == 1)
3583 break;
3584 if (!!DemandedRHS) {
3585 const Value *RHS = Shuf->getOperand(1);
3586 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3587 Tmp = std::min(Tmp, Tmp2);
3589 // If we don't know anything, early out and try computeKnownBits
3590 // fall-back.
3591 if (Tmp == 1)
3592 break;
3593 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3594 return Tmp;
3596 case Instruction::Call: {
3597 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3598 switch (II->getIntrinsicID()) {
3599 default: break;
3600 case Intrinsic::abs:
3601 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3602 if (Tmp == 1) break;
3604 // Absolute value reduces number of sign bits by at most 1.
3605 return Tmp - 1;
3606 case Intrinsic::smin:
3607 case Intrinsic::smax: {
3608 const APInt *CLow, *CHigh;
3609 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3610 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3618 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3619 // use this information.
3621 // If we can examine all elements of a vector constant successfully, we're
3622 // done (we can't do any better than that). If not, keep trying.
3623 if (unsigned VecSignBits =
3624 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3625 return VecSignBits;
3627 KnownBits Known(TyBits);
3628 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3630 // If we know that the sign bit is either zero or one, determine the number of
3631 // identical bits in the top of the input value.
3632 return std::max(FirstAnswer, Known.countMinSignBits());
3635 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3636 const TargetLibraryInfo *TLI) {
3637 const Function *F = CB.getCalledFunction();
3638 if (!F)
3639 return Intrinsic::not_intrinsic;
3641 if (F->isIntrinsic())
3642 return F->getIntrinsicID();
3644 // We are going to infer semantics of a library function based on mapping it
3645 // to an LLVM intrinsic. Check that the library function is available from
3646 // this callbase and in this environment.
3647 LibFunc Func;
3648 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3649 !CB.onlyReadsMemory())
3650 return Intrinsic::not_intrinsic;
3652 switch (Func) {
3653 default:
3654 break;
3655 case LibFunc_sin:
3656 case LibFunc_sinf:
3657 case LibFunc_sinl:
3658 return Intrinsic::sin;
3659 case LibFunc_cos:
3660 case LibFunc_cosf:
3661 case LibFunc_cosl:
3662 return Intrinsic::cos;
3663 case LibFunc_exp:
3664 case LibFunc_expf:
3665 case LibFunc_expl:
3666 return Intrinsic::exp;
3667 case LibFunc_exp2:
3668 case LibFunc_exp2f:
3669 case LibFunc_exp2l:
3670 return Intrinsic::exp2;
3671 case LibFunc_log:
3672 case LibFunc_logf:
3673 case LibFunc_logl:
3674 return Intrinsic::log;
3675 case LibFunc_log10:
3676 case LibFunc_log10f:
3677 case LibFunc_log10l:
3678 return Intrinsic::log10;
3679 case LibFunc_log2:
3680 case LibFunc_log2f:
3681 case LibFunc_log2l:
3682 return Intrinsic::log2;
3683 case LibFunc_fabs:
3684 case LibFunc_fabsf:
3685 case LibFunc_fabsl:
3686 return Intrinsic::fabs;
3687 case LibFunc_fmin:
3688 case LibFunc_fminf:
3689 case LibFunc_fminl:
3690 return Intrinsic::minnum;
3691 case LibFunc_fmax:
3692 case LibFunc_fmaxf:
3693 case LibFunc_fmaxl:
3694 return Intrinsic::maxnum;
3695 case LibFunc_copysign:
3696 case LibFunc_copysignf:
3697 case LibFunc_copysignl:
3698 return Intrinsic::copysign;
3699 case LibFunc_floor:
3700 case LibFunc_floorf:
3701 case LibFunc_floorl:
3702 return Intrinsic::floor;
3703 case LibFunc_ceil:
3704 case LibFunc_ceilf:
3705 case LibFunc_ceill:
3706 return Intrinsic::ceil;
3707 case LibFunc_trunc:
3708 case LibFunc_truncf:
3709 case LibFunc_truncl:
3710 return Intrinsic::trunc;
3711 case LibFunc_rint:
3712 case LibFunc_rintf:
3713 case LibFunc_rintl:
3714 return Intrinsic::rint;
3715 case LibFunc_nearbyint:
3716 case LibFunc_nearbyintf:
3717 case LibFunc_nearbyintl:
3718 return Intrinsic::nearbyint;
3719 case LibFunc_round:
3720 case LibFunc_roundf:
3721 case LibFunc_roundl:
3722 return Intrinsic::round;
3723 case LibFunc_roundeven:
3724 case LibFunc_roundevenf:
3725 case LibFunc_roundevenl:
3726 return Intrinsic::roundeven;
3727 case LibFunc_pow:
3728 case LibFunc_powf:
3729 case LibFunc_powl:
3730 return Intrinsic::pow;
3731 case LibFunc_sqrt:
3732 case LibFunc_sqrtf:
3733 case LibFunc_sqrtl:
3734 return Intrinsic::sqrt;
3737 return Intrinsic::not_intrinsic;
3740 /// Deprecated, use computeKnownFPClass instead.
3742 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3743 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3744 /// bit despite comparing equal.
3745 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3746 const DataLayout &DL,
3747 const TargetLibraryInfo *TLI,
3748 bool SignBitOnly, unsigned Depth) {
3749 // TODO: This function does not do the right thing when SignBitOnly is true
3750 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3751 // which flips the sign bits of NaNs. See
3752 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3754 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3755 return !CFP->getValueAPF().isNegative() ||
3756 (!SignBitOnly && CFP->getValueAPF().isZero());
3759 // Handle vector of constants.
3760 if (auto *CV = dyn_cast<Constant>(V)) {
3761 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3762 unsigned NumElts = CVFVTy->getNumElements();
3763 for (unsigned i = 0; i != NumElts; ++i) {
3764 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3765 if (!CFP)
3766 return false;
3767 if (CFP->getValueAPF().isNegative() &&
3768 (SignBitOnly || !CFP->getValueAPF().isZero()))
3769 return false;
3772 // All non-negative ConstantFPs.
3773 return true;
3777 if (Depth == MaxAnalysisRecursionDepth)
3778 return false;
3780 const Operator *I = dyn_cast<Operator>(V);
3781 if (!I)
3782 return false;
3784 switch (I->getOpcode()) {
3785 default:
3786 break;
3787 // Unsigned integers are always nonnegative.
3788 case Instruction::UIToFP:
3789 return true;
3790 case Instruction::FDiv:
3791 // X / X is always exactly 1.0 or a NaN.
3792 if (I->getOperand(0) == I->getOperand(1) &&
3793 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3794 return true;
3796 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3797 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3798 SignBitOnly, Depth + 1) &&
3799 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3800 /*SignBitOnly*/ true, Depth + 1);
3801 case Instruction::FMul:
3802 // X * X is always non-negative or a NaN.
3803 if (I->getOperand(0) == I->getOperand(1) &&
3804 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3805 return true;
3807 [[fallthrough]];
3808 case Instruction::FAdd:
3809 case Instruction::FRem:
3810 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3811 SignBitOnly, Depth + 1) &&
3812 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3813 SignBitOnly, Depth + 1);
3814 case Instruction::Select:
3815 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3816 SignBitOnly, Depth + 1) &&
3817 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI,
3818 SignBitOnly, Depth + 1);
3819 case Instruction::FPExt:
3820 case Instruction::FPTrunc:
3821 // Widening/narrowing never change sign.
3822 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3823 SignBitOnly, Depth + 1);
3824 case Instruction::ExtractElement:
3825 // Look through extract element. At the moment we keep this simple and skip
3826 // tracking the specific element. But at least we might find information
3827 // valid for all elements of the vector.
3828 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3829 SignBitOnly, Depth + 1);
3830 case Instruction::Call:
3831 const auto *CI = cast<CallInst>(I);
3832 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3833 switch (IID) {
3834 default:
3835 break;
3836 case Intrinsic::canonicalize:
3837 case Intrinsic::arithmetic_fence:
3838 case Intrinsic::floor:
3839 case Intrinsic::ceil:
3840 case Intrinsic::trunc:
3841 case Intrinsic::rint:
3842 case Intrinsic::nearbyint:
3843 case Intrinsic::round:
3844 case Intrinsic::roundeven:
3845 case Intrinsic::fptrunc_round:
3846 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3847 SignBitOnly, Depth + 1);
3848 case Intrinsic::maxnum: {
3849 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3850 auto isPositiveNum = [&](Value *V) {
3851 if (SignBitOnly) {
3852 // With SignBitOnly, this is tricky because the result of
3853 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3854 // a constant strictly greater than 0.0.
3855 const APFloat *C;
3856 return match(V, m_APFloat(C)) &&
3857 *C > APFloat::getZero(C->getSemantics());
3860 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3861 // maxnum can't be ordered-less-than-zero.
3862 return isKnownNeverNaN(V, DL, TLI) &&
3863 cannotBeOrderedLessThanZeroImpl(V, DL, TLI, false, Depth + 1);
3866 // TODO: This could be improved. We could also check that neither operand
3867 // has its sign bit set (and at least 1 is not-NAN?).
3868 return isPositiveNum(V0) || isPositiveNum(V1);
3871 case Intrinsic::maximum:
3872 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3873 SignBitOnly, Depth + 1) ||
3874 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3875 SignBitOnly, Depth + 1);
3876 case Intrinsic::minnum:
3877 case Intrinsic::minimum:
3878 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3879 SignBitOnly, Depth + 1) &&
3880 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI,
3881 SignBitOnly, Depth + 1);
3882 case Intrinsic::exp:
3883 case Intrinsic::exp2:
3884 case Intrinsic::fabs:
3885 return true;
3886 case Intrinsic::copysign:
3887 // Only the sign operand matters.
3888 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, true,
3889 Depth + 1);
3890 case Intrinsic::sqrt:
3891 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3892 if (!SignBitOnly)
3893 return true;
3894 return CI->hasNoNaNs() &&
3895 (CI->hasNoSignedZeros() ||
3896 cannotBeNegativeZero(CI->getOperand(0), DL, TLI));
3898 case Intrinsic::powi:
3899 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3900 // powi(x,n) is non-negative if n is even.
3901 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3902 return true;
3904 // TODO: This is not correct. Given that exp is an integer, here are the
3905 // ways that pow can return a negative value:
3907 // pow(x, exp) --> negative if exp is odd and x is negative.
3908 // pow(-0, exp) --> -inf if exp is negative odd.
3909 // pow(-0, exp) --> -0 if exp is positive odd.
3910 // pow(-inf, exp) --> -0 if exp is negative odd.
3911 // pow(-inf, exp) --> -inf if exp is positive odd.
3913 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3914 // but we must return false if x == -0. Unfortunately we do not currently
3915 // have a way of expressing this constraint. See details in
3916 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3917 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI,
3918 SignBitOnly, Depth + 1);
3920 case Intrinsic::fma:
3921 case Intrinsic::fmuladd:
3922 // x*x+y is non-negative if y is non-negative.
3923 return I->getOperand(0) == I->getOperand(1) &&
3924 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3925 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI,
3926 SignBitOnly, Depth + 1);
3928 break;
3930 return false;
3933 bool llvm::SignBitMustBeZero(const Value *V, const DataLayout &DL,
3934 const TargetLibraryInfo *TLI) {
3935 // FIXME: Use computeKnownFPClass and pass all arguments
3936 return cannotBeOrderedLessThanZeroImpl(V, DL, TLI, true, 0);
3939 /// Return true if it's possible to assume IEEE treatment of input denormals in
3940 /// \p F for \p Val.
3941 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
3942 Ty = Ty->getScalarType();
3943 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
3946 static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
3947 Ty = Ty->getScalarType();
3948 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
3949 return Mode.Input == DenormalMode::IEEE ||
3950 Mode.Input == DenormalMode::PositiveZero;
3953 static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
3954 Ty = Ty->getScalarType();
3955 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
3956 return Mode.Output == DenormalMode::IEEE ||
3957 Mode.Output == DenormalMode::PositiveZero;
3960 bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const {
3961 return isKnownNeverZero() &&
3962 (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty));
3965 bool KnownFPClass::isKnownNeverLogicalNegZero(const Function &F,
3966 Type *Ty) const {
3967 return isKnownNeverNegZero() &&
3968 (isKnownNeverNegSubnormal() || inputDenormalIsIEEEOrPosZero(F, Ty));
3971 bool KnownFPClass::isKnownNeverLogicalPosZero(const Function &F,
3972 Type *Ty) const {
3973 if (!isKnownNeverPosZero())
3974 return false;
3976 // If we know there are no denormals, nothing can be flushed to zero.
3977 if (isKnownNeverSubnormal())
3978 return true;
3980 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics());
3981 switch (Mode.Input) {
3982 case DenormalMode::IEEE:
3983 return true;
3984 case DenormalMode::PreserveSign:
3985 // Negative subnormal won't flush to +0
3986 return isKnownNeverPosSubnormal();
3987 case DenormalMode::PositiveZero:
3988 default:
3989 // Both positive and negative subnormal could flush to +0
3990 return false;
3993 llvm_unreachable("covered switch over denormal mode");
3996 void KnownFPClass::propagateDenormal(const KnownFPClass &Src, const Function &F,
3997 Type *Ty) {
3998 KnownFPClasses = Src.KnownFPClasses;
3999 // If we aren't assuming the source can't be a zero, we don't have to check if
4000 // a denormal input could be flushed.
4001 if (!Src.isKnownNeverPosZero() && !Src.isKnownNeverNegZero())
4002 return;
4004 // If we know the input can't be a denormal, it can't be flushed to 0.
4005 if (Src.isKnownNeverSubnormal())
4006 return;
4008 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics());
4010 if (!Src.isKnownNeverPosSubnormal() && Mode != DenormalMode::getIEEE())
4011 KnownFPClasses |= fcPosZero;
4013 if (!Src.isKnownNeverNegSubnormal() && Mode != DenormalMode::getIEEE()) {
4014 if (Mode != DenormalMode::getPositiveZero())
4015 KnownFPClasses |= fcNegZero;
4017 if (Mode.Input == DenormalMode::PositiveZero ||
4018 Mode.Output == DenormalMode::PositiveZero ||
4019 Mode.Input == DenormalMode::Dynamic ||
4020 Mode.Output == DenormalMode::Dynamic)
4021 KnownFPClasses |= fcPosZero;
4025 void KnownFPClass::propagateCanonicalizingSrc(const KnownFPClass &Src,
4026 const Function &F, Type *Ty) {
4027 propagateDenormal(Src, F, Ty);
4028 propagateNaN(Src, /*PreserveSign=*/true);
4031 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
4032 /// same result as an fcmp with the given operands.
4033 std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
4034 const Function &F,
4035 Value *LHS, Value *RHS,
4036 bool LookThroughSrc) {
4037 const APFloat *ConstRHS;
4038 if (!match(RHS, m_APFloatAllowUndef(ConstRHS)))
4039 return {nullptr, fcAllFlags};
4041 return fcmpToClassTest(Pred, F, LHS, ConstRHS, LookThroughSrc);
4044 std::pair<Value *, FPClassTest>
4045 llvm::fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS,
4046 const APFloat *ConstRHS, bool LookThroughSrc) {
4047 // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan
4048 if (Pred == FCmpInst::FCMP_ORD && !ConstRHS->isNaN())
4049 return {LHS, ~fcNan};
4051 // fcmp uno x, zero|normal|subnormal|inf -> fcNan
4052 if (Pred == FCmpInst::FCMP_UNO && !ConstRHS->isNaN())
4053 return {LHS, fcNan};
4055 if (ConstRHS->isZero()) {
4056 // Compares with fcNone are only exactly equal to fcZero if input denormals
4057 // are not flushed.
4058 // TODO: Handle DAZ by expanding masks to cover subnormal cases.
4059 if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO &&
4060 !inputDenormalIsIEEE(F, LHS->getType()))
4061 return {nullptr, fcAllFlags};
4063 switch (Pred) {
4064 case FCmpInst::FCMP_OEQ: // Match x == 0.0
4065 return {LHS, fcZero};
4066 case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0)
4067 return {LHS, fcZero | fcNan};
4068 case FCmpInst::FCMP_UNE: // Match (x != 0.0)
4069 return {LHS, ~fcZero};
4070 case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0
4071 return {LHS, ~fcNan & ~fcZero};
4072 case FCmpInst::FCMP_ORD:
4073 // Canonical form of ord/uno is with a zero. We could also handle
4074 // non-canonical other non-NaN constants or LHS == RHS.
4075 return {LHS, ~fcNan};
4076 case FCmpInst::FCMP_UNO:
4077 return {LHS, fcNan};
4078 case FCmpInst::FCMP_OGT: // x > 0
4079 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf};
4080 case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
4081 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan};
4082 case FCmpInst::FCMP_OGE: // x >= 0
4083 return {LHS, fcPositive | fcNegZero};
4084 case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
4085 return {LHS, fcPositive | fcNegZero | fcNan};
4086 case FCmpInst::FCMP_OLT: // x < 0
4087 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf};
4088 case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
4089 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan};
4090 case FCmpInst::FCMP_OLE: // x <= 0
4091 return {LHS, fcNegative | fcPosZero};
4092 case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
4093 return {LHS, fcNegative | fcPosZero | fcNan};
4094 default:
4095 break;
4098 return {nullptr, fcAllFlags};
4101 Value *Src = LHS;
4102 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src)));
4104 // Compute the test mask that would return true for the ordered comparisons.
4105 FPClassTest Mask;
4107 if (ConstRHS->isInfinity()) {
4108 switch (Pred) {
4109 case FCmpInst::FCMP_OEQ:
4110 case FCmpInst::FCMP_UNE: {
4111 // Match __builtin_isinf patterns
4113 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf
4114 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf
4115 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf
4116 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false
4118 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf
4119 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf
4120 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf
4121 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true
4123 if (ConstRHS->isNegative()) {
4124 Mask = fcNegInf;
4125 if (IsFabs)
4126 Mask = fcNone;
4127 } else {
4128 Mask = fcPosInf;
4129 if (IsFabs)
4130 Mask |= fcNegInf;
4133 break;
4135 case FCmpInst::FCMP_ONE:
4136 case FCmpInst::FCMP_UEQ: {
4137 // Match __builtin_isinf patterns
4138 // fcmp one x, -inf -> is_fpclass x, fcNegInf
4139 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan
4140 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan
4141 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan
4143 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan
4144 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan
4145 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan
4146 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan
4147 if (ConstRHS->isNegative()) {
4148 Mask = ~fcNegInf & ~fcNan;
4149 if (IsFabs)
4150 Mask = ~fcNan;
4151 } else {
4152 Mask = ~fcPosInf & ~fcNan;
4153 if (IsFabs)
4154 Mask &= ~fcNegInf;
4157 break;
4159 case FCmpInst::FCMP_OLT:
4160 case FCmpInst::FCMP_UGE: {
4161 if (ConstRHS->isNegative()) {
4162 // No value is ordered and less than negative infinity.
4163 // All values are unordered with or at least negative infinity.
4164 // fcmp olt x, -inf -> false
4165 // fcmp uge x, -inf -> true
4166 Mask = fcNone;
4167 break;
4170 // fcmp olt fabs(x), +inf -> fcFinite
4171 // fcmp uge fabs(x), +inf -> ~fcFinite
4172 // fcmp olt x, +inf -> fcFinite|fcNegInf
4173 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf)
4174 Mask = fcFinite;
4175 if (!IsFabs)
4176 Mask |= fcNegInf;
4177 break;
4179 case FCmpInst::FCMP_OGE:
4180 case FCmpInst::FCMP_ULT: {
4181 if (ConstRHS->isNegative()) // TODO
4182 return {nullptr, fcAllFlags};
4184 // fcmp oge fabs(x), +inf -> fcInf
4185 // fcmp oge x, +inf -> fcPosInf
4186 // fcmp ult fabs(x), +inf -> ~fcInf
4187 // fcmp ult x, +inf -> ~fcPosInf
4188 Mask = fcPosInf;
4189 if (IsFabs)
4190 Mask |= fcNegInf;
4191 break;
4193 case FCmpInst::FCMP_OGT:
4194 case FCmpInst::FCMP_ULE: {
4195 if (ConstRHS->isNegative())
4196 return {nullptr, fcAllFlags};
4198 // No value is ordered and greater than infinity.
4199 Mask = fcNone;
4200 break;
4202 default:
4203 return {nullptr, fcAllFlags};
4205 } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) {
4206 // Match pattern that's used in __builtin_isnormal.
4207 switch (Pred) {
4208 case FCmpInst::FCMP_OLT:
4209 case FCmpInst::FCMP_UGE: {
4210 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero
4211 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero
4212 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf
4213 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero)
4214 Mask = fcZero | fcSubnormal;
4215 if (!IsFabs)
4216 Mask |= fcNegNormal | fcNegInf;
4218 break;
4220 case FCmpInst::FCMP_OGE:
4221 case FCmpInst::FCMP_ULT: {
4222 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf
4223 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal
4224 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf)
4225 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal)
4226 Mask = fcPosInf | fcPosNormal;
4227 if (IsFabs)
4228 Mask |= fcNegInf | fcNegNormal;
4229 break;
4231 default:
4232 return {nullptr, fcAllFlags};
4234 } else if (ConstRHS->isNaN()) {
4235 // fcmp o__ x, nan -> false
4236 // fcmp u__ x, nan -> true
4237 Mask = fcNone;
4238 } else
4239 return {nullptr, fcAllFlags};
4241 // Invert the comparison for the unordered cases.
4242 if (FCmpInst::isUnordered(Pred))
4243 Mask = ~Mask;
4245 return {Src, Mask};
4248 static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
4249 const SimplifyQuery &Q) {
4250 FPClassTest KnownFromAssume = fcAllFlags;
4252 // Try to restrict the floating-point classes based on information from
4253 // assumptions.
4254 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
4255 if (!AssumeVH)
4256 continue;
4257 CallInst *I = cast<CallInst>(AssumeVH);
4258 const Function *F = I->getFunction();
4260 assert(F == Q.CxtI->getParent()->getParent() &&
4261 "Got assumption for the wrong function!");
4262 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
4263 "must be an assume intrinsic");
4265 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
4266 continue;
4268 CmpInst::Predicate Pred;
4269 Value *LHS, *RHS;
4270 uint64_t ClassVal = 0;
4271 if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) {
4272 auto [TestedValue, TestedMask] =
4273 fcmpToClassTest(Pred, *F, LHS, RHS, true);
4274 // First see if we can fold in fabs/fneg into the test.
4275 if (TestedValue == V)
4276 KnownFromAssume &= TestedMask;
4277 else {
4278 // Try again without the lookthrough if we found a different source
4279 // value.
4280 auto [TestedValue, TestedMask] =
4281 fcmpToClassTest(Pred, *F, LHS, RHS, false);
4282 if (TestedValue == V)
4283 KnownFromAssume &= TestedMask;
4285 } else if (match(I->getArgOperand(0),
4286 m_Intrinsic<Intrinsic::is_fpclass>(
4287 m_Value(LHS), m_ConstantInt(ClassVal)))) {
4288 KnownFromAssume &= static_cast<FPClassTest>(ClassVal);
4292 return KnownFromAssume;
4295 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4296 FPClassTest InterestedClasses, KnownFPClass &Known,
4297 unsigned Depth, const SimplifyQuery &Q);
4299 static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
4300 FPClassTest InterestedClasses, unsigned Depth,
4301 const SimplifyQuery &Q) {
4302 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4303 APInt DemandedElts =
4304 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
4305 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q);
4308 static void computeKnownFPClassForFPTrunc(const Operator *Op,
4309 const APInt &DemandedElts,
4310 FPClassTest InterestedClasses,
4311 KnownFPClass &Known, unsigned Depth,
4312 const SimplifyQuery &Q) {
4313 if ((InterestedClasses &
4314 (KnownFPClass::OrderedLessThanZeroMask | fcNan)) == fcNone)
4315 return;
4317 KnownFPClass KnownSrc;
4318 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4319 KnownSrc, Depth + 1, Q);
4321 // Sign should be preserved
4322 // TODO: Handle cannot be ordered greater than zero
4323 if (KnownSrc.cannotBeOrderedLessThanZero())
4324 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4326 Known.propagateNaN(KnownSrc, true);
4328 // Infinity needs a range check.
4331 // TODO: Merge implementation of cannotBeOrderedLessThanZero into here.
4332 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4333 FPClassTest InterestedClasses, KnownFPClass &Known,
4334 unsigned Depth, const SimplifyQuery &Q) {
4335 assert(Known.isUnknown() && "should not be called with known information");
4337 if (!DemandedElts) {
4338 // No demanded elts, better to assume we don't know anything.
4339 Known.resetAll();
4340 return;
4343 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
4345 if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) {
4346 Known.KnownFPClasses = CFP->getValueAPF().classify();
4347 Known.SignBit = CFP->isNegative();
4348 return;
4351 // Try to handle fixed width vector constants
4352 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4353 const Constant *CV = dyn_cast<Constant>(V);
4354 if (VFVTy && CV) {
4355 Known.KnownFPClasses = fcNone;
4357 // For vectors, verify that each element is not NaN.
4358 unsigned NumElts = VFVTy->getNumElements();
4359 for (unsigned i = 0; i != NumElts; ++i) {
4360 Constant *Elt = CV->getAggregateElement(i);
4361 if (!Elt) {
4362 Known = KnownFPClass();
4363 return;
4365 if (isa<UndefValue>(Elt))
4366 continue;
4367 auto *CElt = dyn_cast<ConstantFP>(Elt);
4368 if (!CElt) {
4369 Known = KnownFPClass();
4370 return;
4373 KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()};
4374 Known |= KnownElt;
4377 return;
4380 FPClassTest KnownNotFromFlags = fcNone;
4381 if (const auto *CB = dyn_cast<CallBase>(V))
4382 KnownNotFromFlags |= CB->getRetNoFPClass();
4383 else if (const auto *Arg = dyn_cast<Argument>(V))
4384 KnownNotFromFlags |= Arg->getNoFPClass();
4386 const Operator *Op = dyn_cast<Operator>(V);
4387 if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) {
4388 if (FPOp->hasNoNaNs())
4389 KnownNotFromFlags |= fcNan;
4390 if (FPOp->hasNoInfs())
4391 KnownNotFromFlags |= fcInf;
4394 if (Q.AC) {
4395 FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q);
4396 KnownNotFromFlags |= ~AssumedClasses;
4399 // We no longer need to find out about these bits from inputs if we can
4400 // assume this from flags/attributes.
4401 InterestedClasses &= ~KnownNotFromFlags;
4403 auto ClearClassesFromFlags = make_scope_exit([=, &Known] {
4404 Known.knownNot(KnownNotFromFlags);
4407 if (!Op)
4408 return;
4410 // All recursive calls that increase depth must come after this.
4411 if (Depth == MaxAnalysisRecursionDepth)
4412 return;
4414 const unsigned Opc = Op->getOpcode();
4415 switch (Opc) {
4416 case Instruction::FNeg: {
4417 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4418 Known, Depth + 1, Q);
4419 Known.fneg();
4420 break;
4422 case Instruction::Select: {
4423 Value *Cond = Op->getOperand(0);
4424 Value *LHS = Op->getOperand(1);
4425 Value *RHS = Op->getOperand(2);
4427 FPClassTest FilterLHS = fcAllFlags;
4428 FPClassTest FilterRHS = fcAllFlags;
4430 Value *TestedValue = nullptr;
4431 FPClassTest TestedMask = fcNone;
4432 uint64_t ClassVal = 0;
4433 const Function *F = cast<Instruction>(Op)->getFunction();
4434 CmpInst::Predicate Pred;
4435 Value *CmpLHS, *CmpRHS;
4436 if (F && match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) {
4437 // If the select filters out a value based on the class, it no longer
4438 // participates in the class of the result
4440 // TODO: In some degenerate cases we can infer something if we try again
4441 // without looking through sign operations.
4442 bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;
4443 std::tie(TestedValue, TestedMask) =
4444 fcmpToClassTest(Pred, *F, CmpLHS, CmpRHS, LookThroughFAbsFNeg);
4445 } else if (match(Cond,
4446 m_Intrinsic<Intrinsic::is_fpclass>(
4447 m_Value(TestedValue), m_ConstantInt(ClassVal)))) {
4448 TestedMask = static_cast<FPClassTest>(ClassVal);
4451 if (TestedValue == LHS) {
4452 // match !isnan(x) ? x : y
4453 FilterLHS = TestedMask;
4454 } else if (TestedValue == RHS) {
4455 // match !isnan(x) ? y : x
4456 FilterRHS = ~TestedMask;
4459 KnownFPClass Known2;
4460 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
4461 Depth + 1, Q);
4462 Known.KnownFPClasses &= FilterLHS;
4464 computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
4465 Known2, Depth + 1, Q);
4466 Known2.KnownFPClasses &= FilterRHS;
4468 Known |= Known2;
4469 break;
4471 case Instruction::Call: {
4472 const CallInst *II = cast<CallInst>(Op);
4473 const Intrinsic::ID IID = II->getIntrinsicID();
4474 switch (IID) {
4475 case Intrinsic::fabs: {
4476 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
4477 // If we only care about the sign bit we don't need to inspect the
4478 // operand.
4479 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4480 InterestedClasses, Known, Depth + 1, Q);
4483 Known.fabs();
4484 break;
4486 case Intrinsic::copysign: {
4487 KnownFPClass KnownSign;
4489 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4490 Known, Depth + 1, Q);
4491 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
4492 KnownSign, Depth + 1, Q);
4493 Known.copysign(KnownSign);
4494 break;
4496 case Intrinsic::fma:
4497 case Intrinsic::fmuladd: {
4498 if ((InterestedClasses & fcNegative) == fcNone)
4499 break;
4501 if (II->getArgOperand(0) != II->getArgOperand(1))
4502 break;
4504 // The multiply cannot be -0 and therefore the add can't be -0
4505 Known.knownNot(fcNegZero);
4507 // x * x + y is non-negative if y is non-negative.
4508 KnownFPClass KnownAddend;
4509 computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses,
4510 KnownAddend, Depth + 1, Q);
4512 // TODO: Known sign bit with no nans
4513 if (KnownAddend.cannotBeOrderedLessThanZero())
4514 Known.knownNot(fcNegative);
4515 break;
4517 case Intrinsic::sqrt:
4518 case Intrinsic::experimental_constrained_sqrt: {
4519 KnownFPClass KnownSrc;
4520 FPClassTest InterestedSrcs = InterestedClasses;
4521 if (InterestedClasses & fcNan)
4522 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
4524 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4525 KnownSrc, Depth + 1, Q);
4527 if (KnownSrc.isKnownNeverPosInfinity())
4528 Known.knownNot(fcPosInf);
4529 if (KnownSrc.isKnownNever(fcSNan))
4530 Known.knownNot(fcSNan);
4532 // Any negative value besides -0 returns a nan.
4533 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
4534 Known.knownNot(fcNan);
4536 // The only negative value that can be returned is -0 for -0 inputs.
4537 Known.knownNot(fcNegInf | fcNegSubnormal | fcNegNormal);
4539 // If the input denormal mode could be PreserveSign, a negative
4540 // subnormal input could produce a negative zero output.
4541 const Function *F = II->getFunction();
4542 if (Q.IIQ.hasNoSignedZeros(II) ||
4543 (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) {
4544 Known.knownNot(fcNegZero);
4545 if (KnownSrc.isKnownNeverNaN())
4546 Known.SignBit = false;
4549 break;
4551 case Intrinsic::sin:
4552 case Intrinsic::cos: {
4553 // Return NaN on infinite inputs.
4554 KnownFPClass KnownSrc;
4555 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4556 KnownSrc, Depth + 1, Q);
4557 Known.knownNot(fcInf);
4558 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
4559 Known.knownNot(fcNan);
4560 break;
4562 case Intrinsic::maxnum:
4563 case Intrinsic::minnum:
4564 case Intrinsic::minimum:
4565 case Intrinsic::maximum: {
4566 KnownFPClass KnownLHS, KnownRHS;
4567 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4568 KnownLHS, Depth + 1, Q);
4569 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
4570 KnownRHS, Depth + 1, Q);
4572 bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
4573 Known = KnownLHS | KnownRHS;
4575 // If either operand is not NaN, the result is not NaN.
4576 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
4577 Known.knownNot(fcNan);
4579 if (IID == Intrinsic::maxnum) {
4580 // If at least one operand is known to be positive, the result must be
4581 // positive.
4582 if ((KnownLHS.cannotBeOrderedLessThanZero() &&
4583 KnownLHS.isKnownNeverNaN()) ||
4584 (KnownRHS.cannotBeOrderedLessThanZero() &&
4585 KnownRHS.isKnownNeverNaN()))
4586 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4587 } else if (IID == Intrinsic::maximum) {
4588 // If at least one operand is known to be positive, the result must be
4589 // positive.
4590 if (KnownLHS.cannotBeOrderedLessThanZero() ||
4591 KnownRHS.cannotBeOrderedLessThanZero())
4592 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4593 } else if (IID == Intrinsic::minnum) {
4594 // If at least one operand is known to be negative, the result must be
4595 // negative.
4596 if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
4597 KnownLHS.isKnownNeverNaN()) ||
4598 (KnownRHS.cannotBeOrderedGreaterThanZero() &&
4599 KnownRHS.isKnownNeverNaN()))
4600 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4601 } else {
4602 // If at least one operand is known to be negative, the result must be
4603 // negative.
4604 if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
4605 KnownRHS.cannotBeOrderedGreaterThanZero())
4606 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4609 // Fixup zero handling if denormals could be returned as a zero.
4611 // As there's no spec for denormal flushing, be conservative with the
4612 // treatment of denormals that could be flushed to zero. For older
4613 // subtargets on AMDGPU the min/max instructions would not flush the
4614 // output and return the original value.
4616 // TODO: This could be refined based on the sign
4617 if ((Known.KnownFPClasses & fcZero) != fcNone &&
4618 !Known.isKnownNeverSubnormal()) {
4619 const Function *Parent = II->getFunction();
4620 if (!Parent)
4621 break;
4623 DenormalMode Mode = Parent->getDenormalMode(
4624 II->getType()->getScalarType()->getFltSemantics());
4625 if (Mode != DenormalMode::getIEEE())
4626 Known.KnownFPClasses |= fcZero;
4629 break;
4631 case Intrinsic::canonicalize: {
4632 KnownFPClass KnownSrc;
4633 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4634 KnownSrc, Depth + 1, Q);
4636 // This is essentially a stronger form of
4637 // propagateCanonicalizingSrc. Other "canonicalizing" operations don't
4638 // actually have an IR canonicalization guarantee.
4640 // Canonicalize may flush denormals to zero, so we have to consider the
4641 // denormal mode to preserve known-not-0 knowledge.
4642 Known.KnownFPClasses = KnownSrc.KnownFPClasses | fcZero | fcQNan;
4644 // Stronger version of propagateNaN
4645 // Canonicalize is guaranteed to quiet signaling nans.
4646 if (KnownSrc.isKnownNeverNaN())
4647 Known.knownNot(fcNan);
4648 else
4649 Known.knownNot(fcSNan);
4651 const Function *F = II->getFunction();
4652 if (!F)
4653 break;
4655 // If the parent function flushes denormals, the canonical output cannot
4656 // be a denormal.
4657 const fltSemantics &FPType =
4658 II->getType()->getScalarType()->getFltSemantics();
4659 DenormalMode DenormMode = F->getDenormalMode(FPType);
4660 if (DenormMode == DenormalMode::getIEEE()) {
4661 if (KnownSrc.isKnownNever(fcPosZero))
4662 Known.knownNot(fcPosZero);
4663 if (KnownSrc.isKnownNever(fcNegZero))
4664 Known.knownNot(fcNegZero);
4665 break;
4668 if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
4669 Known.knownNot(fcSubnormal);
4671 if (DenormMode.Input == DenormalMode::PositiveZero ||
4672 (DenormMode.Output == DenormalMode::PositiveZero &&
4673 DenormMode.Input == DenormalMode::IEEE))
4674 Known.knownNot(fcNegZero);
4676 break;
4678 case Intrinsic::trunc:
4679 case Intrinsic::floor:
4680 case Intrinsic::ceil:
4681 case Intrinsic::rint:
4682 case Intrinsic::nearbyint:
4683 case Intrinsic::round:
4684 case Intrinsic::roundeven: {
4685 KnownFPClass KnownSrc;
4686 FPClassTest InterestedSrcs = InterestedClasses;
4687 if (InterestedSrcs & fcPosFinite)
4688 InterestedSrcs |= fcPosFinite;
4689 if (InterestedSrcs & fcNegFinite)
4690 InterestedSrcs |= fcNegFinite;
4691 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4692 KnownSrc, Depth + 1, Q);
4694 // Integer results cannot be subnormal.
4695 Known.knownNot(fcSubnormal);
4697 Known.propagateNaN(KnownSrc, true);
4699 // Pass through infinities, except PPC_FP128 is a special case for
4700 // intrinsics other than trunc.
4701 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
4702 if (KnownSrc.isKnownNeverPosInfinity())
4703 Known.knownNot(fcPosInf);
4704 if (KnownSrc.isKnownNeverNegInfinity())
4705 Known.knownNot(fcNegInf);
4708 // Negative round ups to 0 produce -0
4709 if (KnownSrc.isKnownNever(fcPosFinite))
4710 Known.knownNot(fcPosFinite);
4711 if (KnownSrc.isKnownNever(fcNegFinite))
4712 Known.knownNot(fcNegFinite);
4714 break;
4716 case Intrinsic::exp:
4717 case Intrinsic::exp2:
4718 case Intrinsic::exp10: {
4719 Known.knownNot(fcNegative);
4720 if ((InterestedClasses & fcNan) == fcNone)
4721 break;
4723 KnownFPClass KnownSrc;
4724 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4725 KnownSrc, Depth + 1, Q);
4726 if (KnownSrc.isKnownNeverNaN()) {
4727 Known.knownNot(fcNan);
4728 Known.SignBit = false;
4731 break;
4733 case Intrinsic::fptrunc_round: {
4734 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
4735 Depth, Q);
4736 break;
4738 case Intrinsic::log:
4739 case Intrinsic::log10:
4740 case Intrinsic::log2:
4741 case Intrinsic::experimental_constrained_log:
4742 case Intrinsic::experimental_constrained_log10:
4743 case Intrinsic::experimental_constrained_log2: {
4744 // log(+inf) -> +inf
4745 // log([+-]0.0) -> -inf
4746 // log(-inf) -> nan
4747 // log(-x) -> nan
4748 if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
4749 break;
4751 FPClassTest InterestedSrcs = InterestedClasses;
4752 if ((InterestedClasses & fcNegInf) != fcNone)
4753 InterestedSrcs |= fcZero | fcSubnormal;
4754 if ((InterestedClasses & fcNan) != fcNone)
4755 InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
4757 KnownFPClass KnownSrc;
4758 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4759 KnownSrc, Depth + 1, Q);
4761 if (KnownSrc.isKnownNeverPosInfinity())
4762 Known.knownNot(fcPosInf);
4764 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
4765 Known.knownNot(fcNan);
4767 const Function *F = II->getFunction();
4768 if (F && KnownSrc.isKnownNeverLogicalZero(*F, II->getType()))
4769 Known.knownNot(fcNegInf);
4771 break;
4773 case Intrinsic::powi: {
4774 if ((InterestedClasses & fcNegative) == fcNone)
4775 break;
4777 const Value *Exp = II->getArgOperand(1);
4778 Type *ExpTy = Exp->getType();
4779 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth();
4780 KnownBits ExponentKnownBits(BitWidth);
4781 computeKnownBits(Exp, isa<VectorType>(ExpTy) ? DemandedElts : APInt(1, 1),
4782 ExponentKnownBits, Depth + 1, Q);
4784 if (ExponentKnownBits.Zero[0]) { // Is even
4785 Known.knownNot(fcNegative);
4786 break;
4789 // Given that exp is an integer, here are the
4790 // ways that pow can return a negative value:
4792 // pow(-x, exp) --> negative if exp is odd and x is negative.
4793 // pow(-0, exp) --> -inf if exp is negative odd.
4794 // pow(-0, exp) --> -0 if exp is positive odd.
4795 // pow(-inf, exp) --> -0 if exp is negative odd.
4796 // pow(-inf, exp) --> -inf if exp is positive odd.
4797 KnownFPClass KnownSrc;
4798 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
4799 KnownSrc, Depth + 1, Q);
4800 if (KnownSrc.isKnownNever(fcNegative))
4801 Known.knownNot(fcNegative);
4802 break;
4804 case Intrinsic::ldexp: {
4805 KnownFPClass KnownSrc;
4806 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4807 KnownSrc, Depth + 1, Q);
4808 Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
4810 // Sign is preserved, but underflows may produce zeroes.
4811 if (KnownSrc.isKnownNever(fcNegative))
4812 Known.knownNot(fcNegative);
4813 else if (KnownSrc.cannotBeOrderedLessThanZero())
4814 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4816 if (KnownSrc.isKnownNever(fcPositive))
4817 Known.knownNot(fcPositive);
4818 else if (KnownSrc.cannotBeOrderedGreaterThanZero())
4819 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4821 // Can refine inf/zero handling based on the exponent operand.
4822 const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf;
4823 if ((InterestedClasses & ExpInfoMask) == fcNone)
4824 break;
4825 if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone)
4826 break;
4828 const fltSemantics &Flt =
4829 II->getType()->getScalarType()->getFltSemantics();
4830 unsigned Precision = APFloat::semanticsPrecision(Flt);
4831 const Value *ExpArg = II->getArgOperand(1);
4832 ConstantRange ExpRange = computeConstantRange(
4833 ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1);
4835 const int MantissaBits = Precision - 1;
4836 if (ExpRange.getSignedMin().sge(static_cast<int64_t>(MantissaBits)))
4837 Known.knownNot(fcSubnormal);
4839 const Function *F = II->getFunction();
4840 const APInt *ConstVal = ExpRange.getSingleElement();
4841 if (ConstVal && ConstVal->isZero()) {
4842 // ldexp(x, 0) -> x, so propagate everything.
4843 Known.propagateCanonicalizingSrc(KnownSrc, *F, II->getType());
4844 } else if (ExpRange.isAllNegative()) {
4845 // If we know the power is <= 0, can't introduce inf
4846 if (KnownSrc.isKnownNeverPosInfinity())
4847 Known.knownNot(fcPosInf);
4848 if (KnownSrc.isKnownNeverNegInfinity())
4849 Known.knownNot(fcNegInf);
4850 } else if (ExpRange.isAllNonNegative()) {
4851 // If we know the power is >= 0, can't introduce subnormal or zero
4852 if (KnownSrc.isKnownNeverPosSubnormal())
4853 Known.knownNot(fcPosSubnormal);
4854 if (KnownSrc.isKnownNeverNegSubnormal())
4855 Known.knownNot(fcNegSubnormal);
4856 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, II->getType()))
4857 Known.knownNot(fcPosZero);
4858 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))
4859 Known.knownNot(fcNegZero);
4862 break;
4864 case Intrinsic::arithmetic_fence: {
4865 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
4866 Known, Depth + 1, Q);
4867 break;
4869 case Intrinsic::experimental_constrained_sitofp:
4870 case Intrinsic::experimental_constrained_uitofp:
4871 // Cannot produce nan
4872 Known.knownNot(fcNan);
4874 // sitofp and uitofp turn into +0.0 for zero.
4875 Known.knownNot(fcNegZero);
4877 // Integers cannot be subnormal
4878 Known.knownNot(fcSubnormal);
4880 if (IID == Intrinsic::experimental_constrained_uitofp)
4881 Known.signBitMustBeZero();
4883 // TODO: Copy inf handling from instructions
4884 break;
4885 default:
4886 break;
4889 break;
4891 case Instruction::FAdd:
4892 case Instruction::FSub: {
4893 KnownFPClass KnownLHS, KnownRHS;
4894 bool WantNegative =
4895 Op->getOpcode() == Instruction::FAdd &&
4896 (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone;
4897 bool WantNaN = (InterestedClasses & fcNan) != fcNone;
4898 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
4900 if (!WantNaN && !WantNegative && !WantNegZero)
4901 break;
4903 FPClassTest InterestedSrcs = InterestedClasses;
4904 if (WantNegative)
4905 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
4906 if (InterestedClasses & fcNan)
4907 InterestedSrcs |= fcInf;
4908 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs,
4909 KnownRHS, Depth + 1, Q);
4911 if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
4912 (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
4913 WantNegZero || Opc == Instruction::FSub) {
4915 // RHS is canonically cheaper to compute. Skip inspecting the LHS if
4916 // there's no point.
4917 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs,
4918 KnownLHS, Depth + 1, Q);
4919 // Adding positive and negative infinity produces NaN.
4920 // TODO: Check sign of infinities.
4921 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
4922 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
4923 Known.knownNot(fcNan);
4925 // FIXME: Context function should always be passed in separately
4926 const Function *F = cast<Instruction>(Op)->getFunction();
4928 if (Op->getOpcode() == Instruction::FAdd) {
4929 if (KnownLHS.cannotBeOrderedLessThanZero() &&
4930 KnownRHS.cannotBeOrderedLessThanZero())
4931 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4932 if (!F)
4933 break;
4935 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
4936 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
4937 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) &&
4938 // Make sure output negative denormal can't flush to -0
4939 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
4940 Known.knownNot(fcNegZero);
4941 } else {
4942 if (!F)
4943 break;
4945 // Only fsub -0, +0 can return -0
4946 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
4947 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) &&
4948 // Make sure output negative denormal can't flush to -0
4949 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
4950 Known.knownNot(fcNegZero);
4954 break;
4956 case Instruction::FMul: {
4957 // X * X is always non-negative or a NaN.
4958 if (Op->getOperand(0) == Op->getOperand(1))
4959 Known.knownNot(fcNegative);
4961 if ((InterestedClasses & fcNan) != fcNan)
4962 break;
4964 // fcSubnormal is only needed in case of DAZ.
4965 const FPClassTest NeedForNan = fcNan | fcInf | fcZero | fcSubnormal;
4967 KnownFPClass KnownLHS, KnownRHS;
4968 computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS,
4969 Depth + 1, Q);
4970 if (!KnownRHS.isKnownNeverNaN())
4971 break;
4973 computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS,
4974 Depth + 1, Q);
4975 if (!KnownLHS.isKnownNeverNaN())
4976 break;
4978 // If 0 * +/-inf produces NaN.
4979 if (KnownLHS.isKnownNeverInfinity() && KnownRHS.isKnownNeverInfinity()) {
4980 Known.knownNot(fcNan);
4981 break;
4984 const Function *F = cast<Instruction>(Op)->getFunction();
4985 if (!F)
4986 break;
4988 if ((KnownRHS.isKnownNeverInfinity() ||
4989 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) &&
4990 (KnownLHS.isKnownNeverInfinity() ||
4991 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))
4992 Known.knownNot(fcNan);
4994 break;
4996 case Instruction::FDiv:
4997 case Instruction::FRem: {
4998 if (Op->getOperand(0) == Op->getOperand(1)) {
4999 // TODO: Could filter out snan if we inspect the operand
5000 if (Op->getOpcode() == Instruction::FDiv) {
5001 // X / X is always exactly 1.0 or a NaN.
5002 Known.KnownFPClasses = fcNan | fcPosNormal;
5003 } else {
5004 // X % X is always exactly [+-]0.0 or a NaN.
5005 Known.KnownFPClasses = fcNan | fcZero;
5008 break;
5011 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
5012 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
5013 const bool WantPositive =
5014 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone;
5015 if (!WantNan && !WantNegative && !WantPositive)
5016 break;
5018 KnownFPClass KnownLHS, KnownRHS;
5020 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5021 fcNan | fcInf | fcZero | fcNegative, KnownRHS,
5022 Depth + 1, Q);
5024 bool KnowSomethingUseful =
5025 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
5027 if (KnowSomethingUseful || WantPositive) {
5028 const FPClassTest InterestedLHS =
5029 WantPositive ? fcAllFlags
5030 : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
5032 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5033 InterestedClasses & InterestedLHS, KnownLHS,
5034 Depth + 1, Q);
5037 const Function *F = cast<Instruction>(Op)->getFunction();
5039 if (Op->getOpcode() == Instruction::FDiv) {
5040 // Only 0/0, Inf/Inf produce NaN.
5041 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5042 (KnownLHS.isKnownNeverInfinity() ||
5043 KnownRHS.isKnownNeverInfinity()) &&
5044 ((F && KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) ||
5045 (F && KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))) {
5046 Known.knownNot(fcNan);
5049 // X / -0.0 is -Inf (or NaN).
5050 // +X / +X is +X
5051 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative))
5052 Known.knownNot(fcNegative);
5053 } else {
5054 // Inf REM x and x REM 0 produce NaN.
5055 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5056 KnownLHS.isKnownNeverInfinity() && F &&
5057 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) {
5058 Known.knownNot(fcNan);
5061 // The sign for frem is the same as the first operand.
5062 if (KnownLHS.cannotBeOrderedLessThanZero())
5063 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
5064 if (KnownLHS.cannotBeOrderedGreaterThanZero())
5065 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
5067 // See if we can be more aggressive about the sign of 0.
5068 if (KnownLHS.isKnownNever(fcNegative))
5069 Known.knownNot(fcNegative);
5070 if (KnownLHS.isKnownNever(fcPositive))
5071 Known.knownNot(fcPositive);
5074 break;
5076 case Instruction::FPExt: {
5077 // Infinity, nan and zero propagate from source.
5078 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5079 Known, Depth + 1, Q);
5081 const fltSemantics &DstTy =
5082 Op->getType()->getScalarType()->getFltSemantics();
5083 const fltSemantics &SrcTy =
5084 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5086 // All subnormal inputs should be in the normal range in the result type.
5087 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy))
5088 Known.knownNot(fcSubnormal);
5090 // Sign bit of a nan isn't guaranteed.
5091 if (!Known.isKnownNeverNaN())
5092 Known.SignBit = std::nullopt;
5093 break;
5095 case Instruction::FPTrunc: {
5096 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5097 Depth, Q);
5098 break;
5100 case Instruction::SIToFP:
5101 case Instruction::UIToFP: {
5102 // Cannot produce nan
5103 Known.knownNot(fcNan);
5105 // Integers cannot be subnormal
5106 Known.knownNot(fcSubnormal);
5108 // sitofp and uitofp turn into +0.0 for zero.
5109 Known.knownNot(fcNegZero);
5110 if (Op->getOpcode() == Instruction::UIToFP)
5111 Known.signBitMustBeZero();
5113 if (InterestedClasses & fcInf) {
5114 // Get width of largest magnitude integer (remove a bit if signed).
5115 // This still works for a signed minimum value because the largest FP
5116 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
5117 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits();
5118 if (Op->getOpcode() == Instruction::SIToFP)
5119 --IntSize;
5121 // If the exponent of the largest finite FP value can hold the largest
5122 // integer, the result of the cast must be finite.
5123 Type *FPTy = Op->getType()->getScalarType();
5124 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize)
5125 Known.knownNot(fcInf);
5128 break;
5130 case Instruction::ExtractElement: {
5131 // Look through extract element. If the index is non-constant or
5132 // out-of-range demand all elements, otherwise just the extracted element.
5133 const Value *Vec = Op->getOperand(0);
5134 const Value *Idx = Op->getOperand(1);
5135 auto *CIdx = dyn_cast<ConstantInt>(Idx);
5137 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
5138 unsigned NumElts = VecTy->getNumElements();
5139 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
5140 if (CIdx && CIdx->getValue().ult(NumElts))
5141 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
5142 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
5143 Depth + 1, Q);
5146 break;
5148 case Instruction::InsertElement: {
5149 if (isa<ScalableVectorType>(Op->getType()))
5150 return;
5152 const Value *Vec = Op->getOperand(0);
5153 const Value *Elt = Op->getOperand(1);
5154 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2));
5155 // Early out if the index is non-constant or out-of-range.
5156 unsigned NumElts = DemandedElts.getBitWidth();
5157 if (!CIdx || CIdx->getValue().uge(NumElts))
5158 return;
5160 unsigned EltIdx = CIdx->getZExtValue();
5161 // Do we demand the inserted element?
5162 if (DemandedElts[EltIdx]) {
5163 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q);
5164 // If we don't know any bits, early out.
5165 if (Known.isUnknown())
5166 break;
5167 } else {
5168 Known.KnownFPClasses = fcNone;
5171 // We don't need the base vector element that has been inserted.
5172 APInt DemandedVecElts = DemandedElts;
5173 DemandedVecElts.clearBit(EltIdx);
5174 if (!!DemandedVecElts) {
5175 KnownFPClass Known2;
5176 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
5177 Depth + 1, Q);
5178 Known |= Known2;
5181 break;
5183 case Instruction::ShuffleVector: {
5184 // For undef elements, we don't know anything about the common state of
5185 // the shuffle result.
5186 APInt DemandedLHS, DemandedRHS;
5187 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op);
5188 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
5189 return;
5191 if (!!DemandedLHS) {
5192 const Value *LHS = Shuf->getOperand(0);
5193 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
5194 Depth + 1, Q);
5196 // If we don't know any bits, early out.
5197 if (Known.isUnknown())
5198 break;
5199 } else {
5200 Known.KnownFPClasses = fcNone;
5203 if (!!DemandedRHS) {
5204 KnownFPClass Known2;
5205 const Value *RHS = Shuf->getOperand(1);
5206 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
5207 Depth + 1, Q);
5208 Known |= Known2;
5211 break;
5213 case Instruction::ExtractValue: {
5214 const ExtractValueInst *Extract = cast<ExtractValueInst>(Op);
5215 ArrayRef<unsigned> Indices = Extract->getIndices();
5216 const Value *Src = Extract->getAggregateOperand();
5217 if (isa<StructType>(Src->getType()) && Indices.size() == 1 &&
5218 Indices[0] == 0) {
5219 if (const auto *II = dyn_cast<IntrinsicInst>(Src)) {
5220 switch (II->getIntrinsicID()) {
5221 case Intrinsic::frexp: {
5222 Known.knownNot(fcSubnormal);
5224 KnownFPClass KnownSrc;
5225 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5226 InterestedClasses, KnownSrc, Depth + 1, Q);
5228 const Function *F = cast<Instruction>(Op)->getFunction();
5230 if (KnownSrc.isKnownNever(fcNegative))
5231 Known.knownNot(fcNegative);
5232 else {
5233 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, Op->getType()))
5234 Known.knownNot(fcNegZero);
5235 if (KnownSrc.isKnownNever(fcNegInf))
5236 Known.knownNot(fcNegInf);
5239 if (KnownSrc.isKnownNever(fcPositive))
5240 Known.knownNot(fcPositive);
5241 else {
5242 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, Op->getType()))
5243 Known.knownNot(fcPosZero);
5244 if (KnownSrc.isKnownNever(fcPosInf))
5245 Known.knownNot(fcPosInf);
5248 Known.propagateNaN(KnownSrc);
5249 return;
5251 default:
5252 break;
5257 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1,
5259 break;
5261 case Instruction::PHI: {
5262 const PHINode *P = cast<PHINode>(Op);
5263 // Unreachable blocks may have zero-operand PHI nodes.
5264 if (P->getNumIncomingValues() == 0)
5265 break;
5267 // Otherwise take the unions of the known bit sets of the operands,
5268 // taking conservative care to avoid excessive recursion.
5269 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2;
5271 if (Depth < PhiRecursionLimit) {
5272 // Skip if every incoming value references to ourself.
5273 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
5274 break;
5276 bool First = true;
5278 for (Value *IncValue : P->incoming_values()) {
5279 // Skip direct self references.
5280 if (IncValue == P)
5281 continue;
5283 KnownFPClass KnownSrc;
5284 // Recurse, but cap the recursion to two levels, because we don't want
5285 // to waste time spinning around in loops. We need at least depth 2 to
5286 // detect known sign bits.
5287 computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc,
5288 PhiRecursionLimit, Q);
5290 if (First) {
5291 Known = KnownSrc;
5292 First = false;
5293 } else {
5294 Known |= KnownSrc;
5297 if (Known.KnownFPClasses == fcAllFlags)
5298 break;
5302 break;
5304 default:
5305 break;
5309 KnownFPClass llvm::computeKnownFPClass(
5310 const Value *V, const APInt &DemandedElts, const DataLayout &DL,
5311 FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI,
5312 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
5313 bool UseInstrInfo) {
5314 KnownFPClass KnownClasses;
5315 ::computeKnownFPClass(
5316 V, DemandedElts, InterestedClasses, KnownClasses, Depth,
5317 SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
5318 return KnownClasses;
5321 KnownFPClass llvm::computeKnownFPClass(
5322 const Value *V, const DataLayout &DL, FPClassTest InterestedClasses,
5323 unsigned Depth, const TargetLibraryInfo *TLI, AssumptionCache *AC,
5324 const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) {
5325 KnownFPClass Known;
5326 ::computeKnownFPClass(
5327 V, Known, InterestedClasses, Depth,
5328 SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
5329 return Known;
5332 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
5334 // All byte-wide stores are splatable, even of arbitrary variables.
5335 if (V->getType()->isIntegerTy(8))
5336 return V;
5338 LLVMContext &Ctx = V->getContext();
5340 // Undef don't care.
5341 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
5342 if (isa<UndefValue>(V))
5343 return UndefInt8;
5345 // Return Undef for zero-sized type.
5346 if (DL.getTypeStoreSize(V->getType()).isZero())
5347 return UndefInt8;
5349 Constant *C = dyn_cast<Constant>(V);
5350 if (!C) {
5351 // Conceptually, we could handle things like:
5352 // %a = zext i8 %X to i16
5353 // %b = shl i16 %a, 8
5354 // %c = or i16 %a, %b
5355 // but until there is an example that actually needs this, it doesn't seem
5356 // worth worrying about.
5357 return nullptr;
5360 // Handle 'null' ConstantArrayZero etc.
5361 if (C->isNullValue())
5362 return Constant::getNullValue(Type::getInt8Ty(Ctx));
5364 // Constant floating-point values can be handled as integer values if the
5365 // corresponding integer value is "byteable". An important case is 0.0.
5366 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
5367 Type *Ty = nullptr;
5368 if (CFP->getType()->isHalfTy())
5369 Ty = Type::getInt16Ty(Ctx);
5370 else if (CFP->getType()->isFloatTy())
5371 Ty = Type::getInt32Ty(Ctx);
5372 else if (CFP->getType()->isDoubleTy())
5373 Ty = Type::getInt64Ty(Ctx);
5374 // Don't handle long double formats, which have strange constraints.
5375 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
5376 : nullptr;
5379 // We can handle constant integers that are multiple of 8 bits.
5380 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
5381 if (CI->getBitWidth() % 8 == 0) {
5382 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
5383 if (!CI->getValue().isSplat(8))
5384 return nullptr;
5385 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
5389 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
5390 if (CE->getOpcode() == Instruction::IntToPtr) {
5391 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
5392 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
5393 if (Constant *Op = ConstantFoldIntegerCast(
5394 CE->getOperand(0), Type::getIntNTy(Ctx, BitWidth), false, DL))
5395 return isBytewiseValue(Op, DL);
5400 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
5401 if (LHS == RHS)
5402 return LHS;
5403 if (!LHS || !RHS)
5404 return nullptr;
5405 if (LHS == UndefInt8)
5406 return RHS;
5407 if (RHS == UndefInt8)
5408 return LHS;
5409 return nullptr;
5412 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
5413 Value *Val = UndefInt8;
5414 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
5415 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
5416 return nullptr;
5417 return Val;
5420 if (isa<ConstantAggregate>(C)) {
5421 Value *Val = UndefInt8;
5422 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
5423 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
5424 return nullptr;
5425 return Val;
5428 // Don't try to handle the handful of other constants.
5429 return nullptr;
5432 // This is the recursive version of BuildSubAggregate. It takes a few different
5433 // arguments. Idxs is the index within the nested struct From that we are
5434 // looking at now (which is of type IndexedType). IdxSkip is the number of
5435 // indices from Idxs that should be left out when inserting into the resulting
5436 // struct. To is the result struct built so far, new insertvalue instructions
5437 // build on that.
5438 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
5439 SmallVectorImpl<unsigned> &Idxs,
5440 unsigned IdxSkip,
5441 Instruction *InsertBefore) {
5442 StructType *STy = dyn_cast<StructType>(IndexedType);
5443 if (STy) {
5444 // Save the original To argument so we can modify it
5445 Value *OrigTo = To;
5446 // General case, the type indexed by Idxs is a struct
5447 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5448 // Process each struct element recursively
5449 Idxs.push_back(i);
5450 Value *PrevTo = To;
5451 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
5452 InsertBefore);
5453 Idxs.pop_back();
5454 if (!To) {
5455 // Couldn't find any inserted value for this index? Cleanup
5456 while (PrevTo != OrigTo) {
5457 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
5458 PrevTo = Del->getAggregateOperand();
5459 Del->eraseFromParent();
5461 // Stop processing elements
5462 break;
5465 // If we successfully found a value for each of our subaggregates
5466 if (To)
5467 return To;
5469 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
5470 // the struct's elements had a value that was inserted directly. In the latter
5471 // case, perhaps we can't determine each of the subelements individually, but
5472 // we might be able to find the complete struct somewhere.
5474 // Find the value that is at that particular spot
5475 Value *V = FindInsertedValue(From, Idxs);
5477 if (!V)
5478 return nullptr;
5480 // Insert the value in the new (sub) aggregate
5481 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
5482 InsertBefore);
5485 // This helper takes a nested struct and extracts a part of it (which is again a
5486 // struct) into a new value. For example, given the struct:
5487 // { a, { b, { c, d }, e } }
5488 // and the indices "1, 1" this returns
5489 // { c, d }.
5491 // It does this by inserting an insertvalue for each element in the resulting
5492 // struct, as opposed to just inserting a single struct. This will only work if
5493 // each of the elements of the substruct are known (ie, inserted into From by an
5494 // insertvalue instruction somewhere).
5496 // All inserted insertvalue instructions are inserted before InsertBefore
5497 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
5498 Instruction *InsertBefore) {
5499 assert(InsertBefore && "Must have someplace to insert!");
5500 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
5501 idx_range);
5502 Value *To = PoisonValue::get(IndexedType);
5503 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
5504 unsigned IdxSkip = Idxs.size();
5506 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
5509 /// Given an aggregate and a sequence of indices, see if the scalar value
5510 /// indexed is already around as a register, for example if it was inserted
5511 /// directly into the aggregate.
5513 /// If InsertBefore is not null, this function will duplicate (modified)
5514 /// insertvalues when a part of a nested struct is extracted.
5515 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
5516 Instruction *InsertBefore) {
5517 // Nothing to index? Just return V then (this is useful at the end of our
5518 // recursion).
5519 if (idx_range.empty())
5520 return V;
5521 // We have indices, so V should have an indexable type.
5522 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
5523 "Not looking at a struct or array?");
5524 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
5525 "Invalid indices for type?");
5527 if (Constant *C = dyn_cast<Constant>(V)) {
5528 C = C->getAggregateElement(idx_range[0]);
5529 if (!C) return nullptr;
5530 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
5533 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
5534 // Loop the indices for the insertvalue instruction in parallel with the
5535 // requested indices
5536 const unsigned *req_idx = idx_range.begin();
5537 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
5538 i != e; ++i, ++req_idx) {
5539 if (req_idx == idx_range.end()) {
5540 // We can't handle this without inserting insertvalues
5541 if (!InsertBefore)
5542 return nullptr;
5544 // The requested index identifies a part of a nested aggregate. Handle
5545 // this specially. For example,
5546 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
5547 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
5548 // %C = extractvalue {i32, { i32, i32 } } %B, 1
5549 // This can be changed into
5550 // %A = insertvalue {i32, i32 } undef, i32 10, 0
5551 // %C = insertvalue {i32, i32 } %A, i32 11, 1
5552 // which allows the unused 0,0 element from the nested struct to be
5553 // removed.
5554 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
5555 InsertBefore);
5558 // This insert value inserts something else than what we are looking for.
5559 // See if the (aggregate) value inserted into has the value we are
5560 // looking for, then.
5561 if (*req_idx != *i)
5562 return FindInsertedValue(I->getAggregateOperand(), idx_range,
5563 InsertBefore);
5565 // If we end up here, the indices of the insertvalue match with those
5566 // requested (though possibly only partially). Now we recursively look at
5567 // the inserted value, passing any remaining indices.
5568 return FindInsertedValue(I->getInsertedValueOperand(),
5569 ArrayRef(req_idx, idx_range.end()), InsertBefore);
5572 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
5573 // If we're extracting a value from an aggregate that was extracted from
5574 // something else, we can extract from that something else directly instead.
5575 // However, we will need to chain I's indices with the requested indices.
5577 // Calculate the number of indices required
5578 unsigned size = I->getNumIndices() + idx_range.size();
5579 // Allocate some space to put the new indices in
5580 SmallVector<unsigned, 5> Idxs;
5581 Idxs.reserve(size);
5582 // Add indices from the extract value instruction
5583 Idxs.append(I->idx_begin(), I->idx_end());
5585 // Add requested indices
5586 Idxs.append(idx_range.begin(), idx_range.end());
5588 assert(Idxs.size() == size
5589 && "Number of indices added not correct?");
5591 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
5593 // Otherwise, we don't know (such as, extracting from a function return value
5594 // or load instruction)
5595 return nullptr;
5598 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
5599 unsigned CharSize) {
5600 // Make sure the GEP has exactly three arguments.
5601 if (GEP->getNumOperands() != 3)
5602 return false;
5604 // Make sure the index-ee is a pointer to array of \p CharSize integers.
5605 // CharSize.
5606 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
5607 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
5608 return false;
5610 // Check to make sure that the first operand of the GEP is an integer and
5611 // has value 0 so that we are sure we're indexing into the initializer.
5612 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
5613 if (!FirstIdx || !FirstIdx->isZero())
5614 return false;
5616 return true;
5619 // If V refers to an initialized global constant, set Slice either to
5620 // its initializer if the size of its elements equals ElementSize, or,
5621 // for ElementSize == 8, to its representation as an array of unsiged
5622 // char. Return true on success.
5623 // Offset is in the unit "nr of ElementSize sized elements".
5624 bool llvm::getConstantDataArrayInfo(const Value *V,
5625 ConstantDataArraySlice &Slice,
5626 unsigned ElementSize, uint64_t Offset) {
5627 assert(V && "V should not be null.");
5628 assert((ElementSize % 8) == 0 &&
5629 "ElementSize expected to be a multiple of the size of a byte.");
5630 unsigned ElementSizeInBytes = ElementSize / 8;
5632 // Drill down into the pointer expression V, ignoring any intervening
5633 // casts, and determine the identity of the object it references along
5634 // with the cumulative byte offset into it.
5635 const GlobalVariable *GV =
5636 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
5637 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
5638 // Fail if V is not based on constant global object.
5639 return false;
5641 const DataLayout &DL = GV->getParent()->getDataLayout();
5642 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
5644 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
5645 /*AllowNonInbounds*/ true))
5646 // Fail if a constant offset could not be determined.
5647 return false;
5649 uint64_t StartIdx = Off.getLimitedValue();
5650 if (StartIdx == UINT64_MAX)
5651 // Fail if the constant offset is excessive.
5652 return false;
5654 // Off/StartIdx is in the unit of bytes. So we need to convert to number of
5655 // elements. Simply bail out if that isn't possible.
5656 if ((StartIdx % ElementSizeInBytes) != 0)
5657 return false;
5659 Offset += StartIdx / ElementSizeInBytes;
5660 ConstantDataArray *Array = nullptr;
5661 ArrayType *ArrayTy = nullptr;
5663 if (GV->getInitializer()->isNullValue()) {
5664 Type *GVTy = GV->getValueType();
5665 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
5666 uint64_t Length = SizeInBytes / ElementSizeInBytes;
5668 Slice.Array = nullptr;
5669 Slice.Offset = 0;
5670 // Return an empty Slice for undersized constants to let callers
5671 // transform even undefined library calls into simpler, well-defined
5672 // expressions. This is preferable to making the calls although it
5673 // prevents sanitizers from detecting such calls.
5674 Slice.Length = Length < Offset ? 0 : Length - Offset;
5675 return true;
5678 auto *Init = const_cast<Constant *>(GV->getInitializer());
5679 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
5680 Type *InitElTy = ArrayInit->getElementType();
5681 if (InitElTy->isIntegerTy(ElementSize)) {
5682 // If Init is an initializer for an array of the expected type
5683 // and size, use it as is.
5684 Array = ArrayInit;
5685 ArrayTy = ArrayInit->getType();
5689 if (!Array) {
5690 if (ElementSize != 8)
5691 // TODO: Handle conversions to larger integral types.
5692 return false;
5694 // Otherwise extract the portion of the initializer starting
5695 // at Offset as an array of bytes, and reset Offset.
5696 Init = ReadByteArrayFromGlobal(GV, Offset);
5697 if (!Init)
5698 return false;
5700 Offset = 0;
5701 Array = dyn_cast<ConstantDataArray>(Init);
5702 ArrayTy = dyn_cast<ArrayType>(Init->getType());
5705 uint64_t NumElts = ArrayTy->getArrayNumElements();
5706 if (Offset > NumElts)
5707 return false;
5709 Slice.Array = Array;
5710 Slice.Offset = Offset;
5711 Slice.Length = NumElts - Offset;
5712 return true;
5715 /// Extract bytes from the initializer of the constant array V, which need
5716 /// not be a nul-terminated string. On success, store the bytes in Str and
5717 /// return true. When TrimAtNul is set, Str will contain only the bytes up
5718 /// to but not including the first nul. Return false on failure.
5719 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
5720 bool TrimAtNul) {
5721 ConstantDataArraySlice Slice;
5722 if (!getConstantDataArrayInfo(V, Slice, 8))
5723 return false;
5725 if (Slice.Array == nullptr) {
5726 if (TrimAtNul) {
5727 // Return a nul-terminated string even for an empty Slice. This is
5728 // safe because all existing SimplifyLibcalls callers require string
5729 // arguments and the behavior of the functions they fold is undefined
5730 // otherwise. Folding the calls this way is preferable to making
5731 // the undefined library calls, even though it prevents sanitizers
5732 // from reporting such calls.
5733 Str = StringRef();
5734 return true;
5736 if (Slice.Length == 1) {
5737 Str = StringRef("", 1);
5738 return true;
5740 // We cannot instantiate a StringRef as we do not have an appropriate string
5741 // of 0s at hand.
5742 return false;
5745 // Start out with the entire array in the StringRef.
5746 Str = Slice.Array->getAsString();
5747 // Skip over 'offset' bytes.
5748 Str = Str.substr(Slice.Offset);
5750 if (TrimAtNul) {
5751 // Trim off the \0 and anything after it. If the array is not nul
5752 // terminated, we just return the whole end of string. The client may know
5753 // some other way that the string is length-bound.
5754 Str = Str.substr(0, Str.find('\0'));
5756 return true;
5759 // These next two are very similar to the above, but also look through PHI
5760 // nodes.
5761 // TODO: See if we can integrate these two together.
5763 /// If we can compute the length of the string pointed to by
5764 /// the specified pointer, return 'len+1'. If we can't, return 0.
5765 static uint64_t GetStringLengthH(const Value *V,
5766 SmallPtrSetImpl<const PHINode*> &PHIs,
5767 unsigned CharSize) {
5768 // Look through noop bitcast instructions.
5769 V = V->stripPointerCasts();
5771 // If this is a PHI node, there are two cases: either we have already seen it
5772 // or we haven't.
5773 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
5774 if (!PHIs.insert(PN).second)
5775 return ~0ULL; // already in the set.
5777 // If it was new, see if all the input strings are the same length.
5778 uint64_t LenSoFar = ~0ULL;
5779 for (Value *IncValue : PN->incoming_values()) {
5780 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
5781 if (Len == 0) return 0; // Unknown length -> unknown.
5783 if (Len == ~0ULL) continue;
5785 if (Len != LenSoFar && LenSoFar != ~0ULL)
5786 return 0; // Disagree -> unknown.
5787 LenSoFar = Len;
5790 // Success, all agree.
5791 return LenSoFar;
5794 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
5795 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
5796 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
5797 if (Len1 == 0) return 0;
5798 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
5799 if (Len2 == 0) return 0;
5800 if (Len1 == ~0ULL) return Len2;
5801 if (Len2 == ~0ULL) return Len1;
5802 if (Len1 != Len2) return 0;
5803 return Len1;
5806 // Otherwise, see if we can read the string.
5807 ConstantDataArraySlice Slice;
5808 if (!getConstantDataArrayInfo(V, Slice, CharSize))
5809 return 0;
5811 if (Slice.Array == nullptr)
5812 // Zeroinitializer (including an empty one).
5813 return 1;
5815 // Search for the first nul character. Return a conservative result even
5816 // when there is no nul. This is safe since otherwise the string function
5817 // being folded such as strlen is undefined, and can be preferable to
5818 // making the undefined library call.
5819 unsigned NullIndex = 0;
5820 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
5821 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
5822 break;
5825 return NullIndex + 1;
5828 /// If we can compute the length of the string pointed to by
5829 /// the specified pointer, return 'len+1'. If we can't, return 0.
5830 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
5831 if (!V->getType()->isPointerTy())
5832 return 0;
5834 SmallPtrSet<const PHINode*, 32> PHIs;
5835 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
5836 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
5837 // an empty string as a length.
5838 return Len == ~0ULL ? 1 : Len;
5841 const Value *
5842 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
5843 bool MustPreserveNullness) {
5844 assert(Call &&
5845 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
5846 if (const Value *RV = Call->getReturnedArgOperand())
5847 return RV;
5848 // This can be used only as a aliasing property.
5849 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5850 Call, MustPreserveNullness))
5851 return Call->getArgOperand(0);
5852 return nullptr;
5855 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5856 const CallBase *Call, bool MustPreserveNullness) {
5857 switch (Call->getIntrinsicID()) {
5858 case Intrinsic::launder_invariant_group:
5859 case Intrinsic::strip_invariant_group:
5860 case Intrinsic::aarch64_irg:
5861 case Intrinsic::aarch64_tagp:
5862 // The amdgcn_make_buffer_rsrc function does not alter the address of the
5863 // input pointer (and thus preserve null-ness for the purposes of escape
5864 // analysis, which is where the MustPreserveNullness flag comes in to play).
5865 // However, it will not necessarily map ptr addrspace(N) null to ptr
5866 // addrspace(8) null, aka the "null descriptor", which has "all loads return
5867 // 0, all stores are dropped" semantics. Given the context of this intrinsic
5868 // list, no one should be relying on such a strict interpretation of
5869 // MustPreserveNullness (and, at time of writing, they are not), but we
5870 // document this fact out of an abundance of caution.
5871 case Intrinsic::amdgcn_make_buffer_rsrc:
5872 return true;
5873 case Intrinsic::ptrmask:
5874 return !MustPreserveNullness;
5875 default:
5876 return false;
5880 /// \p PN defines a loop-variant pointer to an object. Check if the
5881 /// previous iteration of the loop was referring to the same object as \p PN.
5882 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
5883 const LoopInfo *LI) {
5884 // Find the loop-defined value.
5885 Loop *L = LI->getLoopFor(PN->getParent());
5886 if (PN->getNumIncomingValues() != 2)
5887 return true;
5889 // Find the value from previous iteration.
5890 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
5891 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5892 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
5893 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5894 return true;
5896 // If a new pointer is loaded in the loop, the pointer references a different
5897 // object in every iteration. E.g.:
5898 // for (i)
5899 // int *p = a[i];
5900 // ...
5901 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
5902 if (!L->isLoopInvariant(Load->getPointerOperand()))
5903 return false;
5904 return true;
5907 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
5908 if (!V->getType()->isPointerTy())
5909 return V;
5910 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
5911 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
5912 V = GEP->getPointerOperand();
5913 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
5914 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
5915 V = cast<Operator>(V)->getOperand(0);
5916 if (!V->getType()->isPointerTy())
5917 return V;
5918 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
5919 if (GA->isInterposable())
5920 return V;
5921 V = GA->getAliasee();
5922 } else {
5923 if (auto *PHI = dyn_cast<PHINode>(V)) {
5924 // Look through single-arg phi nodes created by LCSSA.
5925 if (PHI->getNumIncomingValues() == 1) {
5926 V = PHI->getIncomingValue(0);
5927 continue;
5929 } else if (auto *Call = dyn_cast<CallBase>(V)) {
5930 // CaptureTracking can know about special capturing properties of some
5931 // intrinsics like launder.invariant.group, that can't be expressed with
5932 // the attributes, but have properties like returning aliasing pointer.
5933 // Because some analysis may assume that nocaptured pointer is not
5934 // returned from some special intrinsic (because function would have to
5935 // be marked with returns attribute), it is crucial to use this function
5936 // because it should be in sync with CaptureTracking. Not using it may
5937 // cause weird miscompilations where 2 aliasing pointers are assumed to
5938 // noalias.
5939 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
5940 V = RP;
5941 continue;
5945 return V;
5947 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
5949 return V;
5952 void llvm::getUnderlyingObjects(const Value *V,
5953 SmallVectorImpl<const Value *> &Objects,
5954 LoopInfo *LI, unsigned MaxLookup) {
5955 SmallPtrSet<const Value *, 4> Visited;
5956 SmallVector<const Value *, 4> Worklist;
5957 Worklist.push_back(V);
5958 do {
5959 const Value *P = Worklist.pop_back_val();
5960 P = getUnderlyingObject(P, MaxLookup);
5962 if (!Visited.insert(P).second)
5963 continue;
5965 if (auto *SI = dyn_cast<SelectInst>(P)) {
5966 Worklist.push_back(SI->getTrueValue());
5967 Worklist.push_back(SI->getFalseValue());
5968 continue;
5971 if (auto *PN = dyn_cast<PHINode>(P)) {
5972 // If this PHI changes the underlying object in every iteration of the
5973 // loop, don't look through it. Consider:
5974 // int **A;
5975 // for (i) {
5976 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
5977 // Curr = A[i];
5978 // *Prev, *Curr;
5980 // Prev is tracking Curr one iteration behind so they refer to different
5981 // underlying objects.
5982 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
5983 isSameUnderlyingObjectInLoop(PN, LI))
5984 append_range(Worklist, PN->incoming_values());
5985 continue;
5988 Objects.push_back(P);
5989 } while (!Worklist.empty());
5992 /// This is the function that does the work of looking through basic
5993 /// ptrtoint+arithmetic+inttoptr sequences.
5994 static const Value *getUnderlyingObjectFromInt(const Value *V) {
5995 do {
5996 if (const Operator *U = dyn_cast<Operator>(V)) {
5997 // If we find a ptrtoint, we can transfer control back to the
5998 // regular getUnderlyingObjectFromInt.
5999 if (U->getOpcode() == Instruction::PtrToInt)
6000 return U->getOperand(0);
6001 // If we find an add of a constant, a multiplied value, or a phi, it's
6002 // likely that the other operand will lead us to the base
6003 // object. We don't have to worry about the case where the
6004 // object address is somehow being computed by the multiply,
6005 // because our callers only care when the result is an
6006 // identifiable object.
6007 if (U->getOpcode() != Instruction::Add ||
6008 (!isa<ConstantInt>(U->getOperand(1)) &&
6009 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
6010 !isa<PHINode>(U->getOperand(1))))
6011 return V;
6012 V = U->getOperand(0);
6013 } else {
6014 return V;
6016 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
6017 } while (true);
6020 /// This is a wrapper around getUnderlyingObjects and adds support for basic
6021 /// ptrtoint+arithmetic+inttoptr sequences.
6022 /// It returns false if unidentified object is found in getUnderlyingObjects.
6023 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
6024 SmallVectorImpl<Value *> &Objects) {
6025 SmallPtrSet<const Value *, 16> Visited;
6026 SmallVector<const Value *, 4> Working(1, V);
6027 do {
6028 V = Working.pop_back_val();
6030 SmallVector<const Value *, 4> Objs;
6031 getUnderlyingObjects(V, Objs);
6033 for (const Value *V : Objs) {
6034 if (!Visited.insert(V).second)
6035 continue;
6036 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
6037 const Value *O =
6038 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
6039 if (O->getType()->isPointerTy()) {
6040 Working.push_back(O);
6041 continue;
6044 // If getUnderlyingObjects fails to find an identifiable object,
6045 // getUnderlyingObjectsForCodeGen also fails for safety.
6046 if (!isIdentifiedObject(V)) {
6047 Objects.clear();
6048 return false;
6050 Objects.push_back(const_cast<Value *>(V));
6052 } while (!Working.empty());
6053 return true;
6056 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
6057 AllocaInst *Result = nullptr;
6058 SmallPtrSet<Value *, 4> Visited;
6059 SmallVector<Value *, 4> Worklist;
6061 auto AddWork = [&](Value *V) {
6062 if (Visited.insert(V).second)
6063 Worklist.push_back(V);
6066 AddWork(V);
6067 do {
6068 V = Worklist.pop_back_val();
6069 assert(Visited.count(V));
6071 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
6072 if (Result && Result != AI)
6073 return nullptr;
6074 Result = AI;
6075 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
6076 AddWork(CI->getOperand(0));
6077 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
6078 for (Value *IncValue : PN->incoming_values())
6079 AddWork(IncValue);
6080 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
6081 AddWork(SI->getTrueValue());
6082 AddWork(SI->getFalseValue());
6083 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
6084 if (OffsetZero && !GEP->hasAllZeroIndices())
6085 return nullptr;
6086 AddWork(GEP->getPointerOperand());
6087 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
6088 Value *Returned = CB->getReturnedArgOperand();
6089 if (Returned)
6090 AddWork(Returned);
6091 else
6092 return nullptr;
6093 } else {
6094 return nullptr;
6096 } while (!Worklist.empty());
6098 return Result;
6101 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6102 const Value *V, bool AllowLifetime, bool AllowDroppable) {
6103 for (const User *U : V->users()) {
6104 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
6105 if (!II)
6106 return false;
6108 if (AllowLifetime && II->isLifetimeStartOrEnd())
6109 continue;
6111 if (AllowDroppable && II->isDroppable())
6112 continue;
6114 return false;
6116 return true;
6119 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
6120 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6121 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
6123 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
6124 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6125 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
6128 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
6129 if (!LI.isUnordered())
6130 return true;
6131 const Function &F = *LI.getFunction();
6132 // Speculative load may create a race that did not exist in the source.
6133 return F.hasFnAttribute(Attribute::SanitizeThread) ||
6134 // Speculative load may load data from dirty regions.
6135 F.hasFnAttribute(Attribute::SanitizeAddress) ||
6136 F.hasFnAttribute(Attribute::SanitizeHWAddress);
6139 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
6140 const Instruction *CtxI,
6141 AssumptionCache *AC,
6142 const DominatorTree *DT,
6143 const TargetLibraryInfo *TLI) {
6144 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
6145 AC, DT, TLI);
6148 bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
6149 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
6150 AssumptionCache *AC, const DominatorTree *DT,
6151 const TargetLibraryInfo *TLI) {
6152 #ifndef NDEBUG
6153 if (Inst->getOpcode() != Opcode) {
6154 // Check that the operands are actually compatible with the Opcode override.
6155 auto hasEqualReturnAndLeadingOperandTypes =
6156 [](const Instruction *Inst, unsigned NumLeadingOperands) {
6157 if (Inst->getNumOperands() < NumLeadingOperands)
6158 return false;
6159 const Type *ExpectedType = Inst->getType();
6160 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6161 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
6162 return false;
6163 return true;
6165 assert(!Instruction::isBinaryOp(Opcode) ||
6166 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6167 assert(!Instruction::isUnaryOp(Opcode) ||
6168 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6170 #endif
6172 switch (Opcode) {
6173 default:
6174 return true;
6175 case Instruction::UDiv:
6176 case Instruction::URem: {
6177 // x / y is undefined if y == 0.
6178 const APInt *V;
6179 if (match(Inst->getOperand(1), m_APInt(V)))
6180 return *V != 0;
6181 return false;
6183 case Instruction::SDiv:
6184 case Instruction::SRem: {
6185 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
6186 const APInt *Numerator, *Denominator;
6187 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
6188 return false;
6189 // We cannot hoist this division if the denominator is 0.
6190 if (*Denominator == 0)
6191 return false;
6192 // It's safe to hoist if the denominator is not 0 or -1.
6193 if (!Denominator->isAllOnes())
6194 return true;
6195 // At this point we know that the denominator is -1. It is safe to hoist as
6196 // long we know that the numerator is not INT_MIN.
6197 if (match(Inst->getOperand(0), m_APInt(Numerator)))
6198 return !Numerator->isMinSignedValue();
6199 // The numerator *might* be MinSignedValue.
6200 return false;
6202 case Instruction::Load: {
6203 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
6204 if (!LI)
6205 return false;
6206 if (mustSuppressSpeculation(*LI))
6207 return false;
6208 const DataLayout &DL = LI->getModule()->getDataLayout();
6209 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
6210 LI->getType(), LI->getAlign(), DL,
6211 CtxI, AC, DT, TLI);
6213 case Instruction::Call: {
6214 auto *CI = dyn_cast<const CallInst>(Inst);
6215 if (!CI)
6216 return false;
6217 const Function *Callee = CI->getCalledFunction();
6219 // The called function could have undefined behavior or side-effects, even
6220 // if marked readnone nounwind.
6221 return Callee && Callee->isSpeculatable();
6223 case Instruction::VAArg:
6224 case Instruction::Alloca:
6225 case Instruction::Invoke:
6226 case Instruction::CallBr:
6227 case Instruction::PHI:
6228 case Instruction::Store:
6229 case Instruction::Ret:
6230 case Instruction::Br:
6231 case Instruction::IndirectBr:
6232 case Instruction::Switch:
6233 case Instruction::Unreachable:
6234 case Instruction::Fence:
6235 case Instruction::AtomicRMW:
6236 case Instruction::AtomicCmpXchg:
6237 case Instruction::LandingPad:
6238 case Instruction::Resume:
6239 case Instruction::CatchSwitch:
6240 case Instruction::CatchPad:
6241 case Instruction::CatchRet:
6242 case Instruction::CleanupPad:
6243 case Instruction::CleanupRet:
6244 return false; // Misc instructions which have effects
6248 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
6249 if (I.mayReadOrWriteMemory())
6250 // Memory dependency possible
6251 return true;
6252 if (!isSafeToSpeculativelyExecute(&I))
6253 // Can't move above a maythrow call or infinite loop. Or if an
6254 // inalloca alloca, above a stacksave call.
6255 return true;
6256 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6257 // 1) Can't reorder two inf-loop calls, even if readonly
6258 // 2) Also can't reorder an inf-loop call below a instruction which isn't
6259 // safe to speculative execute. (Inverse of above)
6260 return true;
6261 return false;
6264 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
6265 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
6266 switch (OR) {
6267 case ConstantRange::OverflowResult::MayOverflow:
6268 return OverflowResult::MayOverflow;
6269 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
6270 return OverflowResult::AlwaysOverflowsLow;
6271 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
6272 return OverflowResult::AlwaysOverflowsHigh;
6273 case ConstantRange::OverflowResult::NeverOverflows:
6274 return OverflowResult::NeverOverflows;
6276 llvm_unreachable("Unknown OverflowResult");
6279 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
6280 static ConstantRange
6281 computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
6282 bool ForSigned,
6283 const SimplifyQuery &SQ) {
6284 ConstantRange CR1 =
6285 ConstantRange::fromKnownBits(V.getKnownBits(SQ), ForSigned);
6286 ConstantRange CR2 = computeConstantRange(V, ForSigned, SQ.IIQ.UseInstrInfo);
6287 ConstantRange::PreferredRangeType RangeType =
6288 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
6289 return CR1.intersectWith(CR2, RangeType);
6292 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
6293 const Value *RHS,
6294 const SimplifyQuery &SQ) {
6295 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
6296 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
6297 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
6298 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
6299 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
6302 OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS,
6303 const Value *RHS,
6304 const SimplifyQuery &SQ) {
6305 // Multiplying n * m significant bits yields a result of n + m significant
6306 // bits. If the total number of significant bits does not exceed the
6307 // result bit width (minus 1), there is no overflow.
6308 // This means if we have enough leading sign bits in the operands
6309 // we can guarantee that the result does not overflow.
6310 // Ref: "Hacker's Delight" by Henry Warren
6311 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
6313 // Note that underestimating the number of sign bits gives a more
6314 // conservative answer.
6315 unsigned SignBits =
6316 ::ComputeNumSignBits(LHS, 0, SQ) + ::ComputeNumSignBits(RHS, 0, SQ);
6318 // First handle the easy case: if we have enough sign bits there's
6319 // definitely no overflow.
6320 if (SignBits > BitWidth + 1)
6321 return OverflowResult::NeverOverflows;
6323 // There are two ambiguous cases where there can be no overflow:
6324 // SignBits == BitWidth + 1 and
6325 // SignBits == BitWidth
6326 // The second case is difficult to check, therefore we only handle the
6327 // first case.
6328 if (SignBits == BitWidth + 1) {
6329 // It overflows only when both arguments are negative and the true
6330 // product is exactly the minimum negative number.
6331 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
6332 // For simplicity we just check if at least one side is not negative.
6333 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
6334 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
6335 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
6336 return OverflowResult::NeverOverflows;
6338 return OverflowResult::MayOverflow;
6341 OverflowResult
6342 llvm::computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
6343 const WithCache<const Value *> &RHS,
6344 const SimplifyQuery &SQ) {
6345 ConstantRange LHSRange =
6346 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
6347 ConstantRange RHSRange =
6348 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
6349 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
6352 static OverflowResult
6353 computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
6354 const WithCache<const Value *> &RHS,
6355 const AddOperator *Add, const SimplifyQuery &SQ) {
6356 if (Add && Add->hasNoSignedWrap()) {
6357 return OverflowResult::NeverOverflows;
6360 // If LHS and RHS each have at least two sign bits, the addition will look
6361 // like
6363 // XX..... +
6364 // YY.....
6366 // If the carry into the most significant position is 0, X and Y can't both
6367 // be 1 and therefore the carry out of the addition is also 0.
6369 // If the carry into the most significant position is 1, X and Y can't both
6370 // be 0 and therefore the carry out of the addition is also 1.
6372 // Since the carry into the most significant position is always equal to
6373 // the carry out of the addition, there is no signed overflow.
6374 if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
6375 ::ComputeNumSignBits(RHS, 0, SQ) > 1)
6376 return OverflowResult::NeverOverflows;
6378 ConstantRange LHSRange =
6379 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
6380 ConstantRange RHSRange =
6381 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
6382 OverflowResult OR =
6383 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
6384 if (OR != OverflowResult::MayOverflow)
6385 return OR;
6387 // The remaining code needs Add to be available. Early returns if not so.
6388 if (!Add)
6389 return OverflowResult::MayOverflow;
6391 // If the sign of Add is the same as at least one of the operands, this add
6392 // CANNOT overflow. If this can be determined from the known bits of the
6393 // operands the above signedAddMayOverflow() check will have already done so.
6394 // The only other way to improve on the known bits is from an assumption, so
6395 // call computeKnownBitsFromAssume() directly.
6396 bool LHSOrRHSKnownNonNegative =
6397 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
6398 bool LHSOrRHSKnownNegative =
6399 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
6400 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
6401 KnownBits AddKnown(LHSRange.getBitWidth());
6402 computeKnownBitsFromAssume(Add, AddKnown, /*Depth=*/0, SQ);
6403 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
6404 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
6405 return OverflowResult::NeverOverflows;
6408 return OverflowResult::MayOverflow;
6411 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
6412 const Value *RHS,
6413 const SimplifyQuery &SQ) {
6414 // X - (X % ?)
6415 // The remainder of a value can't have greater magnitude than itself,
6416 // so the subtraction can't overflow.
6418 // X - (X -nuw ?)
6419 // In the minimal case, this would simplify to "?", so there's no subtract
6420 // at all. But if this analysis is used to peek through casts, for example,
6421 // then determining no-overflow may allow other transforms.
6423 // TODO: There are other patterns like this.
6424 // See simplifyICmpWithBinOpOnLHS() for candidates.
6425 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
6426 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
6427 if (isGuaranteedNotToBeUndefOrPoison(LHS, SQ.AC, SQ.CxtI, SQ.DT))
6428 return OverflowResult::NeverOverflows;
6430 // Checking for conditions implied by dominating conditions may be expensive.
6431 // Limit it to usub_with_overflow calls for now.
6432 if (match(SQ.CxtI,
6433 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
6434 if (auto C = isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, SQ.CxtI,
6435 SQ.DL)) {
6436 if (*C)
6437 return OverflowResult::NeverOverflows;
6438 return OverflowResult::AlwaysOverflowsLow;
6440 ConstantRange LHSRange =
6441 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
6442 ConstantRange RHSRange =
6443 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
6444 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
6447 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
6448 const Value *RHS,
6449 const SimplifyQuery &SQ) {
6450 // X - (X % ?)
6451 // The remainder of a value can't have greater magnitude than itself,
6452 // so the subtraction can't overflow.
6454 // X - (X -nsw ?)
6455 // In the minimal case, this would simplify to "?", so there's no subtract
6456 // at all. But if this analysis is used to peek through casts, for example,
6457 // then determining no-overflow may allow other transforms.
6458 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
6459 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
6460 if (isGuaranteedNotToBeUndefOrPoison(LHS, SQ.AC, SQ.CxtI, SQ.DT))
6461 return OverflowResult::NeverOverflows;
6463 // If LHS and RHS each have at least two sign bits, the subtraction
6464 // cannot overflow.
6465 if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
6466 ::ComputeNumSignBits(RHS, 0, SQ) > 1)
6467 return OverflowResult::NeverOverflows;
6469 ConstantRange LHSRange =
6470 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
6471 ConstantRange RHSRange =
6472 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
6473 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
6476 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
6477 const DominatorTree &DT) {
6478 SmallVector<const BranchInst *, 2> GuardingBranches;
6479 SmallVector<const ExtractValueInst *, 2> Results;
6481 for (const User *U : WO->users()) {
6482 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
6483 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
6485 if (EVI->getIndices()[0] == 0)
6486 Results.push_back(EVI);
6487 else {
6488 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
6490 for (const auto *U : EVI->users())
6491 if (const auto *B = dyn_cast<BranchInst>(U)) {
6492 assert(B->isConditional() && "How else is it using an i1?");
6493 GuardingBranches.push_back(B);
6496 } else {
6497 // We are using the aggregate directly in a way we don't want to analyze
6498 // here (storing it to a global, say).
6499 return false;
6503 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
6504 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
6505 if (!NoWrapEdge.isSingleEdge())
6506 return false;
6508 // Check if all users of the add are provably no-wrap.
6509 for (const auto *Result : Results) {
6510 // If the extractvalue itself is not executed on overflow, the we don't
6511 // need to check each use separately, since domination is transitive.
6512 if (DT.dominates(NoWrapEdge, Result->getParent()))
6513 continue;
6515 for (const auto &RU : Result->uses())
6516 if (!DT.dominates(NoWrapEdge, RU))
6517 return false;
6520 return true;
6523 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
6526 /// Shifts return poison if shiftwidth is larger than the bitwidth.
6527 static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
6528 auto *C = dyn_cast<Constant>(ShiftAmount);
6529 if (!C)
6530 return false;
6532 // Shifts return poison if shiftwidth is larger than the bitwidth.
6533 SmallVector<const Constant *, 4> ShiftAmounts;
6534 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
6535 unsigned NumElts = FVTy->getNumElements();
6536 for (unsigned i = 0; i < NumElts; ++i)
6537 ShiftAmounts.push_back(C->getAggregateElement(i));
6538 } else if (isa<ScalableVectorType>(C->getType()))
6539 return false; // Can't tell, just return false to be safe
6540 else
6541 ShiftAmounts.push_back(C);
6543 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
6544 auto *CI = dyn_cast_or_null<ConstantInt>(C);
6545 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
6548 return Safe;
6551 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
6552 bool ConsiderFlagsAndMetadata) {
6554 if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata())
6555 return true;
6557 unsigned Opcode = Op->getOpcode();
6559 // Check whether opcode is a poison/undef-generating operation
6560 switch (Opcode) {
6561 case Instruction::Shl:
6562 case Instruction::AShr:
6563 case Instruction::LShr:
6564 return !shiftAmountKnownInRange(Op->getOperand(1));
6565 case Instruction::FPToSI:
6566 case Instruction::FPToUI:
6567 // fptosi/ui yields poison if the resulting value does not fit in the
6568 // destination type.
6569 return true;
6570 case Instruction::Call:
6571 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
6572 switch (II->getIntrinsicID()) {
6573 // TODO: Add more intrinsics.
6574 case Intrinsic::ctlz:
6575 case Intrinsic::cttz:
6576 case Intrinsic::abs:
6577 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
6578 return false;
6579 break;
6580 case Intrinsic::ctpop:
6581 case Intrinsic::bswap:
6582 case Intrinsic::bitreverse:
6583 case Intrinsic::fshl:
6584 case Intrinsic::fshr:
6585 case Intrinsic::smax:
6586 case Intrinsic::smin:
6587 case Intrinsic::umax:
6588 case Intrinsic::umin:
6589 case Intrinsic::ptrmask:
6590 case Intrinsic::fptoui_sat:
6591 case Intrinsic::fptosi_sat:
6592 case Intrinsic::sadd_with_overflow:
6593 case Intrinsic::ssub_with_overflow:
6594 case Intrinsic::smul_with_overflow:
6595 case Intrinsic::uadd_with_overflow:
6596 case Intrinsic::usub_with_overflow:
6597 case Intrinsic::umul_with_overflow:
6598 case Intrinsic::sadd_sat:
6599 case Intrinsic::uadd_sat:
6600 case Intrinsic::ssub_sat:
6601 case Intrinsic::usub_sat:
6602 return false;
6603 case Intrinsic::sshl_sat:
6604 case Intrinsic::ushl_sat:
6605 return !shiftAmountKnownInRange(II->getArgOperand(1));
6606 case Intrinsic::fma:
6607 case Intrinsic::fmuladd:
6608 case Intrinsic::sqrt:
6609 case Intrinsic::powi:
6610 case Intrinsic::sin:
6611 case Intrinsic::cos:
6612 case Intrinsic::pow:
6613 case Intrinsic::log:
6614 case Intrinsic::log10:
6615 case Intrinsic::log2:
6616 case Intrinsic::exp:
6617 case Intrinsic::exp2:
6618 case Intrinsic::exp10:
6619 case Intrinsic::fabs:
6620 case Intrinsic::copysign:
6621 case Intrinsic::floor:
6622 case Intrinsic::ceil:
6623 case Intrinsic::trunc:
6624 case Intrinsic::rint:
6625 case Intrinsic::nearbyint:
6626 case Intrinsic::round:
6627 case Intrinsic::roundeven:
6628 case Intrinsic::fptrunc_round:
6629 case Intrinsic::canonicalize:
6630 case Intrinsic::arithmetic_fence:
6631 case Intrinsic::minnum:
6632 case Intrinsic::maxnum:
6633 case Intrinsic::minimum:
6634 case Intrinsic::maximum:
6635 case Intrinsic::is_fpclass:
6636 case Intrinsic::ldexp:
6637 case Intrinsic::frexp:
6638 return false;
6639 case Intrinsic::lround:
6640 case Intrinsic::llround:
6641 case Intrinsic::lrint:
6642 case Intrinsic::llrint:
6643 // If the value doesn't fit an unspecified value is returned (but this
6644 // is not poison).
6645 return false;
6648 [[fallthrough]];
6649 case Instruction::CallBr:
6650 case Instruction::Invoke: {
6651 const auto *CB = cast<CallBase>(Op);
6652 return !CB->hasRetAttr(Attribute::NoUndef);
6654 case Instruction::InsertElement:
6655 case Instruction::ExtractElement: {
6656 // If index exceeds the length of the vector, it returns poison
6657 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
6658 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
6659 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
6660 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
6661 return true;
6662 return false;
6664 case Instruction::ShuffleVector: {
6665 // shufflevector may return undef.
6666 if (PoisonOnly)
6667 return false;
6668 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
6669 ? cast<ConstantExpr>(Op)->getShuffleMask()
6670 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
6671 return is_contained(Mask, PoisonMaskElem);
6673 case Instruction::FNeg:
6674 case Instruction::PHI:
6675 case Instruction::Select:
6676 case Instruction::URem:
6677 case Instruction::SRem:
6678 case Instruction::ExtractValue:
6679 case Instruction::InsertValue:
6680 case Instruction::Freeze:
6681 case Instruction::ICmp:
6682 case Instruction::FCmp:
6683 case Instruction::FAdd:
6684 case Instruction::FSub:
6685 case Instruction::FMul:
6686 case Instruction::FDiv:
6687 case Instruction::FRem:
6688 return false;
6689 case Instruction::GetElementPtr:
6690 // inbounds is handled above
6691 // TODO: what about inrange on constexpr?
6692 return false;
6693 default: {
6694 const auto *CE = dyn_cast<ConstantExpr>(Op);
6695 if (isa<CastInst>(Op) || (CE && CE->isCast()))
6696 return false;
6697 else if (Instruction::isBinaryOp(Opcode))
6698 return false;
6699 // Be conservative and return true.
6700 return true;
6705 bool llvm::canCreateUndefOrPoison(const Operator *Op,
6706 bool ConsiderFlagsAndMetadata) {
6707 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false,
6708 ConsiderFlagsAndMetadata);
6711 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
6712 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true,
6713 ConsiderFlagsAndMetadata);
6716 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
6717 const Value *V, unsigned Depth) {
6718 if (ValAssumedPoison == V)
6719 return true;
6721 const unsigned MaxDepth = 2;
6722 if (Depth >= MaxDepth)
6723 return false;
6725 if (const auto *I = dyn_cast<Instruction>(V)) {
6726 if (any_of(I->operands(), [=](const Use &Op) {
6727 return propagatesPoison(Op) &&
6728 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
6730 return true;
6732 // V = extractvalue V0, idx
6733 // V2 = extractvalue V0, idx2
6734 // V0's elements are all poison or not. (e.g., add_with_overflow)
6735 const WithOverflowInst *II;
6736 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
6737 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
6738 llvm::is_contained(II->args(), ValAssumedPoison)))
6739 return true;
6741 return false;
6744 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
6745 unsigned Depth) {
6746 if (isGuaranteedNotToBePoison(ValAssumedPoison))
6747 return true;
6749 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
6750 return true;
6752 const unsigned MaxDepth = 2;
6753 if (Depth >= MaxDepth)
6754 return false;
6756 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
6757 if (I && !canCreatePoison(cast<Operator>(I))) {
6758 return all_of(I->operands(), [=](const Value *Op) {
6759 return impliesPoison(Op, V, Depth + 1);
6762 return false;
6765 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
6766 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
6769 static bool programUndefinedIfUndefOrPoison(const Value *V,
6770 bool PoisonOnly);
6772 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
6773 AssumptionCache *AC,
6774 const Instruction *CtxI,
6775 const DominatorTree *DT,
6776 unsigned Depth, bool PoisonOnly) {
6777 if (Depth >= MaxAnalysisRecursionDepth)
6778 return false;
6780 if (isa<MetadataAsValue>(V))
6781 return false;
6783 if (const auto *A = dyn_cast<Argument>(V)) {
6784 if (A->hasAttribute(Attribute::NoUndef) ||
6785 A->hasAttribute(Attribute::Dereferenceable) ||
6786 A->hasAttribute(Attribute::DereferenceableOrNull))
6787 return true;
6790 if (auto *C = dyn_cast<Constant>(V)) {
6791 if (isa<UndefValue>(C))
6792 return PoisonOnly && !isa<PoisonValue>(C);
6794 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
6795 isa<ConstantPointerNull>(C) || isa<Function>(C))
6796 return true;
6798 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
6799 return (PoisonOnly ? !C->containsPoisonElement()
6800 : !C->containsUndefOrPoisonElement()) &&
6801 !C->containsConstantExpression();
6804 // Strip cast operations from a pointer value.
6805 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
6806 // inbounds with zero offset. To guarantee that the result isn't poison, the
6807 // stripped pointer is checked as it has to be pointing into an allocated
6808 // object or be null `null` to ensure `inbounds` getelement pointers with a
6809 // zero offset could not produce poison.
6810 // It can strip off addrspacecast that do not change bit representation as
6811 // well. We believe that such addrspacecast is equivalent to no-op.
6812 auto *StrippedV = V->stripPointerCastsSameRepresentation();
6813 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
6814 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
6815 return true;
6817 auto OpCheck = [&](const Value *V) {
6818 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
6819 PoisonOnly);
6822 if (auto *Opr = dyn_cast<Operator>(V)) {
6823 // If the value is a freeze instruction, then it can never
6824 // be undef or poison.
6825 if (isa<FreezeInst>(V))
6826 return true;
6828 if (const auto *CB = dyn_cast<CallBase>(V)) {
6829 if (CB->hasRetAttr(Attribute::NoUndef) ||
6830 CB->hasRetAttr(Attribute::Dereferenceable) ||
6831 CB->hasRetAttr(Attribute::DereferenceableOrNull))
6832 return true;
6835 if (const auto *PN = dyn_cast<PHINode>(V)) {
6836 unsigned Num = PN->getNumIncomingValues();
6837 bool IsWellDefined = true;
6838 for (unsigned i = 0; i < Num; ++i) {
6839 auto *TI = PN->getIncomingBlock(i)->getTerminator();
6840 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
6841 DT, Depth + 1, PoisonOnly)) {
6842 IsWellDefined = false;
6843 break;
6846 if (IsWellDefined)
6847 return true;
6848 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
6849 return true;
6852 if (auto *I = dyn_cast<LoadInst>(V))
6853 if (I->hasMetadata(LLVMContext::MD_noundef) ||
6854 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
6855 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
6856 return true;
6858 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
6859 return true;
6861 // CxtI may be null or a cloned instruction.
6862 if (!CtxI || !CtxI->getParent() || !DT)
6863 return false;
6865 auto *DNode = DT->getNode(CtxI->getParent());
6866 if (!DNode)
6867 // Unreachable block
6868 return false;
6870 // If V is used as a branch condition before reaching CtxI, V cannot be
6871 // undef or poison.
6872 // br V, BB1, BB2
6873 // BB1:
6874 // CtxI ; V cannot be undef or poison here
6875 auto *Dominator = DNode->getIDom();
6876 while (Dominator) {
6877 auto *TI = Dominator->getBlock()->getTerminator();
6879 Value *Cond = nullptr;
6880 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
6881 if (BI->isConditional())
6882 Cond = BI->getCondition();
6883 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
6884 Cond = SI->getCondition();
6887 if (Cond) {
6888 if (Cond == V)
6889 return true;
6890 else if (PoisonOnly && isa<Operator>(Cond)) {
6891 // For poison, we can analyze further
6892 auto *Opr = cast<Operator>(Cond);
6893 if (any_of(Opr->operands(),
6894 [V](const Use &U) { return V == U && propagatesPoison(U); }))
6895 return true;
6899 Dominator = Dominator->getIDom();
6902 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
6903 return true;
6905 return false;
6908 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
6909 const Instruction *CtxI,
6910 const DominatorTree *DT,
6911 unsigned Depth) {
6912 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
6915 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
6916 const Instruction *CtxI,
6917 const DominatorTree *DT, unsigned Depth) {
6918 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
6921 /// Return true if undefined behavior would provably be executed on the path to
6922 /// OnPathTo if Root produced a posion result. Note that this doesn't say
6923 /// anything about whether OnPathTo is actually executed or whether Root is
6924 /// actually poison. This can be used to assess whether a new use of Root can
6925 /// be added at a location which is control equivalent with OnPathTo (such as
6926 /// immediately before it) without introducing UB which didn't previously
6927 /// exist. Note that a false result conveys no information.
6928 bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
6929 Instruction *OnPathTo,
6930 DominatorTree *DT) {
6931 // Basic approach is to assume Root is poison, propagate poison forward
6932 // through all users we can easily track, and then check whether any of those
6933 // users are provable UB and must execute before out exiting block might
6934 // exit.
6936 // The set of all recursive users we've visited (which are assumed to all be
6937 // poison because of said visit)
6938 SmallSet<const Value *, 16> KnownPoison;
6939 SmallVector<const Instruction*, 16> Worklist;
6940 Worklist.push_back(Root);
6941 while (!Worklist.empty()) {
6942 const Instruction *I = Worklist.pop_back_val();
6944 // If we know this must trigger UB on a path leading our target.
6945 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
6946 return true;
6948 // If we can't analyze propagation through this instruction, just skip it
6949 // and transitive users. Safe as false is a conservative result.
6950 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) {
6951 return KnownPoison.contains(U) && propagatesPoison(U);
6953 continue;
6955 if (KnownPoison.insert(I).second)
6956 for (const User *User : I->users())
6957 Worklist.push_back(cast<Instruction>(User));
6960 // Might be non-UB, or might have a path we couldn't prove must execute on
6961 // way to exiting bb.
6962 return false;
6965 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
6966 const SimplifyQuery &SQ) {
6967 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
6968 Add, SQ);
6971 OverflowResult
6972 llvm::computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
6973 const WithCache<const Value *> &RHS,
6974 const SimplifyQuery &SQ) {
6975 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, SQ);
6978 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
6979 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
6980 // of time because it's possible for another thread to interfere with it for an
6981 // arbitrary length of time, but programs aren't allowed to rely on that.
6983 // If there is no successor, then execution can't transfer to it.
6984 if (isa<ReturnInst>(I))
6985 return false;
6986 if (isa<UnreachableInst>(I))
6987 return false;
6989 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
6990 // Instruction::willReturn.
6992 // FIXME: Move this check into Instruction::willReturn.
6993 if (isa<CatchPadInst>(I)) {
6994 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
6995 default:
6996 // A catchpad may invoke exception object constructors and such, which
6997 // in some languages can be arbitrary code, so be conservative by default.
6998 return false;
6999 case EHPersonality::CoreCLR:
7000 // For CoreCLR, it just involves a type test.
7001 return true;
7005 // An instruction that returns without throwing must transfer control flow
7006 // to a successor.
7007 return !I->mayThrow() && I->willReturn();
7010 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
7011 // TODO: This is slightly conservative for invoke instruction since exiting
7012 // via an exception *is* normal control for them.
7013 for (const Instruction &I : *BB)
7014 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7015 return false;
7016 return true;
7019 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
7020 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
7021 unsigned ScanLimit) {
7022 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
7023 ScanLimit);
7026 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
7027 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
7028 assert(ScanLimit && "scan limit must be non-zero");
7029 for (const Instruction &I : Range) {
7030 if (isa<DbgInfoIntrinsic>(I))
7031 continue;
7032 if (--ScanLimit == 0)
7033 return false;
7034 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7035 return false;
7037 return true;
7040 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
7041 const Loop *L) {
7042 // The loop header is guaranteed to be executed for every iteration.
7044 // FIXME: Relax this constraint to cover all basic blocks that are
7045 // guaranteed to be executed at every iteration.
7046 if (I->getParent() != L->getHeader()) return false;
7048 for (const Instruction &LI : *L->getHeader()) {
7049 if (&LI == I) return true;
7050 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
7052 llvm_unreachable("Instruction not contained in its own parent basic block.");
7055 bool llvm::propagatesPoison(const Use &PoisonOp) {
7056 const Operator *I = cast<Operator>(PoisonOp.getUser());
7057 switch (I->getOpcode()) {
7058 case Instruction::Freeze:
7059 case Instruction::PHI:
7060 case Instruction::Invoke:
7061 return false;
7062 case Instruction::Select:
7063 return PoisonOp.getOperandNo() == 0;
7064 case Instruction::Call:
7065 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
7066 switch (II->getIntrinsicID()) {
7067 // TODO: Add more intrinsics.
7068 case Intrinsic::sadd_with_overflow:
7069 case Intrinsic::ssub_with_overflow:
7070 case Intrinsic::smul_with_overflow:
7071 case Intrinsic::uadd_with_overflow:
7072 case Intrinsic::usub_with_overflow:
7073 case Intrinsic::umul_with_overflow:
7074 // If an input is a vector containing a poison element, the
7075 // two output vectors (calculated results, overflow bits)'
7076 // corresponding lanes are poison.
7077 return true;
7078 case Intrinsic::ctpop:
7079 return true;
7082 return false;
7083 case Instruction::ICmp:
7084 case Instruction::FCmp:
7085 case Instruction::GetElementPtr:
7086 return true;
7087 default:
7088 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
7089 return true;
7091 // Be conservative and return false.
7092 return false;
7096 void llvm::getGuaranteedWellDefinedOps(
7097 const Instruction *I, SmallVectorImpl<const Value *> &Operands) {
7098 switch (I->getOpcode()) {
7099 case Instruction::Store:
7100 Operands.push_back(cast<StoreInst>(I)->getPointerOperand());
7101 break;
7103 case Instruction::Load:
7104 Operands.push_back(cast<LoadInst>(I)->getPointerOperand());
7105 break;
7107 // Since dereferenceable attribute imply noundef, atomic operations
7108 // also implicitly have noundef pointers too
7109 case Instruction::AtomicCmpXchg:
7110 Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
7111 break;
7113 case Instruction::AtomicRMW:
7114 Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand());
7115 break;
7117 case Instruction::Call:
7118 case Instruction::Invoke: {
7119 const CallBase *CB = cast<CallBase>(I);
7120 if (CB->isIndirectCall())
7121 Operands.push_back(CB->getCalledOperand());
7122 for (unsigned i = 0; i < CB->arg_size(); ++i) {
7123 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
7124 CB->paramHasAttr(i, Attribute::Dereferenceable) ||
7125 CB->paramHasAttr(i, Attribute::DereferenceableOrNull))
7126 Operands.push_back(CB->getArgOperand(i));
7128 break;
7130 case Instruction::Ret:
7131 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
7132 Operands.push_back(I->getOperand(0));
7133 break;
7134 case Instruction::Switch:
7135 Operands.push_back(cast<SwitchInst>(I)->getCondition());
7136 break;
7137 case Instruction::Br: {
7138 auto *BR = cast<BranchInst>(I);
7139 if (BR->isConditional())
7140 Operands.push_back(BR->getCondition());
7141 break;
7143 default:
7144 break;
7148 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
7149 SmallVectorImpl<const Value *> &Operands) {
7150 getGuaranteedWellDefinedOps(I, Operands);
7151 switch (I->getOpcode()) {
7152 // Divisors of these operations are allowed to be partially undef.
7153 case Instruction::UDiv:
7154 case Instruction::SDiv:
7155 case Instruction::URem:
7156 case Instruction::SRem:
7157 Operands.push_back(I->getOperand(1));
7158 break;
7159 default:
7160 break;
7164 bool llvm::mustTriggerUB(const Instruction *I,
7165 const SmallPtrSetImpl<const Value *> &KnownPoison) {
7166 SmallVector<const Value *, 4> NonPoisonOps;
7167 getGuaranteedNonPoisonOps(I, NonPoisonOps);
7169 for (const auto *V : NonPoisonOps)
7170 if (KnownPoison.count(V))
7171 return true;
7173 return false;
7176 static bool programUndefinedIfUndefOrPoison(const Value *V,
7177 bool PoisonOnly) {
7178 // We currently only look for uses of values within the same basic
7179 // block, as that makes it easier to guarantee that the uses will be
7180 // executed given that Inst is executed.
7182 // FIXME: Expand this to consider uses beyond the same basic block. To do
7183 // this, look out for the distinction between post-dominance and strong
7184 // post-dominance.
7185 const BasicBlock *BB = nullptr;
7186 BasicBlock::const_iterator Begin;
7187 if (const auto *Inst = dyn_cast<Instruction>(V)) {
7188 BB = Inst->getParent();
7189 Begin = Inst->getIterator();
7190 Begin++;
7191 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
7192 if (Arg->getParent()->isDeclaration())
7193 return false;
7194 BB = &Arg->getParent()->getEntryBlock();
7195 Begin = BB->begin();
7196 } else {
7197 return false;
7200 // Limit number of instructions we look at, to avoid scanning through large
7201 // blocks. The current limit is chosen arbitrarily.
7202 unsigned ScanLimit = 32;
7203 BasicBlock::const_iterator End = BB->end();
7205 if (!PoisonOnly) {
7206 // Since undef does not propagate eagerly, be conservative & just check
7207 // whether a value is directly passed to an instruction that must take
7208 // well-defined operands.
7210 for (const auto &I : make_range(Begin, End)) {
7211 if (isa<DbgInfoIntrinsic>(I))
7212 continue;
7213 if (--ScanLimit == 0)
7214 break;
7216 SmallVector<const Value *, 4> WellDefinedOps;
7217 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
7218 if (is_contained(WellDefinedOps, V))
7219 return true;
7221 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7222 break;
7224 return false;
7227 // Set of instructions that we have proved will yield poison if Inst
7228 // does.
7229 SmallSet<const Value *, 16> YieldsPoison;
7230 SmallSet<const BasicBlock *, 4> Visited;
7232 YieldsPoison.insert(V);
7233 Visited.insert(BB);
7235 while (true) {
7236 for (const auto &I : make_range(Begin, End)) {
7237 if (isa<DbgInfoIntrinsic>(I))
7238 continue;
7239 if (--ScanLimit == 0)
7240 return false;
7241 if (mustTriggerUB(&I, YieldsPoison))
7242 return true;
7243 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7244 return false;
7246 // If an operand is poison and propagates it, mark I as yielding poison.
7247 for (const Use &Op : I.operands()) {
7248 if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
7249 YieldsPoison.insert(&I);
7250 break;
7254 // Special handling for select, which returns poison if its operand 0 is
7255 // poison (handled in the loop above) *or* if both its true/false operands
7256 // are poison (handled here).
7257 if (I.getOpcode() == Instruction::Select &&
7258 YieldsPoison.count(I.getOperand(1)) &&
7259 YieldsPoison.count(I.getOperand(2))) {
7260 YieldsPoison.insert(&I);
7264 BB = BB->getSingleSuccessor();
7265 if (!BB || !Visited.insert(BB).second)
7266 break;
7268 Begin = BB->getFirstNonPHI()->getIterator();
7269 End = BB->end();
7271 return false;
7274 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
7275 return ::programUndefinedIfUndefOrPoison(Inst, false);
7278 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
7279 return ::programUndefinedIfUndefOrPoison(Inst, true);
7282 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
7283 if (FMF.noNaNs())
7284 return true;
7286 if (auto *C = dyn_cast<ConstantFP>(V))
7287 return !C->isNaN();
7289 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7290 if (!C->getElementType()->isFloatingPointTy())
7291 return false;
7292 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7293 if (C->getElementAsAPFloat(I).isNaN())
7294 return false;
7296 return true;
7299 if (isa<ConstantAggregateZero>(V))
7300 return true;
7302 return false;
7305 static bool isKnownNonZero(const Value *V) {
7306 if (auto *C = dyn_cast<ConstantFP>(V))
7307 return !C->isZero();
7309 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7310 if (!C->getElementType()->isFloatingPointTy())
7311 return false;
7312 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7313 if (C->getElementAsAPFloat(I).isZero())
7314 return false;
7316 return true;
7319 return false;
7322 /// Match clamp pattern for float types without care about NaNs or signed zeros.
7323 /// Given non-min/max outer cmp/select from the clamp pattern this
7324 /// function recognizes if it can be substitued by a "canonical" min/max
7325 /// pattern.
7326 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
7327 Value *CmpLHS, Value *CmpRHS,
7328 Value *TrueVal, Value *FalseVal,
7329 Value *&LHS, Value *&RHS) {
7330 // Try to match
7331 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
7332 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
7333 // and return description of the outer Max/Min.
7335 // First, check if select has inverse order:
7336 if (CmpRHS == FalseVal) {
7337 std::swap(TrueVal, FalseVal);
7338 Pred = CmpInst::getInversePredicate(Pred);
7341 // Assume success now. If there's no match, callers should not use these anyway.
7342 LHS = TrueVal;
7343 RHS = FalseVal;
7345 const APFloat *FC1;
7346 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
7347 return {SPF_UNKNOWN, SPNB_NA, false};
7349 const APFloat *FC2;
7350 switch (Pred) {
7351 case CmpInst::FCMP_OLT:
7352 case CmpInst::FCMP_OLE:
7353 case CmpInst::FCMP_ULT:
7354 case CmpInst::FCMP_ULE:
7355 if (match(FalseVal,
7356 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
7357 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7358 *FC1 < *FC2)
7359 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
7360 break;
7361 case CmpInst::FCMP_OGT:
7362 case CmpInst::FCMP_OGE:
7363 case CmpInst::FCMP_UGT:
7364 case CmpInst::FCMP_UGE:
7365 if (match(FalseVal,
7366 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
7367 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7368 *FC1 > *FC2)
7369 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
7370 break;
7371 default:
7372 break;
7375 return {SPF_UNKNOWN, SPNB_NA, false};
7378 /// Recognize variations of:
7379 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
7380 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
7381 Value *CmpLHS, Value *CmpRHS,
7382 Value *TrueVal, Value *FalseVal) {
7383 // Swap the select operands and predicate to match the patterns below.
7384 if (CmpRHS != TrueVal) {
7385 Pred = ICmpInst::getSwappedPredicate(Pred);
7386 std::swap(TrueVal, FalseVal);
7388 const APInt *C1;
7389 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
7390 const APInt *C2;
7391 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
7392 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7393 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
7394 return {SPF_SMAX, SPNB_NA, false};
7396 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
7397 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7398 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
7399 return {SPF_SMIN, SPNB_NA, false};
7401 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
7402 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7403 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
7404 return {SPF_UMAX, SPNB_NA, false};
7406 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
7407 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7408 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
7409 return {SPF_UMIN, SPNB_NA, false};
7411 return {SPF_UNKNOWN, SPNB_NA, false};
7414 /// Recognize variations of:
7415 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
7416 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
7417 Value *CmpLHS, Value *CmpRHS,
7418 Value *TVal, Value *FVal,
7419 unsigned Depth) {
7420 // TODO: Allow FP min/max with nnan/nsz.
7421 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
7423 Value *A = nullptr, *B = nullptr;
7424 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
7425 if (!SelectPatternResult::isMinOrMax(L.Flavor))
7426 return {SPF_UNKNOWN, SPNB_NA, false};
7428 Value *C = nullptr, *D = nullptr;
7429 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
7430 if (L.Flavor != R.Flavor)
7431 return {SPF_UNKNOWN, SPNB_NA, false};
7433 // We have something like: x Pred y ? min(a, b) : min(c, d).
7434 // Try to match the compare to the min/max operations of the select operands.
7435 // First, make sure we have the right compare predicate.
7436 switch (L.Flavor) {
7437 case SPF_SMIN:
7438 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
7439 Pred = ICmpInst::getSwappedPredicate(Pred);
7440 std::swap(CmpLHS, CmpRHS);
7442 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
7443 break;
7444 return {SPF_UNKNOWN, SPNB_NA, false};
7445 case SPF_SMAX:
7446 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
7447 Pred = ICmpInst::getSwappedPredicate(Pred);
7448 std::swap(CmpLHS, CmpRHS);
7450 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
7451 break;
7452 return {SPF_UNKNOWN, SPNB_NA, false};
7453 case SPF_UMIN:
7454 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
7455 Pred = ICmpInst::getSwappedPredicate(Pred);
7456 std::swap(CmpLHS, CmpRHS);
7458 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
7459 break;
7460 return {SPF_UNKNOWN, SPNB_NA, false};
7461 case SPF_UMAX:
7462 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
7463 Pred = ICmpInst::getSwappedPredicate(Pred);
7464 std::swap(CmpLHS, CmpRHS);
7466 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
7467 break;
7468 return {SPF_UNKNOWN, SPNB_NA, false};
7469 default:
7470 return {SPF_UNKNOWN, SPNB_NA, false};
7473 // If there is a common operand in the already matched min/max and the other
7474 // min/max operands match the compare operands (either directly or inverted),
7475 // then this is min/max of the same flavor.
7477 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7478 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7479 if (D == B) {
7480 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7481 match(A, m_Not(m_Specific(CmpRHS)))))
7482 return {L.Flavor, SPNB_NA, false};
7484 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7485 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7486 if (C == B) {
7487 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7488 match(A, m_Not(m_Specific(CmpRHS)))))
7489 return {L.Flavor, SPNB_NA, false};
7491 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7492 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7493 if (D == A) {
7494 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7495 match(B, m_Not(m_Specific(CmpRHS)))))
7496 return {L.Flavor, SPNB_NA, false};
7498 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7499 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7500 if (C == A) {
7501 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7502 match(B, m_Not(m_Specific(CmpRHS)))))
7503 return {L.Flavor, SPNB_NA, false};
7506 return {SPF_UNKNOWN, SPNB_NA, false};
7509 /// If the input value is the result of a 'not' op, constant integer, or vector
7510 /// splat of a constant integer, return the bitwise-not source value.
7511 /// TODO: This could be extended to handle non-splat vector integer constants.
7512 static Value *getNotValue(Value *V) {
7513 Value *NotV;
7514 if (match(V, m_Not(m_Value(NotV))))
7515 return NotV;
7517 const APInt *C;
7518 if (match(V, m_APInt(C)))
7519 return ConstantInt::get(V->getType(), ~(*C));
7521 return nullptr;
7524 /// Match non-obvious integer minimum and maximum sequences.
7525 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
7526 Value *CmpLHS, Value *CmpRHS,
7527 Value *TrueVal, Value *FalseVal,
7528 Value *&LHS, Value *&RHS,
7529 unsigned Depth) {
7530 // Assume success. If there's no match, callers should not use these anyway.
7531 LHS = TrueVal;
7532 RHS = FalseVal;
7534 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
7535 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7536 return SPR;
7538 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
7539 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7540 return SPR;
7542 // Look through 'not' ops to find disguised min/max.
7543 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
7544 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
7545 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
7546 switch (Pred) {
7547 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
7548 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
7549 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
7550 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
7551 default: break;
7555 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
7556 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
7557 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
7558 switch (Pred) {
7559 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
7560 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
7561 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
7562 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
7563 default: break;
7567 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
7568 return {SPF_UNKNOWN, SPNB_NA, false};
7570 const APInt *C1;
7571 if (!match(CmpRHS, m_APInt(C1)))
7572 return {SPF_UNKNOWN, SPNB_NA, false};
7574 // An unsigned min/max can be written with a signed compare.
7575 const APInt *C2;
7576 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
7577 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
7578 // Is the sign bit set?
7579 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
7580 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
7581 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
7582 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7584 // Is the sign bit clear?
7585 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
7586 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
7587 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
7588 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7591 return {SPF_UNKNOWN, SPNB_NA, false};
7594 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
7595 assert(X && Y && "Invalid operand");
7597 // X = sub (0, Y) || X = sub nsw (0, Y)
7598 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
7599 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
7600 return true;
7602 // Y = sub (0, X) || Y = sub nsw (0, X)
7603 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
7604 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
7605 return true;
7607 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
7608 Value *A, *B;
7609 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
7610 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
7611 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
7612 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
7615 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
7616 FastMathFlags FMF,
7617 Value *CmpLHS, Value *CmpRHS,
7618 Value *TrueVal, Value *FalseVal,
7619 Value *&LHS, Value *&RHS,
7620 unsigned Depth) {
7621 bool HasMismatchedZeros = false;
7622 if (CmpInst::isFPPredicate(Pred)) {
7623 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
7624 // 0.0 operand, set the compare's 0.0 operands to that same value for the
7625 // purpose of identifying min/max. Disregard vector constants with undefined
7626 // elements because those can not be back-propagated for analysis.
7627 Value *OutputZeroVal = nullptr;
7628 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
7629 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
7630 OutputZeroVal = TrueVal;
7631 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
7632 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
7633 OutputZeroVal = FalseVal;
7635 if (OutputZeroVal) {
7636 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
7637 HasMismatchedZeros = true;
7638 CmpLHS = OutputZeroVal;
7640 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
7641 HasMismatchedZeros = true;
7642 CmpRHS = OutputZeroVal;
7647 LHS = CmpLHS;
7648 RHS = CmpRHS;
7650 // Signed zero may return inconsistent results between implementations.
7651 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
7652 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
7653 // Therefore, we behave conservatively and only proceed if at least one of the
7654 // operands is known to not be zero or if we don't care about signed zero.
7655 switch (Pred) {
7656 default: break;
7657 case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT:
7658 case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT:
7659 if (!HasMismatchedZeros)
7660 break;
7661 [[fallthrough]];
7662 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
7663 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
7664 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7665 !isKnownNonZero(CmpRHS))
7666 return {SPF_UNKNOWN, SPNB_NA, false};
7669 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
7670 bool Ordered = false;
7672 // When given one NaN and one non-NaN input:
7673 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
7674 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
7675 // ordered comparison fails), which could be NaN or non-NaN.
7676 // so here we discover exactly what NaN behavior is required/accepted.
7677 if (CmpInst::isFPPredicate(Pred)) {
7678 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
7679 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
7681 if (LHSSafe && RHSSafe) {
7682 // Both operands are known non-NaN.
7683 NaNBehavior = SPNB_RETURNS_ANY;
7684 } else if (CmpInst::isOrdered(Pred)) {
7685 // An ordered comparison will return false when given a NaN, so it
7686 // returns the RHS.
7687 Ordered = true;
7688 if (LHSSafe)
7689 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
7690 NaNBehavior = SPNB_RETURNS_NAN;
7691 else if (RHSSafe)
7692 NaNBehavior = SPNB_RETURNS_OTHER;
7693 else
7694 // Completely unsafe.
7695 return {SPF_UNKNOWN, SPNB_NA, false};
7696 } else {
7697 Ordered = false;
7698 // An unordered comparison will return true when given a NaN, so it
7699 // returns the LHS.
7700 if (LHSSafe)
7701 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
7702 NaNBehavior = SPNB_RETURNS_OTHER;
7703 else if (RHSSafe)
7704 NaNBehavior = SPNB_RETURNS_NAN;
7705 else
7706 // Completely unsafe.
7707 return {SPF_UNKNOWN, SPNB_NA, false};
7711 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
7712 std::swap(CmpLHS, CmpRHS);
7713 Pred = CmpInst::getSwappedPredicate(Pred);
7714 if (NaNBehavior == SPNB_RETURNS_NAN)
7715 NaNBehavior = SPNB_RETURNS_OTHER;
7716 else if (NaNBehavior == SPNB_RETURNS_OTHER)
7717 NaNBehavior = SPNB_RETURNS_NAN;
7718 Ordered = !Ordered;
7721 // ([if]cmp X, Y) ? X : Y
7722 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
7723 switch (Pred) {
7724 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
7725 case ICmpInst::ICMP_UGT:
7726 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
7727 case ICmpInst::ICMP_SGT:
7728 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
7729 case ICmpInst::ICMP_ULT:
7730 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
7731 case ICmpInst::ICMP_SLT:
7732 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
7733 case FCmpInst::FCMP_UGT:
7734 case FCmpInst::FCMP_UGE:
7735 case FCmpInst::FCMP_OGT:
7736 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
7737 case FCmpInst::FCMP_ULT:
7738 case FCmpInst::FCMP_ULE:
7739 case FCmpInst::FCMP_OLT:
7740 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
7744 if (isKnownNegation(TrueVal, FalseVal)) {
7745 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
7746 // match against either LHS or sext(LHS).
7747 auto MaybeSExtCmpLHS =
7748 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
7749 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
7750 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
7751 if (match(TrueVal, MaybeSExtCmpLHS)) {
7752 // Set the return values. If the compare uses the negated value (-X >s 0),
7753 // swap the return values because the negated value is always 'RHS'.
7754 LHS = TrueVal;
7755 RHS = FalseVal;
7756 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
7757 std::swap(LHS, RHS);
7759 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
7760 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
7761 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7762 return {SPF_ABS, SPNB_NA, false};
7764 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
7765 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
7766 return {SPF_ABS, SPNB_NA, false};
7768 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
7769 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
7770 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7771 return {SPF_NABS, SPNB_NA, false};
7773 else if (match(FalseVal, MaybeSExtCmpLHS)) {
7774 // Set the return values. If the compare uses the negated value (-X >s 0),
7775 // swap the return values because the negated value is always 'RHS'.
7776 LHS = FalseVal;
7777 RHS = TrueVal;
7778 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
7779 std::swap(LHS, RHS);
7781 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
7782 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
7783 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7784 return {SPF_NABS, SPNB_NA, false};
7786 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
7787 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
7788 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7789 return {SPF_ABS, SPNB_NA, false};
7793 if (CmpInst::isIntPredicate(Pred))
7794 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
7796 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
7797 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
7798 // semantics than minNum. Be conservative in such case.
7799 if (NaNBehavior != SPNB_RETURNS_ANY ||
7800 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7801 !isKnownNonZero(CmpRHS)))
7802 return {SPF_UNKNOWN, SPNB_NA, false};
7804 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
7807 /// Helps to match a select pattern in case of a type mismatch.
7809 /// The function processes the case when type of true and false values of a
7810 /// select instruction differs from type of the cmp instruction operands because
7811 /// of a cast instruction. The function checks if it is legal to move the cast
7812 /// operation after "select". If yes, it returns the new second value of
7813 /// "select" (with the assumption that cast is moved):
7814 /// 1. As operand of cast instruction when both values of "select" are same cast
7815 /// instructions.
7816 /// 2. As restored constant (by applying reverse cast operation) when the first
7817 /// value of the "select" is a cast operation and the second value is a
7818 /// constant.
7819 /// NOTE: We return only the new second value because the first value could be
7820 /// accessed as operand of cast instruction.
7821 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
7822 Instruction::CastOps *CastOp) {
7823 auto *Cast1 = dyn_cast<CastInst>(V1);
7824 if (!Cast1)
7825 return nullptr;
7827 *CastOp = Cast1->getOpcode();
7828 Type *SrcTy = Cast1->getSrcTy();
7829 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
7830 // If V1 and V2 are both the same cast from the same type, look through V1.
7831 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
7832 return Cast2->getOperand(0);
7833 return nullptr;
7836 auto *C = dyn_cast<Constant>(V2);
7837 if (!C)
7838 return nullptr;
7840 const DataLayout &DL = CmpI->getModule()->getDataLayout();
7841 Constant *CastedTo = nullptr;
7842 switch (*CastOp) {
7843 case Instruction::ZExt:
7844 if (CmpI->isUnsigned())
7845 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
7846 break;
7847 case Instruction::SExt:
7848 if (CmpI->isSigned())
7849 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
7850 break;
7851 case Instruction::Trunc:
7852 Constant *CmpConst;
7853 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
7854 CmpConst->getType() == SrcTy) {
7855 // Here we have the following case:
7857 // %cond = cmp iN %x, CmpConst
7858 // %tr = trunc iN %x to iK
7859 // %narrowsel = select i1 %cond, iK %t, iK C
7861 // We can always move trunc after select operation:
7863 // %cond = cmp iN %x, CmpConst
7864 // %widesel = select i1 %cond, iN %x, iN CmpConst
7865 // %tr = trunc iN %widesel to iK
7867 // Note that C could be extended in any way because we don't care about
7868 // upper bits after truncation. It can't be abs pattern, because it would
7869 // look like:
7871 // select i1 %cond, x, -x.
7873 // So only min/max pattern could be matched. Such match requires widened C
7874 // == CmpConst. That is why set widened C = CmpConst, condition trunc
7875 // CmpConst == C is checked below.
7876 CastedTo = CmpConst;
7877 } else {
7878 unsigned ExtOp = CmpI->isSigned() ? Instruction::SExt : Instruction::ZExt;
7879 CastedTo = ConstantFoldCastOperand(ExtOp, C, SrcTy, DL);
7881 break;
7882 case Instruction::FPTrunc:
7883 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
7884 break;
7885 case Instruction::FPExt:
7886 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
7887 break;
7888 case Instruction::FPToUI:
7889 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
7890 break;
7891 case Instruction::FPToSI:
7892 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
7893 break;
7894 case Instruction::UIToFP:
7895 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
7896 break;
7897 case Instruction::SIToFP:
7898 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
7899 break;
7900 default:
7901 break;
7904 if (!CastedTo)
7905 return nullptr;
7907 // Make sure the cast doesn't lose any information.
7908 Constant *CastedBack =
7909 ConstantFoldCastOperand(*CastOp, CastedTo, C->getType(), DL);
7910 if (CastedBack && CastedBack != C)
7911 return nullptr;
7913 return CastedTo;
7916 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
7917 Instruction::CastOps *CastOp,
7918 unsigned Depth) {
7919 if (Depth >= MaxAnalysisRecursionDepth)
7920 return {SPF_UNKNOWN, SPNB_NA, false};
7922 SelectInst *SI = dyn_cast<SelectInst>(V);
7923 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
7925 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
7926 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
7928 Value *TrueVal = SI->getTrueValue();
7929 Value *FalseVal = SI->getFalseValue();
7931 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
7932 CastOp, Depth);
7935 SelectPatternResult llvm::matchDecomposedSelectPattern(
7936 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
7937 Instruction::CastOps *CastOp, unsigned Depth) {
7938 CmpInst::Predicate Pred = CmpI->getPredicate();
7939 Value *CmpLHS = CmpI->getOperand(0);
7940 Value *CmpRHS = CmpI->getOperand(1);
7941 FastMathFlags FMF;
7942 if (isa<FPMathOperator>(CmpI))
7943 FMF = CmpI->getFastMathFlags();
7945 // Bail out early.
7946 if (CmpI->isEquality())
7947 return {SPF_UNKNOWN, SPNB_NA, false};
7949 // Deal with type mismatches.
7950 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
7951 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
7952 // If this is a potential fmin/fmax with a cast to integer, then ignore
7953 // -0.0 because there is no corresponding integer value.
7954 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
7955 FMF.setNoSignedZeros();
7956 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
7957 cast<CastInst>(TrueVal)->getOperand(0), C,
7958 LHS, RHS, Depth);
7960 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
7961 // If this is a potential fmin/fmax with a cast to integer, then ignore
7962 // -0.0 because there is no corresponding integer value.
7963 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
7964 FMF.setNoSignedZeros();
7965 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
7966 C, cast<CastInst>(FalseVal)->getOperand(0),
7967 LHS, RHS, Depth);
7970 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
7971 LHS, RHS, Depth);
7974 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
7975 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
7976 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
7977 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
7978 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
7979 if (SPF == SPF_FMINNUM)
7980 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
7981 if (SPF == SPF_FMAXNUM)
7982 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
7983 llvm_unreachable("unhandled!");
7986 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
7987 if (SPF == SPF_SMIN) return SPF_SMAX;
7988 if (SPF == SPF_UMIN) return SPF_UMAX;
7989 if (SPF == SPF_SMAX) return SPF_SMIN;
7990 if (SPF == SPF_UMAX) return SPF_UMIN;
7991 llvm_unreachable("unhandled!");
7994 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
7995 switch (MinMaxID) {
7996 case Intrinsic::smax: return Intrinsic::smin;
7997 case Intrinsic::smin: return Intrinsic::smax;
7998 case Intrinsic::umax: return Intrinsic::umin;
7999 case Intrinsic::umin: return Intrinsic::umax;
8000 // Please note that next four intrinsics may produce the same result for
8001 // original and inverted case even if X != Y due to NaN is handled specially.
8002 case Intrinsic::maximum: return Intrinsic::minimum;
8003 case Intrinsic::minimum: return Intrinsic::maximum;
8004 case Intrinsic::maxnum: return Intrinsic::minnum;
8005 case Intrinsic::minnum: return Intrinsic::maxnum;
8006 default: llvm_unreachable("Unexpected intrinsic");
8010 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
8011 switch (SPF) {
8012 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
8013 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
8014 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
8015 case SPF_UMIN: return APInt::getMinValue(BitWidth);
8016 default: llvm_unreachable("Unexpected flavor");
8020 std::pair<Intrinsic::ID, bool>
8021 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
8022 // Check if VL contains select instructions that can be folded into a min/max
8023 // vector intrinsic and return the intrinsic if it is possible.
8024 // TODO: Support floating point min/max.
8025 bool AllCmpSingleUse = true;
8026 SelectPatternResult SelectPattern;
8027 SelectPattern.Flavor = SPF_UNKNOWN;
8028 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
8029 Value *LHS, *RHS;
8030 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
8031 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
8032 CurrentPattern.Flavor == SPF_FMINNUM ||
8033 CurrentPattern.Flavor == SPF_FMAXNUM ||
8034 !I->getType()->isIntOrIntVectorTy())
8035 return false;
8036 if (SelectPattern.Flavor != SPF_UNKNOWN &&
8037 SelectPattern.Flavor != CurrentPattern.Flavor)
8038 return false;
8039 SelectPattern = CurrentPattern;
8040 AllCmpSingleUse &=
8041 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
8042 return true;
8043 })) {
8044 switch (SelectPattern.Flavor) {
8045 case SPF_SMIN:
8046 return {Intrinsic::smin, AllCmpSingleUse};
8047 case SPF_UMIN:
8048 return {Intrinsic::umin, AllCmpSingleUse};
8049 case SPF_SMAX:
8050 return {Intrinsic::smax, AllCmpSingleUse};
8051 case SPF_UMAX:
8052 return {Intrinsic::umax, AllCmpSingleUse};
8053 default:
8054 llvm_unreachable("unexpected select pattern flavor");
8057 return {Intrinsic::not_intrinsic, false};
8060 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
8061 Value *&Start, Value *&Step) {
8062 // Handle the case of a simple two-predecessor recurrence PHI.
8063 // There's a lot more that could theoretically be done here, but
8064 // this is sufficient to catch some interesting cases.
8065 if (P->getNumIncomingValues() != 2)
8066 return false;
8068 for (unsigned i = 0; i != 2; ++i) {
8069 Value *L = P->getIncomingValue(i);
8070 Value *R = P->getIncomingValue(!i);
8071 Operator *LU = dyn_cast<Operator>(L);
8072 if (!LU)
8073 continue;
8074 unsigned Opcode = LU->getOpcode();
8076 switch (Opcode) {
8077 default:
8078 continue;
8079 // TODO: Expand list -- xor, div, gep, uaddo, etc..
8080 case Instruction::LShr:
8081 case Instruction::AShr:
8082 case Instruction::Shl:
8083 case Instruction::Add:
8084 case Instruction::Sub:
8085 case Instruction::And:
8086 case Instruction::Or:
8087 case Instruction::Mul:
8088 case Instruction::FMul: {
8089 Value *LL = LU->getOperand(0);
8090 Value *LR = LU->getOperand(1);
8091 // Find a recurrence.
8092 if (LL == P)
8093 L = LR;
8094 else if (LR == P)
8095 L = LL;
8096 else
8097 continue; // Check for recurrence with L and R flipped.
8099 break; // Match!
8103 // We have matched a recurrence of the form:
8104 // %iv = [R, %entry], [%iv.next, %backedge]
8105 // %iv.next = binop %iv, L
8106 // OR
8107 // %iv = [R, %entry], [%iv.next, %backedge]
8108 // %iv.next = binop L, %iv
8109 BO = cast<BinaryOperator>(LU);
8110 Start = R;
8111 Step = L;
8112 return true;
8114 return false;
8117 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
8118 Value *&Start, Value *&Step) {
8119 BinaryOperator *BO = nullptr;
8120 P = dyn_cast<PHINode>(I->getOperand(0));
8121 if (!P)
8122 P = dyn_cast<PHINode>(I->getOperand(1));
8123 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
8126 /// Return true if "icmp Pred LHS RHS" is always true.
8127 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
8128 const Value *RHS, const DataLayout &DL,
8129 unsigned Depth) {
8130 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
8131 return true;
8133 switch (Pred) {
8134 default:
8135 return false;
8137 case CmpInst::ICMP_SLE: {
8138 const APInt *C;
8140 // LHS s<= LHS +_{nsw} C if C >= 0
8141 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
8142 return !C->isNegative();
8143 return false;
8146 case CmpInst::ICMP_ULE: {
8147 const APInt *C;
8149 // LHS u<= LHS +_{nuw} C for any C
8150 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
8151 return true;
8153 // RHS >> V u<= RHS for any V
8154 if (match(LHS, m_LShr(m_Specific(RHS), m_Value())))
8155 return true;
8157 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
8158 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
8159 const Value *&X,
8160 const APInt *&CA, const APInt *&CB) {
8161 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
8162 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
8163 return true;
8165 // If X & C == 0 then (X | C) == X +_{nuw} C
8166 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
8167 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
8168 KnownBits Known(CA->getBitWidth());
8169 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
8170 /*CxtI*/ nullptr, /*DT*/ nullptr);
8171 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
8172 return true;
8175 return false;
8178 const Value *X;
8179 const APInt *CLHS, *CRHS;
8180 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
8181 return CLHS->ule(*CRHS);
8183 return false;
8188 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
8189 /// ALHS ARHS" is true. Otherwise, return std::nullopt.
8190 static std::optional<bool>
8191 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
8192 const Value *ARHS, const Value *BLHS, const Value *BRHS,
8193 const DataLayout &DL, unsigned Depth) {
8194 switch (Pred) {
8195 default:
8196 return std::nullopt;
8198 case CmpInst::ICMP_SLT:
8199 case CmpInst::ICMP_SLE:
8200 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
8201 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
8202 return true;
8203 return std::nullopt;
8205 case CmpInst::ICMP_SGT:
8206 case CmpInst::ICMP_SGE:
8207 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS, DL, Depth) &&
8208 isTruePredicate(CmpInst::ICMP_SLE, BRHS, ARHS, DL, Depth))
8209 return true;
8210 return std::nullopt;
8212 case CmpInst::ICMP_ULT:
8213 case CmpInst::ICMP_ULE:
8214 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
8215 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
8216 return true;
8217 return std::nullopt;
8219 case CmpInst::ICMP_UGT:
8220 case CmpInst::ICMP_UGE:
8221 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS, DL, Depth) &&
8222 isTruePredicate(CmpInst::ICMP_ULE, BRHS, ARHS, DL, Depth))
8223 return true;
8224 return std::nullopt;
8228 /// Return true if the operands of two compares (expanded as "L0 pred L1" and
8229 /// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are
8230 /// swapped.
8231 static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0,
8232 const Value *R1, bool &AreSwappedOps) {
8233 bool AreMatchingOps = (L0 == R0 && L1 == R1);
8234 AreSwappedOps = (L0 == R1 && L1 == R0);
8235 return AreMatchingOps || AreSwappedOps;
8238 /// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
8239 /// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
8240 /// Otherwise, return std::nullopt if we can't infer anything.
8241 static std::optional<bool>
8242 isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
8243 CmpInst::Predicate RPred, bool AreSwappedOps) {
8244 // Canonicalize the predicate as if the operands were not commuted.
8245 if (AreSwappedOps)
8246 RPred = ICmpInst::getSwappedPredicate(RPred);
8248 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
8249 return true;
8250 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
8251 return false;
8253 return std::nullopt;
8256 /// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
8257 /// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
8258 /// Otherwise, return std::nullopt if we can't infer anything.
8259 static std::optional<bool> isImpliedCondCommonOperandWithConstants(
8260 CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
8261 const APInt &RC) {
8262 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
8263 ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
8264 ConstantRange Intersection = DomCR.intersectWith(CR);
8265 ConstantRange Difference = DomCR.difference(CR);
8266 if (Intersection.isEmptySet())
8267 return false;
8268 if (Difference.isEmptySet())
8269 return true;
8270 return std::nullopt;
8273 /// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
8274 /// is true. Return false if LHS implies RHS is false. Otherwise, return
8275 /// std::nullopt if we can't infer anything.
8276 static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
8277 CmpInst::Predicate RPred,
8278 const Value *R0, const Value *R1,
8279 const DataLayout &DL,
8280 bool LHSIsTrue, unsigned Depth) {
8281 Value *L0 = LHS->getOperand(0);
8282 Value *L1 = LHS->getOperand(1);
8284 // The rest of the logic assumes the LHS condition is true. If that's not the
8285 // case, invert the predicate to make it so.
8286 CmpInst::Predicate LPred =
8287 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
8289 // Can we infer anything when the two compares have matching operands?
8290 bool AreSwappedOps;
8291 if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps))
8292 return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps);
8294 // Can we infer anything when the 0-operands match and the 1-operands are
8295 // constants (not necessarily matching)?
8296 const APInt *LC, *RC;
8297 if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
8298 return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
8300 // L0 = R0 = L1 + R1, L0 >=u L1 implies R0 >=u R1, L0 <u L1 implies R0 <u R1
8301 if (ICmpInst::isUnsigned(LPred) && ICmpInst::isUnsigned(RPred)) {
8302 if (L0 == R1) {
8303 std::swap(R0, R1);
8304 RPred = ICmpInst::getSwappedPredicate(RPred);
8306 if (L1 == R0) {
8307 std::swap(L0, L1);
8308 LPred = ICmpInst::getSwappedPredicate(LPred);
8310 if (L1 == R1) {
8311 std::swap(L0, L1);
8312 LPred = ICmpInst::getSwappedPredicate(LPred);
8313 std::swap(R0, R1);
8314 RPred = ICmpInst::getSwappedPredicate(RPred);
8316 if (L0 == R0 &&
8317 (LPred == ICmpInst::ICMP_ULT || LPred == ICmpInst::ICMP_UGE) &&
8318 (RPred == ICmpInst::ICMP_ULT || RPred == ICmpInst::ICMP_UGE) &&
8319 match(L0, m_c_Add(m_Specific(L1), m_Specific(R1))))
8320 return LPred == RPred;
8323 if (LPred == RPred)
8324 return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
8326 return std::nullopt;
8329 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
8330 /// false. Otherwise, return std::nullopt if we can't infer anything. We
8331 /// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
8332 /// instruction.
8333 static std::optional<bool>
8334 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
8335 const Value *RHSOp0, const Value *RHSOp1,
8336 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8337 // The LHS must be an 'or', 'and', or a 'select' instruction.
8338 assert((LHS->getOpcode() == Instruction::And ||
8339 LHS->getOpcode() == Instruction::Or ||
8340 LHS->getOpcode() == Instruction::Select) &&
8341 "Expected LHS to be 'and', 'or', or 'select'.");
8343 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
8345 // If the result of an 'or' is false, then we know both legs of the 'or' are
8346 // false. Similarly, if the result of an 'and' is true, then we know both
8347 // legs of the 'and' are true.
8348 const Value *ALHS, *ARHS;
8349 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
8350 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
8351 // FIXME: Make this non-recursion.
8352 if (std::optional<bool> Implication = isImpliedCondition(
8353 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8354 return Implication;
8355 if (std::optional<bool> Implication = isImpliedCondition(
8356 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8357 return Implication;
8358 return std::nullopt;
8360 return std::nullopt;
8363 std::optional<bool>
8364 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
8365 const Value *RHSOp0, const Value *RHSOp1,
8366 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8367 // Bail out when we hit the limit.
8368 if (Depth == MaxAnalysisRecursionDepth)
8369 return std::nullopt;
8371 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
8372 // example.
8373 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
8374 return std::nullopt;
8376 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
8377 "Expected integer type only!");
8379 // Both LHS and RHS are icmps.
8380 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
8381 if (LHSCmp)
8382 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8383 Depth);
8385 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
8386 /// the RHS to be an icmp.
8387 /// FIXME: Add support for and/or/select on the RHS.
8388 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
8389 if ((LHSI->getOpcode() == Instruction::And ||
8390 LHSI->getOpcode() == Instruction::Or ||
8391 LHSI->getOpcode() == Instruction::Select))
8392 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8393 Depth);
8395 return std::nullopt;
8398 std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
8399 const DataLayout &DL,
8400 bool LHSIsTrue, unsigned Depth) {
8401 // LHS ==> RHS by definition
8402 if (LHS == RHS)
8403 return LHSIsTrue;
8405 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
8406 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
8407 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
8408 LHSIsTrue, Depth);
8410 if (Depth == MaxAnalysisRecursionDepth)
8411 return std::nullopt;
8413 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
8414 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
8415 const Value *RHS1, *RHS2;
8416 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
8417 if (std::optional<bool> Imp =
8418 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8419 if (*Imp == true)
8420 return true;
8421 if (std::optional<bool> Imp =
8422 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8423 if (*Imp == true)
8424 return true;
8426 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
8427 if (std::optional<bool> Imp =
8428 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8429 if (*Imp == false)
8430 return false;
8431 if (std::optional<bool> Imp =
8432 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8433 if (*Imp == false)
8434 return false;
8437 return std::nullopt;
8440 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
8441 // condition dominating ContextI or nullptr, if no condition is found.
8442 static std::pair<Value *, bool>
8443 getDomPredecessorCondition(const Instruction *ContextI) {
8444 if (!ContextI || !ContextI->getParent())
8445 return {nullptr, false};
8447 // TODO: This is a poor/cheap way to determine dominance. Should we use a
8448 // dominator tree (eg, from a SimplifyQuery) instead?
8449 const BasicBlock *ContextBB = ContextI->getParent();
8450 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
8451 if (!PredBB)
8452 return {nullptr, false};
8454 // We need a conditional branch in the predecessor.
8455 Value *PredCond;
8456 BasicBlock *TrueBB, *FalseBB;
8457 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
8458 return {nullptr, false};
8460 // The branch should get simplified. Don't bother simplifying this condition.
8461 if (TrueBB == FalseBB)
8462 return {nullptr, false};
8464 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
8465 "Predecessor block does not point to successor?");
8467 // Is this condition implied by the predecessor condition?
8468 return {PredCond, TrueBB == ContextBB};
8471 std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
8472 const Instruction *ContextI,
8473 const DataLayout &DL) {
8474 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
8475 auto PredCond = getDomPredecessorCondition(ContextI);
8476 if (PredCond.first)
8477 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
8478 return std::nullopt;
8481 std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
8482 const Value *LHS,
8483 const Value *RHS,
8484 const Instruction *ContextI,
8485 const DataLayout &DL) {
8486 auto PredCond = getDomPredecessorCondition(ContextI);
8487 if (PredCond.first)
8488 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
8489 PredCond.second);
8490 return std::nullopt;
8493 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
8494 APInt &Upper, const InstrInfoQuery &IIQ,
8495 bool PreferSignedRange) {
8496 unsigned Width = Lower.getBitWidth();
8497 const APInt *C;
8498 switch (BO.getOpcode()) {
8499 case Instruction::Add:
8500 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8501 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
8502 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
8504 // If the caller expects a signed compare, then try to use a signed range.
8505 // Otherwise if both no-wraps are set, use the unsigned range because it
8506 // is never larger than the signed range. Example:
8507 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
8508 if (PreferSignedRange && HasNSW && HasNUW)
8509 HasNUW = false;
8511 if (HasNUW) {
8512 // 'add nuw x, C' produces [C, UINT_MAX].
8513 Lower = *C;
8514 } else if (HasNSW) {
8515 if (C->isNegative()) {
8516 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
8517 Lower = APInt::getSignedMinValue(Width);
8518 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
8519 } else {
8520 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
8521 Lower = APInt::getSignedMinValue(Width) + *C;
8522 Upper = APInt::getSignedMaxValue(Width) + 1;
8526 break;
8528 case Instruction::And:
8529 if (match(BO.getOperand(1), m_APInt(C)))
8530 // 'and x, C' produces [0, C].
8531 Upper = *C + 1;
8532 // X & -X is a power of two or zero. So we can cap the value at max power of
8533 // two.
8534 if (match(BO.getOperand(0), m_Neg(m_Specific(BO.getOperand(1)))) ||
8535 match(BO.getOperand(1), m_Neg(m_Specific(BO.getOperand(0)))))
8536 Upper = APInt::getSignedMinValue(Width) + 1;
8537 break;
8539 case Instruction::Or:
8540 if (match(BO.getOperand(1), m_APInt(C)))
8541 // 'or x, C' produces [C, UINT_MAX].
8542 Lower = *C;
8543 break;
8545 case Instruction::AShr:
8546 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8547 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
8548 Lower = APInt::getSignedMinValue(Width).ashr(*C);
8549 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
8550 } else if (match(BO.getOperand(0), m_APInt(C))) {
8551 unsigned ShiftAmount = Width - 1;
8552 if (!C->isZero() && IIQ.isExact(&BO))
8553 ShiftAmount = C->countr_zero();
8554 if (C->isNegative()) {
8555 // 'ashr C, x' produces [C, C >> (Width-1)]
8556 Lower = *C;
8557 Upper = C->ashr(ShiftAmount) + 1;
8558 } else {
8559 // 'ashr C, x' produces [C >> (Width-1), C]
8560 Lower = C->ashr(ShiftAmount);
8561 Upper = *C + 1;
8564 break;
8566 case Instruction::LShr:
8567 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8568 // 'lshr x, C' produces [0, UINT_MAX >> C].
8569 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
8570 } else if (match(BO.getOperand(0), m_APInt(C))) {
8571 // 'lshr C, x' produces [C >> (Width-1), C].
8572 unsigned ShiftAmount = Width - 1;
8573 if (!C->isZero() && IIQ.isExact(&BO))
8574 ShiftAmount = C->countr_zero();
8575 Lower = C->lshr(ShiftAmount);
8576 Upper = *C + 1;
8578 break;
8580 case Instruction::Shl:
8581 if (match(BO.getOperand(0), m_APInt(C))) {
8582 if (IIQ.hasNoUnsignedWrap(&BO)) {
8583 // 'shl nuw C, x' produces [C, C << CLZ(C)]
8584 Lower = *C;
8585 Upper = Lower.shl(Lower.countl_zero()) + 1;
8586 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
8587 if (C->isNegative()) {
8588 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
8589 unsigned ShiftAmount = C->countl_one() - 1;
8590 Lower = C->shl(ShiftAmount);
8591 Upper = *C + 1;
8592 } else {
8593 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
8594 unsigned ShiftAmount = C->countl_zero() - 1;
8595 Lower = *C;
8596 Upper = C->shl(ShiftAmount) + 1;
8598 } else {
8599 // If lowbit is set, value can never be zero.
8600 if ((*C)[0])
8601 Lower = APInt::getOneBitSet(Width, 0);
8602 // If we are shifting a constant the largest it can be is if the longest
8603 // sequence of consecutive ones is shifted to the highbits (breaking
8604 // ties for which sequence is higher). At the moment we take a liberal
8605 // upper bound on this by just popcounting the constant.
8606 // TODO: There may be a bitwise trick for it longest/highest
8607 // consecutative sequence of ones (naive method is O(Width) loop).
8608 Upper = APInt::getHighBitsSet(Width, C->popcount()) + 1;
8610 } else if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8611 Upper = APInt::getBitsSetFrom(Width, C->getZExtValue()) + 1;
8613 break;
8615 case Instruction::SDiv:
8616 if (match(BO.getOperand(1), m_APInt(C))) {
8617 APInt IntMin = APInt::getSignedMinValue(Width);
8618 APInt IntMax = APInt::getSignedMaxValue(Width);
8619 if (C->isAllOnes()) {
8620 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
8621 // where C != -1 and C != 0 and C != 1
8622 Lower = IntMin + 1;
8623 Upper = IntMax + 1;
8624 } else if (C->countl_zero() < Width - 1) {
8625 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
8626 // where C != -1 and C != 0 and C != 1
8627 Lower = IntMin.sdiv(*C);
8628 Upper = IntMax.sdiv(*C);
8629 if (Lower.sgt(Upper))
8630 std::swap(Lower, Upper);
8631 Upper = Upper + 1;
8632 assert(Upper != Lower && "Upper part of range has wrapped!");
8634 } else if (match(BO.getOperand(0), m_APInt(C))) {
8635 if (C->isMinSignedValue()) {
8636 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
8637 Lower = *C;
8638 Upper = Lower.lshr(1) + 1;
8639 } else {
8640 // 'sdiv C, x' produces [-|C|, |C|].
8641 Upper = C->abs() + 1;
8642 Lower = (-Upper) + 1;
8645 break;
8647 case Instruction::UDiv:
8648 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8649 // 'udiv x, C' produces [0, UINT_MAX / C].
8650 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
8651 } else if (match(BO.getOperand(0), m_APInt(C))) {
8652 // 'udiv C, x' produces [0, C].
8653 Upper = *C + 1;
8655 break;
8657 case Instruction::SRem:
8658 if (match(BO.getOperand(1), m_APInt(C))) {
8659 // 'srem x, C' produces (-|C|, |C|).
8660 Upper = C->abs();
8661 Lower = (-Upper) + 1;
8663 break;
8665 case Instruction::URem:
8666 if (match(BO.getOperand(1), m_APInt(C)))
8667 // 'urem x, C' produces [0, C).
8668 Upper = *C;
8669 break;
8671 default:
8672 break;
8676 static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) {
8677 unsigned Width = II.getType()->getScalarSizeInBits();
8678 const APInt *C;
8679 switch (II.getIntrinsicID()) {
8680 case Intrinsic::ctpop:
8681 case Intrinsic::ctlz:
8682 case Intrinsic::cttz:
8683 // Maximum of set/clear bits is the bit width.
8684 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8685 APInt(Width, Width + 1));
8686 case Intrinsic::uadd_sat:
8687 // uadd.sat(x, C) produces [C, UINT_MAX].
8688 if (match(II.getOperand(0), m_APInt(C)) ||
8689 match(II.getOperand(1), m_APInt(C)))
8690 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8691 break;
8692 case Intrinsic::sadd_sat:
8693 if (match(II.getOperand(0), m_APInt(C)) ||
8694 match(II.getOperand(1), m_APInt(C))) {
8695 if (C->isNegative())
8696 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
8697 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8698 APInt::getSignedMaxValue(Width) + *C +
8701 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
8702 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) + *C,
8703 APInt::getSignedMaxValue(Width) + 1);
8705 break;
8706 case Intrinsic::usub_sat:
8707 // usub.sat(C, x) produces [0, C].
8708 if (match(II.getOperand(0), m_APInt(C)))
8709 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8711 // usub.sat(x, C) produces [0, UINT_MAX - C].
8712 if (match(II.getOperand(1), m_APInt(C)))
8713 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8714 APInt::getMaxValue(Width) - *C + 1);
8715 break;
8716 case Intrinsic::ssub_sat:
8717 if (match(II.getOperand(0), m_APInt(C))) {
8718 if (C->isNegative())
8719 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
8720 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8721 *C - APInt::getSignedMinValue(Width) +
8724 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
8725 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width),
8726 APInt::getSignedMaxValue(Width) + 1);
8727 } else if (match(II.getOperand(1), m_APInt(C))) {
8728 if (C->isNegative())
8729 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
8730 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C,
8731 APInt::getSignedMaxValue(Width) + 1);
8733 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
8734 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8735 APInt::getSignedMaxValue(Width) - *C +
8738 break;
8739 case Intrinsic::umin:
8740 case Intrinsic::umax:
8741 case Intrinsic::smin:
8742 case Intrinsic::smax:
8743 if (!match(II.getOperand(0), m_APInt(C)) &&
8744 !match(II.getOperand(1), m_APInt(C)))
8745 break;
8747 switch (II.getIntrinsicID()) {
8748 case Intrinsic::umin:
8749 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8750 case Intrinsic::umax:
8751 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8752 case Intrinsic::smin:
8753 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8754 *C + 1);
8755 case Intrinsic::smax:
8756 return ConstantRange::getNonEmpty(*C,
8757 APInt::getSignedMaxValue(Width) + 1);
8758 default:
8759 llvm_unreachable("Must be min/max intrinsic");
8761 break;
8762 case Intrinsic::abs:
8763 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
8764 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8765 if (match(II.getOperand(1), m_One()))
8766 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8767 APInt::getSignedMaxValue(Width) + 1);
8769 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8770 APInt::getSignedMinValue(Width) + 1);
8771 case Intrinsic::vscale:
8772 if (!II.getParent() || !II.getFunction())
8773 break;
8774 return getVScaleRange(II.getFunction(), Width);
8775 default:
8776 break;
8779 return ConstantRange::getFull(Width);
8782 static ConstantRange getRangeForSelectPattern(const SelectInst &SI,
8783 const InstrInfoQuery &IIQ) {
8784 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
8785 const Value *LHS = nullptr, *RHS = nullptr;
8786 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
8787 if (R.Flavor == SPF_UNKNOWN)
8788 return ConstantRange::getFull(BitWidth);
8790 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
8791 // If the negation part of the abs (in RHS) has the NSW flag,
8792 // then the result of abs(X) is [0..SIGNED_MAX],
8793 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8794 if (match(RHS, m_Neg(m_Specific(LHS))) &&
8795 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
8796 return ConstantRange::getNonEmpty(APInt::getZero(BitWidth),
8797 APInt::getSignedMaxValue(BitWidth) + 1);
8799 return ConstantRange::getNonEmpty(APInt::getZero(BitWidth),
8800 APInt::getSignedMinValue(BitWidth) + 1);
8803 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
8804 // The result of -abs(X) is <= 0.
8805 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(BitWidth),
8806 APInt(BitWidth, 1));
8809 const APInt *C;
8810 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
8811 return ConstantRange::getFull(BitWidth);
8813 switch (R.Flavor) {
8814 case SPF_UMIN:
8815 return ConstantRange::getNonEmpty(APInt::getZero(BitWidth), *C + 1);
8816 case SPF_UMAX:
8817 return ConstantRange::getNonEmpty(*C, APInt::getZero(BitWidth));
8818 case SPF_SMIN:
8819 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(BitWidth),
8820 *C + 1);
8821 case SPF_SMAX:
8822 return ConstantRange::getNonEmpty(*C,
8823 APInt::getSignedMaxValue(BitWidth) + 1);
8824 default:
8825 return ConstantRange::getFull(BitWidth);
8829 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
8830 // The maximum representable value of a half is 65504. For floats the maximum
8831 // value is 3.4e38 which requires roughly 129 bits.
8832 unsigned BitWidth = I->getType()->getScalarSizeInBits();
8833 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
8834 return;
8835 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
8836 Lower = APInt(BitWidth, -65504);
8837 Upper = APInt(BitWidth, 65505);
8840 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
8841 // For a fptoui the lower limit is left as 0.
8842 Upper = APInt(BitWidth, 65505);
8846 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
8847 bool UseInstrInfo, AssumptionCache *AC,
8848 const Instruction *CtxI,
8849 const DominatorTree *DT,
8850 unsigned Depth) {
8851 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
8853 if (Depth == MaxAnalysisRecursionDepth)
8854 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
8856 const APInt *C;
8857 if (match(V, m_APInt(C)))
8858 return ConstantRange(*C);
8860 InstrInfoQuery IIQ(UseInstrInfo);
8861 unsigned BitWidth = V->getType()->getScalarSizeInBits();
8862 ConstantRange CR = ConstantRange::getFull(BitWidth);
8863 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
8864 APInt Lower = APInt(BitWidth, 0);
8865 APInt Upper = APInt(BitWidth, 0);
8866 // TODO: Return ConstantRange.
8867 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
8868 CR = ConstantRange::getNonEmpty(Lower, Upper);
8869 } else if (auto *II = dyn_cast<IntrinsicInst>(V))
8870 CR = getRangeForIntrinsic(*II);
8871 else if (auto *SI = dyn_cast<SelectInst>(V)) {
8872 ConstantRange CRTrue = computeConstantRange(
8873 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
8874 ConstantRange CRFalse = computeConstantRange(
8875 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
8876 CR = CRTrue.unionWith(CRFalse);
8877 CR = CR.intersectWith(getRangeForSelectPattern(*SI, IIQ));
8878 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
8879 APInt Lower = APInt(BitWidth, 0);
8880 APInt Upper = APInt(BitWidth, 0);
8881 // TODO: Return ConstantRange.
8882 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
8883 CR = ConstantRange::getNonEmpty(Lower, Upper);
8886 if (auto *I = dyn_cast<Instruction>(V))
8887 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
8888 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
8890 if (CtxI && AC) {
8891 // Try to restrict the range based on information from assumptions.
8892 for (auto &AssumeVH : AC->assumptionsFor(V)) {
8893 if (!AssumeVH)
8894 continue;
8895 CallInst *I = cast<CallInst>(AssumeVH);
8896 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
8897 "Got assumption for the wrong function!");
8898 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
8899 "must be an assume intrinsic");
8901 if (!isValidAssumeForContext(I, CtxI, DT))
8902 continue;
8903 Value *Arg = I->getArgOperand(0);
8904 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
8905 // Currently we just use information from comparisons.
8906 if (!Cmp || Cmp->getOperand(0) != V)
8907 continue;
8908 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
8909 ConstantRange RHS =
8910 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
8911 UseInstrInfo, AC, I, DT, Depth + 1);
8912 CR = CR.intersectWith(
8913 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
8917 return CR;