[ARM] More MVE compare vector splat combines for ANDs
[llvm-complete.git] / lib / Analysis / ValueTracking.cpp
blobc70906dcc6295734219645792c49ed7422118bd3
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/GuardUtils.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Operator.h"
58 #include "llvm/IR/PatternMatch.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/KnownBits.h"
67 #include "llvm/Support/MathExtras.h"
68 #include <algorithm>
69 #include <array>
70 #include <cassert>
71 #include <cstdint>
72 #include <iterator>
73 #include <utility>
75 using namespace llvm;
76 using namespace llvm::PatternMatch;
78 const unsigned MaxDepth = 6;
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83 cl::Hidden, cl::init(20));
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88 if (unsigned BitWidth = Ty->getScalarSizeInBits())
89 return BitWidth;
91 return DL.getIndexTypeSizeInBits(Ty);
94 namespace {
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101 const DataLayout &DL;
102 AssumptionCache *AC;
103 const Instruction *CxtI;
104 const DominatorTree *DT;
106 // Unlike the other analyses, this may be a nullptr because not all clients
107 // provide it currently.
108 OptimizationRemarkEmitter *ORE;
110 /// Set of assumptions that should be excluded from further queries.
111 /// This is because of the potential for mutual recursion to cause
112 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113 /// classic case of this is assume(x = y), which will attempt to determine
114 /// bits in x from bits in y, which will attempt to determine bits in y from
115 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117 /// (all of which can call computeKnownBits), and so on.
118 std::array<const Value *, MaxDepth> Excluded;
120 /// If true, it is safe to use metadata during simplification.
121 InstrInfoQuery IIQ;
123 unsigned NumExcluded = 0;
125 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126 const DominatorTree *DT, bool UseInstrInfo,
127 OptimizationRemarkEmitter *ORE = nullptr)
128 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
130 Query(const Query &Q, const Value *NewExcl)
131 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132 NumExcluded(Q.NumExcluded) {
133 Excluded = Q.Excluded;
134 Excluded[NumExcluded++] = NewExcl;
135 assert(NumExcluded <= Excluded.size());
138 bool isExcluded(const Value *Value) const {
139 if (NumExcluded == 0)
140 return false;
141 auto End = Excluded.begin() + NumExcluded;
142 return std::find(Excluded.begin(), End, Value) != End;
146 } // end anonymous namespace
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151 // If we've been provided with a context instruction, then use that (provided
152 // it has been inserted).
153 if (CxtI && CxtI->getParent())
154 return CxtI;
156 // If the value is really an already-inserted instruction, then use that.
157 CxtI = dyn_cast<Instruction>(V);
158 if (CxtI && CxtI->getParent())
159 return CxtI;
161 return nullptr;
164 static void computeKnownBits(const Value *V, KnownBits &Known,
165 unsigned Depth, const Query &Q);
167 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
168 const DataLayout &DL, unsigned Depth,
169 AssumptionCache *AC, const Instruction *CxtI,
170 const DominatorTree *DT,
171 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
172 ::computeKnownBits(V, Known, Depth,
173 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
176 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
177 const Query &Q);
179 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
180 unsigned Depth, AssumptionCache *AC,
181 const Instruction *CxtI,
182 const DominatorTree *DT,
183 OptimizationRemarkEmitter *ORE,
184 bool UseInstrInfo) {
185 return ::computeKnownBits(
186 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
189 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
190 const DataLayout &DL, AssumptionCache *AC,
191 const Instruction *CxtI, const DominatorTree *DT,
192 bool UseInstrInfo) {
193 assert(LHS->getType() == RHS->getType() &&
194 "LHS and RHS should have the same type");
195 assert(LHS->getType()->isIntOrIntVectorTy() &&
196 "LHS and RHS should be integers");
197 // Look for an inverted mask: (X & ~M) op (Y & M).
198 Value *M;
199 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
200 match(RHS, m_c_And(m_Specific(M), m_Value())))
201 return true;
202 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
203 match(LHS, m_c_And(m_Specific(M), m_Value())))
204 return true;
205 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
206 KnownBits LHSKnown(IT->getBitWidth());
207 KnownBits RHSKnown(IT->getBitWidth());
208 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
209 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
213 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
214 for (const User *U : CxtI->users()) {
215 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
216 if (IC->isEquality())
217 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
218 if (C->isNullValue())
219 continue;
220 return false;
222 return true;
225 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
226 const Query &Q);
228 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
229 bool OrZero, unsigned Depth,
230 AssumptionCache *AC, const Instruction *CxtI,
231 const DominatorTree *DT, bool UseInstrInfo) {
232 return ::isKnownToBeAPowerOfTwo(
233 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
236 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
238 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
239 AssumptionCache *AC, const Instruction *CxtI,
240 const DominatorTree *DT, bool UseInstrInfo) {
241 return ::isKnownNonZero(V, Depth,
242 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
245 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
246 unsigned Depth, AssumptionCache *AC,
247 const Instruction *CxtI, const DominatorTree *DT,
248 bool UseInstrInfo) {
249 KnownBits Known =
250 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
251 return Known.isNonNegative();
254 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
255 AssumptionCache *AC, const Instruction *CxtI,
256 const DominatorTree *DT, bool UseInstrInfo) {
257 if (auto *CI = dyn_cast<ConstantInt>(V))
258 return CI->getValue().isStrictlyPositive();
260 // TODO: We'd doing two recursive queries here. We should factor this such
261 // that only a single query is needed.
262 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
263 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
266 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
267 AssumptionCache *AC, const Instruction *CxtI,
268 const DominatorTree *DT, bool UseInstrInfo) {
269 KnownBits Known =
270 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
271 return Known.isNegative();
274 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
276 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
277 const DataLayout &DL, AssumptionCache *AC,
278 const Instruction *CxtI, const DominatorTree *DT,
279 bool UseInstrInfo) {
280 return ::isKnownNonEqual(V1, V2,
281 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
282 UseInstrInfo, /*ORE=*/nullptr));
285 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
286 const Query &Q);
288 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
289 const DataLayout &DL, unsigned Depth,
290 AssumptionCache *AC, const Instruction *CxtI,
291 const DominatorTree *DT, bool UseInstrInfo) {
292 return ::MaskedValueIsZero(
293 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
296 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
297 const Query &Q);
299 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
300 unsigned Depth, AssumptionCache *AC,
301 const Instruction *CxtI,
302 const DominatorTree *DT, bool UseInstrInfo) {
303 return ::ComputeNumSignBits(
304 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
307 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
308 bool NSW,
309 KnownBits &KnownOut, KnownBits &Known2,
310 unsigned Depth, const Query &Q) {
311 unsigned BitWidth = KnownOut.getBitWidth();
313 // If an initial sequence of bits in the result is not needed, the
314 // corresponding bits in the operands are not needed.
315 KnownBits LHSKnown(BitWidth);
316 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
317 computeKnownBits(Op1, Known2, Depth + 1, Q);
319 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
322 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
323 KnownBits &Known, KnownBits &Known2,
324 unsigned Depth, const Query &Q) {
325 unsigned BitWidth = Known.getBitWidth();
326 computeKnownBits(Op1, Known, Depth + 1, Q);
327 computeKnownBits(Op0, Known2, Depth + 1, Q);
329 bool isKnownNegative = false;
330 bool isKnownNonNegative = false;
331 // If the multiplication is known not to overflow, compute the sign bit.
332 if (NSW) {
333 if (Op0 == Op1) {
334 // The product of a number with itself is non-negative.
335 isKnownNonNegative = true;
336 } else {
337 bool isKnownNonNegativeOp1 = Known.isNonNegative();
338 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
339 bool isKnownNegativeOp1 = Known.isNegative();
340 bool isKnownNegativeOp0 = Known2.isNegative();
341 // The product of two numbers with the same sign is non-negative.
342 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344 // The product of a negative number and a non-negative number is either
345 // negative or zero.
346 if (!isKnownNonNegative)
347 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348 isKnownNonZero(Op0, Depth, Q)) ||
349 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350 isKnownNonZero(Op1, Depth, Q));
354 assert(!Known.hasConflict() && !Known2.hasConflict());
355 // Compute a conservative estimate for high known-0 bits.
356 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
357 Known2.countMinLeadingZeros(),
358 BitWidth) - BitWidth;
359 LeadZ = std::min(LeadZ, BitWidth);
361 // The result of the bottom bits of an integer multiply can be
362 // inferred by looking at the bottom bits of both operands and
363 // multiplying them together.
364 // We can infer at least the minimum number of known trailing bits
365 // of both operands. Depending on number of trailing zeros, we can
366 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
367 // a and b are divisible by m and n respectively.
368 // We then calculate how many of those bits are inferrable and set
369 // the output. For example, the i8 mul:
370 // a = XXXX1100 (12)
371 // b = XXXX1110 (14)
372 // We know the bottom 3 bits are zero since the first can be divided by
373 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
374 // Applying the multiplication to the trimmed arguments gets:
375 // XX11 (3)
376 // X111 (7)
377 // -------
378 // XX11
379 // XX11
380 // XX11
381 // XX11
382 // -------
383 // XXXXX01
384 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
385 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
386 // The proof for this can be described as:
387 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
388 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
389 // umin(countTrailingZeros(C2), C6) +
390 // umin(C5 - umin(countTrailingZeros(C1), C5),
391 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
392 // %aa = shl i8 %a, C5
393 // %bb = shl i8 %b, C6
394 // %aaa = or i8 %aa, C1
395 // %bbb = or i8 %bb, C2
396 // %mul = mul i8 %aaa, %bbb
397 // %mask = and i8 %mul, C7
398 // =>
399 // %mask = i8 ((C1*C2)&C7)
400 // Where C5, C6 describe the known bits of %a, %b
401 // C1, C2 describe the known bottom bits of %a, %b.
402 // C7 describes the mask of the known bits of the result.
403 APInt Bottom0 = Known.One;
404 APInt Bottom1 = Known2.One;
406 // How many times we'd be able to divide each argument by 2 (shr by 1).
407 // This gives us the number of trailing zeros on the multiplication result.
408 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
409 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
410 unsigned TrailZero0 = Known.countMinTrailingZeros();
411 unsigned TrailZero1 = Known2.countMinTrailingZeros();
412 unsigned TrailZ = TrailZero0 + TrailZero1;
414 // Figure out the fewest known-bits operand.
415 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
416 TrailBitsKnown1 - TrailZero1);
417 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
419 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
420 Bottom1.getLoBits(TrailBitsKnown1);
422 Known.resetAll();
423 Known.Zero.setHighBits(LeadZ);
424 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
425 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
427 // Only make use of no-wrap flags if we failed to compute the sign bit
428 // directly. This matters if the multiplication always overflows, in
429 // which case we prefer to follow the result of the direct computation,
430 // though as the program is invoking undefined behaviour we can choose
431 // whatever we like here.
432 if (isKnownNonNegative && !Known.isNegative())
433 Known.makeNonNegative();
434 else if (isKnownNegative && !Known.isNonNegative())
435 Known.makeNegative();
438 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
439 KnownBits &Known) {
440 unsigned BitWidth = Known.getBitWidth();
441 unsigned NumRanges = Ranges.getNumOperands() / 2;
442 assert(NumRanges >= 1);
444 Known.Zero.setAllBits();
445 Known.One.setAllBits();
447 for (unsigned i = 0; i < NumRanges; ++i) {
448 ConstantInt *Lower =
449 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
450 ConstantInt *Upper =
451 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
452 ConstantRange Range(Lower->getValue(), Upper->getValue());
454 // The first CommonPrefixBits of all values in Range are equal.
455 unsigned CommonPrefixBits =
456 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
458 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
459 Known.One &= Range.getUnsignedMax() & Mask;
460 Known.Zero &= ~Range.getUnsignedMax() & Mask;
464 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
465 SmallVector<const Value *, 16> WorkSet(1, I);
466 SmallPtrSet<const Value *, 32> Visited;
467 SmallPtrSet<const Value *, 16> EphValues;
469 // The instruction defining an assumption's condition itself is always
470 // considered ephemeral to that assumption (even if it has other
471 // non-ephemeral users). See r246696's test case for an example.
472 if (is_contained(I->operands(), E))
473 return true;
475 while (!WorkSet.empty()) {
476 const Value *V = WorkSet.pop_back_val();
477 if (!Visited.insert(V).second)
478 continue;
480 // If all uses of this value are ephemeral, then so is this value.
481 if (llvm::all_of(V->users(), [&](const User *U) {
482 return EphValues.count(U);
483 })) {
484 if (V == E)
485 return true;
487 if (V == I || isSafeToSpeculativelyExecute(V)) {
488 EphValues.insert(V);
489 if (const User *U = dyn_cast<User>(V))
490 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
491 J != JE; ++J)
492 WorkSet.push_back(*J);
497 return false;
500 // Is this an intrinsic that cannot be speculated but also cannot trap?
501 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
502 if (const CallInst *CI = dyn_cast<CallInst>(I))
503 if (Function *F = CI->getCalledFunction())
504 switch (F->getIntrinsicID()) {
505 default: break;
506 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
507 case Intrinsic::assume:
508 case Intrinsic::sideeffect:
509 case Intrinsic::dbg_declare:
510 case Intrinsic::dbg_value:
511 case Intrinsic::dbg_label:
512 case Intrinsic::invariant_start:
513 case Intrinsic::invariant_end:
514 case Intrinsic::lifetime_start:
515 case Intrinsic::lifetime_end:
516 case Intrinsic::objectsize:
517 case Intrinsic::ptr_annotation:
518 case Intrinsic::var_annotation:
519 return true;
522 return false;
525 bool llvm::isValidAssumeForContext(const Instruction *Inv,
526 const Instruction *CxtI,
527 const DominatorTree *DT) {
528 // There are two restrictions on the use of an assume:
529 // 1. The assume must dominate the context (or the control flow must
530 // reach the assume whenever it reaches the context).
531 // 2. The context must not be in the assume's set of ephemeral values
532 // (otherwise we will use the assume to prove that the condition
533 // feeding the assume is trivially true, thus causing the removal of
534 // the assume).
536 if (DT) {
537 if (DT->dominates(Inv, CxtI))
538 return true;
539 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
540 // We don't have a DT, but this trivially dominates.
541 return true;
544 // With or without a DT, the only remaining case we will check is if the
545 // instructions are in the same BB. Give up if that is not the case.
546 if (Inv->getParent() != CxtI->getParent())
547 return false;
549 // If we have a dom tree, then we now know that the assume doesn't dominate
550 // the other instruction. If we don't have a dom tree then we can check if
551 // the assume is first in the BB.
552 if (!DT) {
553 // Search forward from the assume until we reach the context (or the end
554 // of the block); the common case is that the assume will come first.
555 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
556 IE = Inv->getParent()->end(); I != IE; ++I)
557 if (&*I == CxtI)
558 return true;
561 // The context comes first, but they're both in the same block. Make sure
562 // there is nothing in between that might interrupt the control flow.
563 for (BasicBlock::const_iterator I =
564 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
565 I != IE; ++I)
566 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
567 return false;
569 return !isEphemeralValueOf(Inv, CxtI);
572 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
573 unsigned Depth, const Query &Q) {
574 // Use of assumptions is context-sensitive. If we don't have a context, we
575 // cannot use them!
576 if (!Q.AC || !Q.CxtI)
577 return;
579 unsigned BitWidth = Known.getBitWidth();
581 // Note that the patterns below need to be kept in sync with the code
582 // in AssumptionCache::updateAffectedValues.
584 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
585 if (!AssumeVH)
586 continue;
587 CallInst *I = cast<CallInst>(AssumeVH);
588 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
589 "Got assumption for the wrong function!");
590 if (Q.isExcluded(I))
591 continue;
593 // Warning: This loop can end up being somewhat performance sensitive.
594 // We're running this loop for once for each value queried resulting in a
595 // runtime of ~O(#assumes * #values).
597 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
598 "must be an assume intrinsic");
600 Value *Arg = I->getArgOperand(0);
602 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
603 assert(BitWidth == 1 && "assume operand is not i1?");
604 Known.setAllOnes();
605 return;
607 if (match(Arg, m_Not(m_Specific(V))) &&
608 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
609 assert(BitWidth == 1 && "assume operand is not i1?");
610 Known.setAllZero();
611 return;
614 // The remaining tests are all recursive, so bail out if we hit the limit.
615 if (Depth == MaxDepth)
616 continue;
618 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
619 if (!Cmp)
620 continue;
622 Value *A, *B;
623 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
625 CmpInst::Predicate Pred;
626 uint64_t C;
627 switch (Cmp->getPredicate()) {
628 default:
629 break;
630 case ICmpInst::ICMP_EQ:
631 // assume(v = a)
632 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
633 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
634 KnownBits RHSKnown(BitWidth);
635 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
636 Known.Zero |= RHSKnown.Zero;
637 Known.One |= RHSKnown.One;
638 // assume(v & b = a)
639 } else if (match(Cmp,
640 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
641 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
642 KnownBits RHSKnown(BitWidth);
643 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
644 KnownBits MaskKnown(BitWidth);
645 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
647 // For those bits in the mask that are known to be one, we can propagate
648 // known bits from the RHS to V.
649 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
650 Known.One |= RHSKnown.One & MaskKnown.One;
651 // assume(~(v & b) = a)
652 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
653 m_Value(A))) &&
654 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
655 KnownBits RHSKnown(BitWidth);
656 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
657 KnownBits MaskKnown(BitWidth);
658 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
660 // For those bits in the mask that are known to be one, we can propagate
661 // inverted known bits from the RHS to V.
662 Known.Zero |= RHSKnown.One & MaskKnown.One;
663 Known.One |= RHSKnown.Zero & MaskKnown.One;
664 // assume(v | b = a)
665 } else if (match(Cmp,
666 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
667 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668 KnownBits RHSKnown(BitWidth);
669 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
670 KnownBits BKnown(BitWidth);
671 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
673 // For those bits in B that are known to be zero, we can propagate known
674 // bits from the RHS to V.
675 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
676 Known.One |= RHSKnown.One & BKnown.Zero;
677 // assume(~(v | b) = a)
678 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
679 m_Value(A))) &&
680 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
681 KnownBits RHSKnown(BitWidth);
682 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
683 KnownBits BKnown(BitWidth);
684 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
686 // For those bits in B that are known to be zero, we can propagate
687 // inverted known bits from the RHS to V.
688 Known.Zero |= RHSKnown.One & BKnown.Zero;
689 Known.One |= RHSKnown.Zero & BKnown.Zero;
690 // assume(v ^ b = a)
691 } else if (match(Cmp,
692 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694 KnownBits RHSKnown(BitWidth);
695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696 KnownBits BKnown(BitWidth);
697 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
699 // For those bits in B that are known to be zero, we can propagate known
700 // bits from the RHS to V. For those bits in B that are known to be one,
701 // we can propagate inverted known bits from the RHS to V.
702 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
703 Known.One |= RHSKnown.One & BKnown.Zero;
704 Known.Zero |= RHSKnown.One & BKnown.One;
705 Known.One |= RHSKnown.Zero & BKnown.One;
706 // assume(~(v ^ b) = a)
707 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
708 m_Value(A))) &&
709 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
710 KnownBits RHSKnown(BitWidth);
711 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
712 KnownBits BKnown(BitWidth);
713 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
715 // For those bits in B that are known to be zero, we can propagate
716 // inverted known bits from the RHS to V. For those bits in B that are
717 // known to be one, we can propagate known bits from the RHS to V.
718 Known.Zero |= RHSKnown.One & BKnown.Zero;
719 Known.One |= RHSKnown.Zero & BKnown.Zero;
720 Known.Zero |= RHSKnown.Zero & BKnown.One;
721 Known.One |= RHSKnown.One & BKnown.One;
722 // assume(v << c = a)
723 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
724 m_Value(A))) &&
725 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
726 KnownBits RHSKnown(BitWidth);
727 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728 // For those bits in RHS that are known, we can propagate them to known
729 // bits in V shifted to the right by C.
730 RHSKnown.Zero.lshrInPlace(C);
731 Known.Zero |= RHSKnown.Zero;
732 RHSKnown.One.lshrInPlace(C);
733 Known.One |= RHSKnown.One;
734 // assume(~(v << c) = a)
735 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
736 m_Value(A))) &&
737 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
738 KnownBits RHSKnown(BitWidth);
739 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
740 // For those bits in RHS that are known, we can propagate them inverted
741 // to known bits in V shifted to the right by C.
742 RHSKnown.One.lshrInPlace(C);
743 Known.Zero |= RHSKnown.One;
744 RHSKnown.Zero.lshrInPlace(C);
745 Known.One |= RHSKnown.Zero;
746 // assume(v >> c = a)
747 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
748 m_Value(A))) &&
749 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
750 KnownBits RHSKnown(BitWidth);
751 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
752 // For those bits in RHS that are known, we can propagate them to known
753 // bits in V shifted to the right by C.
754 Known.Zero |= RHSKnown.Zero << C;
755 Known.One |= RHSKnown.One << C;
756 // assume(~(v >> c) = a)
757 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
758 m_Value(A))) &&
759 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
760 KnownBits RHSKnown(BitWidth);
761 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
762 // For those bits in RHS that are known, we can propagate them inverted
763 // to known bits in V shifted to the right by C.
764 Known.Zero |= RHSKnown.One << C;
765 Known.One |= RHSKnown.Zero << C;
767 break;
768 case ICmpInst::ICMP_SGE:
769 // assume(v >=_s c) where c is non-negative
770 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
771 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
772 KnownBits RHSKnown(BitWidth);
773 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
775 if (RHSKnown.isNonNegative()) {
776 // We know that the sign bit is zero.
777 Known.makeNonNegative();
780 break;
781 case ICmpInst::ICMP_SGT:
782 // assume(v >_s c) where c is at least -1.
783 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
784 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785 KnownBits RHSKnown(BitWidth);
786 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
788 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
789 // We know that the sign bit is zero.
790 Known.makeNonNegative();
793 break;
794 case ICmpInst::ICMP_SLE:
795 // assume(v <=_s c) where c is negative
796 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
797 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
798 KnownBits RHSKnown(BitWidth);
799 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
801 if (RHSKnown.isNegative()) {
802 // We know that the sign bit is one.
803 Known.makeNegative();
806 break;
807 case ICmpInst::ICMP_SLT:
808 // assume(v <_s c) where c is non-positive
809 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
810 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
811 KnownBits RHSKnown(BitWidth);
812 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
814 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
815 // We know that the sign bit is one.
816 Known.makeNegative();
819 break;
820 case ICmpInst::ICMP_ULE:
821 // assume(v <=_u c)
822 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
823 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
824 KnownBits RHSKnown(BitWidth);
825 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
827 // Whatever high bits in c are zero are known to be zero.
828 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
830 break;
831 case ICmpInst::ICMP_ULT:
832 // assume(v <_u c)
833 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
834 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
835 KnownBits RHSKnown(BitWidth);
836 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
838 // If the RHS is known zero, then this assumption must be wrong (nothing
839 // is unsigned less than zero). Signal a conflict and get out of here.
840 if (RHSKnown.isZero()) {
841 Known.Zero.setAllBits();
842 Known.One.setAllBits();
843 break;
846 // Whatever high bits in c are zero are known to be zero (if c is a power
847 // of 2, then one more).
848 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
849 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
850 else
851 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
853 break;
857 // If assumptions conflict with each other or previous known bits, then we
858 // have a logical fallacy. It's possible that the assumption is not reachable,
859 // so this isn't a real bug. On the other hand, the program may have undefined
860 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
861 // clear out the known bits, try to warn the user, and hope for the best.
862 if (Known.Zero.intersects(Known.One)) {
863 Known.resetAll();
865 if (Q.ORE)
866 Q.ORE->emit([&]() {
867 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
868 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
869 CxtI)
870 << "Detected conflicting code assumptions. Program may "
871 "have undefined behavior, or compiler may have "
872 "internal error.";
877 /// Compute known bits from a shift operator, including those with a
878 /// non-constant shift amount. Known is the output of this function. Known2 is a
879 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
880 /// operator-specific functions that, given the known-zero or known-one bits
881 /// respectively, and a shift amount, compute the implied known-zero or
882 /// known-one bits of the shift operator's result respectively for that shift
883 /// amount. The results from calling KZF and KOF are conservatively combined for
884 /// all permitted shift amounts.
885 static void computeKnownBitsFromShiftOperator(
886 const Operator *I, KnownBits &Known, KnownBits &Known2,
887 unsigned Depth, const Query &Q,
888 function_ref<APInt(const APInt &, unsigned)> KZF,
889 function_ref<APInt(const APInt &, unsigned)> KOF) {
890 unsigned BitWidth = Known.getBitWidth();
892 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
893 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
895 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
896 Known.Zero = KZF(Known.Zero, ShiftAmt);
897 Known.One = KOF(Known.One, ShiftAmt);
898 // If the known bits conflict, this must be an overflowing left shift, so
899 // the shift result is poison. We can return anything we want. Choose 0 for
900 // the best folding opportunity.
901 if (Known.hasConflict())
902 Known.setAllZero();
904 return;
907 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
909 // If the shift amount could be greater than or equal to the bit-width of the
910 // LHS, the value could be poison, but bail out because the check below is
911 // expensive. TODO: Should we just carry on?
912 if ((~Known.Zero).uge(BitWidth)) {
913 Known.resetAll();
914 return;
917 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
918 // BitWidth > 64 and any upper bits are known, we'll end up returning the
919 // limit value (which implies all bits are known).
920 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
921 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
923 // It would be more-clearly correct to use the two temporaries for this
924 // calculation. Reusing the APInts here to prevent unnecessary allocations.
925 Known.resetAll();
927 // If we know the shifter operand is nonzero, we can sometimes infer more
928 // known bits. However this is expensive to compute, so be lazy about it and
929 // only compute it when absolutely necessary.
930 Optional<bool> ShifterOperandIsNonZero;
932 // Early exit if we can't constrain any well-defined shift amount.
933 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
934 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
935 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
936 if (!*ShifterOperandIsNonZero)
937 return;
940 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
942 Known.Zero.setAllBits();
943 Known.One.setAllBits();
944 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
945 // Combine the shifted known input bits only for those shift amounts
946 // compatible with its known constraints.
947 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
948 continue;
949 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
950 continue;
951 // If we know the shifter is nonzero, we may be able to infer more known
952 // bits. This check is sunk down as far as possible to avoid the expensive
953 // call to isKnownNonZero if the cheaper checks above fail.
954 if (ShiftAmt == 0) {
955 if (!ShifterOperandIsNonZero.hasValue())
956 ShifterOperandIsNonZero =
957 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
958 if (*ShifterOperandIsNonZero)
959 continue;
962 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
963 Known.One &= KOF(Known2.One, ShiftAmt);
966 // If the known bits conflict, the result is poison. Return a 0 and hope the
967 // caller can further optimize that.
968 if (Known.hasConflict())
969 Known.setAllZero();
972 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
973 unsigned Depth, const Query &Q) {
974 unsigned BitWidth = Known.getBitWidth();
976 KnownBits Known2(Known);
977 switch (I->getOpcode()) {
978 default: break;
979 case Instruction::Load:
980 if (MDNode *MD =
981 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
982 computeKnownBitsFromRangeMetadata(*MD, Known);
983 break;
984 case Instruction::And: {
985 // If either the LHS or the RHS are Zero, the result is zero.
986 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
987 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
989 // Output known-1 bits are only known if set in both the LHS & RHS.
990 Known.One &= Known2.One;
991 // Output known-0 are known to be clear if zero in either the LHS | RHS.
992 Known.Zero |= Known2.Zero;
994 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
995 // here we handle the more general case of adding any odd number by
996 // matching the form add(x, add(x, y)) where y is odd.
997 // TODO: This could be generalized to clearing any bit set in y where the
998 // following bit is known to be unset in y.
999 Value *X = nullptr, *Y = nullptr;
1000 if (!Known.Zero[0] && !Known.One[0] &&
1001 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1002 Known2.resetAll();
1003 computeKnownBits(Y, Known2, Depth + 1, Q);
1004 if (Known2.countMinTrailingOnes() > 0)
1005 Known.Zero.setBit(0);
1007 break;
1009 case Instruction::Or:
1010 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1011 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1013 // Output known-0 bits are only known if clear in both the LHS & RHS.
1014 Known.Zero &= Known2.Zero;
1015 // Output known-1 are known to be set if set in either the LHS | RHS.
1016 Known.One |= Known2.One;
1017 break;
1018 case Instruction::Xor: {
1019 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1020 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1022 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1023 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1024 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1025 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1026 Known.Zero = std::move(KnownZeroOut);
1027 break;
1029 case Instruction::Mul: {
1030 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1031 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1032 Known2, Depth, Q);
1033 break;
1035 case Instruction::UDiv: {
1036 // For the purposes of computing leading zeros we can conservatively
1037 // treat a udiv as a logical right shift by the power of 2 known to
1038 // be less than the denominator.
1039 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1040 unsigned LeadZ = Known2.countMinLeadingZeros();
1042 Known2.resetAll();
1043 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1044 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1045 if (RHSMaxLeadingZeros != BitWidth)
1046 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1048 Known.Zero.setHighBits(LeadZ);
1049 break;
1051 case Instruction::Select: {
1052 const Value *LHS, *RHS;
1053 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1054 if (SelectPatternResult::isMinOrMax(SPF)) {
1055 computeKnownBits(RHS, Known, Depth + 1, Q);
1056 computeKnownBits(LHS, Known2, Depth + 1, Q);
1057 } else {
1058 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1059 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1062 unsigned MaxHighOnes = 0;
1063 unsigned MaxHighZeros = 0;
1064 if (SPF == SPF_SMAX) {
1065 // If both sides are negative, the result is negative.
1066 if (Known.isNegative() && Known2.isNegative())
1067 // We can derive a lower bound on the result by taking the max of the
1068 // leading one bits.
1069 MaxHighOnes =
1070 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1071 // If either side is non-negative, the result is non-negative.
1072 else if (Known.isNonNegative() || Known2.isNonNegative())
1073 MaxHighZeros = 1;
1074 } else if (SPF == SPF_SMIN) {
1075 // If both sides are non-negative, the result is non-negative.
1076 if (Known.isNonNegative() && Known2.isNonNegative())
1077 // We can derive an upper bound on the result by taking the max of the
1078 // leading zero bits.
1079 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1080 Known2.countMinLeadingZeros());
1081 // If either side is negative, the result is negative.
1082 else if (Known.isNegative() || Known2.isNegative())
1083 MaxHighOnes = 1;
1084 } else if (SPF == SPF_UMAX) {
1085 // We can derive a lower bound on the result by taking the max of the
1086 // leading one bits.
1087 MaxHighOnes =
1088 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1089 } else if (SPF == SPF_UMIN) {
1090 // We can derive an upper bound on the result by taking the max of the
1091 // leading zero bits.
1092 MaxHighZeros =
1093 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1094 } else if (SPF == SPF_ABS) {
1095 // RHS from matchSelectPattern returns the negation part of abs pattern.
1096 // If the negate has an NSW flag we can assume the sign bit of the result
1097 // will be 0 because that makes abs(INT_MIN) undefined.
1098 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1099 MaxHighZeros = 1;
1102 // Only known if known in both the LHS and RHS.
1103 Known.One &= Known2.One;
1104 Known.Zero &= Known2.Zero;
1105 if (MaxHighOnes > 0)
1106 Known.One.setHighBits(MaxHighOnes);
1107 if (MaxHighZeros > 0)
1108 Known.Zero.setHighBits(MaxHighZeros);
1109 break;
1111 case Instruction::FPTrunc:
1112 case Instruction::FPExt:
1113 case Instruction::FPToUI:
1114 case Instruction::FPToSI:
1115 case Instruction::SIToFP:
1116 case Instruction::UIToFP:
1117 break; // Can't work with floating point.
1118 case Instruction::PtrToInt:
1119 case Instruction::IntToPtr:
1120 // Fall through and handle them the same as zext/trunc.
1121 LLVM_FALLTHROUGH;
1122 case Instruction::ZExt:
1123 case Instruction::Trunc: {
1124 Type *SrcTy = I->getOperand(0)->getType();
1126 unsigned SrcBitWidth;
1127 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1128 // which fall through here.
1129 Type *ScalarTy = SrcTy->getScalarType();
1130 SrcBitWidth = ScalarTy->isPointerTy() ?
1131 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1132 Q.DL.getTypeSizeInBits(ScalarTy);
1134 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1135 Known = Known.zextOrTrunc(SrcBitWidth, false);
1136 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1137 Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1138 break;
1140 case Instruction::BitCast: {
1141 Type *SrcTy = I->getOperand(0)->getType();
1142 if (SrcTy->isIntOrPtrTy() &&
1143 // TODO: For now, not handling conversions like:
1144 // (bitcast i64 %x to <2 x i32>)
1145 !I->getType()->isVectorTy()) {
1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147 break;
1149 break;
1151 case Instruction::SExt: {
1152 // Compute the bits in the result that are not present in the input.
1153 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1155 Known = Known.trunc(SrcBitWidth);
1156 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1157 // If the sign bit of the input is known set or clear, then we know the
1158 // top bits of the result.
1159 Known = Known.sext(BitWidth);
1160 break;
1162 case Instruction::Shl: {
1163 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1164 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1165 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1166 APInt KZResult = KnownZero << ShiftAmt;
1167 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1168 // If this shift has "nsw" keyword, then the result is either a poison
1169 // value or has the same sign bit as the first operand.
1170 if (NSW && KnownZero.isSignBitSet())
1171 KZResult.setSignBit();
1172 return KZResult;
1175 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1176 APInt KOResult = KnownOne << ShiftAmt;
1177 if (NSW && KnownOne.isSignBitSet())
1178 KOResult.setSignBit();
1179 return KOResult;
1182 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1183 break;
1185 case Instruction::LShr: {
1186 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1187 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1188 APInt KZResult = KnownZero.lshr(ShiftAmt);
1189 // High bits known zero.
1190 KZResult.setHighBits(ShiftAmt);
1191 return KZResult;
1194 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1195 return KnownOne.lshr(ShiftAmt);
1198 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1199 break;
1201 case Instruction::AShr: {
1202 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1203 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1204 return KnownZero.ashr(ShiftAmt);
1207 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1208 return KnownOne.ashr(ShiftAmt);
1211 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1212 break;
1214 case Instruction::Sub: {
1215 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1216 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1217 Known, Known2, Depth, Q);
1218 break;
1220 case Instruction::Add: {
1221 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1222 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1223 Known, Known2, Depth, Q);
1224 break;
1226 case Instruction::SRem:
1227 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1228 APInt RA = Rem->getValue().abs();
1229 if (RA.isPowerOf2()) {
1230 APInt LowBits = RA - 1;
1231 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1233 // The low bits of the first operand are unchanged by the srem.
1234 Known.Zero = Known2.Zero & LowBits;
1235 Known.One = Known2.One & LowBits;
1237 // If the first operand is non-negative or has all low bits zero, then
1238 // the upper bits are all zero.
1239 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1240 Known.Zero |= ~LowBits;
1242 // If the first operand is negative and not all low bits are zero, then
1243 // the upper bits are all one.
1244 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1245 Known.One |= ~LowBits;
1247 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1248 break;
1252 // The sign bit is the LHS's sign bit, except when the result of the
1253 // remainder is zero.
1254 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1255 // If it's known zero, our sign bit is also zero.
1256 if (Known2.isNonNegative())
1257 Known.makeNonNegative();
1259 break;
1260 case Instruction::URem: {
1261 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1262 const APInt &RA = Rem->getValue();
1263 if (RA.isPowerOf2()) {
1264 APInt LowBits = (RA - 1);
1265 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1266 Known.Zero |= ~LowBits;
1267 Known.One &= LowBits;
1268 break;
1272 // Since the result is less than or equal to either operand, any leading
1273 // zero bits in either operand must also exist in the result.
1274 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1277 unsigned Leaders =
1278 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1279 Known.resetAll();
1280 Known.Zero.setHighBits(Leaders);
1281 break;
1284 case Instruction::Alloca: {
1285 const AllocaInst *AI = cast<AllocaInst>(I);
1286 unsigned Align = AI->getAlignment();
1287 if (Align == 0)
1288 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1290 if (Align > 0)
1291 Known.Zero.setLowBits(countTrailingZeros(Align));
1292 break;
1294 case Instruction::GetElementPtr: {
1295 // Analyze all of the subscripts of this getelementptr instruction
1296 // to determine if we can prove known low zero bits.
1297 KnownBits LocalKnown(BitWidth);
1298 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1299 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1301 gep_type_iterator GTI = gep_type_begin(I);
1302 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1303 Value *Index = I->getOperand(i);
1304 if (StructType *STy = GTI.getStructTypeOrNull()) {
1305 // Handle struct member offset arithmetic.
1307 // Handle case when index is vector zeroinitializer
1308 Constant *CIndex = cast<Constant>(Index);
1309 if (CIndex->isZeroValue())
1310 continue;
1312 if (CIndex->getType()->isVectorTy())
1313 Index = CIndex->getSplatValue();
1315 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1316 const StructLayout *SL = Q.DL.getStructLayout(STy);
1317 uint64_t Offset = SL->getElementOffset(Idx);
1318 TrailZ = std::min<unsigned>(TrailZ,
1319 countTrailingZeros(Offset));
1320 } else {
1321 // Handle array index arithmetic.
1322 Type *IndexedTy = GTI.getIndexedType();
1323 if (!IndexedTy->isSized()) {
1324 TrailZ = 0;
1325 break;
1327 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1328 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1329 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1330 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1331 TrailZ = std::min(TrailZ,
1332 unsigned(countTrailingZeros(TypeSize) +
1333 LocalKnown.countMinTrailingZeros()));
1337 Known.Zero.setLowBits(TrailZ);
1338 break;
1340 case Instruction::PHI: {
1341 const PHINode *P = cast<PHINode>(I);
1342 // Handle the case of a simple two-predecessor recurrence PHI.
1343 // There's a lot more that could theoretically be done here, but
1344 // this is sufficient to catch some interesting cases.
1345 if (P->getNumIncomingValues() == 2) {
1346 for (unsigned i = 0; i != 2; ++i) {
1347 Value *L = P->getIncomingValue(i);
1348 Value *R = P->getIncomingValue(!i);
1349 Operator *LU = dyn_cast<Operator>(L);
1350 if (!LU)
1351 continue;
1352 unsigned Opcode = LU->getOpcode();
1353 // Check for operations that have the property that if
1354 // both their operands have low zero bits, the result
1355 // will have low zero bits.
1356 if (Opcode == Instruction::Add ||
1357 Opcode == Instruction::Sub ||
1358 Opcode == Instruction::And ||
1359 Opcode == Instruction::Or ||
1360 Opcode == Instruction::Mul) {
1361 Value *LL = LU->getOperand(0);
1362 Value *LR = LU->getOperand(1);
1363 // Find a recurrence.
1364 if (LL == I)
1365 L = LR;
1366 else if (LR == I)
1367 L = LL;
1368 else
1369 break;
1370 // Ok, we have a PHI of the form L op= R. Check for low
1371 // zero bits.
1372 computeKnownBits(R, Known2, Depth + 1, Q);
1374 // We need to take the minimum number of known bits
1375 KnownBits Known3(Known);
1376 computeKnownBits(L, Known3, Depth + 1, Q);
1378 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1379 Known3.countMinTrailingZeros()));
1381 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1382 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1383 // If initial value of recurrence is nonnegative, and we are adding
1384 // a nonnegative number with nsw, the result can only be nonnegative
1385 // or poison value regardless of the number of times we execute the
1386 // add in phi recurrence. If initial value is negative and we are
1387 // adding a negative number with nsw, the result can only be
1388 // negative or poison value. Similar arguments apply to sub and mul.
1390 // (add non-negative, non-negative) --> non-negative
1391 // (add negative, negative) --> negative
1392 if (Opcode == Instruction::Add) {
1393 if (Known2.isNonNegative() && Known3.isNonNegative())
1394 Known.makeNonNegative();
1395 else if (Known2.isNegative() && Known3.isNegative())
1396 Known.makeNegative();
1399 // (sub nsw non-negative, negative) --> non-negative
1400 // (sub nsw negative, non-negative) --> negative
1401 else if (Opcode == Instruction::Sub && LL == I) {
1402 if (Known2.isNonNegative() && Known3.isNegative())
1403 Known.makeNonNegative();
1404 else if (Known2.isNegative() && Known3.isNonNegative())
1405 Known.makeNegative();
1408 // (mul nsw non-negative, non-negative) --> non-negative
1409 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1410 Known3.isNonNegative())
1411 Known.makeNonNegative();
1414 break;
1419 // Unreachable blocks may have zero-operand PHI nodes.
1420 if (P->getNumIncomingValues() == 0)
1421 break;
1423 // Otherwise take the unions of the known bit sets of the operands,
1424 // taking conservative care to avoid excessive recursion.
1425 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1426 // Skip if every incoming value references to ourself.
1427 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1428 break;
1430 Known.Zero.setAllBits();
1431 Known.One.setAllBits();
1432 for (Value *IncValue : P->incoming_values()) {
1433 // Skip direct self references.
1434 if (IncValue == P) continue;
1436 Known2 = KnownBits(BitWidth);
1437 // Recurse, but cap the recursion to one level, because we don't
1438 // want to waste time spinning around in loops.
1439 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1440 Known.Zero &= Known2.Zero;
1441 Known.One &= Known2.One;
1442 // If all bits have been ruled out, there's no need to check
1443 // more operands.
1444 if (!Known.Zero && !Known.One)
1445 break;
1448 break;
1450 case Instruction::Call:
1451 case Instruction::Invoke:
1452 // If range metadata is attached to this call, set known bits from that,
1453 // and then intersect with known bits based on other properties of the
1454 // function.
1455 if (MDNode *MD =
1456 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1457 computeKnownBitsFromRangeMetadata(*MD, Known);
1458 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1459 computeKnownBits(RV, Known2, Depth + 1, Q);
1460 Known.Zero |= Known2.Zero;
1461 Known.One |= Known2.One;
1463 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1464 switch (II->getIntrinsicID()) {
1465 default: break;
1466 case Intrinsic::bitreverse:
1467 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1468 Known.Zero |= Known2.Zero.reverseBits();
1469 Known.One |= Known2.One.reverseBits();
1470 break;
1471 case Intrinsic::bswap:
1472 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1473 Known.Zero |= Known2.Zero.byteSwap();
1474 Known.One |= Known2.One.byteSwap();
1475 break;
1476 case Intrinsic::ctlz: {
1477 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1478 // If we have a known 1, its position is our upper bound.
1479 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1480 // If this call is undefined for 0, the result will be less than 2^n.
1481 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1482 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1483 unsigned LowBits = Log2_32(PossibleLZ)+1;
1484 Known.Zero.setBitsFrom(LowBits);
1485 break;
1487 case Intrinsic::cttz: {
1488 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1489 // If we have a known 1, its position is our upper bound.
1490 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1491 // If this call is undefined for 0, the result will be less than 2^n.
1492 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1493 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1494 unsigned LowBits = Log2_32(PossibleTZ)+1;
1495 Known.Zero.setBitsFrom(LowBits);
1496 break;
1498 case Intrinsic::ctpop: {
1499 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1500 // We can bound the space the count needs. Also, bits known to be zero
1501 // can't contribute to the population.
1502 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1503 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1504 Known.Zero.setBitsFrom(LowBits);
1505 // TODO: we could bound KnownOne using the lower bound on the number
1506 // of bits which might be set provided by popcnt KnownOne2.
1507 break;
1509 case Intrinsic::fshr:
1510 case Intrinsic::fshl: {
1511 const APInt *SA;
1512 if (!match(I->getOperand(2), m_APInt(SA)))
1513 break;
1515 // Normalize to funnel shift left.
1516 uint64_t ShiftAmt = SA->urem(BitWidth);
1517 if (II->getIntrinsicID() == Intrinsic::fshr)
1518 ShiftAmt = BitWidth - ShiftAmt;
1520 KnownBits Known3(Known);
1521 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1522 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1524 Known.Zero =
1525 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1526 Known.One =
1527 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1528 break;
1530 case Intrinsic::uadd_sat:
1531 case Intrinsic::usub_sat: {
1532 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1533 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1534 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1536 // Add: Leading ones of either operand are preserved.
1537 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1538 // as leading zeros in the result.
1539 unsigned LeadingKnown;
1540 if (IsAdd)
1541 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1542 Known2.countMinLeadingOnes());
1543 else
1544 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1545 Known2.countMinLeadingOnes());
1547 Known = KnownBits::computeForAddSub(
1548 IsAdd, /* NSW */ false, Known, Known2);
1550 // We select between the operation result and all-ones/zero
1551 // respectively, so we can preserve known ones/zeros.
1552 if (IsAdd) {
1553 Known.One.setHighBits(LeadingKnown);
1554 Known.Zero.clearAllBits();
1555 } else {
1556 Known.Zero.setHighBits(LeadingKnown);
1557 Known.One.clearAllBits();
1559 break;
1561 case Intrinsic::x86_sse42_crc32_64_64:
1562 Known.Zero.setBitsFrom(32);
1563 break;
1566 break;
1567 case Instruction::ExtractElement:
1568 // Look through extract element. At the moment we keep this simple and skip
1569 // tracking the specific element. But at least we might find information
1570 // valid for all elements of the vector (for example if vector is sign
1571 // extended, shifted, etc).
1572 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1573 break;
1574 case Instruction::ExtractValue:
1575 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1576 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1577 if (EVI->getNumIndices() != 1) break;
1578 if (EVI->getIndices()[0] == 0) {
1579 switch (II->getIntrinsicID()) {
1580 default: break;
1581 case Intrinsic::uadd_with_overflow:
1582 case Intrinsic::sadd_with_overflow:
1583 computeKnownBitsAddSub(true, II->getArgOperand(0),
1584 II->getArgOperand(1), false, Known, Known2,
1585 Depth, Q);
1586 break;
1587 case Intrinsic::usub_with_overflow:
1588 case Intrinsic::ssub_with_overflow:
1589 computeKnownBitsAddSub(false, II->getArgOperand(0),
1590 II->getArgOperand(1), false, Known, Known2,
1591 Depth, Q);
1592 break;
1593 case Intrinsic::umul_with_overflow:
1594 case Intrinsic::smul_with_overflow:
1595 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1596 Known, Known2, Depth, Q);
1597 break;
1604 /// Determine which bits of V are known to be either zero or one and return
1605 /// them.
1606 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1607 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1608 computeKnownBits(V, Known, Depth, Q);
1609 return Known;
1612 /// Determine which bits of V are known to be either zero or one and return
1613 /// them in the Known bit set.
1615 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1616 /// we cannot optimize based on the assumption that it is zero without changing
1617 /// it to be an explicit zero. If we don't change it to zero, other code could
1618 /// optimized based on the contradictory assumption that it is non-zero.
1619 /// Because instcombine aggressively folds operations with undef args anyway,
1620 /// this won't lose us code quality.
1622 /// This function is defined on values with integer type, values with pointer
1623 /// type, and vectors of integers. In the case
1624 /// where V is a vector, known zero, and known one values are the
1625 /// same width as the vector element, and the bit is set only if it is true
1626 /// for all of the elements in the vector.
1627 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1628 const Query &Q) {
1629 assert(V && "No Value?");
1630 assert(Depth <= MaxDepth && "Limit Search Depth");
1631 unsigned BitWidth = Known.getBitWidth();
1633 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1634 V->getType()->isPtrOrPtrVectorTy()) &&
1635 "Not integer or pointer type!");
1637 Type *ScalarTy = V->getType()->getScalarType();
1638 unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1639 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1640 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1641 (void)BitWidth;
1642 (void)ExpectedWidth;
1644 const APInt *C;
1645 if (match(V, m_APInt(C))) {
1646 // We know all of the bits for a scalar constant or a splat vector constant!
1647 Known.One = *C;
1648 Known.Zero = ~Known.One;
1649 return;
1651 // Null and aggregate-zero are all-zeros.
1652 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1653 Known.setAllZero();
1654 return;
1656 // Handle a constant vector by taking the intersection of the known bits of
1657 // each element.
1658 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1659 // We know that CDS must be a vector of integers. Take the intersection of
1660 // each element.
1661 Known.Zero.setAllBits(); Known.One.setAllBits();
1662 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1663 APInt Elt = CDS->getElementAsAPInt(i);
1664 Known.Zero &= ~Elt;
1665 Known.One &= Elt;
1667 return;
1670 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1671 // We know that CV must be a vector of integers. Take the intersection of
1672 // each element.
1673 Known.Zero.setAllBits(); Known.One.setAllBits();
1674 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1675 Constant *Element = CV->getAggregateElement(i);
1676 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1677 if (!ElementCI) {
1678 Known.resetAll();
1679 return;
1681 const APInt &Elt = ElementCI->getValue();
1682 Known.Zero &= ~Elt;
1683 Known.One &= Elt;
1685 return;
1688 // Start out not knowing anything.
1689 Known.resetAll();
1691 // We can't imply anything about undefs.
1692 if (isa<UndefValue>(V))
1693 return;
1695 // There's no point in looking through other users of ConstantData for
1696 // assumptions. Confirm that we've handled them all.
1697 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1699 // Limit search depth.
1700 // All recursive calls that increase depth must come after this.
1701 if (Depth == MaxDepth)
1702 return;
1704 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1705 // the bits of its aliasee.
1706 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1707 if (!GA->isInterposable())
1708 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1709 return;
1712 if (const Operator *I = dyn_cast<Operator>(V))
1713 computeKnownBitsFromOperator(I, Known, Depth, Q);
1715 // Aligned pointers have trailing zeros - refine Known.Zero set
1716 if (V->getType()->isPointerTy()) {
1717 unsigned Align = V->getPointerAlignment(Q.DL);
1718 if (Align)
1719 Known.Zero.setLowBits(countTrailingZeros(Align));
1722 // computeKnownBitsFromAssume strictly refines Known.
1723 // Therefore, we run them after computeKnownBitsFromOperator.
1725 // Check whether a nearby assume intrinsic can determine some known bits.
1726 computeKnownBitsFromAssume(V, Known, Depth, Q);
1728 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1731 /// Return true if the given value is known to have exactly one
1732 /// bit set when defined. For vectors return true if every element is known to
1733 /// be a power of two when defined. Supports values with integer or pointer
1734 /// types and vectors of integers.
1735 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1736 const Query &Q) {
1737 assert(Depth <= MaxDepth && "Limit Search Depth");
1739 // Attempt to match against constants.
1740 if (OrZero && match(V, m_Power2OrZero()))
1741 return true;
1742 if (match(V, m_Power2()))
1743 return true;
1745 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1746 // it is shifted off the end then the result is undefined.
1747 if (match(V, m_Shl(m_One(), m_Value())))
1748 return true;
1750 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1751 // the bottom. If it is shifted off the bottom then the result is undefined.
1752 if (match(V, m_LShr(m_SignMask(), m_Value())))
1753 return true;
1755 // The remaining tests are all recursive, so bail out if we hit the limit.
1756 if (Depth++ == MaxDepth)
1757 return false;
1759 Value *X = nullptr, *Y = nullptr;
1760 // A shift left or a logical shift right of a power of two is a power of two
1761 // or zero.
1762 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1763 match(V, m_LShr(m_Value(X), m_Value()))))
1764 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1766 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1767 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1769 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1770 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1771 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1773 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1774 // A power of two and'd with anything is a power of two or zero.
1775 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1776 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1777 return true;
1778 // X & (-X) is always a power of two or zero.
1779 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1780 return true;
1781 return false;
1784 // Adding a power-of-two or zero to the same power-of-two or zero yields
1785 // either the original power-of-two, a larger power-of-two or zero.
1786 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1787 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1788 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1789 Q.IIQ.hasNoSignedWrap(VOBO)) {
1790 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1791 match(X, m_And(m_Value(), m_Specific(Y))))
1792 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1793 return true;
1794 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1795 match(Y, m_And(m_Value(), m_Specific(X))))
1796 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1797 return true;
1799 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1800 KnownBits LHSBits(BitWidth);
1801 computeKnownBits(X, LHSBits, Depth, Q);
1803 KnownBits RHSBits(BitWidth);
1804 computeKnownBits(Y, RHSBits, Depth, Q);
1805 // If i8 V is a power of two or zero:
1806 // ZeroBits: 1 1 1 0 1 1 1 1
1807 // ~ZeroBits: 0 0 0 1 0 0 0 0
1808 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1809 // If OrZero isn't set, we cannot give back a zero result.
1810 // Make sure either the LHS or RHS has a bit set.
1811 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1812 return true;
1816 // An exact divide or right shift can only shift off zero bits, so the result
1817 // is a power of two only if the first operand is a power of two and not
1818 // copying a sign bit (sdiv int_min, 2).
1819 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1820 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1821 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1822 Depth, Q);
1825 return false;
1828 /// Test whether a GEP's result is known to be non-null.
1830 /// Uses properties inherent in a GEP to try to determine whether it is known
1831 /// to be non-null.
1833 /// Currently this routine does not support vector GEPs.
1834 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1835 const Query &Q) {
1836 const Function *F = nullptr;
1837 if (const Instruction *I = dyn_cast<Instruction>(GEP))
1838 F = I->getFunction();
1840 if (!GEP->isInBounds() ||
1841 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1842 return false;
1844 // FIXME: Support vector-GEPs.
1845 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1847 // If the base pointer is non-null, we cannot walk to a null address with an
1848 // inbounds GEP in address space zero.
1849 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1850 return true;
1852 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1853 // If so, then the GEP cannot produce a null pointer, as doing so would
1854 // inherently violate the inbounds contract within address space zero.
1855 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1856 GTI != GTE; ++GTI) {
1857 // Struct types are easy -- they must always be indexed by a constant.
1858 if (StructType *STy = GTI.getStructTypeOrNull()) {
1859 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1860 unsigned ElementIdx = OpC->getZExtValue();
1861 const StructLayout *SL = Q.DL.getStructLayout(STy);
1862 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1863 if (ElementOffset > 0)
1864 return true;
1865 continue;
1868 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1869 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1870 continue;
1872 // Fast path the constant operand case both for efficiency and so we don't
1873 // increment Depth when just zipping down an all-constant GEP.
1874 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1875 if (!OpC->isZero())
1876 return true;
1877 continue;
1880 // We post-increment Depth here because while isKnownNonZero increments it
1881 // as well, when we pop back up that increment won't persist. We don't want
1882 // to recurse 10k times just because we have 10k GEP operands. We don't
1883 // bail completely out because we want to handle constant GEPs regardless
1884 // of depth.
1885 if (Depth++ >= MaxDepth)
1886 continue;
1888 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1889 return true;
1892 return false;
1895 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1896 const Instruction *CtxI,
1897 const DominatorTree *DT) {
1898 assert(V->getType()->isPointerTy() && "V must be pointer type");
1899 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1901 if (!CtxI || !DT)
1902 return false;
1904 unsigned NumUsesExplored = 0;
1905 for (auto *U : V->users()) {
1906 // Avoid massive lists
1907 if (NumUsesExplored >= DomConditionsMaxUses)
1908 break;
1909 NumUsesExplored++;
1911 // If the value is used as an argument to a call or invoke, then argument
1912 // attributes may provide an answer about null-ness.
1913 if (auto CS = ImmutableCallSite(U))
1914 if (auto *CalledFunc = CS.getCalledFunction())
1915 for (const Argument &Arg : CalledFunc->args())
1916 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1917 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1918 return true;
1920 // Consider only compare instructions uniquely controlling a branch
1921 CmpInst::Predicate Pred;
1922 if (!match(const_cast<User *>(U),
1923 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1924 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1925 continue;
1927 SmallVector<const User *, 4> WorkList;
1928 SmallPtrSet<const User *, 4> Visited;
1929 for (auto *CmpU : U->users()) {
1930 assert(WorkList.empty() && "Should be!");
1931 if (Visited.insert(CmpU).second)
1932 WorkList.push_back(CmpU);
1934 while (!WorkList.empty()) {
1935 auto *Curr = WorkList.pop_back_val();
1937 // If a user is an AND, add all its users to the work list. We only
1938 // propagate "pred != null" condition through AND because it is only
1939 // correct to assume that all conditions of AND are met in true branch.
1940 // TODO: Support similar logic of OR and EQ predicate?
1941 if (Pred == ICmpInst::ICMP_NE)
1942 if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1943 if (BO->getOpcode() == Instruction::And) {
1944 for (auto *BOU : BO->users())
1945 if (Visited.insert(BOU).second)
1946 WorkList.push_back(BOU);
1947 continue;
1950 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1951 assert(BI->isConditional() && "uses a comparison!");
1953 BasicBlock *NonNullSuccessor =
1954 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1955 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1956 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1957 return true;
1958 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1959 DT->dominates(cast<Instruction>(Curr), CtxI)) {
1960 return true;
1966 return false;
1969 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1970 /// ensure that the value it's attached to is never Value? 'RangeType' is
1971 /// is the type of the value described by the range.
1972 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1973 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1974 assert(NumRanges >= 1);
1975 for (unsigned i = 0; i < NumRanges; ++i) {
1976 ConstantInt *Lower =
1977 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1978 ConstantInt *Upper =
1979 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1980 ConstantRange Range(Lower->getValue(), Upper->getValue());
1981 if (Range.contains(Value))
1982 return false;
1984 return true;
1987 /// Return true if the given value is known to be non-zero when defined. For
1988 /// vectors, return true if every element is known to be non-zero when
1989 /// defined. For pointers, if the context instruction and dominator tree are
1990 /// specified, perform context-sensitive analysis and return true if the
1991 /// pointer couldn't possibly be null at the specified instruction.
1992 /// Supports values with integer or pointer type and vectors of integers.
1993 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1994 if (auto *C = dyn_cast<Constant>(V)) {
1995 if (C->isNullValue())
1996 return false;
1997 if (isa<ConstantInt>(C))
1998 // Must be non-zero due to null test above.
1999 return true;
2001 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2002 // See the comment for IntToPtr/PtrToInt instructions below.
2003 if (CE->getOpcode() == Instruction::IntToPtr ||
2004 CE->getOpcode() == Instruction::PtrToInt)
2005 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2006 Q.DL.getTypeSizeInBits(CE->getType()))
2007 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2010 // For constant vectors, check that all elements are undefined or known
2011 // non-zero to determine that the whole vector is known non-zero.
2012 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2013 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2014 Constant *Elt = C->getAggregateElement(i);
2015 if (!Elt || Elt->isNullValue())
2016 return false;
2017 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2018 return false;
2020 return true;
2023 // A global variable in address space 0 is non null unless extern weak
2024 // or an absolute symbol reference. Other address spaces may have null as a
2025 // valid address for a global, so we can't assume anything.
2026 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2027 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2028 GV->getType()->getAddressSpace() == 0)
2029 return true;
2030 } else
2031 return false;
2034 if (auto *I = dyn_cast<Instruction>(V)) {
2035 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2036 // If the possible ranges don't contain zero, then the value is
2037 // definitely non-zero.
2038 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2039 const APInt ZeroValue(Ty->getBitWidth(), 0);
2040 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2041 return true;
2046 // Some of the tests below are recursive, so bail out if we hit the limit.
2047 if (Depth++ >= MaxDepth)
2048 return false;
2050 // Check for pointer simplifications.
2051 if (V->getType()->isPointerTy()) {
2052 // Alloca never returns null, malloc might.
2053 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2054 return true;
2056 // A byval, inalloca, or nonnull argument is never null.
2057 if (const Argument *A = dyn_cast<Argument>(V))
2058 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2059 return true;
2061 // A Load tagged with nonnull metadata is never null.
2062 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2063 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2064 return true;
2066 if (const auto *Call = dyn_cast<CallBase>(V)) {
2067 if (Call->isReturnNonNull())
2068 return true;
2069 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
2070 return isKnownNonZero(RP, Depth, Q);
2075 // Check for recursive pointer simplifications.
2076 if (V->getType()->isPointerTy()) {
2077 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2078 return true;
2080 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2081 // do not alter the value, or at least not the nullness property of the
2082 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2084 // Note that we have to take special care to avoid looking through
2085 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2086 // as casts that can alter the value, e.g., AddrSpaceCasts.
2087 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2088 if (isGEPKnownNonNull(GEP, Depth, Q))
2089 return true;
2091 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2092 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2094 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2095 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2096 Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2097 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2100 // Similar to int2ptr above, we can look through ptr2int here if the cast
2101 // is a no-op or an extend and not a truncate.
2102 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2103 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2104 Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2105 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2107 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2109 // X | Y != 0 if X != 0 or Y != 0.
2110 Value *X = nullptr, *Y = nullptr;
2111 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2112 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2114 // ext X != 0 if X != 0.
2115 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2116 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2118 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2119 // if the lowest bit is shifted off the end.
2120 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2121 // shl nuw can't remove any non-zero bits.
2122 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2123 if (Q.IIQ.hasNoUnsignedWrap(BO))
2124 return isKnownNonZero(X, Depth, Q);
2126 KnownBits Known(BitWidth);
2127 computeKnownBits(X, Known, Depth, Q);
2128 if (Known.One[0])
2129 return true;
2131 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2132 // defined if the sign bit is shifted off the end.
2133 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2134 // shr exact can only shift out zero bits.
2135 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2136 if (BO->isExact())
2137 return isKnownNonZero(X, Depth, Q);
2139 KnownBits Known = computeKnownBits(X, Depth, Q);
2140 if (Known.isNegative())
2141 return true;
2143 // If the shifter operand is a constant, and all of the bits shifted
2144 // out are known to be zero, and X is known non-zero then at least one
2145 // non-zero bit must remain.
2146 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2147 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2148 // Is there a known one in the portion not shifted out?
2149 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2150 return true;
2151 // Are all the bits to be shifted out known zero?
2152 if (Known.countMinTrailingZeros() >= ShiftVal)
2153 return isKnownNonZero(X, Depth, Q);
2156 // div exact can only produce a zero if the dividend is zero.
2157 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2158 return isKnownNonZero(X, Depth, Q);
2160 // X + Y.
2161 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2162 KnownBits XKnown = computeKnownBits(X, Depth, Q);
2163 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2165 // If X and Y are both non-negative (as signed values) then their sum is not
2166 // zero unless both X and Y are zero.
2167 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2168 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2169 return true;
2171 // If X and Y are both negative (as signed values) then their sum is not
2172 // zero unless both X and Y equal INT_MIN.
2173 if (XKnown.isNegative() && YKnown.isNegative()) {
2174 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2175 // The sign bit of X is set. If some other bit is set then X is not equal
2176 // to INT_MIN.
2177 if (XKnown.One.intersects(Mask))
2178 return true;
2179 // The sign bit of Y is set. If some other bit is set then Y is not equal
2180 // to INT_MIN.
2181 if (YKnown.One.intersects(Mask))
2182 return true;
2185 // The sum of a non-negative number and a power of two is not zero.
2186 if (XKnown.isNonNegative() &&
2187 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2188 return true;
2189 if (YKnown.isNonNegative() &&
2190 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2191 return true;
2193 // X * Y.
2194 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2195 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2196 // If X and Y are non-zero then so is X * Y as long as the multiplication
2197 // does not overflow.
2198 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2199 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2200 return true;
2202 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2203 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2204 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2205 isKnownNonZero(SI->getFalseValue(), Depth, Q))
2206 return true;
2208 // PHI
2209 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2210 // Try and detect a recurrence that monotonically increases from a
2211 // starting value, as these are common as induction variables.
2212 if (PN->getNumIncomingValues() == 2) {
2213 Value *Start = PN->getIncomingValue(0);
2214 Value *Induction = PN->getIncomingValue(1);
2215 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2216 std::swap(Start, Induction);
2217 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2218 if (!C->isZero() && !C->isNegative()) {
2219 ConstantInt *X;
2220 if (Q.IIQ.UseInstrInfo &&
2221 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2222 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2223 !X->isNegative())
2224 return true;
2228 // Check if all incoming values are non-zero constant.
2229 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2230 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2232 if (AllNonZeroConstants)
2233 return true;
2236 KnownBits Known(BitWidth);
2237 computeKnownBits(V, Known, Depth, Q);
2238 return Known.One != 0;
2241 /// Return true if V2 == V1 + X, where X is known non-zero.
2242 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2243 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2244 if (!BO || BO->getOpcode() != Instruction::Add)
2245 return false;
2246 Value *Op = nullptr;
2247 if (V2 == BO->getOperand(0))
2248 Op = BO->getOperand(1);
2249 else if (V2 == BO->getOperand(1))
2250 Op = BO->getOperand(0);
2251 else
2252 return false;
2253 return isKnownNonZero(Op, 0, Q);
2256 /// Return true if it is known that V1 != V2.
2257 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2258 if (V1 == V2)
2259 return false;
2260 if (V1->getType() != V2->getType())
2261 // We can't look through casts yet.
2262 return false;
2263 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2264 return true;
2266 if (V1->getType()->isIntOrIntVectorTy()) {
2267 // Are any known bits in V1 contradictory to known bits in V2? If V1
2268 // has a known zero where V2 has a known one, they must not be equal.
2269 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2270 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2272 if (Known1.Zero.intersects(Known2.One) ||
2273 Known2.Zero.intersects(Known1.One))
2274 return true;
2276 return false;
2279 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2280 /// simplify operations downstream. Mask is known to be zero for bits that V
2281 /// cannot have.
2283 /// This function is defined on values with integer type, values with pointer
2284 /// type, and vectors of integers. In the case
2285 /// where V is a vector, the mask, known zero, and known one values are the
2286 /// same width as the vector element, and the bit is set only if it is true
2287 /// for all of the elements in the vector.
2288 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2289 const Query &Q) {
2290 KnownBits Known(Mask.getBitWidth());
2291 computeKnownBits(V, Known, Depth, Q);
2292 return Mask.isSubsetOf(Known.Zero);
2295 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2296 // Returns the input and lower/upper bounds.
2297 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2298 const APInt *&CLow, const APInt *&CHigh) {
2299 assert(isa<Operator>(Select) &&
2300 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2301 "Input should be a Select!");
2303 const Value *LHS, *RHS, *LHS2, *RHS2;
2304 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2305 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2306 return false;
2308 if (!match(RHS, m_APInt(CLow)))
2309 return false;
2311 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2312 if (getInverseMinMaxFlavor(SPF) != SPF2)
2313 return false;
2315 if (!match(RHS2, m_APInt(CHigh)))
2316 return false;
2318 if (SPF == SPF_SMIN)
2319 std::swap(CLow, CHigh);
2321 In = LHS2;
2322 return CLow->sle(*CHigh);
2325 /// For vector constants, loop over the elements and find the constant with the
2326 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2327 /// or if any element was not analyzed; otherwise, return the count for the
2328 /// element with the minimum number of sign bits.
2329 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2330 unsigned TyBits) {
2331 const auto *CV = dyn_cast<Constant>(V);
2332 if (!CV || !CV->getType()->isVectorTy())
2333 return 0;
2335 unsigned MinSignBits = TyBits;
2336 unsigned NumElts = CV->getType()->getVectorNumElements();
2337 for (unsigned i = 0; i != NumElts; ++i) {
2338 // If we find a non-ConstantInt, bail out.
2339 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2340 if (!Elt)
2341 return 0;
2343 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2346 return MinSignBits;
2349 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2350 const Query &Q);
2352 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2353 const Query &Q) {
2354 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2355 assert(Result > 0 && "At least one sign bit needs to be present!");
2356 return Result;
2359 /// Return the number of times the sign bit of the register is replicated into
2360 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2361 /// (itself), but other cases can give us information. For example, immediately
2362 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2363 /// other, so we return 3. For vectors, return the number of sign bits for the
2364 /// vector element with the minimum number of known sign bits.
2365 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2366 const Query &Q) {
2367 assert(Depth <= MaxDepth && "Limit Search Depth");
2369 // We return the minimum number of sign bits that are guaranteed to be present
2370 // in V, so for undef we have to conservatively return 1. We don't have the
2371 // same behavior for poison though -- that's a FIXME today.
2373 Type *ScalarTy = V->getType()->getScalarType();
2374 unsigned TyBits = ScalarTy->isPointerTy() ?
2375 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2376 Q.DL.getTypeSizeInBits(ScalarTy);
2378 unsigned Tmp, Tmp2;
2379 unsigned FirstAnswer = 1;
2381 // Note that ConstantInt is handled by the general computeKnownBits case
2382 // below.
2384 if (Depth == MaxDepth)
2385 return 1; // Limit search depth.
2387 const Operator *U = dyn_cast<Operator>(V);
2388 switch (Operator::getOpcode(V)) {
2389 default: break;
2390 case Instruction::SExt:
2391 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2392 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2394 case Instruction::SDiv: {
2395 const APInt *Denominator;
2396 // sdiv X, C -> adds log(C) sign bits.
2397 if (match(U->getOperand(1), m_APInt(Denominator))) {
2399 // Ignore non-positive denominator.
2400 if (!Denominator->isStrictlyPositive())
2401 break;
2403 // Calculate the incoming numerator bits.
2404 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2406 // Add floor(log(C)) bits to the numerator bits.
2407 return std::min(TyBits, NumBits + Denominator->logBase2());
2409 break;
2412 case Instruction::SRem: {
2413 const APInt *Denominator;
2414 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2415 // positive constant. This let us put a lower bound on the number of sign
2416 // bits.
2417 if (match(U->getOperand(1), m_APInt(Denominator))) {
2419 // Ignore non-positive denominator.
2420 if (!Denominator->isStrictlyPositive())
2421 break;
2423 // Calculate the incoming numerator bits. SRem by a positive constant
2424 // can't lower the number of sign bits.
2425 unsigned NumrBits =
2426 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2428 // Calculate the leading sign bit constraints by examining the
2429 // denominator. Given that the denominator is positive, there are two
2430 // cases:
2432 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2433 // (1 << ceilLogBase2(C)).
2435 // 2. the numerator is negative. Then the result range is (-C,0] and
2436 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2438 // Thus a lower bound on the number of sign bits is `TyBits -
2439 // ceilLogBase2(C)`.
2441 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2442 return std::max(NumrBits, ResBits);
2444 break;
2447 case Instruction::AShr: {
2448 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2449 // ashr X, C -> adds C sign bits. Vectors too.
2450 const APInt *ShAmt;
2451 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2452 if (ShAmt->uge(TyBits))
2453 break; // Bad shift.
2454 unsigned ShAmtLimited = ShAmt->getZExtValue();
2455 Tmp += ShAmtLimited;
2456 if (Tmp > TyBits) Tmp = TyBits;
2458 return Tmp;
2460 case Instruction::Shl: {
2461 const APInt *ShAmt;
2462 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2463 // shl destroys sign bits.
2464 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2465 if (ShAmt->uge(TyBits) || // Bad shift.
2466 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2467 Tmp2 = ShAmt->getZExtValue();
2468 return Tmp - Tmp2;
2470 break;
2472 case Instruction::And:
2473 case Instruction::Or:
2474 case Instruction::Xor: // NOT is handled here.
2475 // Logical binary ops preserve the number of sign bits at the worst.
2476 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2477 if (Tmp != 1) {
2478 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2479 FirstAnswer = std::min(Tmp, Tmp2);
2480 // We computed what we know about the sign bits as our first
2481 // answer. Now proceed to the generic code that uses
2482 // computeKnownBits, and pick whichever answer is better.
2484 break;
2486 case Instruction::Select: {
2487 // If we have a clamp pattern, we know that the number of sign bits will be
2488 // the minimum of the clamp min/max range.
2489 const Value *X;
2490 const APInt *CLow, *CHigh;
2491 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2492 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2494 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2495 if (Tmp == 1) break;
2496 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2497 return std::min(Tmp, Tmp2);
2500 case Instruction::Add:
2501 // Add can have at most one carry bit. Thus we know that the output
2502 // is, at worst, one more bit than the inputs.
2503 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2504 if (Tmp == 1) break;
2506 // Special case decrementing a value (ADD X, -1):
2507 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2508 if (CRHS->isAllOnesValue()) {
2509 KnownBits Known(TyBits);
2510 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2512 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2513 // sign bits set.
2514 if ((Known.Zero | 1).isAllOnesValue())
2515 return TyBits;
2517 // If we are subtracting one from a positive number, there is no carry
2518 // out of the result.
2519 if (Known.isNonNegative())
2520 return Tmp;
2523 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2524 if (Tmp2 == 1) break;
2525 return std::min(Tmp, Tmp2)-1;
2527 case Instruction::Sub:
2528 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2529 if (Tmp2 == 1) break;
2531 // Handle NEG.
2532 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2533 if (CLHS->isNullValue()) {
2534 KnownBits Known(TyBits);
2535 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2536 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2537 // sign bits set.
2538 if ((Known.Zero | 1).isAllOnesValue())
2539 return TyBits;
2541 // If the input is known to be positive (the sign bit is known clear),
2542 // the output of the NEG has the same number of sign bits as the input.
2543 if (Known.isNonNegative())
2544 return Tmp2;
2546 // Otherwise, we treat this like a SUB.
2549 // Sub can have at most one carry bit. Thus we know that the output
2550 // is, at worst, one more bit than the inputs.
2551 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2552 if (Tmp == 1) break;
2553 return std::min(Tmp, Tmp2)-1;
2555 case Instruction::Mul: {
2556 // The output of the Mul can be at most twice the valid bits in the inputs.
2557 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2558 if (SignBitsOp0 == 1) break;
2559 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2560 if (SignBitsOp1 == 1) break;
2561 unsigned OutValidBits =
2562 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2563 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2566 case Instruction::PHI: {
2567 const PHINode *PN = cast<PHINode>(U);
2568 unsigned NumIncomingValues = PN->getNumIncomingValues();
2569 // Don't analyze large in-degree PHIs.
2570 if (NumIncomingValues > 4) break;
2571 // Unreachable blocks may have zero-operand PHI nodes.
2572 if (NumIncomingValues == 0) break;
2574 // Take the minimum of all incoming values. This can't infinitely loop
2575 // because of our depth threshold.
2576 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2577 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2578 if (Tmp == 1) return Tmp;
2579 Tmp = std::min(
2580 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2582 return Tmp;
2585 case Instruction::Trunc:
2586 // FIXME: it's tricky to do anything useful for this, but it is an important
2587 // case for targets like X86.
2588 break;
2590 case Instruction::ExtractElement:
2591 // Look through extract element. At the moment we keep this simple and skip
2592 // tracking the specific element. But at least we might find information
2593 // valid for all elements of the vector (for example if vector is sign
2594 // extended, shifted, etc).
2595 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2597 case Instruction::ShuffleVector: {
2598 // TODO: This is copied almost directly from the SelectionDAG version of
2599 // ComputeNumSignBits. It would be better if we could share common
2600 // code. If not, make sure that changes are translated to the DAG.
2602 // Collect the minimum number of sign bits that are shared by every vector
2603 // element referenced by the shuffle.
2604 auto *Shuf = cast<ShuffleVectorInst>(U);
2605 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2606 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2607 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2608 for (int i = 0; i != NumMaskElts; ++i) {
2609 int M = Shuf->getMaskValue(i);
2610 assert(M < NumElts * 2 && "Invalid shuffle mask constant");
2611 // For undef elements, we don't know anything about the common state of
2612 // the shuffle result.
2613 if (M == -1)
2614 return 1;
2615 if (M < NumElts)
2616 DemandedLHS.setBit(M % NumElts);
2617 else
2618 DemandedRHS.setBit(M % NumElts);
2620 Tmp = std::numeric_limits<unsigned>::max();
2621 if (!!DemandedLHS)
2622 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2623 if (!!DemandedRHS) {
2624 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2625 Tmp = std::min(Tmp, Tmp2);
2627 // If we don't know anything, early out and try computeKnownBits fall-back.
2628 if (Tmp == 1)
2629 break;
2630 assert(Tmp <= V->getType()->getScalarSizeInBits() &&
2631 "Failed to determine minimum sign bits");
2632 return Tmp;
2636 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2637 // use this information.
2639 // If we can examine all elements of a vector constant successfully, we're
2640 // done (we can't do any better than that). If not, keep trying.
2641 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2642 return VecSignBits;
2644 KnownBits Known(TyBits);
2645 computeKnownBits(V, Known, Depth, Q);
2647 // If we know that the sign bit is either zero or one, determine the number of
2648 // identical bits in the top of the input value.
2649 return std::max(FirstAnswer, Known.countMinSignBits());
2652 /// This function computes the integer multiple of Base that equals V.
2653 /// If successful, it returns true and returns the multiple in
2654 /// Multiple. If unsuccessful, it returns false. It looks
2655 /// through SExt instructions only if LookThroughSExt is true.
2656 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2657 bool LookThroughSExt, unsigned Depth) {
2658 const unsigned MaxDepth = 6;
2660 assert(V && "No Value?");
2661 assert(Depth <= MaxDepth && "Limit Search Depth");
2662 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2664 Type *T = V->getType();
2666 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2668 if (Base == 0)
2669 return false;
2671 if (Base == 1) {
2672 Multiple = V;
2673 return true;
2676 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2677 Constant *BaseVal = ConstantInt::get(T, Base);
2678 if (CO && CO == BaseVal) {
2679 // Multiple is 1.
2680 Multiple = ConstantInt::get(T, 1);
2681 return true;
2684 if (CI && CI->getZExtValue() % Base == 0) {
2685 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2686 return true;
2689 if (Depth == MaxDepth) return false; // Limit search depth.
2691 Operator *I = dyn_cast<Operator>(V);
2692 if (!I) return false;
2694 switch (I->getOpcode()) {
2695 default: break;
2696 case Instruction::SExt:
2697 if (!LookThroughSExt) return false;
2698 // otherwise fall through to ZExt
2699 LLVM_FALLTHROUGH;
2700 case Instruction::ZExt:
2701 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2702 LookThroughSExt, Depth+1);
2703 case Instruction::Shl:
2704 case Instruction::Mul: {
2705 Value *Op0 = I->getOperand(0);
2706 Value *Op1 = I->getOperand(1);
2708 if (I->getOpcode() == Instruction::Shl) {
2709 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2710 if (!Op1CI) return false;
2711 // Turn Op0 << Op1 into Op0 * 2^Op1
2712 APInt Op1Int = Op1CI->getValue();
2713 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2714 APInt API(Op1Int.getBitWidth(), 0);
2715 API.setBit(BitToSet);
2716 Op1 = ConstantInt::get(V->getContext(), API);
2719 Value *Mul0 = nullptr;
2720 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2721 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2722 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2723 if (Op1C->getType()->getPrimitiveSizeInBits() <
2724 MulC->getType()->getPrimitiveSizeInBits())
2725 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2726 if (Op1C->getType()->getPrimitiveSizeInBits() >
2727 MulC->getType()->getPrimitiveSizeInBits())
2728 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2730 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2731 Multiple = ConstantExpr::getMul(MulC, Op1C);
2732 return true;
2735 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2736 if (Mul0CI->getValue() == 1) {
2737 // V == Base * Op1, so return Op1
2738 Multiple = Op1;
2739 return true;
2743 Value *Mul1 = nullptr;
2744 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2745 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2746 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2747 if (Op0C->getType()->getPrimitiveSizeInBits() <
2748 MulC->getType()->getPrimitiveSizeInBits())
2749 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2750 if (Op0C->getType()->getPrimitiveSizeInBits() >
2751 MulC->getType()->getPrimitiveSizeInBits())
2752 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2754 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2755 Multiple = ConstantExpr::getMul(MulC, Op0C);
2756 return true;
2759 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2760 if (Mul1CI->getValue() == 1) {
2761 // V == Base * Op0, so return Op0
2762 Multiple = Op0;
2763 return true;
2769 // We could not determine if V is a multiple of Base.
2770 return false;
2773 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2774 const TargetLibraryInfo *TLI) {
2775 const Function *F = ICS.getCalledFunction();
2776 if (!F)
2777 return Intrinsic::not_intrinsic;
2779 if (F->isIntrinsic())
2780 return F->getIntrinsicID();
2782 if (!TLI)
2783 return Intrinsic::not_intrinsic;
2785 LibFunc Func;
2786 // We're going to make assumptions on the semantics of the functions, check
2787 // that the target knows that it's available in this environment and it does
2788 // not have local linkage.
2789 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2790 return Intrinsic::not_intrinsic;
2792 if (!ICS.onlyReadsMemory())
2793 return Intrinsic::not_intrinsic;
2795 // Otherwise check if we have a call to a function that can be turned into a
2796 // vector intrinsic.
2797 switch (Func) {
2798 default:
2799 break;
2800 case LibFunc_sin:
2801 case LibFunc_sinf:
2802 case LibFunc_sinl:
2803 return Intrinsic::sin;
2804 case LibFunc_cos:
2805 case LibFunc_cosf:
2806 case LibFunc_cosl:
2807 return Intrinsic::cos;
2808 case LibFunc_exp:
2809 case LibFunc_expf:
2810 case LibFunc_expl:
2811 return Intrinsic::exp;
2812 case LibFunc_exp2:
2813 case LibFunc_exp2f:
2814 case LibFunc_exp2l:
2815 return Intrinsic::exp2;
2816 case LibFunc_log:
2817 case LibFunc_logf:
2818 case LibFunc_logl:
2819 return Intrinsic::log;
2820 case LibFunc_log10:
2821 case LibFunc_log10f:
2822 case LibFunc_log10l:
2823 return Intrinsic::log10;
2824 case LibFunc_log2:
2825 case LibFunc_log2f:
2826 case LibFunc_log2l:
2827 return Intrinsic::log2;
2828 case LibFunc_fabs:
2829 case LibFunc_fabsf:
2830 case LibFunc_fabsl:
2831 return Intrinsic::fabs;
2832 case LibFunc_fmin:
2833 case LibFunc_fminf:
2834 case LibFunc_fminl:
2835 return Intrinsic::minnum;
2836 case LibFunc_fmax:
2837 case LibFunc_fmaxf:
2838 case LibFunc_fmaxl:
2839 return Intrinsic::maxnum;
2840 case LibFunc_copysign:
2841 case LibFunc_copysignf:
2842 case LibFunc_copysignl:
2843 return Intrinsic::copysign;
2844 case LibFunc_floor:
2845 case LibFunc_floorf:
2846 case LibFunc_floorl:
2847 return Intrinsic::floor;
2848 case LibFunc_ceil:
2849 case LibFunc_ceilf:
2850 case LibFunc_ceill:
2851 return Intrinsic::ceil;
2852 case LibFunc_trunc:
2853 case LibFunc_truncf:
2854 case LibFunc_truncl:
2855 return Intrinsic::trunc;
2856 case LibFunc_rint:
2857 case LibFunc_rintf:
2858 case LibFunc_rintl:
2859 return Intrinsic::rint;
2860 case LibFunc_nearbyint:
2861 case LibFunc_nearbyintf:
2862 case LibFunc_nearbyintl:
2863 return Intrinsic::nearbyint;
2864 case LibFunc_round:
2865 case LibFunc_roundf:
2866 case LibFunc_roundl:
2867 return Intrinsic::round;
2868 case LibFunc_pow:
2869 case LibFunc_powf:
2870 case LibFunc_powl:
2871 return Intrinsic::pow;
2872 case LibFunc_sqrt:
2873 case LibFunc_sqrtf:
2874 case LibFunc_sqrtl:
2875 return Intrinsic::sqrt;
2878 return Intrinsic::not_intrinsic;
2881 /// Return true if we can prove that the specified FP value is never equal to
2882 /// -0.0.
2884 /// NOTE: this function will need to be revisited when we support non-default
2885 /// rounding modes!
2886 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2887 unsigned Depth) {
2888 if (auto *CFP = dyn_cast<ConstantFP>(V))
2889 return !CFP->getValueAPF().isNegZero();
2891 // Limit search depth.
2892 if (Depth == MaxDepth)
2893 return false;
2895 auto *Op = dyn_cast<Operator>(V);
2896 if (!Op)
2897 return false;
2899 // Check if the nsz fast-math flag is set.
2900 if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2901 if (FPO->hasNoSignedZeros())
2902 return true;
2904 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2905 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2906 return true;
2908 // sitofp and uitofp turn into +0.0 for zero.
2909 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2910 return true;
2912 if (auto *Call = dyn_cast<CallInst>(Op)) {
2913 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2914 switch (IID) {
2915 default:
2916 break;
2917 // sqrt(-0.0) = -0.0, no other negative results are possible.
2918 case Intrinsic::sqrt:
2919 case Intrinsic::canonicalize:
2920 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2921 // fabs(x) != -0.0
2922 case Intrinsic::fabs:
2923 return true;
2927 return false;
2930 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2931 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2932 /// bit despite comparing equal.
2933 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2934 const TargetLibraryInfo *TLI,
2935 bool SignBitOnly,
2936 unsigned Depth) {
2937 // TODO: This function does not do the right thing when SignBitOnly is true
2938 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2939 // which flips the sign bits of NaNs. See
2940 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2942 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2943 return !CFP->getValueAPF().isNegative() ||
2944 (!SignBitOnly && CFP->getValueAPF().isZero());
2947 // Handle vector of constants.
2948 if (auto *CV = dyn_cast<Constant>(V)) {
2949 if (CV->getType()->isVectorTy()) {
2950 unsigned NumElts = CV->getType()->getVectorNumElements();
2951 for (unsigned i = 0; i != NumElts; ++i) {
2952 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2953 if (!CFP)
2954 return false;
2955 if (CFP->getValueAPF().isNegative() &&
2956 (SignBitOnly || !CFP->getValueAPF().isZero()))
2957 return false;
2960 // All non-negative ConstantFPs.
2961 return true;
2965 if (Depth == MaxDepth)
2966 return false; // Limit search depth.
2968 const Operator *I = dyn_cast<Operator>(V);
2969 if (!I)
2970 return false;
2972 switch (I->getOpcode()) {
2973 default:
2974 break;
2975 // Unsigned integers are always nonnegative.
2976 case Instruction::UIToFP:
2977 return true;
2978 case Instruction::FMul:
2979 // x*x is always non-negative or a NaN.
2980 if (I->getOperand(0) == I->getOperand(1) &&
2981 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2982 return true;
2984 LLVM_FALLTHROUGH;
2985 case Instruction::FAdd:
2986 case Instruction::FDiv:
2987 case Instruction::FRem:
2988 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2989 Depth + 1) &&
2990 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2991 Depth + 1);
2992 case Instruction::Select:
2993 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2994 Depth + 1) &&
2995 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2996 Depth + 1);
2997 case Instruction::FPExt:
2998 case Instruction::FPTrunc:
2999 // Widening/narrowing never change sign.
3000 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3001 Depth + 1);
3002 case Instruction::ExtractElement:
3003 // Look through extract element. At the moment we keep this simple and skip
3004 // tracking the specific element. But at least we might find information
3005 // valid for all elements of the vector.
3006 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3007 Depth + 1);
3008 case Instruction::Call:
3009 const auto *CI = cast<CallInst>(I);
3010 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3011 switch (IID) {
3012 default:
3013 break;
3014 case Intrinsic::maxnum:
3015 return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3016 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
3017 SignBitOnly, Depth + 1)) ||
3018 (isKnownNeverNaN(I->getOperand(1), TLI) &&
3019 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3020 SignBitOnly, Depth + 1));
3022 case Intrinsic::maximum:
3023 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3024 Depth + 1) ||
3025 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3026 Depth + 1);
3027 case Intrinsic::minnum:
3028 case Intrinsic::minimum:
3029 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3030 Depth + 1) &&
3031 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3032 Depth + 1);
3033 case Intrinsic::exp:
3034 case Intrinsic::exp2:
3035 case Intrinsic::fabs:
3036 return true;
3038 case Intrinsic::sqrt:
3039 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3040 if (!SignBitOnly)
3041 return true;
3042 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3043 CannotBeNegativeZero(CI->getOperand(0), TLI));
3045 case Intrinsic::powi:
3046 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3047 // powi(x,n) is non-negative if n is even.
3048 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3049 return true;
3051 // TODO: This is not correct. Given that exp is an integer, here are the
3052 // ways that pow can return a negative value:
3054 // pow(x, exp) --> negative if exp is odd and x is negative.
3055 // pow(-0, exp) --> -inf if exp is negative odd.
3056 // pow(-0, exp) --> -0 if exp is positive odd.
3057 // pow(-inf, exp) --> -0 if exp is negative odd.
3058 // pow(-inf, exp) --> -inf if exp is positive odd.
3060 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3061 // but we must return false if x == -0. Unfortunately we do not currently
3062 // have a way of expressing this constraint. See details in
3063 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3064 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3065 Depth + 1);
3067 case Intrinsic::fma:
3068 case Intrinsic::fmuladd:
3069 // x*x+y is non-negative if y is non-negative.
3070 return I->getOperand(0) == I->getOperand(1) &&
3071 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3072 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3073 Depth + 1);
3075 break;
3077 return false;
3080 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3081 const TargetLibraryInfo *TLI) {
3082 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3085 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3086 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3089 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3090 unsigned Depth) {
3091 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3093 // If we're told that NaNs won't happen, assume they won't.
3094 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3095 if (FPMathOp->hasNoNaNs())
3096 return true;
3098 // Handle scalar constants.
3099 if (auto *CFP = dyn_cast<ConstantFP>(V))
3100 return !CFP->isNaN();
3102 if (Depth == MaxDepth)
3103 return false;
3105 if (auto *Inst = dyn_cast<Instruction>(V)) {
3106 switch (Inst->getOpcode()) {
3107 case Instruction::FAdd:
3108 case Instruction::FMul:
3109 case Instruction::FSub:
3110 case Instruction::FDiv:
3111 case Instruction::FRem: {
3112 // TODO: Need isKnownNeverInfinity
3113 return false;
3115 case Instruction::Select: {
3116 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3117 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3119 case Instruction::SIToFP:
3120 case Instruction::UIToFP:
3121 return true;
3122 case Instruction::FPTrunc:
3123 case Instruction::FPExt:
3124 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3125 default:
3126 break;
3130 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3131 switch (II->getIntrinsicID()) {
3132 case Intrinsic::canonicalize:
3133 case Intrinsic::fabs:
3134 case Intrinsic::copysign:
3135 case Intrinsic::exp:
3136 case Intrinsic::exp2:
3137 case Intrinsic::floor:
3138 case Intrinsic::ceil:
3139 case Intrinsic::trunc:
3140 case Intrinsic::rint:
3141 case Intrinsic::nearbyint:
3142 case Intrinsic::round:
3143 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3144 case Intrinsic::sqrt:
3145 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3146 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3147 case Intrinsic::minnum:
3148 case Intrinsic::maxnum:
3149 // If either operand is not NaN, the result is not NaN.
3150 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3151 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3152 default:
3153 return false;
3157 // Bail out for constant expressions, but try to handle vector constants.
3158 if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3159 return false;
3161 // For vectors, verify that each element is not NaN.
3162 unsigned NumElts = V->getType()->getVectorNumElements();
3163 for (unsigned i = 0; i != NumElts; ++i) {
3164 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3165 if (!Elt)
3166 return false;
3167 if (isa<UndefValue>(Elt))
3168 continue;
3169 auto *CElt = dyn_cast<ConstantFP>(Elt);
3170 if (!CElt || CElt->isNaN())
3171 return false;
3173 // All elements were confirmed not-NaN or undefined.
3174 return true;
3177 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3179 // All byte-wide stores are splatable, even of arbitrary variables.
3180 if (V->getType()->isIntegerTy(8))
3181 return V;
3183 LLVMContext &Ctx = V->getContext();
3185 // Undef don't care.
3186 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3187 if (isa<UndefValue>(V))
3188 return UndefInt8;
3190 const uint64_t Size = DL.getTypeStoreSize(V->getType());
3191 if (!Size)
3192 return UndefInt8;
3194 Constant *C = dyn_cast<Constant>(V);
3195 if (!C) {
3196 // Conceptually, we could handle things like:
3197 // %a = zext i8 %X to i16
3198 // %b = shl i16 %a, 8
3199 // %c = or i16 %a, %b
3200 // but until there is an example that actually needs this, it doesn't seem
3201 // worth worrying about.
3202 return nullptr;
3205 // Handle 'null' ConstantArrayZero etc.
3206 if (C->isNullValue())
3207 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3209 // Constant floating-point values can be handled as integer values if the
3210 // corresponding integer value is "byteable". An important case is 0.0.
3211 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3212 Type *Ty = nullptr;
3213 if (CFP->getType()->isHalfTy())
3214 Ty = Type::getInt16Ty(Ctx);
3215 else if (CFP->getType()->isFloatTy())
3216 Ty = Type::getInt32Ty(Ctx);
3217 else if (CFP->getType()->isDoubleTy())
3218 Ty = Type::getInt64Ty(Ctx);
3219 // Don't handle long double formats, which have strange constraints.
3220 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3221 : nullptr;
3224 // We can handle constant integers that are multiple of 8 bits.
3225 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3226 if (CI->getBitWidth() % 8 == 0) {
3227 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3228 if (!CI->getValue().isSplat(8))
3229 return nullptr;
3230 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3234 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3235 if (CE->getOpcode() == Instruction::IntToPtr) {
3236 auto PS = DL.getPointerSizeInBits(
3237 cast<PointerType>(CE->getType())->getAddressSpace());
3238 return isBytewiseValue(
3239 ConstantExpr::getIntegerCast(CE->getOperand(0),
3240 Type::getIntNTy(Ctx, PS), false),
3241 DL);
3245 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3246 if (LHS == RHS)
3247 return LHS;
3248 if (!LHS || !RHS)
3249 return nullptr;
3250 if (LHS == UndefInt8)
3251 return RHS;
3252 if (RHS == UndefInt8)
3253 return LHS;
3254 return nullptr;
3257 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3258 Value *Val = UndefInt8;
3259 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3260 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3261 return nullptr;
3262 return Val;
3265 if (isa<ConstantAggregate>(C)) {
3266 Value *Val = UndefInt8;
3267 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3268 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3269 return nullptr;
3270 return Val;
3273 // Don't try to handle the handful of other constants.
3274 return nullptr;
3277 // This is the recursive version of BuildSubAggregate. It takes a few different
3278 // arguments. Idxs is the index within the nested struct From that we are
3279 // looking at now (which is of type IndexedType). IdxSkip is the number of
3280 // indices from Idxs that should be left out when inserting into the resulting
3281 // struct. To is the result struct built so far, new insertvalue instructions
3282 // build on that.
3283 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3284 SmallVectorImpl<unsigned> &Idxs,
3285 unsigned IdxSkip,
3286 Instruction *InsertBefore) {
3287 StructType *STy = dyn_cast<StructType>(IndexedType);
3288 if (STy) {
3289 // Save the original To argument so we can modify it
3290 Value *OrigTo = To;
3291 // General case, the type indexed by Idxs is a struct
3292 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3293 // Process each struct element recursively
3294 Idxs.push_back(i);
3295 Value *PrevTo = To;
3296 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3297 InsertBefore);
3298 Idxs.pop_back();
3299 if (!To) {
3300 // Couldn't find any inserted value for this index? Cleanup
3301 while (PrevTo != OrigTo) {
3302 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3303 PrevTo = Del->getAggregateOperand();
3304 Del->eraseFromParent();
3306 // Stop processing elements
3307 break;
3310 // If we successfully found a value for each of our subaggregates
3311 if (To)
3312 return To;
3314 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3315 // the struct's elements had a value that was inserted directly. In the latter
3316 // case, perhaps we can't determine each of the subelements individually, but
3317 // we might be able to find the complete struct somewhere.
3319 // Find the value that is at that particular spot
3320 Value *V = FindInsertedValue(From, Idxs);
3322 if (!V)
3323 return nullptr;
3325 // Insert the value in the new (sub) aggregate
3326 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3327 "tmp", InsertBefore);
3330 // This helper takes a nested struct and extracts a part of it (which is again a
3331 // struct) into a new value. For example, given the struct:
3332 // { a, { b, { c, d }, e } }
3333 // and the indices "1, 1" this returns
3334 // { c, d }.
3336 // It does this by inserting an insertvalue for each element in the resulting
3337 // struct, as opposed to just inserting a single struct. This will only work if
3338 // each of the elements of the substruct are known (ie, inserted into From by an
3339 // insertvalue instruction somewhere).
3341 // All inserted insertvalue instructions are inserted before InsertBefore
3342 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3343 Instruction *InsertBefore) {
3344 assert(InsertBefore && "Must have someplace to insert!");
3345 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3346 idx_range);
3347 Value *To = UndefValue::get(IndexedType);
3348 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3349 unsigned IdxSkip = Idxs.size();
3351 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3354 /// Given an aggregate and a sequence of indices, see if the scalar value
3355 /// indexed is already around as a register, for example if it was inserted
3356 /// directly into the aggregate.
3358 /// If InsertBefore is not null, this function will duplicate (modified)
3359 /// insertvalues when a part of a nested struct is extracted.
3360 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3361 Instruction *InsertBefore) {
3362 // Nothing to index? Just return V then (this is useful at the end of our
3363 // recursion).
3364 if (idx_range.empty())
3365 return V;
3366 // We have indices, so V should have an indexable type.
3367 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3368 "Not looking at a struct or array?");
3369 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3370 "Invalid indices for type?");
3372 if (Constant *C = dyn_cast<Constant>(V)) {
3373 C = C->getAggregateElement(idx_range[0]);
3374 if (!C) return nullptr;
3375 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3378 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3379 // Loop the indices for the insertvalue instruction in parallel with the
3380 // requested indices
3381 const unsigned *req_idx = idx_range.begin();
3382 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3383 i != e; ++i, ++req_idx) {
3384 if (req_idx == idx_range.end()) {
3385 // We can't handle this without inserting insertvalues
3386 if (!InsertBefore)
3387 return nullptr;
3389 // The requested index identifies a part of a nested aggregate. Handle
3390 // this specially. For example,
3391 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3392 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3393 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3394 // This can be changed into
3395 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3396 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3397 // which allows the unused 0,0 element from the nested struct to be
3398 // removed.
3399 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3400 InsertBefore);
3403 // This insert value inserts something else than what we are looking for.
3404 // See if the (aggregate) value inserted into has the value we are
3405 // looking for, then.
3406 if (*req_idx != *i)
3407 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3408 InsertBefore);
3410 // If we end up here, the indices of the insertvalue match with those
3411 // requested (though possibly only partially). Now we recursively look at
3412 // the inserted value, passing any remaining indices.
3413 return FindInsertedValue(I->getInsertedValueOperand(),
3414 makeArrayRef(req_idx, idx_range.end()),
3415 InsertBefore);
3418 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3419 // If we're extracting a value from an aggregate that was extracted from
3420 // something else, we can extract from that something else directly instead.
3421 // However, we will need to chain I's indices with the requested indices.
3423 // Calculate the number of indices required
3424 unsigned size = I->getNumIndices() + idx_range.size();
3425 // Allocate some space to put the new indices in
3426 SmallVector<unsigned, 5> Idxs;
3427 Idxs.reserve(size);
3428 // Add indices from the extract value instruction
3429 Idxs.append(I->idx_begin(), I->idx_end());
3431 // Add requested indices
3432 Idxs.append(idx_range.begin(), idx_range.end());
3434 assert(Idxs.size() == size
3435 && "Number of indices added not correct?");
3437 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3439 // Otherwise, we don't know (such as, extracting from a function return value
3440 // or load instruction)
3441 return nullptr;
3444 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3445 unsigned CharSize) {
3446 // Make sure the GEP has exactly three arguments.
3447 if (GEP->getNumOperands() != 3)
3448 return false;
3450 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3451 // CharSize.
3452 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3453 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3454 return false;
3456 // Check to make sure that the first operand of the GEP is an integer and
3457 // has value 0 so that we are sure we're indexing into the initializer.
3458 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3459 if (!FirstIdx || !FirstIdx->isZero())
3460 return false;
3462 return true;
3465 bool llvm::getConstantDataArrayInfo(const Value *V,
3466 ConstantDataArraySlice &Slice,
3467 unsigned ElementSize, uint64_t Offset) {
3468 assert(V);
3470 // Look through bitcast instructions and geps.
3471 V = V->stripPointerCasts();
3473 // If the value is a GEP instruction or constant expression, treat it as an
3474 // offset.
3475 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3476 // The GEP operator should be based on a pointer to string constant, and is
3477 // indexing into the string constant.
3478 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3479 return false;
3481 // If the second index isn't a ConstantInt, then this is a variable index
3482 // into the array. If this occurs, we can't say anything meaningful about
3483 // the string.
3484 uint64_t StartIdx = 0;
3485 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3486 StartIdx = CI->getZExtValue();
3487 else
3488 return false;
3489 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3490 StartIdx + Offset);
3493 // The GEP instruction, constant or instruction, must reference a global
3494 // variable that is a constant and is initialized. The referenced constant
3495 // initializer is the array that we'll use for optimization.
3496 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3497 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3498 return false;
3500 const ConstantDataArray *Array;
3501 ArrayType *ArrayTy;
3502 if (GV->getInitializer()->isNullValue()) {
3503 Type *GVTy = GV->getValueType();
3504 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3505 // A zeroinitializer for the array; there is no ConstantDataArray.
3506 Array = nullptr;
3507 } else {
3508 const DataLayout &DL = GV->getParent()->getDataLayout();
3509 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3510 uint64_t Length = SizeInBytes / (ElementSize / 8);
3511 if (Length <= Offset)
3512 return false;
3514 Slice.Array = nullptr;
3515 Slice.Offset = 0;
3516 Slice.Length = Length - Offset;
3517 return true;
3519 } else {
3520 // This must be a ConstantDataArray.
3521 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3522 if (!Array)
3523 return false;
3524 ArrayTy = Array->getType();
3526 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3527 return false;
3529 uint64_t NumElts = ArrayTy->getArrayNumElements();
3530 if (Offset > NumElts)
3531 return false;
3533 Slice.Array = Array;
3534 Slice.Offset = Offset;
3535 Slice.Length = NumElts - Offset;
3536 return true;
3539 /// This function computes the length of a null-terminated C string pointed to
3540 /// by V. If successful, it returns true and returns the string in Str.
3541 /// If unsuccessful, it returns false.
3542 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3543 uint64_t Offset, bool TrimAtNul) {
3544 ConstantDataArraySlice Slice;
3545 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3546 return false;
3548 if (Slice.Array == nullptr) {
3549 if (TrimAtNul) {
3550 Str = StringRef();
3551 return true;
3553 if (Slice.Length == 1) {
3554 Str = StringRef("", 1);
3555 return true;
3557 // We cannot instantiate a StringRef as we do not have an appropriate string
3558 // of 0s at hand.
3559 return false;
3562 // Start out with the entire array in the StringRef.
3563 Str = Slice.Array->getAsString();
3564 // Skip over 'offset' bytes.
3565 Str = Str.substr(Slice.Offset);
3567 if (TrimAtNul) {
3568 // Trim off the \0 and anything after it. If the array is not nul
3569 // terminated, we just return the whole end of string. The client may know
3570 // some other way that the string is length-bound.
3571 Str = Str.substr(0, Str.find('\0'));
3573 return true;
3576 // These next two are very similar to the above, but also look through PHI
3577 // nodes.
3578 // TODO: See if we can integrate these two together.
3580 /// If we can compute the length of the string pointed to by
3581 /// the specified pointer, return 'len+1'. If we can't, return 0.
3582 static uint64_t GetStringLengthH(const Value *V,
3583 SmallPtrSetImpl<const PHINode*> &PHIs,
3584 unsigned CharSize) {
3585 // Look through noop bitcast instructions.
3586 V = V->stripPointerCasts();
3588 // If this is a PHI node, there are two cases: either we have already seen it
3589 // or we haven't.
3590 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3591 if (!PHIs.insert(PN).second)
3592 return ~0ULL; // already in the set.
3594 // If it was new, see if all the input strings are the same length.
3595 uint64_t LenSoFar = ~0ULL;
3596 for (Value *IncValue : PN->incoming_values()) {
3597 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3598 if (Len == 0) return 0; // Unknown length -> unknown.
3600 if (Len == ~0ULL) continue;
3602 if (Len != LenSoFar && LenSoFar != ~0ULL)
3603 return 0; // Disagree -> unknown.
3604 LenSoFar = Len;
3607 // Success, all agree.
3608 return LenSoFar;
3611 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3612 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3613 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3614 if (Len1 == 0) return 0;
3615 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3616 if (Len2 == 0) return 0;
3617 if (Len1 == ~0ULL) return Len2;
3618 if (Len2 == ~0ULL) return Len1;
3619 if (Len1 != Len2) return 0;
3620 return Len1;
3623 // Otherwise, see if we can read the string.
3624 ConstantDataArraySlice Slice;
3625 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3626 return 0;
3628 if (Slice.Array == nullptr)
3629 return 1;
3631 // Search for nul characters
3632 unsigned NullIndex = 0;
3633 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3634 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3635 break;
3638 return NullIndex + 1;
3641 /// If we can compute the length of the string pointed to by
3642 /// the specified pointer, return 'len+1'. If we can't, return 0.
3643 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3644 if (!V->getType()->isPointerTy())
3645 return 0;
3647 SmallPtrSet<const PHINode*, 32> PHIs;
3648 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3649 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3650 // an empty string as a length.
3651 return Len == ~0ULL ? 1 : Len;
3654 const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
3655 assert(Call &&
3656 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
3657 if (const Value *RV = Call->getReturnedArgOperand())
3658 return RV;
3659 // This can be used only as a aliasing property.
3660 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
3661 return Call->getArgOperand(0);
3662 return nullptr;
3665 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3666 const CallBase *Call) {
3667 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3668 Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3669 Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
3670 Call->getIntrinsicID() == Intrinsic::aarch64_tagp;
3673 /// \p PN defines a loop-variant pointer to an object. Check if the
3674 /// previous iteration of the loop was referring to the same object as \p PN.
3675 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3676 const LoopInfo *LI) {
3677 // Find the loop-defined value.
3678 Loop *L = LI->getLoopFor(PN->getParent());
3679 if (PN->getNumIncomingValues() != 2)
3680 return true;
3682 // Find the value from previous iteration.
3683 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3684 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3685 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3686 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3687 return true;
3689 // If a new pointer is loaded in the loop, the pointer references a different
3690 // object in every iteration. E.g.:
3691 // for (i)
3692 // int *p = a[i];
3693 // ...
3694 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3695 if (!L->isLoopInvariant(Load->getPointerOperand()))
3696 return false;
3697 return true;
3700 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3701 unsigned MaxLookup) {
3702 if (!V->getType()->isPointerTy())
3703 return V;
3704 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3705 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3706 V = GEP->getPointerOperand();
3707 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3708 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3709 V = cast<Operator>(V)->getOperand(0);
3710 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3711 if (GA->isInterposable())
3712 return V;
3713 V = GA->getAliasee();
3714 } else if (isa<AllocaInst>(V)) {
3715 // An alloca can't be further simplified.
3716 return V;
3717 } else {
3718 if (auto *Call = dyn_cast<CallBase>(V)) {
3719 // CaptureTracking can know about special capturing properties of some
3720 // intrinsics like launder.invariant.group, that can't be expressed with
3721 // the attributes, but have properties like returning aliasing pointer.
3722 // Because some analysis may assume that nocaptured pointer is not
3723 // returned from some special intrinsic (because function would have to
3724 // be marked with returns attribute), it is crucial to use this function
3725 // because it should be in sync with CaptureTracking. Not using it may
3726 // cause weird miscompilations where 2 aliasing pointers are assumed to
3727 // noalias.
3728 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
3729 V = RP;
3730 continue;
3734 // See if InstructionSimplify knows any relevant tricks.
3735 if (Instruction *I = dyn_cast<Instruction>(V))
3736 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3737 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3738 V = Simplified;
3739 continue;
3742 return V;
3744 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3746 return V;
3749 void llvm::GetUnderlyingObjects(const Value *V,
3750 SmallVectorImpl<const Value *> &Objects,
3751 const DataLayout &DL, LoopInfo *LI,
3752 unsigned MaxLookup) {
3753 SmallPtrSet<const Value *, 4> Visited;
3754 SmallVector<const Value *, 4> Worklist;
3755 Worklist.push_back(V);
3756 do {
3757 const Value *P = Worklist.pop_back_val();
3758 P = GetUnderlyingObject(P, DL, MaxLookup);
3760 if (!Visited.insert(P).second)
3761 continue;
3763 if (auto *SI = dyn_cast<SelectInst>(P)) {
3764 Worklist.push_back(SI->getTrueValue());
3765 Worklist.push_back(SI->getFalseValue());
3766 continue;
3769 if (auto *PN = dyn_cast<PHINode>(P)) {
3770 // If this PHI changes the underlying object in every iteration of the
3771 // loop, don't look through it. Consider:
3772 // int **A;
3773 // for (i) {
3774 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3775 // Curr = A[i];
3776 // *Prev, *Curr;
3778 // Prev is tracking Curr one iteration behind so they refer to different
3779 // underlying objects.
3780 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3781 isSameUnderlyingObjectInLoop(PN, LI))
3782 for (Value *IncValue : PN->incoming_values())
3783 Worklist.push_back(IncValue);
3784 continue;
3787 Objects.push_back(P);
3788 } while (!Worklist.empty());
3791 /// This is the function that does the work of looking through basic
3792 /// ptrtoint+arithmetic+inttoptr sequences.
3793 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3794 do {
3795 if (const Operator *U = dyn_cast<Operator>(V)) {
3796 // If we find a ptrtoint, we can transfer control back to the
3797 // regular getUnderlyingObjectFromInt.
3798 if (U->getOpcode() == Instruction::PtrToInt)
3799 return U->getOperand(0);
3800 // If we find an add of a constant, a multiplied value, or a phi, it's
3801 // likely that the other operand will lead us to the base
3802 // object. We don't have to worry about the case where the
3803 // object address is somehow being computed by the multiply,
3804 // because our callers only care when the result is an
3805 // identifiable object.
3806 if (U->getOpcode() != Instruction::Add ||
3807 (!isa<ConstantInt>(U->getOperand(1)) &&
3808 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3809 !isa<PHINode>(U->getOperand(1))))
3810 return V;
3811 V = U->getOperand(0);
3812 } else {
3813 return V;
3815 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3816 } while (true);
3819 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3820 /// ptrtoint+arithmetic+inttoptr sequences.
3821 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3822 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3823 SmallVectorImpl<Value *> &Objects,
3824 const DataLayout &DL) {
3825 SmallPtrSet<const Value *, 16> Visited;
3826 SmallVector<const Value *, 4> Working(1, V);
3827 do {
3828 V = Working.pop_back_val();
3830 SmallVector<const Value *, 4> Objs;
3831 GetUnderlyingObjects(V, Objs, DL);
3833 for (const Value *V : Objs) {
3834 if (!Visited.insert(V).second)
3835 continue;
3836 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3837 const Value *O =
3838 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3839 if (O->getType()->isPointerTy()) {
3840 Working.push_back(O);
3841 continue;
3844 // If GetUnderlyingObjects fails to find an identifiable object,
3845 // getUnderlyingObjectsForCodeGen also fails for safety.
3846 if (!isIdentifiedObject(V)) {
3847 Objects.clear();
3848 return false;
3850 Objects.push_back(const_cast<Value *>(V));
3852 } while (!Working.empty());
3853 return true;
3856 /// Return true if the only users of this pointer are lifetime markers.
3857 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3858 for (const User *U : V->users()) {
3859 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3860 if (!II) return false;
3862 if (!II->isLifetimeStartOrEnd())
3863 return false;
3865 return true;
3868 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3869 const Instruction *CtxI,
3870 const DominatorTree *DT) {
3871 const Operator *Inst = dyn_cast<Operator>(V);
3872 if (!Inst)
3873 return false;
3875 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3876 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3877 if (C->canTrap())
3878 return false;
3880 switch (Inst->getOpcode()) {
3881 default:
3882 return true;
3883 case Instruction::UDiv:
3884 case Instruction::URem: {
3885 // x / y is undefined if y == 0.
3886 const APInt *V;
3887 if (match(Inst->getOperand(1), m_APInt(V)))
3888 return *V != 0;
3889 return false;
3891 case Instruction::SDiv:
3892 case Instruction::SRem: {
3893 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3894 const APInt *Numerator, *Denominator;
3895 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3896 return false;
3897 // We cannot hoist this division if the denominator is 0.
3898 if (*Denominator == 0)
3899 return false;
3900 // It's safe to hoist if the denominator is not 0 or -1.
3901 if (*Denominator != -1)
3902 return true;
3903 // At this point we know that the denominator is -1. It is safe to hoist as
3904 // long we know that the numerator is not INT_MIN.
3905 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3906 return !Numerator->isMinSignedValue();
3907 // The numerator *might* be MinSignedValue.
3908 return false;
3910 case Instruction::Load: {
3911 const LoadInst *LI = cast<LoadInst>(Inst);
3912 if (!LI->isUnordered() ||
3913 // Speculative load may create a race that did not exist in the source.
3914 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3915 // Speculative load may load data from dirty regions.
3916 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3917 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3918 return false;
3919 const DataLayout &DL = LI->getModule()->getDataLayout();
3920 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3921 LI->getType(), LI->getAlignment(),
3922 DL, CtxI, DT);
3924 case Instruction::Call: {
3925 auto *CI = cast<const CallInst>(Inst);
3926 const Function *Callee = CI->getCalledFunction();
3928 // The called function could have undefined behavior or side-effects, even
3929 // if marked readnone nounwind.
3930 return Callee && Callee->isSpeculatable();
3932 case Instruction::VAArg:
3933 case Instruction::Alloca:
3934 case Instruction::Invoke:
3935 case Instruction::CallBr:
3936 case Instruction::PHI:
3937 case Instruction::Store:
3938 case Instruction::Ret:
3939 case Instruction::Br:
3940 case Instruction::IndirectBr:
3941 case Instruction::Switch:
3942 case Instruction::Unreachable:
3943 case Instruction::Fence:
3944 case Instruction::AtomicRMW:
3945 case Instruction::AtomicCmpXchg:
3946 case Instruction::LandingPad:
3947 case Instruction::Resume:
3948 case Instruction::CatchSwitch:
3949 case Instruction::CatchPad:
3950 case Instruction::CatchRet:
3951 case Instruction::CleanupPad:
3952 case Instruction::CleanupRet:
3953 return false; // Misc instructions which have effects
3957 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3958 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3961 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
3962 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
3963 switch (OR) {
3964 case ConstantRange::OverflowResult::MayOverflow:
3965 return OverflowResult::MayOverflow;
3966 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
3967 return OverflowResult::AlwaysOverflowsLow;
3968 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
3969 return OverflowResult::AlwaysOverflowsHigh;
3970 case ConstantRange::OverflowResult::NeverOverflows:
3971 return OverflowResult::NeverOverflows;
3973 llvm_unreachable("Unknown OverflowResult");
3976 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
3977 static ConstantRange computeConstantRangeIncludingKnownBits(
3978 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
3979 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3980 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
3981 KnownBits Known = computeKnownBits(
3982 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
3983 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
3984 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
3985 ConstantRange::PreferredRangeType RangeType =
3986 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
3987 return CR1.intersectWith(CR2, RangeType);
3990 OverflowResult llvm::computeOverflowForUnsignedMul(
3991 const Value *LHS, const Value *RHS, const DataLayout &DL,
3992 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3993 bool UseInstrInfo) {
3994 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
3995 nullptr, UseInstrInfo);
3996 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
3997 nullptr, UseInstrInfo);
3998 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
3999 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4000 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4003 OverflowResult
4004 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4005 const DataLayout &DL, AssumptionCache *AC,
4006 const Instruction *CxtI,
4007 const DominatorTree *DT, bool UseInstrInfo) {
4008 // Multiplying n * m significant bits yields a result of n + m significant
4009 // bits. If the total number of significant bits does not exceed the
4010 // result bit width (minus 1), there is no overflow.
4011 // This means if we have enough leading sign bits in the operands
4012 // we can guarantee that the result does not overflow.
4013 // Ref: "Hacker's Delight" by Henry Warren
4014 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4016 // Note that underestimating the number of sign bits gives a more
4017 // conservative answer.
4018 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4019 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4021 // First handle the easy case: if we have enough sign bits there's
4022 // definitely no overflow.
4023 if (SignBits > BitWidth + 1)
4024 return OverflowResult::NeverOverflows;
4026 // There are two ambiguous cases where there can be no overflow:
4027 // SignBits == BitWidth + 1 and
4028 // SignBits == BitWidth
4029 // The second case is difficult to check, therefore we only handle the
4030 // first case.
4031 if (SignBits == BitWidth + 1) {
4032 // It overflows only when both arguments are negative and the true
4033 // product is exactly the minimum negative number.
4034 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4035 // For simplicity we just check if at least one side is not negative.
4036 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4037 nullptr, UseInstrInfo);
4038 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4039 nullptr, UseInstrInfo);
4040 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4041 return OverflowResult::NeverOverflows;
4043 return OverflowResult::MayOverflow;
4046 OverflowResult llvm::computeOverflowForUnsignedAdd(
4047 const Value *LHS, const Value *RHS, const DataLayout &DL,
4048 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4049 bool UseInstrInfo) {
4050 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4051 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4052 nullptr, UseInstrInfo);
4053 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4054 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4055 nullptr, UseInstrInfo);
4056 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4059 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4060 const Value *RHS,
4061 const AddOperator *Add,
4062 const DataLayout &DL,
4063 AssumptionCache *AC,
4064 const Instruction *CxtI,
4065 const DominatorTree *DT) {
4066 if (Add && Add->hasNoSignedWrap()) {
4067 return OverflowResult::NeverOverflows;
4070 // If LHS and RHS each have at least two sign bits, the addition will look
4071 // like
4073 // XX..... +
4074 // YY.....
4076 // If the carry into the most significant position is 0, X and Y can't both
4077 // be 1 and therefore the carry out of the addition is also 0.
4079 // If the carry into the most significant position is 1, X and Y can't both
4080 // be 0 and therefore the carry out of the addition is also 1.
4082 // Since the carry into the most significant position is always equal to
4083 // the carry out of the addition, there is no signed overflow.
4084 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4085 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4086 return OverflowResult::NeverOverflows;
4088 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4089 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4090 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4091 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4092 OverflowResult OR =
4093 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4094 if (OR != OverflowResult::MayOverflow)
4095 return OR;
4097 // The remaining code needs Add to be available. Early returns if not so.
4098 if (!Add)
4099 return OverflowResult::MayOverflow;
4101 // If the sign of Add is the same as at least one of the operands, this add
4102 // CANNOT overflow. If this can be determined from the known bits of the
4103 // operands the above signedAddMayOverflow() check will have already done so.
4104 // The only other way to improve on the known bits is from an assumption, so
4105 // call computeKnownBitsFromAssume() directly.
4106 bool LHSOrRHSKnownNonNegative =
4107 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4108 bool LHSOrRHSKnownNegative =
4109 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4110 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4111 KnownBits AddKnown(LHSRange.getBitWidth());
4112 computeKnownBitsFromAssume(
4113 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4114 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4115 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4116 return OverflowResult::NeverOverflows;
4119 return OverflowResult::MayOverflow;
4122 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4123 const Value *RHS,
4124 const DataLayout &DL,
4125 AssumptionCache *AC,
4126 const Instruction *CxtI,
4127 const DominatorTree *DT) {
4128 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4129 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4130 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4131 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4132 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4135 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4136 const Value *RHS,
4137 const DataLayout &DL,
4138 AssumptionCache *AC,
4139 const Instruction *CxtI,
4140 const DominatorTree *DT) {
4141 // If LHS and RHS each have at least two sign bits, the subtraction
4142 // cannot overflow.
4143 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4144 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4145 return OverflowResult::NeverOverflows;
4147 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4148 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4149 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4150 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4151 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4154 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4155 const DominatorTree &DT) {
4156 SmallVector<const BranchInst *, 2> GuardingBranches;
4157 SmallVector<const ExtractValueInst *, 2> Results;
4159 for (const User *U : WO->users()) {
4160 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4161 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4163 if (EVI->getIndices()[0] == 0)
4164 Results.push_back(EVI);
4165 else {
4166 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4168 for (const auto *U : EVI->users())
4169 if (const auto *B = dyn_cast<BranchInst>(U)) {
4170 assert(B->isConditional() && "How else is it using an i1?");
4171 GuardingBranches.push_back(B);
4174 } else {
4175 // We are using the aggregate directly in a way we don't want to analyze
4176 // here (storing it to a global, say).
4177 return false;
4181 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4182 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4183 if (!NoWrapEdge.isSingleEdge())
4184 return false;
4186 // Check if all users of the add are provably no-wrap.
4187 for (const auto *Result : Results) {
4188 // If the extractvalue itself is not executed on overflow, the we don't
4189 // need to check each use separately, since domination is transitive.
4190 if (DT.dominates(NoWrapEdge, Result->getParent()))
4191 continue;
4193 for (auto &RU : Result->uses())
4194 if (!DT.dominates(NoWrapEdge, RU))
4195 return false;
4198 return true;
4201 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4205 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4206 const DataLayout &DL,
4207 AssumptionCache *AC,
4208 const Instruction *CxtI,
4209 const DominatorTree *DT) {
4210 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4211 Add, DL, AC, CxtI, DT);
4214 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4215 const Value *RHS,
4216 const DataLayout &DL,
4217 AssumptionCache *AC,
4218 const Instruction *CxtI,
4219 const DominatorTree *DT) {
4220 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4223 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4224 // A memory operation returns normally if it isn't volatile. A volatile
4225 // operation is allowed to trap.
4227 // An atomic operation isn't guaranteed to return in a reasonable amount of
4228 // time because it's possible for another thread to interfere with it for an
4229 // arbitrary length of time, but programs aren't allowed to rely on that.
4230 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4231 return !LI->isVolatile();
4232 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4233 return !SI->isVolatile();
4234 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4235 return !CXI->isVolatile();
4236 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4237 return !RMWI->isVolatile();
4238 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4239 return !MII->isVolatile();
4241 // If there is no successor, then execution can't transfer to it.
4242 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4243 return !CRI->unwindsToCaller();
4244 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4245 return !CatchSwitch->unwindsToCaller();
4246 if (isa<ResumeInst>(I))
4247 return false;
4248 if (isa<ReturnInst>(I))
4249 return false;
4250 if (isa<UnreachableInst>(I))
4251 return false;
4253 // Calls can throw, or contain an infinite loop, or kill the process.
4254 if (auto CS = ImmutableCallSite(I)) {
4255 // Call sites that throw have implicit non-local control flow.
4256 if (!CS.doesNotThrow())
4257 return false;
4259 // A function which doens't throw and has "willreturn" attribute will
4260 // always return.
4261 if (CS.hasFnAttr(Attribute::WillReturn))
4262 return true;
4264 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4265 // etc. and thus not return. However, LLVM already assumes that
4267 // - Thread exiting actions are modeled as writes to memory invisible to
4268 // the program.
4270 // - Loops that don't have side effects (side effects are volatile/atomic
4271 // stores and IO) always terminate (see http://llvm.org/PR965).
4272 // Furthermore IO itself is also modeled as writes to memory invisible to
4273 // the program.
4275 // We rely on those assumptions here, and use the memory effects of the call
4276 // target as a proxy for checking that it always returns.
4278 // FIXME: This isn't aggressive enough; a call which only writes to a global
4279 // is guaranteed to return.
4280 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4281 match(I, m_Intrinsic<Intrinsic::assume>()) ||
4282 match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
4283 match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
4286 // Other instructions return normally.
4287 return true;
4290 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4291 // TODO: This is slightly conservative for invoke instruction since exiting
4292 // via an exception *is* normal control for them.
4293 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4294 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4295 return false;
4296 return true;
4299 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4300 const Loop *L) {
4301 // The loop header is guaranteed to be executed for every iteration.
4303 // FIXME: Relax this constraint to cover all basic blocks that are
4304 // guaranteed to be executed at every iteration.
4305 if (I->getParent() != L->getHeader()) return false;
4307 for (const Instruction &LI : *L->getHeader()) {
4308 if (&LI == I) return true;
4309 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4311 llvm_unreachable("Instruction not contained in its own parent basic block.");
4314 bool llvm::propagatesFullPoison(const Instruction *I) {
4315 // TODO: This should include all instructions apart from phis, selects and
4316 // call-like instructions.
4317 switch (I->getOpcode()) {
4318 case Instruction::Add:
4319 case Instruction::Sub:
4320 case Instruction::Xor:
4321 case Instruction::Trunc:
4322 case Instruction::BitCast:
4323 case Instruction::AddrSpaceCast:
4324 case Instruction::Mul:
4325 case Instruction::Shl:
4326 case Instruction::GetElementPtr:
4327 // These operations all propagate poison unconditionally. Note that poison
4328 // is not any particular value, so xor or subtraction of poison with
4329 // itself still yields poison, not zero.
4330 return true;
4332 case Instruction::AShr:
4333 case Instruction::SExt:
4334 // For these operations, one bit of the input is replicated across
4335 // multiple output bits. A replicated poison bit is still poison.
4336 return true;
4338 case Instruction::ICmp:
4339 // Comparing poison with any value yields poison. This is why, for
4340 // instance, x s< (x +nsw 1) can be folded to true.
4341 return true;
4343 default:
4344 return false;
4348 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4349 switch (I->getOpcode()) {
4350 case Instruction::Store:
4351 return cast<StoreInst>(I)->getPointerOperand();
4353 case Instruction::Load:
4354 return cast<LoadInst>(I)->getPointerOperand();
4356 case Instruction::AtomicCmpXchg:
4357 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4359 case Instruction::AtomicRMW:
4360 return cast<AtomicRMWInst>(I)->getPointerOperand();
4362 case Instruction::UDiv:
4363 case Instruction::SDiv:
4364 case Instruction::URem:
4365 case Instruction::SRem:
4366 return I->getOperand(1);
4368 default:
4369 // Note: It's really tempting to think that a conditional branch or
4370 // switch should be listed here, but that's incorrect. It's not
4371 // branching off of poison which is UB, it is executing a side effecting
4372 // instruction which follows the branch.
4373 return nullptr;
4377 bool llvm::mustTriggerUB(const Instruction *I,
4378 const SmallSet<const Value *, 16>& KnownPoison) {
4379 auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4380 return (NotPoison && KnownPoison.count(NotPoison));
4384 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4385 // We currently only look for uses of poison values within the same basic
4386 // block, as that makes it easier to guarantee that the uses will be
4387 // executed given that PoisonI is executed.
4389 // FIXME: Expand this to consider uses beyond the same basic block. To do
4390 // this, look out for the distinction between post-dominance and strong
4391 // post-dominance.
4392 const BasicBlock *BB = PoisonI->getParent();
4394 // Set of instructions that we have proved will yield poison if PoisonI
4395 // does.
4396 SmallSet<const Value *, 16> YieldsPoison;
4397 SmallSet<const BasicBlock *, 4> Visited;
4398 YieldsPoison.insert(PoisonI);
4399 Visited.insert(PoisonI->getParent());
4401 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4403 unsigned Iter = 0;
4404 while (Iter++ < MaxDepth) {
4405 for (auto &I : make_range(Begin, End)) {
4406 if (&I != PoisonI) {
4407 if (mustTriggerUB(&I, YieldsPoison))
4408 return true;
4409 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4410 return false;
4413 // Mark poison that propagates from I through uses of I.
4414 if (YieldsPoison.count(&I)) {
4415 for (const User *User : I.users()) {
4416 const Instruction *UserI = cast<Instruction>(User);
4417 if (propagatesFullPoison(UserI))
4418 YieldsPoison.insert(User);
4423 if (auto *NextBB = BB->getSingleSuccessor()) {
4424 if (Visited.insert(NextBB).second) {
4425 BB = NextBB;
4426 Begin = BB->getFirstNonPHI()->getIterator();
4427 End = BB->end();
4428 continue;
4432 break;
4434 return false;
4437 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4438 if (FMF.noNaNs())
4439 return true;
4441 if (auto *C = dyn_cast<ConstantFP>(V))
4442 return !C->isNaN();
4444 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4445 if (!C->getElementType()->isFloatingPointTy())
4446 return false;
4447 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4448 if (C->getElementAsAPFloat(I).isNaN())
4449 return false;
4451 return true;
4454 return false;
4457 static bool isKnownNonZero(const Value *V) {
4458 if (auto *C = dyn_cast<ConstantFP>(V))
4459 return !C->isZero();
4461 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4462 if (!C->getElementType()->isFloatingPointTy())
4463 return false;
4464 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4465 if (C->getElementAsAPFloat(I).isZero())
4466 return false;
4468 return true;
4471 return false;
4474 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4475 /// Given non-min/max outer cmp/select from the clamp pattern this
4476 /// function recognizes if it can be substitued by a "canonical" min/max
4477 /// pattern.
4478 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4479 Value *CmpLHS, Value *CmpRHS,
4480 Value *TrueVal, Value *FalseVal,
4481 Value *&LHS, Value *&RHS) {
4482 // Try to match
4483 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4484 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4485 // and return description of the outer Max/Min.
4487 // First, check if select has inverse order:
4488 if (CmpRHS == FalseVal) {
4489 std::swap(TrueVal, FalseVal);
4490 Pred = CmpInst::getInversePredicate(Pred);
4493 // Assume success now. If there's no match, callers should not use these anyway.
4494 LHS = TrueVal;
4495 RHS = FalseVal;
4497 const APFloat *FC1;
4498 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4499 return {SPF_UNKNOWN, SPNB_NA, false};
4501 const APFloat *FC2;
4502 switch (Pred) {
4503 case CmpInst::FCMP_OLT:
4504 case CmpInst::FCMP_OLE:
4505 case CmpInst::FCMP_ULT:
4506 case CmpInst::FCMP_ULE:
4507 if (match(FalseVal,
4508 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4509 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4510 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4511 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4512 break;
4513 case CmpInst::FCMP_OGT:
4514 case CmpInst::FCMP_OGE:
4515 case CmpInst::FCMP_UGT:
4516 case CmpInst::FCMP_UGE:
4517 if (match(FalseVal,
4518 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4519 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4520 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4521 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4522 break;
4523 default:
4524 break;
4527 return {SPF_UNKNOWN, SPNB_NA, false};
4530 /// Recognize variations of:
4531 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4532 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4533 Value *CmpLHS, Value *CmpRHS,
4534 Value *TrueVal, Value *FalseVal) {
4535 // Swap the select operands and predicate to match the patterns below.
4536 if (CmpRHS != TrueVal) {
4537 Pred = ICmpInst::getSwappedPredicate(Pred);
4538 std::swap(TrueVal, FalseVal);
4540 const APInt *C1;
4541 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4542 const APInt *C2;
4543 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4544 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4545 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4546 return {SPF_SMAX, SPNB_NA, false};
4548 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4549 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4550 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4551 return {SPF_SMIN, SPNB_NA, false};
4553 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4554 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4555 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4556 return {SPF_UMAX, SPNB_NA, false};
4558 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4559 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4560 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4561 return {SPF_UMIN, SPNB_NA, false};
4563 return {SPF_UNKNOWN, SPNB_NA, false};
4566 /// Recognize variations of:
4567 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4568 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4569 Value *CmpLHS, Value *CmpRHS,
4570 Value *TVal, Value *FVal,
4571 unsigned Depth) {
4572 // TODO: Allow FP min/max with nnan/nsz.
4573 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4575 Value *A, *B;
4576 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4577 if (!SelectPatternResult::isMinOrMax(L.Flavor))
4578 return {SPF_UNKNOWN, SPNB_NA, false};
4580 Value *C, *D;
4581 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4582 if (L.Flavor != R.Flavor)
4583 return {SPF_UNKNOWN, SPNB_NA, false};
4585 // We have something like: x Pred y ? min(a, b) : min(c, d).
4586 // Try to match the compare to the min/max operations of the select operands.
4587 // First, make sure we have the right compare predicate.
4588 switch (L.Flavor) {
4589 case SPF_SMIN:
4590 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4591 Pred = ICmpInst::getSwappedPredicate(Pred);
4592 std::swap(CmpLHS, CmpRHS);
4594 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4595 break;
4596 return {SPF_UNKNOWN, SPNB_NA, false};
4597 case SPF_SMAX:
4598 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4599 Pred = ICmpInst::getSwappedPredicate(Pred);
4600 std::swap(CmpLHS, CmpRHS);
4602 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4603 break;
4604 return {SPF_UNKNOWN, SPNB_NA, false};
4605 case SPF_UMIN:
4606 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4607 Pred = ICmpInst::getSwappedPredicate(Pred);
4608 std::swap(CmpLHS, CmpRHS);
4610 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4611 break;
4612 return {SPF_UNKNOWN, SPNB_NA, false};
4613 case SPF_UMAX:
4614 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4615 Pred = ICmpInst::getSwappedPredicate(Pred);
4616 std::swap(CmpLHS, CmpRHS);
4618 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4619 break;
4620 return {SPF_UNKNOWN, SPNB_NA, false};
4621 default:
4622 return {SPF_UNKNOWN, SPNB_NA, false};
4625 // If there is a common operand in the already matched min/max and the other
4626 // min/max operands match the compare operands (either directly or inverted),
4627 // then this is min/max of the same flavor.
4629 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4630 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4631 if (D == B) {
4632 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4633 match(A, m_Not(m_Specific(CmpRHS)))))
4634 return {L.Flavor, SPNB_NA, false};
4636 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4637 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4638 if (C == B) {
4639 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4640 match(A, m_Not(m_Specific(CmpRHS)))))
4641 return {L.Flavor, SPNB_NA, false};
4643 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4644 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4645 if (D == A) {
4646 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4647 match(B, m_Not(m_Specific(CmpRHS)))))
4648 return {L.Flavor, SPNB_NA, false};
4650 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4651 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4652 if (C == A) {
4653 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4654 match(B, m_Not(m_Specific(CmpRHS)))))
4655 return {L.Flavor, SPNB_NA, false};
4658 return {SPF_UNKNOWN, SPNB_NA, false};
4661 /// Match non-obvious integer minimum and maximum sequences.
4662 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4663 Value *CmpLHS, Value *CmpRHS,
4664 Value *TrueVal, Value *FalseVal,
4665 Value *&LHS, Value *&RHS,
4666 unsigned Depth) {
4667 // Assume success. If there's no match, callers should not use these anyway.
4668 LHS = TrueVal;
4669 RHS = FalseVal;
4671 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4672 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4673 return SPR;
4675 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4676 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4677 return SPR;
4679 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4680 return {SPF_UNKNOWN, SPNB_NA, false};
4682 // Z = X -nsw Y
4683 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4684 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4685 if (match(TrueVal, m_Zero()) &&
4686 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4687 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4689 // Z = X -nsw Y
4690 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4691 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4692 if (match(FalseVal, m_Zero()) &&
4693 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4694 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4696 const APInt *C1;
4697 if (!match(CmpRHS, m_APInt(C1)))
4698 return {SPF_UNKNOWN, SPNB_NA, false};
4700 // An unsigned min/max can be written with a signed compare.
4701 const APInt *C2;
4702 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4703 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4704 // Is the sign bit set?
4705 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4706 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4707 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4708 C2->isMaxSignedValue())
4709 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4711 // Is the sign bit clear?
4712 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4713 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4714 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4715 C2->isMinSignedValue())
4716 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4719 // Look through 'not' ops to find disguised signed min/max.
4720 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4721 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4722 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4723 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4724 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4726 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4727 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4728 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4729 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4730 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4732 return {SPF_UNKNOWN, SPNB_NA, false};
4735 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4736 assert(X && Y && "Invalid operand");
4738 // X = sub (0, Y) || X = sub nsw (0, Y)
4739 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4740 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4741 return true;
4743 // Y = sub (0, X) || Y = sub nsw (0, X)
4744 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4745 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4746 return true;
4748 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4749 Value *A, *B;
4750 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4751 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4752 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4753 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4756 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4757 FastMathFlags FMF,
4758 Value *CmpLHS, Value *CmpRHS,
4759 Value *TrueVal, Value *FalseVal,
4760 Value *&LHS, Value *&RHS,
4761 unsigned Depth) {
4762 if (CmpInst::isFPPredicate(Pred)) {
4763 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4764 // 0.0 operand, set the compare's 0.0 operands to that same value for the
4765 // purpose of identifying min/max. Disregard vector constants with undefined
4766 // elements because those can not be back-propagated for analysis.
4767 Value *OutputZeroVal = nullptr;
4768 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4769 !cast<Constant>(TrueVal)->containsUndefElement())
4770 OutputZeroVal = TrueVal;
4771 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4772 !cast<Constant>(FalseVal)->containsUndefElement())
4773 OutputZeroVal = FalseVal;
4775 if (OutputZeroVal) {
4776 if (match(CmpLHS, m_AnyZeroFP()))
4777 CmpLHS = OutputZeroVal;
4778 if (match(CmpRHS, m_AnyZeroFP()))
4779 CmpRHS = OutputZeroVal;
4783 LHS = CmpLHS;
4784 RHS = CmpRHS;
4786 // Signed zero may return inconsistent results between implementations.
4787 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4788 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4789 // Therefore, we behave conservatively and only proceed if at least one of the
4790 // operands is known to not be zero or if we don't care about signed zero.
4791 switch (Pred) {
4792 default: break;
4793 // FIXME: Include OGT/OLT/UGT/ULT.
4794 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4795 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4796 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4797 !isKnownNonZero(CmpRHS))
4798 return {SPF_UNKNOWN, SPNB_NA, false};
4801 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4802 bool Ordered = false;
4804 // When given one NaN and one non-NaN input:
4805 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4806 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4807 // ordered comparison fails), which could be NaN or non-NaN.
4808 // so here we discover exactly what NaN behavior is required/accepted.
4809 if (CmpInst::isFPPredicate(Pred)) {
4810 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4811 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4813 if (LHSSafe && RHSSafe) {
4814 // Both operands are known non-NaN.
4815 NaNBehavior = SPNB_RETURNS_ANY;
4816 } else if (CmpInst::isOrdered(Pred)) {
4817 // An ordered comparison will return false when given a NaN, so it
4818 // returns the RHS.
4819 Ordered = true;
4820 if (LHSSafe)
4821 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4822 NaNBehavior = SPNB_RETURNS_NAN;
4823 else if (RHSSafe)
4824 NaNBehavior = SPNB_RETURNS_OTHER;
4825 else
4826 // Completely unsafe.
4827 return {SPF_UNKNOWN, SPNB_NA, false};
4828 } else {
4829 Ordered = false;
4830 // An unordered comparison will return true when given a NaN, so it
4831 // returns the LHS.
4832 if (LHSSafe)
4833 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4834 NaNBehavior = SPNB_RETURNS_OTHER;
4835 else if (RHSSafe)
4836 NaNBehavior = SPNB_RETURNS_NAN;
4837 else
4838 // Completely unsafe.
4839 return {SPF_UNKNOWN, SPNB_NA, false};
4843 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4844 std::swap(CmpLHS, CmpRHS);
4845 Pred = CmpInst::getSwappedPredicate(Pred);
4846 if (NaNBehavior == SPNB_RETURNS_NAN)
4847 NaNBehavior = SPNB_RETURNS_OTHER;
4848 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4849 NaNBehavior = SPNB_RETURNS_NAN;
4850 Ordered = !Ordered;
4853 // ([if]cmp X, Y) ? X : Y
4854 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4855 switch (Pred) {
4856 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4857 case ICmpInst::ICMP_UGT:
4858 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4859 case ICmpInst::ICMP_SGT:
4860 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4861 case ICmpInst::ICMP_ULT:
4862 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4863 case ICmpInst::ICMP_SLT:
4864 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4865 case FCmpInst::FCMP_UGT:
4866 case FCmpInst::FCMP_UGE:
4867 case FCmpInst::FCMP_OGT:
4868 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4869 case FCmpInst::FCMP_ULT:
4870 case FCmpInst::FCMP_ULE:
4871 case FCmpInst::FCMP_OLT:
4872 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4876 if (isKnownNegation(TrueVal, FalseVal)) {
4877 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4878 // match against either LHS or sext(LHS).
4879 auto MaybeSExtCmpLHS =
4880 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4881 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4882 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4883 if (match(TrueVal, MaybeSExtCmpLHS)) {
4884 // Set the return values. If the compare uses the negated value (-X >s 0),
4885 // swap the return values because the negated value is always 'RHS'.
4886 LHS = TrueVal;
4887 RHS = FalseVal;
4888 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4889 std::swap(LHS, RHS);
4891 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4892 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4893 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4894 return {SPF_ABS, SPNB_NA, false};
4896 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4897 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4898 return {SPF_ABS, SPNB_NA, false};
4900 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4901 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4902 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4903 return {SPF_NABS, SPNB_NA, false};
4905 else if (match(FalseVal, MaybeSExtCmpLHS)) {
4906 // Set the return values. If the compare uses the negated value (-X >s 0),
4907 // swap the return values because the negated value is always 'RHS'.
4908 LHS = FalseVal;
4909 RHS = TrueVal;
4910 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4911 std::swap(LHS, RHS);
4913 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4914 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4915 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4916 return {SPF_NABS, SPNB_NA, false};
4918 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4919 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4920 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4921 return {SPF_ABS, SPNB_NA, false};
4925 if (CmpInst::isIntPredicate(Pred))
4926 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4928 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4929 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4930 // semantics than minNum. Be conservative in such case.
4931 if (NaNBehavior != SPNB_RETURNS_ANY ||
4932 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4933 !isKnownNonZero(CmpRHS)))
4934 return {SPF_UNKNOWN, SPNB_NA, false};
4936 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4939 /// Helps to match a select pattern in case of a type mismatch.
4941 /// The function processes the case when type of true and false values of a
4942 /// select instruction differs from type of the cmp instruction operands because
4943 /// of a cast instruction. The function checks if it is legal to move the cast
4944 /// operation after "select". If yes, it returns the new second value of
4945 /// "select" (with the assumption that cast is moved):
4946 /// 1. As operand of cast instruction when both values of "select" are same cast
4947 /// instructions.
4948 /// 2. As restored constant (by applying reverse cast operation) when the first
4949 /// value of the "select" is a cast operation and the second value is a
4950 /// constant.
4951 /// NOTE: We return only the new second value because the first value could be
4952 /// accessed as operand of cast instruction.
4953 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4954 Instruction::CastOps *CastOp) {
4955 auto *Cast1 = dyn_cast<CastInst>(V1);
4956 if (!Cast1)
4957 return nullptr;
4959 *CastOp = Cast1->getOpcode();
4960 Type *SrcTy = Cast1->getSrcTy();
4961 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4962 // If V1 and V2 are both the same cast from the same type, look through V1.
4963 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4964 return Cast2->getOperand(0);
4965 return nullptr;
4968 auto *C = dyn_cast<Constant>(V2);
4969 if (!C)
4970 return nullptr;
4972 Constant *CastedTo = nullptr;
4973 switch (*CastOp) {
4974 case Instruction::ZExt:
4975 if (CmpI->isUnsigned())
4976 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4977 break;
4978 case Instruction::SExt:
4979 if (CmpI->isSigned())
4980 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4981 break;
4982 case Instruction::Trunc:
4983 Constant *CmpConst;
4984 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4985 CmpConst->getType() == SrcTy) {
4986 // Here we have the following case:
4988 // %cond = cmp iN %x, CmpConst
4989 // %tr = trunc iN %x to iK
4990 // %narrowsel = select i1 %cond, iK %t, iK C
4992 // We can always move trunc after select operation:
4994 // %cond = cmp iN %x, CmpConst
4995 // %widesel = select i1 %cond, iN %x, iN CmpConst
4996 // %tr = trunc iN %widesel to iK
4998 // Note that C could be extended in any way because we don't care about
4999 // upper bits after truncation. It can't be abs pattern, because it would
5000 // look like:
5002 // select i1 %cond, x, -x.
5004 // So only min/max pattern could be matched. Such match requires widened C
5005 // == CmpConst. That is why set widened C = CmpConst, condition trunc
5006 // CmpConst == C is checked below.
5007 CastedTo = CmpConst;
5008 } else {
5009 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5011 break;
5012 case Instruction::FPTrunc:
5013 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5014 break;
5015 case Instruction::FPExt:
5016 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5017 break;
5018 case Instruction::FPToUI:
5019 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5020 break;
5021 case Instruction::FPToSI:
5022 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5023 break;
5024 case Instruction::UIToFP:
5025 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5026 break;
5027 case Instruction::SIToFP:
5028 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5029 break;
5030 default:
5031 break;
5034 if (!CastedTo)
5035 return nullptr;
5037 // Make sure the cast doesn't lose any information.
5038 Constant *CastedBack =
5039 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5040 if (CastedBack != C)
5041 return nullptr;
5043 return CastedTo;
5046 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5047 Instruction::CastOps *CastOp,
5048 unsigned Depth) {
5049 if (Depth >= MaxDepth)
5050 return {SPF_UNKNOWN, SPNB_NA, false};
5052 SelectInst *SI = dyn_cast<SelectInst>(V);
5053 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5055 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5056 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5058 Value *TrueVal = SI->getTrueValue();
5059 Value *FalseVal = SI->getFalseValue();
5061 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5062 CastOp, Depth);
5065 SelectPatternResult llvm::matchDecomposedSelectPattern(
5066 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5067 Instruction::CastOps *CastOp, unsigned Depth) {
5068 CmpInst::Predicate Pred = CmpI->getPredicate();
5069 Value *CmpLHS = CmpI->getOperand(0);
5070 Value *CmpRHS = CmpI->getOperand(1);
5071 FastMathFlags FMF;
5072 if (isa<FPMathOperator>(CmpI))
5073 FMF = CmpI->getFastMathFlags();
5075 // Bail out early.
5076 if (CmpI->isEquality())
5077 return {SPF_UNKNOWN, SPNB_NA, false};
5079 // Deal with type mismatches.
5080 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5081 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5082 // If this is a potential fmin/fmax with a cast to integer, then ignore
5083 // -0.0 because there is no corresponding integer value.
5084 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5085 FMF.setNoSignedZeros();
5086 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5087 cast<CastInst>(TrueVal)->getOperand(0), C,
5088 LHS, RHS, Depth);
5090 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5091 // If this is a potential fmin/fmax with a cast to integer, then ignore
5092 // -0.0 because there is no corresponding integer value.
5093 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5094 FMF.setNoSignedZeros();
5095 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5096 C, cast<CastInst>(FalseVal)->getOperand(0),
5097 LHS, RHS, Depth);
5100 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5101 LHS, RHS, Depth);
5104 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5105 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5106 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5107 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5108 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5109 if (SPF == SPF_FMINNUM)
5110 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5111 if (SPF == SPF_FMAXNUM)
5112 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5113 llvm_unreachable("unhandled!");
5116 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5117 if (SPF == SPF_SMIN) return SPF_SMAX;
5118 if (SPF == SPF_UMIN) return SPF_UMAX;
5119 if (SPF == SPF_SMAX) return SPF_SMIN;
5120 if (SPF == SPF_UMAX) return SPF_UMIN;
5121 llvm_unreachable("unhandled!");
5124 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5125 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5128 /// Return true if "icmp Pred LHS RHS" is always true.
5129 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5130 const Value *RHS, const DataLayout &DL,
5131 unsigned Depth) {
5132 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5133 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5134 return true;
5136 switch (Pred) {
5137 default:
5138 return false;
5140 case CmpInst::ICMP_SLE: {
5141 const APInt *C;
5143 // LHS s<= LHS +_{nsw} C if C >= 0
5144 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5145 return !C->isNegative();
5146 return false;
5149 case CmpInst::ICMP_ULE: {
5150 const APInt *C;
5152 // LHS u<= LHS +_{nuw} C for any C
5153 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5154 return true;
5156 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5157 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5158 const Value *&X,
5159 const APInt *&CA, const APInt *&CB) {
5160 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5161 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5162 return true;
5164 // If X & C == 0 then (X | C) == X +_{nuw} C
5165 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5166 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5167 KnownBits Known(CA->getBitWidth());
5168 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5169 /*CxtI*/ nullptr, /*DT*/ nullptr);
5170 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5171 return true;
5174 return false;
5177 const Value *X;
5178 const APInt *CLHS, *CRHS;
5179 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5180 return CLHS->ule(*CRHS);
5182 return false;
5187 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5188 /// ALHS ARHS" is true. Otherwise, return None.
5189 static Optional<bool>
5190 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5191 const Value *ARHS, const Value *BLHS, const Value *BRHS,
5192 const DataLayout &DL, unsigned Depth) {
5193 switch (Pred) {
5194 default:
5195 return None;
5197 case CmpInst::ICMP_SLT:
5198 case CmpInst::ICMP_SLE:
5199 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5200 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5201 return true;
5202 return None;
5204 case CmpInst::ICMP_ULT:
5205 case CmpInst::ICMP_ULE:
5206 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5207 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5208 return true;
5209 return None;
5213 /// Return true if the operands of the two compares match. IsSwappedOps is true
5214 /// when the operands match, but are swapped.
5215 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5216 const Value *BLHS, const Value *BRHS,
5217 bool &IsSwappedOps) {
5219 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5220 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5221 return IsMatchingOps || IsSwappedOps;
5224 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5225 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5226 /// Otherwise, return None if we can't infer anything.
5227 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5228 CmpInst::Predicate BPred,
5229 bool AreSwappedOps) {
5230 // Canonicalize the predicate as if the operands were not commuted.
5231 if (AreSwappedOps)
5232 BPred = ICmpInst::getSwappedPredicate(BPred);
5234 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5235 return true;
5236 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5237 return false;
5239 return None;
5242 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5243 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5244 /// Otherwise, return None if we can't infer anything.
5245 static Optional<bool>
5246 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5247 const ConstantInt *C1,
5248 CmpInst::Predicate BPred,
5249 const ConstantInt *C2) {
5250 ConstantRange DomCR =
5251 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5252 ConstantRange CR =
5253 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5254 ConstantRange Intersection = DomCR.intersectWith(CR);
5255 ConstantRange Difference = DomCR.difference(CR);
5256 if (Intersection.isEmptySet())
5257 return false;
5258 if (Difference.isEmptySet())
5259 return true;
5260 return None;
5263 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5264 /// false. Otherwise, return None if we can't infer anything.
5265 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5266 const ICmpInst *RHS,
5267 const DataLayout &DL, bool LHSIsTrue,
5268 unsigned Depth) {
5269 Value *ALHS = LHS->getOperand(0);
5270 Value *ARHS = LHS->getOperand(1);
5271 // The rest of the logic assumes the LHS condition is true. If that's not the
5272 // case, invert the predicate to make it so.
5273 ICmpInst::Predicate APred =
5274 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5276 Value *BLHS = RHS->getOperand(0);
5277 Value *BRHS = RHS->getOperand(1);
5278 ICmpInst::Predicate BPred = RHS->getPredicate();
5280 // Can we infer anything when the two compares have matching operands?
5281 bool AreSwappedOps;
5282 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
5283 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5284 APred, BPred, AreSwappedOps))
5285 return Implication;
5286 // No amount of additional analysis will infer the second condition, so
5287 // early exit.
5288 return None;
5291 // Can we infer anything when the LHS operands match and the RHS operands are
5292 // constants (not necessarily matching)?
5293 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5294 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5295 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
5296 return Implication;
5297 // No amount of additional analysis will infer the second condition, so
5298 // early exit.
5299 return None;
5302 if (APred == BPred)
5303 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5304 return None;
5307 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5308 /// false. Otherwise, return None if we can't infer anything. We expect the
5309 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5310 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5311 const ICmpInst *RHS,
5312 const DataLayout &DL, bool LHSIsTrue,
5313 unsigned Depth) {
5314 // The LHS must be an 'or' or an 'and' instruction.
5315 assert((LHS->getOpcode() == Instruction::And ||
5316 LHS->getOpcode() == Instruction::Or) &&
5317 "Expected LHS to be 'and' or 'or'.");
5319 assert(Depth <= MaxDepth && "Hit recursion limit");
5321 // If the result of an 'or' is false, then we know both legs of the 'or' are
5322 // false. Similarly, if the result of an 'and' is true, then we know both
5323 // legs of the 'and' are true.
5324 Value *ALHS, *ARHS;
5325 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5326 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5327 // FIXME: Make this non-recursion.
5328 if (Optional<bool> Implication =
5329 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5330 return Implication;
5331 if (Optional<bool> Implication =
5332 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5333 return Implication;
5334 return None;
5336 return None;
5339 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5340 const DataLayout &DL, bool LHSIsTrue,
5341 unsigned Depth) {
5342 // Bail out when we hit the limit.
5343 if (Depth == MaxDepth)
5344 return None;
5346 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5347 // example.
5348 if (LHS->getType() != RHS->getType())
5349 return None;
5351 Type *OpTy = LHS->getType();
5352 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
5354 // LHS ==> RHS by definition
5355 if (LHS == RHS)
5356 return LHSIsTrue;
5358 // FIXME: Extending the code below to handle vectors.
5359 if (OpTy->isVectorTy())
5360 return None;
5362 assert(OpTy->isIntegerTy(1) && "implied by above");
5364 // Both LHS and RHS are icmps.
5365 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5366 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5367 if (LHSCmp && RHSCmp)
5368 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5370 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
5371 // an icmp. FIXME: Add support for and/or on the RHS.
5372 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5373 if (LHSBO && RHSCmp) {
5374 if ((LHSBO->getOpcode() == Instruction::And ||
5375 LHSBO->getOpcode() == Instruction::Or))
5376 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5378 return None;
5381 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
5382 const Instruction *ContextI,
5383 const DataLayout &DL) {
5384 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
5385 if (!ContextI || !ContextI->getParent())
5386 return None;
5388 // TODO: This is a poor/cheap way to determine dominance. Should we use a
5389 // dominator tree (eg, from a SimplifyQuery) instead?
5390 const BasicBlock *ContextBB = ContextI->getParent();
5391 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
5392 if (!PredBB)
5393 return None;
5395 // We need a conditional branch in the predecessor.
5396 Value *PredCond;
5397 BasicBlock *TrueBB, *FalseBB;
5398 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
5399 return None;
5401 // The branch should get simplified. Don't bother simplifying this condition.
5402 if (TrueBB == FalseBB)
5403 return None;
5405 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
5406 "Predecessor block does not point to successor?");
5408 // Is this condition implied by the predecessor condition?
5409 bool CondIsTrue = TrueBB == ContextBB;
5410 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
5413 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
5414 APInt &Upper, const InstrInfoQuery &IIQ) {
5415 unsigned Width = Lower.getBitWidth();
5416 const APInt *C;
5417 switch (BO.getOpcode()) {
5418 case Instruction::Add:
5419 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5420 // FIXME: If we have both nuw and nsw, we should reduce the range further.
5421 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5422 // 'add nuw x, C' produces [C, UINT_MAX].
5423 Lower = *C;
5424 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5425 if (C->isNegative()) {
5426 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
5427 Lower = APInt::getSignedMinValue(Width);
5428 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5429 } else {
5430 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
5431 Lower = APInt::getSignedMinValue(Width) + *C;
5432 Upper = APInt::getSignedMaxValue(Width) + 1;
5436 break;
5438 case Instruction::And:
5439 if (match(BO.getOperand(1), m_APInt(C)))
5440 // 'and x, C' produces [0, C].
5441 Upper = *C + 1;
5442 break;
5444 case Instruction::Or:
5445 if (match(BO.getOperand(1), m_APInt(C)))
5446 // 'or x, C' produces [C, UINT_MAX].
5447 Lower = *C;
5448 break;
5450 case Instruction::AShr:
5451 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5452 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
5453 Lower = APInt::getSignedMinValue(Width).ashr(*C);
5454 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
5455 } else if (match(BO.getOperand(0), m_APInt(C))) {
5456 unsigned ShiftAmount = Width - 1;
5457 if (!C->isNullValue() && IIQ.isExact(&BO))
5458 ShiftAmount = C->countTrailingZeros();
5459 if (C->isNegative()) {
5460 // 'ashr C, x' produces [C, C >> (Width-1)]
5461 Lower = *C;
5462 Upper = C->ashr(ShiftAmount) + 1;
5463 } else {
5464 // 'ashr C, x' produces [C >> (Width-1), C]
5465 Lower = C->ashr(ShiftAmount);
5466 Upper = *C + 1;
5469 break;
5471 case Instruction::LShr:
5472 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5473 // 'lshr x, C' produces [0, UINT_MAX >> C].
5474 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
5475 } else if (match(BO.getOperand(0), m_APInt(C))) {
5476 // 'lshr C, x' produces [C >> (Width-1), C].
5477 unsigned ShiftAmount = Width - 1;
5478 if (!C->isNullValue() && IIQ.isExact(&BO))
5479 ShiftAmount = C->countTrailingZeros();
5480 Lower = C->lshr(ShiftAmount);
5481 Upper = *C + 1;
5483 break;
5485 case Instruction::Shl:
5486 if (match(BO.getOperand(0), m_APInt(C))) {
5487 if (IIQ.hasNoUnsignedWrap(&BO)) {
5488 // 'shl nuw C, x' produces [C, C << CLZ(C)]
5489 Lower = *C;
5490 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
5491 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
5492 if (C->isNegative()) {
5493 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
5494 unsigned ShiftAmount = C->countLeadingOnes() - 1;
5495 Lower = C->shl(ShiftAmount);
5496 Upper = *C + 1;
5497 } else {
5498 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
5499 unsigned ShiftAmount = C->countLeadingZeros() - 1;
5500 Lower = *C;
5501 Upper = C->shl(ShiftAmount) + 1;
5505 break;
5507 case Instruction::SDiv:
5508 if (match(BO.getOperand(1), m_APInt(C))) {
5509 APInt IntMin = APInt::getSignedMinValue(Width);
5510 APInt IntMax = APInt::getSignedMaxValue(Width);
5511 if (C->isAllOnesValue()) {
5512 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
5513 // where C != -1 and C != 0 and C != 1
5514 Lower = IntMin + 1;
5515 Upper = IntMax + 1;
5516 } else if (C->countLeadingZeros() < Width - 1) {
5517 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
5518 // where C != -1 and C != 0 and C != 1
5519 Lower = IntMin.sdiv(*C);
5520 Upper = IntMax.sdiv(*C);
5521 if (Lower.sgt(Upper))
5522 std::swap(Lower, Upper);
5523 Upper = Upper + 1;
5524 assert(Upper != Lower && "Upper part of range has wrapped!");
5526 } else if (match(BO.getOperand(0), m_APInt(C))) {
5527 if (C->isMinSignedValue()) {
5528 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
5529 Lower = *C;
5530 Upper = Lower.lshr(1) + 1;
5531 } else {
5532 // 'sdiv C, x' produces [-|C|, |C|].
5533 Upper = C->abs() + 1;
5534 Lower = (-Upper) + 1;
5537 break;
5539 case Instruction::UDiv:
5540 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5541 // 'udiv x, C' produces [0, UINT_MAX / C].
5542 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
5543 } else if (match(BO.getOperand(0), m_APInt(C))) {
5544 // 'udiv C, x' produces [0, C].
5545 Upper = *C + 1;
5547 break;
5549 case Instruction::SRem:
5550 if (match(BO.getOperand(1), m_APInt(C))) {
5551 // 'srem x, C' produces (-|C|, |C|).
5552 Upper = C->abs();
5553 Lower = (-Upper) + 1;
5555 break;
5557 case Instruction::URem:
5558 if (match(BO.getOperand(1), m_APInt(C)))
5559 // 'urem x, C' produces [0, C).
5560 Upper = *C;
5561 break;
5563 default:
5564 break;
5568 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
5569 APInt &Upper) {
5570 unsigned Width = Lower.getBitWidth();
5571 const APInt *C;
5572 switch (II.getIntrinsicID()) {
5573 case Intrinsic::uadd_sat:
5574 // uadd.sat(x, C) produces [C, UINT_MAX].
5575 if (match(II.getOperand(0), m_APInt(C)) ||
5576 match(II.getOperand(1), m_APInt(C)))
5577 Lower = *C;
5578 break;
5579 case Intrinsic::sadd_sat:
5580 if (match(II.getOperand(0), m_APInt(C)) ||
5581 match(II.getOperand(1), m_APInt(C))) {
5582 if (C->isNegative()) {
5583 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
5584 Lower = APInt::getSignedMinValue(Width);
5585 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5586 } else {
5587 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
5588 Lower = APInt::getSignedMinValue(Width) + *C;
5589 Upper = APInt::getSignedMaxValue(Width) + 1;
5592 break;
5593 case Intrinsic::usub_sat:
5594 // usub.sat(C, x) produces [0, C].
5595 if (match(II.getOperand(0), m_APInt(C)))
5596 Upper = *C + 1;
5597 // usub.sat(x, C) produces [0, UINT_MAX - C].
5598 else if (match(II.getOperand(1), m_APInt(C)))
5599 Upper = APInt::getMaxValue(Width) - *C + 1;
5600 break;
5601 case Intrinsic::ssub_sat:
5602 if (match(II.getOperand(0), m_APInt(C))) {
5603 if (C->isNegative()) {
5604 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
5605 Lower = APInt::getSignedMinValue(Width);
5606 Upper = *C - APInt::getSignedMinValue(Width) + 1;
5607 } else {
5608 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
5609 Lower = *C - APInt::getSignedMaxValue(Width);
5610 Upper = APInt::getSignedMaxValue(Width) + 1;
5612 } else if (match(II.getOperand(1), m_APInt(C))) {
5613 if (C->isNegative()) {
5614 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
5615 Lower = APInt::getSignedMinValue(Width) - *C;
5616 Upper = APInt::getSignedMaxValue(Width) + 1;
5617 } else {
5618 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
5619 Lower = APInt::getSignedMinValue(Width);
5620 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
5623 break;
5624 default:
5625 break;
5629 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
5630 APInt &Upper) {
5631 const Value *LHS, *RHS;
5632 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
5633 if (R.Flavor == SPF_UNKNOWN)
5634 return;
5636 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
5638 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
5639 // If the negation part of the abs (in RHS) has the NSW flag,
5640 // then the result of abs(X) is [0..SIGNED_MAX],
5641 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
5642 Lower = APInt::getNullValue(BitWidth);
5643 if (cast<Instruction>(RHS)->hasNoSignedWrap())
5644 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5645 else
5646 Upper = APInt::getSignedMinValue(BitWidth) + 1;
5647 return;
5650 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
5651 // The result of -abs(X) is <= 0.
5652 Lower = APInt::getSignedMinValue(BitWidth);
5653 Upper = APInt(BitWidth, 1);
5654 return;
5657 const APInt *C;
5658 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
5659 return;
5661 switch (R.Flavor) {
5662 case SPF_UMIN:
5663 Upper = *C + 1;
5664 break;
5665 case SPF_UMAX:
5666 Lower = *C;
5667 break;
5668 case SPF_SMIN:
5669 Lower = APInt::getSignedMinValue(BitWidth);
5670 Upper = *C + 1;
5671 break;
5672 case SPF_SMAX:
5673 Lower = *C;
5674 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5675 break;
5676 default:
5677 break;
5681 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
5682 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
5684 const APInt *C;
5685 if (match(V, m_APInt(C)))
5686 return ConstantRange(*C);
5688 InstrInfoQuery IIQ(UseInstrInfo);
5689 unsigned BitWidth = V->getType()->getScalarSizeInBits();
5690 APInt Lower = APInt(BitWidth, 0);
5691 APInt Upper = APInt(BitWidth, 0);
5692 if (auto *BO = dyn_cast<BinaryOperator>(V))
5693 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
5694 else if (auto *II = dyn_cast<IntrinsicInst>(V))
5695 setLimitsForIntrinsic(*II, Lower, Upper);
5696 else if (auto *SI = dyn_cast<SelectInst>(V))
5697 setLimitsForSelectPattern(*SI, Lower, Upper);
5699 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
5701 if (auto *I = dyn_cast<Instruction>(V))
5702 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
5703 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
5705 return CR;