1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
78 using namespace llvm::PatternMatch
;
80 const unsigned MaxDepth
= 6;
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt
<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85 cl::Hidden
, cl::init(20));
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type
*Ty
, const DataLayout
&DL
) {
90 if (unsigned BitWidth
= Ty
->getScalarSizeInBits())
93 return DL
.getIndexTypeSizeInBits(Ty
);
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
103 const DataLayout
&DL
;
105 const Instruction
*CxtI
;
106 const DominatorTree
*DT
;
108 // Unlike the other analyses, this may be a nullptr because not all clients
109 // provide it currently.
110 OptimizationRemarkEmitter
*ORE
;
112 /// Set of assumptions that should be excluded from further queries.
113 /// This is because of the potential for mutual recursion to cause
114 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
115 /// classic case of this is assume(x = y), which will attempt to determine
116 /// bits in x from bits in y, which will attempt to determine bits in y from
117 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
118 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
119 /// (all of which can call computeKnownBits), and so on.
120 std::array
<const Value
*, MaxDepth
> Excluded
;
122 /// If true, it is safe to use metadata during simplification.
125 unsigned NumExcluded
= 0;
127 Query(const DataLayout
&DL
, AssumptionCache
*AC
, const Instruction
*CxtI
,
128 const DominatorTree
*DT
, bool UseInstrInfo
,
129 OptimizationRemarkEmitter
*ORE
= nullptr)
130 : DL(DL
), AC(AC
), CxtI(CxtI
), DT(DT
), ORE(ORE
), IIQ(UseInstrInfo
) {}
132 Query(const Query
&Q
, const Value
*NewExcl
)
133 : DL(Q
.DL
), AC(Q
.AC
), CxtI(Q
.CxtI
), DT(Q
.DT
), ORE(Q
.ORE
), IIQ(Q
.IIQ
),
134 NumExcluded(Q
.NumExcluded
) {
135 Excluded
= Q
.Excluded
;
136 Excluded
[NumExcluded
++] = NewExcl
;
137 assert(NumExcluded
<= Excluded
.size());
140 bool isExcluded(const Value
*Value
) const {
141 if (NumExcluded
== 0)
143 auto End
= Excluded
.begin() + NumExcluded
;
144 return std::find(Excluded
.begin(), End
, Value
) != End
;
148 } // end anonymous namespace
150 // Given the provided Value and, potentially, a context instruction, return
151 // the preferred context instruction (if any).
152 static const Instruction
*safeCxtI(const Value
*V
, const Instruction
*CxtI
) {
153 // If we've been provided with a context instruction, then use that (provided
154 // it has been inserted).
155 if (CxtI
&& CxtI
->getParent())
158 // If the value is really an already-inserted instruction, then use that.
159 CxtI
= dyn_cast
<Instruction
>(V
);
160 if (CxtI
&& CxtI
->getParent())
166 static void computeKnownBits(const Value
*V
, KnownBits
&Known
,
167 unsigned Depth
, const Query
&Q
);
169 void llvm::computeKnownBits(const Value
*V
, KnownBits
&Known
,
170 const DataLayout
&DL
, unsigned Depth
,
171 AssumptionCache
*AC
, const Instruction
*CxtI
,
172 const DominatorTree
*DT
,
173 OptimizationRemarkEmitter
*ORE
, bool UseInstrInfo
) {
174 ::computeKnownBits(V
, Known
, Depth
,
175 Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
, ORE
));
178 static KnownBits
computeKnownBits(const Value
*V
, unsigned Depth
,
181 KnownBits
llvm::computeKnownBits(const Value
*V
, const DataLayout
&DL
,
182 unsigned Depth
, AssumptionCache
*AC
,
183 const Instruction
*CxtI
,
184 const DominatorTree
*DT
,
185 OptimizationRemarkEmitter
*ORE
,
187 return ::computeKnownBits(
188 V
, Depth
, Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
, ORE
));
191 bool llvm::haveNoCommonBitsSet(const Value
*LHS
, const Value
*RHS
,
192 const DataLayout
&DL
, AssumptionCache
*AC
,
193 const Instruction
*CxtI
, const DominatorTree
*DT
,
195 assert(LHS
->getType() == RHS
->getType() &&
196 "LHS and RHS should have the same type");
197 assert(LHS
->getType()->isIntOrIntVectorTy() &&
198 "LHS and RHS should be integers");
199 // Look for an inverted mask: (X & ~M) op (Y & M).
201 if (match(LHS
, m_c_And(m_Not(m_Value(M
)), m_Value())) &&
202 match(RHS
, m_c_And(m_Specific(M
), m_Value())))
204 if (match(RHS
, m_c_And(m_Not(m_Value(M
)), m_Value())) &&
205 match(LHS
, m_c_And(m_Specific(M
), m_Value())))
207 IntegerType
*IT
= cast
<IntegerType
>(LHS
->getType()->getScalarType());
208 KnownBits
LHSKnown(IT
->getBitWidth());
209 KnownBits
RHSKnown(IT
->getBitWidth());
210 computeKnownBits(LHS
, LHSKnown
, DL
, 0, AC
, CxtI
, DT
, nullptr, UseInstrInfo
);
211 computeKnownBits(RHS
, RHSKnown
, DL
, 0, AC
, CxtI
, DT
, nullptr, UseInstrInfo
);
212 return (LHSKnown
.Zero
| RHSKnown
.Zero
).isAllOnesValue();
215 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction
*CxtI
) {
216 for (const User
*U
: CxtI
->users()) {
217 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(U
))
218 if (IC
->isEquality())
219 if (Constant
*C
= dyn_cast
<Constant
>(IC
->getOperand(1)))
220 if (C
->isNullValue())
227 static bool isKnownToBeAPowerOfTwo(const Value
*V
, bool OrZero
, unsigned Depth
,
230 bool llvm::isKnownToBeAPowerOfTwo(const Value
*V
, const DataLayout
&DL
,
231 bool OrZero
, unsigned Depth
,
232 AssumptionCache
*AC
, const Instruction
*CxtI
,
233 const DominatorTree
*DT
, bool UseInstrInfo
) {
234 return ::isKnownToBeAPowerOfTwo(
235 V
, OrZero
, Depth
, Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
));
238 static bool isKnownNonZero(const Value
*V
, unsigned Depth
, const Query
&Q
);
240 bool llvm::isKnownNonZero(const Value
*V
, const DataLayout
&DL
, unsigned Depth
,
241 AssumptionCache
*AC
, const Instruction
*CxtI
,
242 const DominatorTree
*DT
, bool UseInstrInfo
) {
243 return ::isKnownNonZero(V
, Depth
,
244 Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
));
247 bool llvm::isKnownNonNegative(const Value
*V
, const DataLayout
&DL
,
248 unsigned Depth
, AssumptionCache
*AC
,
249 const Instruction
*CxtI
, const DominatorTree
*DT
,
252 computeKnownBits(V
, DL
, Depth
, AC
, CxtI
, DT
, nullptr, UseInstrInfo
);
253 return Known
.isNonNegative();
256 bool llvm::isKnownPositive(const Value
*V
, const DataLayout
&DL
, unsigned Depth
,
257 AssumptionCache
*AC
, const Instruction
*CxtI
,
258 const DominatorTree
*DT
, bool UseInstrInfo
) {
259 if (auto *CI
= dyn_cast
<ConstantInt
>(V
))
260 return CI
->getValue().isStrictlyPositive();
262 // TODO: We'd doing two recursive queries here. We should factor this such
263 // that only a single query is needed.
264 return isKnownNonNegative(V
, DL
, Depth
, AC
, CxtI
, DT
, UseInstrInfo
) &&
265 isKnownNonZero(V
, DL
, Depth
, AC
, CxtI
, DT
, UseInstrInfo
);
268 bool llvm::isKnownNegative(const Value
*V
, const DataLayout
&DL
, unsigned Depth
,
269 AssumptionCache
*AC
, const Instruction
*CxtI
,
270 const DominatorTree
*DT
, bool UseInstrInfo
) {
272 computeKnownBits(V
, DL
, Depth
, AC
, CxtI
, DT
, nullptr, UseInstrInfo
);
273 return Known
.isNegative();
276 static bool isKnownNonEqual(const Value
*V1
, const Value
*V2
, const Query
&Q
);
278 bool llvm::isKnownNonEqual(const Value
*V1
, const Value
*V2
,
279 const DataLayout
&DL
, AssumptionCache
*AC
,
280 const Instruction
*CxtI
, const DominatorTree
*DT
,
282 return ::isKnownNonEqual(V1
, V2
,
283 Query(DL
, AC
, safeCxtI(V1
, safeCxtI(V2
, CxtI
)), DT
,
284 UseInstrInfo
, /*ORE=*/nullptr));
287 static bool MaskedValueIsZero(const Value
*V
, const APInt
&Mask
, unsigned Depth
,
290 bool llvm::MaskedValueIsZero(const Value
*V
, const APInt
&Mask
,
291 const DataLayout
&DL
, unsigned Depth
,
292 AssumptionCache
*AC
, const Instruction
*CxtI
,
293 const DominatorTree
*DT
, bool UseInstrInfo
) {
294 return ::MaskedValueIsZero(
295 V
, Mask
, Depth
, Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
));
298 static unsigned ComputeNumSignBits(const Value
*V
, unsigned Depth
,
301 unsigned llvm::ComputeNumSignBits(const Value
*V
, const DataLayout
&DL
,
302 unsigned Depth
, AssumptionCache
*AC
,
303 const Instruction
*CxtI
,
304 const DominatorTree
*DT
, bool UseInstrInfo
) {
305 return ::ComputeNumSignBits(
306 V
, Depth
, Query(DL
, AC
, safeCxtI(V
, CxtI
), DT
, UseInstrInfo
));
309 static void computeKnownBitsAddSub(bool Add
, const Value
*Op0
, const Value
*Op1
,
311 KnownBits
&KnownOut
, KnownBits
&Known2
,
312 unsigned Depth
, const Query
&Q
) {
313 unsigned BitWidth
= KnownOut
.getBitWidth();
315 // If an initial sequence of bits in the result is not needed, the
316 // corresponding bits in the operands are not needed.
317 KnownBits
LHSKnown(BitWidth
);
318 computeKnownBits(Op0
, LHSKnown
, Depth
+ 1, Q
);
319 computeKnownBits(Op1
, Known2
, Depth
+ 1, Q
);
321 KnownOut
= KnownBits::computeForAddSub(Add
, NSW
, LHSKnown
, Known2
);
324 static void computeKnownBitsMul(const Value
*Op0
, const Value
*Op1
, bool NSW
,
325 KnownBits
&Known
, KnownBits
&Known2
,
326 unsigned Depth
, const Query
&Q
) {
327 unsigned BitWidth
= Known
.getBitWidth();
328 computeKnownBits(Op1
, Known
, Depth
+ 1, Q
);
329 computeKnownBits(Op0
, Known2
, Depth
+ 1, Q
);
331 bool isKnownNegative
= false;
332 bool isKnownNonNegative
= false;
333 // If the multiplication is known not to overflow, compute the sign bit.
336 // The product of a number with itself is non-negative.
337 isKnownNonNegative
= true;
339 bool isKnownNonNegativeOp1
= Known
.isNonNegative();
340 bool isKnownNonNegativeOp0
= Known2
.isNonNegative();
341 bool isKnownNegativeOp1
= Known
.isNegative();
342 bool isKnownNegativeOp0
= Known2
.isNegative();
343 // The product of two numbers with the same sign is non-negative.
344 isKnownNonNegative
= (isKnownNegativeOp1
&& isKnownNegativeOp0
) ||
345 (isKnownNonNegativeOp1
&& isKnownNonNegativeOp0
);
346 // The product of a negative number and a non-negative number is either
348 if (!isKnownNonNegative
)
349 isKnownNegative
= (isKnownNegativeOp1
&& isKnownNonNegativeOp0
&&
350 isKnownNonZero(Op0
, Depth
, Q
)) ||
351 (isKnownNegativeOp0
&& isKnownNonNegativeOp1
&&
352 isKnownNonZero(Op1
, Depth
, Q
));
356 assert(!Known
.hasConflict() && !Known2
.hasConflict());
357 // Compute a conservative estimate for high known-0 bits.
358 unsigned LeadZ
= std::max(Known
.countMinLeadingZeros() +
359 Known2
.countMinLeadingZeros(),
360 BitWidth
) - BitWidth
;
361 LeadZ
= std::min(LeadZ
, BitWidth
);
363 // The result of the bottom bits of an integer multiply can be
364 // inferred by looking at the bottom bits of both operands and
365 // multiplying them together.
366 // We can infer at least the minimum number of known trailing bits
367 // of both operands. Depending on number of trailing zeros, we can
368 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
369 // a and b are divisible by m and n respectively.
370 // We then calculate how many of those bits are inferrable and set
371 // the output. For example, the i8 mul:
374 // We know the bottom 3 bits are zero since the first can be divided by
375 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
376 // Applying the multiplication to the trimmed arguments gets:
386 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
387 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
388 // The proof for this can be described as:
389 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
390 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
391 // umin(countTrailingZeros(C2), C6) +
392 // umin(C5 - umin(countTrailingZeros(C1), C5),
393 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
394 // %aa = shl i8 %a, C5
395 // %bb = shl i8 %b, C6
396 // %aaa = or i8 %aa, C1
397 // %bbb = or i8 %bb, C2
398 // %mul = mul i8 %aaa, %bbb
399 // %mask = and i8 %mul, C7
401 // %mask = i8 ((C1*C2)&C7)
402 // Where C5, C6 describe the known bits of %a, %b
403 // C1, C2 describe the known bottom bits of %a, %b.
404 // C7 describes the mask of the known bits of the result.
405 APInt Bottom0
= Known
.One
;
406 APInt Bottom1
= Known2
.One
;
408 // How many times we'd be able to divide each argument by 2 (shr by 1).
409 // This gives us the number of trailing zeros on the multiplication result.
410 unsigned TrailBitsKnown0
= (Known
.Zero
| Known
.One
).countTrailingOnes();
411 unsigned TrailBitsKnown1
= (Known2
.Zero
| Known2
.One
).countTrailingOnes();
412 unsigned TrailZero0
= Known
.countMinTrailingZeros();
413 unsigned TrailZero1
= Known2
.countMinTrailingZeros();
414 unsigned TrailZ
= TrailZero0
+ TrailZero1
;
416 // Figure out the fewest known-bits operand.
417 unsigned SmallestOperand
= std::min(TrailBitsKnown0
- TrailZero0
,
418 TrailBitsKnown1
- TrailZero1
);
419 unsigned ResultBitsKnown
= std::min(SmallestOperand
+ TrailZ
, BitWidth
);
421 APInt BottomKnown
= Bottom0
.getLoBits(TrailBitsKnown0
) *
422 Bottom1
.getLoBits(TrailBitsKnown1
);
425 Known
.Zero
.setHighBits(LeadZ
);
426 Known
.Zero
|= (~BottomKnown
).getLoBits(ResultBitsKnown
);
427 Known
.One
|= BottomKnown
.getLoBits(ResultBitsKnown
);
429 // Only make use of no-wrap flags if we failed to compute the sign bit
430 // directly. This matters if the multiplication always overflows, in
431 // which case we prefer to follow the result of the direct computation,
432 // though as the program is invoking undefined behaviour we can choose
433 // whatever we like here.
434 if (isKnownNonNegative
&& !Known
.isNegative())
435 Known
.makeNonNegative();
436 else if (isKnownNegative
&& !Known
.isNonNegative())
437 Known
.makeNegative();
440 void llvm::computeKnownBitsFromRangeMetadata(const MDNode
&Ranges
,
442 unsigned BitWidth
= Known
.getBitWidth();
443 unsigned NumRanges
= Ranges
.getNumOperands() / 2;
444 assert(NumRanges
>= 1);
446 Known
.Zero
.setAllBits();
447 Known
.One
.setAllBits();
449 for (unsigned i
= 0; i
< NumRanges
; ++i
) {
451 mdconst::extract
<ConstantInt
>(Ranges
.getOperand(2 * i
+ 0));
453 mdconst::extract
<ConstantInt
>(Ranges
.getOperand(2 * i
+ 1));
454 ConstantRange
Range(Lower
->getValue(), Upper
->getValue());
456 // The first CommonPrefixBits of all values in Range are equal.
457 unsigned CommonPrefixBits
=
458 (Range
.getUnsignedMax() ^ Range
.getUnsignedMin()).countLeadingZeros();
460 APInt Mask
= APInt::getHighBitsSet(BitWidth
, CommonPrefixBits
);
461 Known
.One
&= Range
.getUnsignedMax() & Mask
;
462 Known
.Zero
&= ~Range
.getUnsignedMax() & Mask
;
466 static bool isEphemeralValueOf(const Instruction
*I
, const Value
*E
) {
467 SmallVector
<const Value
*, 16> WorkSet(1, I
);
468 SmallPtrSet
<const Value
*, 32> Visited
;
469 SmallPtrSet
<const Value
*, 16> EphValues
;
471 // The instruction defining an assumption's condition itself is always
472 // considered ephemeral to that assumption (even if it has other
473 // non-ephemeral users). See r246696's test case for an example.
474 if (is_contained(I
->operands(), E
))
477 while (!WorkSet
.empty()) {
478 const Value
*V
= WorkSet
.pop_back_val();
479 if (!Visited
.insert(V
).second
)
482 // If all uses of this value are ephemeral, then so is this value.
483 if (llvm::all_of(V
->users(), [&](const User
*U
) {
484 return EphValues
.count(U
);
489 if (V
== I
|| isSafeToSpeculativelyExecute(V
)) {
491 if (const User
*U
= dyn_cast
<User
>(V
))
492 for (User::const_op_iterator J
= U
->op_begin(), JE
= U
->op_end();
494 WorkSet
.push_back(*J
);
502 // Is this an intrinsic that cannot be speculated but also cannot trap?
503 bool llvm::isAssumeLikeIntrinsic(const Instruction
*I
) {
504 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
505 if (Function
*F
= CI
->getCalledFunction())
506 switch (F
->getIntrinsicID()) {
508 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
509 case Intrinsic::assume
:
510 case Intrinsic::sideeffect
:
511 case Intrinsic::dbg_declare
:
512 case Intrinsic::dbg_value
:
513 case Intrinsic::dbg_label
:
514 case Intrinsic::invariant_start
:
515 case Intrinsic::invariant_end
:
516 case Intrinsic::lifetime_start
:
517 case Intrinsic::lifetime_end
:
518 case Intrinsic::objectsize
:
519 case Intrinsic::ptr_annotation
:
520 case Intrinsic::var_annotation
:
527 bool llvm::isValidAssumeForContext(const Instruction
*Inv
,
528 const Instruction
*CxtI
,
529 const DominatorTree
*DT
) {
530 // There are two restrictions on the use of an assume:
531 // 1. The assume must dominate the context (or the control flow must
532 // reach the assume whenever it reaches the context).
533 // 2. The context must not be in the assume's set of ephemeral values
534 // (otherwise we will use the assume to prove that the condition
535 // feeding the assume is trivially true, thus causing the removal of
539 if (DT
->dominates(Inv
, CxtI
))
541 } else if (Inv
->getParent() == CxtI
->getParent()->getSinglePredecessor()) {
542 // We don't have a DT, but this trivially dominates.
546 // With or without a DT, the only remaining case we will check is if the
547 // instructions are in the same BB. Give up if that is not the case.
548 if (Inv
->getParent() != CxtI
->getParent())
551 // If we have a dom tree, then we now know that the assume doesn't dominate
552 // the other instruction. If we don't have a dom tree then we can check if
553 // the assume is first in the BB.
555 // Search forward from the assume until we reach the context (or the end
556 // of the block); the common case is that the assume will come first.
557 for (auto I
= std::next(BasicBlock::const_iterator(Inv
)),
558 IE
= Inv
->getParent()->end(); I
!= IE
; ++I
)
563 // The context comes first, but they're both in the same block. Make sure
564 // there is nothing in between that might interrupt the control flow.
565 for (BasicBlock::const_iterator I
=
566 std::next(BasicBlock::const_iterator(CxtI
)), IE(Inv
);
568 if (!isSafeToSpeculativelyExecute(&*I
) && !isAssumeLikeIntrinsic(&*I
))
571 return !isEphemeralValueOf(Inv
, CxtI
);
574 static void computeKnownBitsFromAssume(const Value
*V
, KnownBits
&Known
,
575 unsigned Depth
, const Query
&Q
) {
576 // Use of assumptions is context-sensitive. If we don't have a context, we
578 if (!Q
.AC
|| !Q
.CxtI
)
581 unsigned BitWidth
= Known
.getBitWidth();
583 // Note that the patterns below need to be kept in sync with the code
584 // in AssumptionCache::updateAffectedValues.
586 for (auto &AssumeVH
: Q
.AC
->assumptionsFor(V
)) {
589 CallInst
*I
= cast
<CallInst
>(AssumeVH
);
590 assert(I
->getParent()->getParent() == Q
.CxtI
->getParent()->getParent() &&
591 "Got assumption for the wrong function!");
595 // Warning: This loop can end up being somewhat performance sensitive.
596 // We're running this loop for once for each value queried resulting in a
597 // runtime of ~O(#assumes * #values).
599 assert(I
->getCalledFunction()->getIntrinsicID() == Intrinsic::assume
&&
600 "must be an assume intrinsic");
602 Value
*Arg
= I
->getArgOperand(0);
604 if (Arg
== V
&& isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
605 assert(BitWidth
== 1 && "assume operand is not i1?");
609 if (match(Arg
, m_Not(m_Specific(V
))) &&
610 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
611 assert(BitWidth
== 1 && "assume operand is not i1?");
616 // The remaining tests are all recursive, so bail out if we hit the limit.
617 if (Depth
== MaxDepth
)
621 auto m_V
= m_CombineOr(m_Specific(V
),
622 m_CombineOr(m_PtrToInt(m_Specific(V
)),
623 m_BitCast(m_Specific(V
))));
625 CmpInst::Predicate Pred
;
628 if (match(Arg
, m_c_ICmp(Pred
, m_V
, m_Value(A
))) &&
629 Pred
== ICmpInst::ICMP_EQ
&& isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
630 KnownBits
RHSKnown(BitWidth
);
631 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
632 Known
.Zero
|= RHSKnown
.Zero
;
633 Known
.One
|= RHSKnown
.One
;
635 } else if (match(Arg
,
636 m_c_ICmp(Pred
, m_c_And(m_V
, m_Value(B
)), m_Value(A
))) &&
637 Pred
== ICmpInst::ICMP_EQ
&&
638 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
639 KnownBits
RHSKnown(BitWidth
);
640 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
641 KnownBits
MaskKnown(BitWidth
);
642 computeKnownBits(B
, MaskKnown
, Depth
+1, Query(Q
, I
));
644 // For those bits in the mask that are known to be one, we can propagate
645 // known bits from the RHS to V.
646 Known
.Zero
|= RHSKnown
.Zero
& MaskKnown
.One
;
647 Known
.One
|= RHSKnown
.One
& MaskKnown
.One
;
648 // assume(~(v & b) = a)
649 } else if (match(Arg
, m_c_ICmp(Pred
, m_Not(m_c_And(m_V
, m_Value(B
))),
651 Pred
== ICmpInst::ICMP_EQ
&&
652 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
653 KnownBits
RHSKnown(BitWidth
);
654 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
655 KnownBits
MaskKnown(BitWidth
);
656 computeKnownBits(B
, MaskKnown
, Depth
+1, Query(Q
, I
));
658 // For those bits in the mask that are known to be one, we can propagate
659 // inverted known bits from the RHS to V.
660 Known
.Zero
|= RHSKnown
.One
& MaskKnown
.One
;
661 Known
.One
|= RHSKnown
.Zero
& MaskKnown
.One
;
663 } else if (match(Arg
,
664 m_c_ICmp(Pred
, m_c_Or(m_V
, m_Value(B
)), m_Value(A
))) &&
665 Pred
== ICmpInst::ICMP_EQ
&&
666 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
667 KnownBits
RHSKnown(BitWidth
);
668 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
669 KnownBits
BKnown(BitWidth
);
670 computeKnownBits(B
, BKnown
, Depth
+1, Query(Q
, I
));
672 // For those bits in B that are known to be zero, we can propagate known
673 // bits from the RHS to V.
674 Known
.Zero
|= RHSKnown
.Zero
& BKnown
.Zero
;
675 Known
.One
|= RHSKnown
.One
& BKnown
.Zero
;
676 // assume(~(v | b) = a)
677 } else if (match(Arg
, m_c_ICmp(Pred
, m_Not(m_c_Or(m_V
, m_Value(B
))),
679 Pred
== ICmpInst::ICMP_EQ
&&
680 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
681 KnownBits
RHSKnown(BitWidth
);
682 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
683 KnownBits
BKnown(BitWidth
);
684 computeKnownBits(B
, BKnown
, Depth
+1, Query(Q
, I
));
686 // For those bits in B that are known to be zero, we can propagate
687 // inverted known bits from the RHS to V.
688 Known
.Zero
|= RHSKnown
.One
& BKnown
.Zero
;
689 Known
.One
|= RHSKnown
.Zero
& BKnown
.Zero
;
691 } else if (match(Arg
,
692 m_c_ICmp(Pred
, m_c_Xor(m_V
, m_Value(B
)), m_Value(A
))) &&
693 Pred
== ICmpInst::ICMP_EQ
&&
694 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
695 KnownBits
RHSKnown(BitWidth
);
696 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
697 KnownBits
BKnown(BitWidth
);
698 computeKnownBits(B
, BKnown
, Depth
+1, Query(Q
, I
));
700 // For those bits in B that are known to be zero, we can propagate known
701 // bits from the RHS to V. For those bits in B that are known to be one,
702 // we can propagate inverted known bits from the RHS to V.
703 Known
.Zero
|= RHSKnown
.Zero
& BKnown
.Zero
;
704 Known
.One
|= RHSKnown
.One
& BKnown
.Zero
;
705 Known
.Zero
|= RHSKnown
.One
& BKnown
.One
;
706 Known
.One
|= RHSKnown
.Zero
& BKnown
.One
;
707 // assume(~(v ^ b) = a)
708 } else if (match(Arg
, m_c_ICmp(Pred
, m_Not(m_c_Xor(m_V
, m_Value(B
))),
710 Pred
== ICmpInst::ICMP_EQ
&&
711 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
712 KnownBits
RHSKnown(BitWidth
);
713 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
714 KnownBits
BKnown(BitWidth
);
715 computeKnownBits(B
, BKnown
, Depth
+1, Query(Q
, I
));
717 // For those bits in B that are known to be zero, we can propagate
718 // inverted known bits from the RHS to V. For those bits in B that are
719 // known to be one, we can propagate known bits from the RHS to V.
720 Known
.Zero
|= RHSKnown
.One
& BKnown
.Zero
;
721 Known
.One
|= RHSKnown
.Zero
& BKnown
.Zero
;
722 Known
.Zero
|= RHSKnown
.Zero
& BKnown
.One
;
723 Known
.One
|= RHSKnown
.One
& BKnown
.One
;
724 // assume(v << c = a)
725 } else if (match(Arg
, m_c_ICmp(Pred
, m_Shl(m_V
, m_ConstantInt(C
)),
727 Pred
== ICmpInst::ICMP_EQ
&&
728 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
) &&
730 KnownBits
RHSKnown(BitWidth
);
731 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
732 // For those bits in RHS that are known, we can propagate them to known
733 // bits in V shifted to the right by C.
734 RHSKnown
.Zero
.lshrInPlace(C
);
735 Known
.Zero
|= RHSKnown
.Zero
;
736 RHSKnown
.One
.lshrInPlace(C
);
737 Known
.One
|= RHSKnown
.One
;
738 // assume(~(v << c) = a)
739 } else if (match(Arg
, m_c_ICmp(Pred
, m_Not(m_Shl(m_V
, m_ConstantInt(C
))),
741 Pred
== ICmpInst::ICMP_EQ
&&
742 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
) &&
744 KnownBits
RHSKnown(BitWidth
);
745 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
746 // For those bits in RHS that are known, we can propagate them inverted
747 // to known bits in V shifted to the right by C.
748 RHSKnown
.One
.lshrInPlace(C
);
749 Known
.Zero
|= RHSKnown
.One
;
750 RHSKnown
.Zero
.lshrInPlace(C
);
751 Known
.One
|= RHSKnown
.Zero
;
752 // assume(v >> c = a)
753 } else if (match(Arg
,
754 m_c_ICmp(Pred
, m_Shr(m_V
, m_ConstantInt(C
)),
756 Pred
== ICmpInst::ICMP_EQ
&&
757 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
) &&
759 KnownBits
RHSKnown(BitWidth
);
760 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
761 // For those bits in RHS that are known, we can propagate them to known
762 // bits in V shifted to the right by C.
763 Known
.Zero
|= RHSKnown
.Zero
<< C
;
764 Known
.One
|= RHSKnown
.One
<< C
;
765 // assume(~(v >> c) = a)
766 } else if (match(Arg
, m_c_ICmp(Pred
, m_Not(m_Shr(m_V
, m_ConstantInt(C
))),
768 Pred
== ICmpInst::ICMP_EQ
&&
769 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
) &&
771 KnownBits
RHSKnown(BitWidth
);
772 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
773 // For those bits in RHS that are known, we can propagate them inverted
774 // to known bits in V shifted to the right by C.
775 Known
.Zero
|= RHSKnown
.One
<< C
;
776 Known
.One
|= RHSKnown
.Zero
<< C
;
777 // assume(v >=_s c) where c is non-negative
778 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
779 Pred
== ICmpInst::ICMP_SGE
&&
780 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
781 KnownBits
RHSKnown(BitWidth
);
782 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
784 if (RHSKnown
.isNonNegative()) {
785 // We know that the sign bit is zero.
786 Known
.makeNonNegative();
788 // assume(v >_s c) where c is at least -1.
789 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
790 Pred
== ICmpInst::ICMP_SGT
&&
791 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
792 KnownBits
RHSKnown(BitWidth
);
793 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
795 if (RHSKnown
.isAllOnes() || RHSKnown
.isNonNegative()) {
796 // We know that the sign bit is zero.
797 Known
.makeNonNegative();
799 // assume(v <=_s c) where c is negative
800 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
801 Pred
== ICmpInst::ICMP_SLE
&&
802 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
803 KnownBits
RHSKnown(BitWidth
);
804 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
806 if (RHSKnown
.isNegative()) {
807 // We know that the sign bit is one.
808 Known
.makeNegative();
810 // assume(v <_s c) where c is non-positive
811 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
812 Pred
== ICmpInst::ICMP_SLT
&&
813 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
814 KnownBits
RHSKnown(BitWidth
);
815 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
817 if (RHSKnown
.isZero() || RHSKnown
.isNegative()) {
818 // We know that the sign bit is one.
819 Known
.makeNegative();
822 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
823 Pred
== ICmpInst::ICMP_ULE
&&
824 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
825 KnownBits
RHSKnown(BitWidth
);
826 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
828 // Whatever high bits in c are zero are known to be zero.
829 Known
.Zero
.setHighBits(RHSKnown
.countMinLeadingZeros());
831 } else if (match(Arg
, m_ICmp(Pred
, m_V
, m_Value(A
))) &&
832 Pred
== ICmpInst::ICMP_ULT
&&
833 isValidAssumeForContext(I
, Q
.CxtI
, Q
.DT
)) {
834 KnownBits
RHSKnown(BitWidth
);
835 computeKnownBits(A
, RHSKnown
, Depth
+1, Query(Q
, I
));
837 // If the RHS is known zero, then this assumption must be wrong (nothing
838 // is unsigned less than zero). Signal a conflict and get out of here.
839 if (RHSKnown
.isZero()) {
840 Known
.Zero
.setAllBits();
841 Known
.One
.setAllBits();
845 // Whatever high bits in c are zero are known to be zero (if c is a power
846 // of 2, then one more).
847 if (isKnownToBeAPowerOfTwo(A
, false, Depth
+ 1, Query(Q
, I
)))
848 Known
.Zero
.setHighBits(RHSKnown
.countMinLeadingZeros() + 1);
850 Known
.Zero
.setHighBits(RHSKnown
.countMinLeadingZeros());
854 // If assumptions conflict with each other or previous known bits, then we
855 // have a logical fallacy. It's possible that the assumption is not reachable,
856 // so this isn't a real bug. On the other hand, the program may have undefined
857 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
858 // clear out the known bits, try to warn the user, and hope for the best.
859 if (Known
.Zero
.intersects(Known
.One
)) {
864 auto *CxtI
= const_cast<Instruction
*>(Q
.CxtI
);
865 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
867 << "Detected conflicting code assumptions. Program may "
868 "have undefined behavior, or compiler may have "
874 /// Compute known bits from a shift operator, including those with a
875 /// non-constant shift amount. Known is the output of this function. Known2 is a
876 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
877 /// operator-specific functions that, given the known-zero or known-one bits
878 /// respectively, and a shift amount, compute the implied known-zero or
879 /// known-one bits of the shift operator's result respectively for that shift
880 /// amount. The results from calling KZF and KOF are conservatively combined for
881 /// all permitted shift amounts.
882 static void computeKnownBitsFromShiftOperator(
883 const Operator
*I
, KnownBits
&Known
, KnownBits
&Known2
,
884 unsigned Depth
, const Query
&Q
,
885 function_ref
<APInt(const APInt
&, unsigned)> KZF
,
886 function_ref
<APInt(const APInt
&, unsigned)> KOF
) {
887 unsigned BitWidth
= Known
.getBitWidth();
889 if (auto *SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
890 unsigned ShiftAmt
= SA
->getLimitedValue(BitWidth
-1);
892 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
893 Known
.Zero
= KZF(Known
.Zero
, ShiftAmt
);
894 Known
.One
= KOF(Known
.One
, ShiftAmt
);
895 // If the known bits conflict, this must be an overflowing left shift, so
896 // the shift result is poison. We can return anything we want. Choose 0 for
897 // the best folding opportunity.
898 if (Known
.hasConflict())
904 computeKnownBits(I
->getOperand(1), Known
, Depth
+ 1, Q
);
906 // If the shift amount could be greater than or equal to the bit-width of the
907 // LHS, the value could be poison, but bail out because the check below is
908 // expensive. TODO: Should we just carry on?
909 if ((~Known
.Zero
).uge(BitWidth
)) {
914 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
915 // BitWidth > 64 and any upper bits are known, we'll end up returning the
916 // limit value (which implies all bits are known).
917 uint64_t ShiftAmtKZ
= Known
.Zero
.zextOrTrunc(64).getZExtValue();
918 uint64_t ShiftAmtKO
= Known
.One
.zextOrTrunc(64).getZExtValue();
920 // It would be more-clearly correct to use the two temporaries for this
921 // calculation. Reusing the APInts here to prevent unnecessary allocations.
924 // If we know the shifter operand is nonzero, we can sometimes infer more
925 // known bits. However this is expensive to compute, so be lazy about it and
926 // only compute it when absolutely necessary.
927 Optional
<bool> ShifterOperandIsNonZero
;
929 // Early exit if we can't constrain any well-defined shift amount.
930 if (!(ShiftAmtKZ
& (PowerOf2Ceil(BitWidth
) - 1)) &&
931 !(ShiftAmtKO
& (PowerOf2Ceil(BitWidth
) - 1))) {
932 ShifterOperandIsNonZero
= isKnownNonZero(I
->getOperand(1), Depth
+ 1, Q
);
933 if (!*ShifterOperandIsNonZero
)
937 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
939 Known
.Zero
.setAllBits();
940 Known
.One
.setAllBits();
941 for (unsigned ShiftAmt
= 0; ShiftAmt
< BitWidth
; ++ShiftAmt
) {
942 // Combine the shifted known input bits only for those shift amounts
943 // compatible with its known constraints.
944 if ((ShiftAmt
& ~ShiftAmtKZ
) != ShiftAmt
)
946 if ((ShiftAmt
| ShiftAmtKO
) != ShiftAmt
)
948 // If we know the shifter is nonzero, we may be able to infer more known
949 // bits. This check is sunk down as far as possible to avoid the expensive
950 // call to isKnownNonZero if the cheaper checks above fail.
952 if (!ShifterOperandIsNonZero
.hasValue())
953 ShifterOperandIsNonZero
=
954 isKnownNonZero(I
->getOperand(1), Depth
+ 1, Q
);
955 if (*ShifterOperandIsNonZero
)
959 Known
.Zero
&= KZF(Known2
.Zero
, ShiftAmt
);
960 Known
.One
&= KOF(Known2
.One
, ShiftAmt
);
963 // If the known bits conflict, the result is poison. Return a 0 and hope the
964 // caller can further optimize that.
965 if (Known
.hasConflict())
969 static void computeKnownBitsFromOperator(const Operator
*I
, KnownBits
&Known
,
970 unsigned Depth
, const Query
&Q
) {
971 unsigned BitWidth
= Known
.getBitWidth();
973 KnownBits
Known2(Known
);
974 switch (I
->getOpcode()) {
976 case Instruction::Load
:
978 Q
.IIQ
.getMetadata(cast
<LoadInst
>(I
), LLVMContext::MD_range
))
979 computeKnownBitsFromRangeMetadata(*MD
, Known
);
981 case Instruction::And
: {
982 // If either the LHS or the RHS are Zero, the result is zero.
983 computeKnownBits(I
->getOperand(1), Known
, Depth
+ 1, Q
);
984 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
986 // Output known-1 bits are only known if set in both the LHS & RHS.
987 Known
.One
&= Known2
.One
;
988 // Output known-0 are known to be clear if zero in either the LHS | RHS.
989 Known
.Zero
|= Known2
.Zero
;
991 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
992 // here we handle the more general case of adding any odd number by
993 // matching the form add(x, add(x, y)) where y is odd.
994 // TODO: This could be generalized to clearing any bit set in y where the
995 // following bit is known to be unset in y.
996 Value
*X
= nullptr, *Y
= nullptr;
997 if (!Known
.Zero
[0] && !Known
.One
[0] &&
998 match(I
, m_c_BinOp(m_Value(X
), m_Add(m_Deferred(X
), m_Value(Y
))))) {
1000 computeKnownBits(Y
, Known2
, Depth
+ 1, Q
);
1001 if (Known2
.countMinTrailingOnes() > 0)
1002 Known
.Zero
.setBit(0);
1006 case Instruction::Or
:
1007 computeKnownBits(I
->getOperand(1), Known
, Depth
+ 1, Q
);
1008 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1010 // Output known-0 bits are only known if clear in both the LHS & RHS.
1011 Known
.Zero
&= Known2
.Zero
;
1012 // Output known-1 are known to be set if set in either the LHS | RHS.
1013 Known
.One
|= Known2
.One
;
1015 case Instruction::Xor
: {
1016 computeKnownBits(I
->getOperand(1), Known
, Depth
+ 1, Q
);
1017 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1019 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1020 APInt KnownZeroOut
= (Known
.Zero
& Known2
.Zero
) | (Known
.One
& Known2
.One
);
1021 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1022 Known
.One
= (Known
.Zero
& Known2
.One
) | (Known
.One
& Known2
.Zero
);
1023 Known
.Zero
= std::move(KnownZeroOut
);
1026 case Instruction::Mul
: {
1027 bool NSW
= Q
.IIQ
.hasNoSignedWrap(cast
<OverflowingBinaryOperator
>(I
));
1028 computeKnownBitsMul(I
->getOperand(0), I
->getOperand(1), NSW
, Known
,
1032 case Instruction::UDiv
: {
1033 // For the purposes of computing leading zeros we can conservatively
1034 // treat a udiv as a logical right shift by the power of 2 known to
1035 // be less than the denominator.
1036 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1037 unsigned LeadZ
= Known2
.countMinLeadingZeros();
1040 computeKnownBits(I
->getOperand(1), Known2
, Depth
+ 1, Q
);
1041 unsigned RHSMaxLeadingZeros
= Known2
.countMaxLeadingZeros();
1042 if (RHSMaxLeadingZeros
!= BitWidth
)
1043 LeadZ
= std::min(BitWidth
, LeadZ
+ BitWidth
- RHSMaxLeadingZeros
- 1);
1045 Known
.Zero
.setHighBits(LeadZ
);
1048 case Instruction::Select
: {
1049 const Value
*LHS
, *RHS
;
1050 SelectPatternFlavor SPF
= matchSelectPattern(I
, LHS
, RHS
).Flavor
;
1051 if (SelectPatternResult::isMinOrMax(SPF
)) {
1052 computeKnownBits(RHS
, Known
, Depth
+ 1, Q
);
1053 computeKnownBits(LHS
, Known2
, Depth
+ 1, Q
);
1055 computeKnownBits(I
->getOperand(2), Known
, Depth
+ 1, Q
);
1056 computeKnownBits(I
->getOperand(1), Known2
, Depth
+ 1, Q
);
1059 unsigned MaxHighOnes
= 0;
1060 unsigned MaxHighZeros
= 0;
1061 if (SPF
== SPF_SMAX
) {
1062 // If both sides are negative, the result is negative.
1063 if (Known
.isNegative() && Known2
.isNegative())
1064 // We can derive a lower bound on the result by taking the max of the
1065 // leading one bits.
1067 std::max(Known
.countMinLeadingOnes(), Known2
.countMinLeadingOnes());
1068 // If either side is non-negative, the result is non-negative.
1069 else if (Known
.isNonNegative() || Known2
.isNonNegative())
1071 } else if (SPF
== SPF_SMIN
) {
1072 // If both sides are non-negative, the result is non-negative.
1073 if (Known
.isNonNegative() && Known2
.isNonNegative())
1074 // We can derive an upper bound on the result by taking the max of the
1075 // leading zero bits.
1076 MaxHighZeros
= std::max(Known
.countMinLeadingZeros(),
1077 Known2
.countMinLeadingZeros());
1078 // If either side is negative, the result is negative.
1079 else if (Known
.isNegative() || Known2
.isNegative())
1081 } else if (SPF
== SPF_UMAX
) {
1082 // We can derive a lower bound on the result by taking the max of the
1083 // leading one bits.
1085 std::max(Known
.countMinLeadingOnes(), Known2
.countMinLeadingOnes());
1086 } else if (SPF
== SPF_UMIN
) {
1087 // We can derive an upper bound on the result by taking the max of the
1088 // leading zero bits.
1090 std::max(Known
.countMinLeadingZeros(), Known2
.countMinLeadingZeros());
1091 } else if (SPF
== SPF_ABS
) {
1092 // RHS from matchSelectPattern returns the negation part of abs pattern.
1093 // If the negate has an NSW flag we can assume the sign bit of the result
1094 // will be 0 because that makes abs(INT_MIN) undefined.
1095 if (Q
.IIQ
.hasNoSignedWrap(cast
<Instruction
>(RHS
)))
1099 // Only known if known in both the LHS and RHS.
1100 Known
.One
&= Known2
.One
;
1101 Known
.Zero
&= Known2
.Zero
;
1102 if (MaxHighOnes
> 0)
1103 Known
.One
.setHighBits(MaxHighOnes
);
1104 if (MaxHighZeros
> 0)
1105 Known
.Zero
.setHighBits(MaxHighZeros
);
1108 case Instruction::FPTrunc
:
1109 case Instruction::FPExt
:
1110 case Instruction::FPToUI
:
1111 case Instruction::FPToSI
:
1112 case Instruction::SIToFP
:
1113 case Instruction::UIToFP
:
1114 break; // Can't work with floating point.
1115 case Instruction::PtrToInt
:
1116 case Instruction::IntToPtr
:
1117 // Fall through and handle them the same as zext/trunc.
1119 case Instruction::ZExt
:
1120 case Instruction::Trunc
: {
1121 Type
*SrcTy
= I
->getOperand(0)->getType();
1123 unsigned SrcBitWidth
;
1124 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1125 // which fall through here.
1126 Type
*ScalarTy
= SrcTy
->getScalarType();
1127 SrcBitWidth
= ScalarTy
->isPointerTy() ?
1128 Q
.DL
.getIndexTypeSizeInBits(ScalarTy
) :
1129 Q
.DL
.getTypeSizeInBits(ScalarTy
);
1131 assert(SrcBitWidth
&& "SrcBitWidth can't be zero");
1132 Known
= Known
.zextOrTrunc(SrcBitWidth
);
1133 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1134 Known
= Known
.zextOrTrunc(BitWidth
);
1135 // Any top bits are known to be zero.
1136 if (BitWidth
> SrcBitWidth
)
1137 Known
.Zero
.setBitsFrom(SrcBitWidth
);
1140 case Instruction::BitCast
: {
1141 Type
*SrcTy
= I
->getOperand(0)->getType();
1142 if (SrcTy
->isIntOrPtrTy() &&
1143 // TODO: For now, not handling conversions like:
1144 // (bitcast i64 %x to <2 x i32>)
1145 !I
->getType()->isVectorTy()) {
1146 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1151 case Instruction::SExt
: {
1152 // Compute the bits in the result that are not present in the input.
1153 unsigned SrcBitWidth
= I
->getOperand(0)->getType()->getScalarSizeInBits();
1155 Known
= Known
.trunc(SrcBitWidth
);
1156 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1157 // If the sign bit of the input is known set or clear, then we know the
1158 // top bits of the result.
1159 Known
= Known
.sext(BitWidth
);
1162 case Instruction::Shl
: {
1163 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1164 bool NSW
= Q
.IIQ
.hasNoSignedWrap(cast
<OverflowingBinaryOperator
>(I
));
1165 auto KZF
= [NSW
](const APInt
&KnownZero
, unsigned ShiftAmt
) {
1166 APInt KZResult
= KnownZero
<< ShiftAmt
;
1167 KZResult
.setLowBits(ShiftAmt
); // Low bits known 0.
1168 // If this shift has "nsw" keyword, then the result is either a poison
1169 // value or has the same sign bit as the first operand.
1170 if (NSW
&& KnownZero
.isSignBitSet())
1171 KZResult
.setSignBit();
1175 auto KOF
= [NSW
](const APInt
&KnownOne
, unsigned ShiftAmt
) {
1176 APInt KOResult
= KnownOne
<< ShiftAmt
;
1177 if (NSW
&& KnownOne
.isSignBitSet())
1178 KOResult
.setSignBit();
1182 computeKnownBitsFromShiftOperator(I
, Known
, Known2
, Depth
, Q
, KZF
, KOF
);
1185 case Instruction::LShr
: {
1186 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1187 auto KZF
= [](const APInt
&KnownZero
, unsigned ShiftAmt
) {
1188 APInt KZResult
= KnownZero
.lshr(ShiftAmt
);
1189 // High bits known zero.
1190 KZResult
.setHighBits(ShiftAmt
);
1194 auto KOF
= [](const APInt
&KnownOne
, unsigned ShiftAmt
) {
1195 return KnownOne
.lshr(ShiftAmt
);
1198 computeKnownBitsFromShiftOperator(I
, Known
, Known2
, Depth
, Q
, KZF
, KOF
);
1201 case Instruction::AShr
: {
1202 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1203 auto KZF
= [](const APInt
&KnownZero
, unsigned ShiftAmt
) {
1204 return KnownZero
.ashr(ShiftAmt
);
1207 auto KOF
= [](const APInt
&KnownOne
, unsigned ShiftAmt
) {
1208 return KnownOne
.ashr(ShiftAmt
);
1211 computeKnownBitsFromShiftOperator(I
, Known
, Known2
, Depth
, Q
, KZF
, KOF
);
1214 case Instruction::Sub
: {
1215 bool NSW
= Q
.IIQ
.hasNoSignedWrap(cast
<OverflowingBinaryOperator
>(I
));
1216 computeKnownBitsAddSub(false, I
->getOperand(0), I
->getOperand(1), NSW
,
1217 Known
, Known2
, Depth
, Q
);
1220 case Instruction::Add
: {
1221 bool NSW
= Q
.IIQ
.hasNoSignedWrap(cast
<OverflowingBinaryOperator
>(I
));
1222 computeKnownBitsAddSub(true, I
->getOperand(0), I
->getOperand(1), NSW
,
1223 Known
, Known2
, Depth
, Q
);
1226 case Instruction::SRem
:
1227 if (ConstantInt
*Rem
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1228 APInt RA
= Rem
->getValue().abs();
1229 if (RA
.isPowerOf2()) {
1230 APInt LowBits
= RA
- 1;
1231 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1233 // The low bits of the first operand are unchanged by the srem.
1234 Known
.Zero
= Known2
.Zero
& LowBits
;
1235 Known
.One
= Known2
.One
& LowBits
;
1237 // If the first operand is non-negative or has all low bits zero, then
1238 // the upper bits are all zero.
1239 if (Known2
.isNonNegative() || LowBits
.isSubsetOf(Known2
.Zero
))
1240 Known
.Zero
|= ~LowBits
;
1242 // If the first operand is negative and not all low bits are zero, then
1243 // the upper bits are all one.
1244 if (Known2
.isNegative() && LowBits
.intersects(Known2
.One
))
1245 Known
.One
|= ~LowBits
;
1247 assert((Known
.Zero
& Known
.One
) == 0 && "Bits known to be one AND zero?");
1252 // The sign bit is the LHS's sign bit, except when the result of the
1253 // remainder is zero.
1254 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1255 // If it's known zero, our sign bit is also zero.
1256 if (Known2
.isNonNegative())
1257 Known
.makeNonNegative();
1260 case Instruction::URem
: {
1261 if (ConstantInt
*Rem
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1262 const APInt
&RA
= Rem
->getValue();
1263 if (RA
.isPowerOf2()) {
1264 APInt LowBits
= (RA
- 1);
1265 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1266 Known
.Zero
|= ~LowBits
;
1267 Known
.One
&= LowBits
;
1272 // Since the result is less than or equal to either operand, any leading
1273 // zero bits in either operand must also exist in the result.
1274 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1275 computeKnownBits(I
->getOperand(1), Known2
, Depth
+ 1, Q
);
1278 std::max(Known
.countMinLeadingZeros(), Known2
.countMinLeadingZeros());
1280 Known
.Zero
.setHighBits(Leaders
);
1284 case Instruction::Alloca
: {
1285 const AllocaInst
*AI
= cast
<AllocaInst
>(I
);
1286 unsigned Align
= AI
->getAlignment();
1288 Align
= Q
.DL
.getABITypeAlignment(AI
->getAllocatedType());
1291 Known
.Zero
.setLowBits(countTrailingZeros(Align
));
1294 case Instruction::GetElementPtr
: {
1295 // Analyze all of the subscripts of this getelementptr instruction
1296 // to determine if we can prove known low zero bits.
1297 KnownBits
LocalKnown(BitWidth
);
1298 computeKnownBits(I
->getOperand(0), LocalKnown
, Depth
+ 1, Q
);
1299 unsigned TrailZ
= LocalKnown
.countMinTrailingZeros();
1301 gep_type_iterator GTI
= gep_type_begin(I
);
1302 for (unsigned i
= 1, e
= I
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
1303 Value
*Index
= I
->getOperand(i
);
1304 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
1305 // Handle struct member offset arithmetic.
1307 // Handle case when index is vector zeroinitializer
1308 Constant
*CIndex
= cast
<Constant
>(Index
);
1309 if (CIndex
->isZeroValue())
1312 if (CIndex
->getType()->isVectorTy())
1313 Index
= CIndex
->getSplatValue();
1315 unsigned Idx
= cast
<ConstantInt
>(Index
)->getZExtValue();
1316 const StructLayout
*SL
= Q
.DL
.getStructLayout(STy
);
1317 uint64_t Offset
= SL
->getElementOffset(Idx
);
1318 TrailZ
= std::min
<unsigned>(TrailZ
,
1319 countTrailingZeros(Offset
));
1321 // Handle array index arithmetic.
1322 Type
*IndexedTy
= GTI
.getIndexedType();
1323 if (!IndexedTy
->isSized()) {
1327 unsigned GEPOpiBits
= Index
->getType()->getScalarSizeInBits();
1328 uint64_t TypeSize
= Q
.DL
.getTypeAllocSize(IndexedTy
);
1329 LocalKnown
.Zero
= LocalKnown
.One
= APInt(GEPOpiBits
, 0);
1330 computeKnownBits(Index
, LocalKnown
, Depth
+ 1, Q
);
1331 TrailZ
= std::min(TrailZ
,
1332 unsigned(countTrailingZeros(TypeSize
) +
1333 LocalKnown
.countMinTrailingZeros()));
1337 Known
.Zero
.setLowBits(TrailZ
);
1340 case Instruction::PHI
: {
1341 const PHINode
*P
= cast
<PHINode
>(I
);
1342 // Handle the case of a simple two-predecessor recurrence PHI.
1343 // There's a lot more that could theoretically be done here, but
1344 // this is sufficient to catch some interesting cases.
1345 if (P
->getNumIncomingValues() == 2) {
1346 for (unsigned i
= 0; i
!= 2; ++i
) {
1347 Value
*L
= P
->getIncomingValue(i
);
1348 Value
*R
= P
->getIncomingValue(!i
);
1349 Operator
*LU
= dyn_cast
<Operator
>(L
);
1352 unsigned Opcode
= LU
->getOpcode();
1353 // Check for operations that have the property that if
1354 // both their operands have low zero bits, the result
1355 // will have low zero bits.
1356 if (Opcode
== Instruction::Add
||
1357 Opcode
== Instruction::Sub
||
1358 Opcode
== Instruction::And
||
1359 Opcode
== Instruction::Or
||
1360 Opcode
== Instruction::Mul
) {
1361 Value
*LL
= LU
->getOperand(0);
1362 Value
*LR
= LU
->getOperand(1);
1363 // Find a recurrence.
1370 // Ok, we have a PHI of the form L op= R. Check for low
1372 computeKnownBits(R
, Known2
, Depth
+ 1, Q
);
1374 // We need to take the minimum number of known bits
1375 KnownBits
Known3(Known
);
1376 computeKnownBits(L
, Known3
, Depth
+ 1, Q
);
1378 Known
.Zero
.setLowBits(std::min(Known2
.countMinTrailingZeros(),
1379 Known3
.countMinTrailingZeros()));
1381 auto *OverflowOp
= dyn_cast
<OverflowingBinaryOperator
>(LU
);
1382 if (OverflowOp
&& Q
.IIQ
.hasNoSignedWrap(OverflowOp
)) {
1383 // If initial value of recurrence is nonnegative, and we are adding
1384 // a nonnegative number with nsw, the result can only be nonnegative
1385 // or poison value regardless of the number of times we execute the
1386 // add in phi recurrence. If initial value is negative and we are
1387 // adding a negative number with nsw, the result can only be
1388 // negative or poison value. Similar arguments apply to sub and mul.
1390 // (add non-negative, non-negative) --> non-negative
1391 // (add negative, negative) --> negative
1392 if (Opcode
== Instruction::Add
) {
1393 if (Known2
.isNonNegative() && Known3
.isNonNegative())
1394 Known
.makeNonNegative();
1395 else if (Known2
.isNegative() && Known3
.isNegative())
1396 Known
.makeNegative();
1399 // (sub nsw non-negative, negative) --> non-negative
1400 // (sub nsw negative, non-negative) --> negative
1401 else if (Opcode
== Instruction::Sub
&& LL
== I
) {
1402 if (Known2
.isNonNegative() && Known3
.isNegative())
1403 Known
.makeNonNegative();
1404 else if (Known2
.isNegative() && Known3
.isNonNegative())
1405 Known
.makeNegative();
1408 // (mul nsw non-negative, non-negative) --> non-negative
1409 else if (Opcode
== Instruction::Mul
&& Known2
.isNonNegative() &&
1410 Known3
.isNonNegative())
1411 Known
.makeNonNegative();
1419 // Unreachable blocks may have zero-operand PHI nodes.
1420 if (P
->getNumIncomingValues() == 0)
1423 // Otherwise take the unions of the known bit sets of the operands,
1424 // taking conservative care to avoid excessive recursion.
1425 if (Depth
< MaxDepth
- 1 && !Known
.Zero
&& !Known
.One
) {
1426 // Skip if every incoming value references to ourself.
1427 if (dyn_cast_or_null
<UndefValue
>(P
->hasConstantValue()))
1430 Known
.Zero
.setAllBits();
1431 Known
.One
.setAllBits();
1432 for (Value
*IncValue
: P
->incoming_values()) {
1433 // Skip direct self references.
1434 if (IncValue
== P
) continue;
1436 Known2
= KnownBits(BitWidth
);
1437 // Recurse, but cap the recursion to one level, because we don't
1438 // want to waste time spinning around in loops.
1439 computeKnownBits(IncValue
, Known2
, MaxDepth
- 1, Q
);
1440 Known
.Zero
&= Known2
.Zero
;
1441 Known
.One
&= Known2
.One
;
1442 // If all bits have been ruled out, there's no need to check
1444 if (!Known
.Zero
&& !Known
.One
)
1450 case Instruction::Call
:
1451 case Instruction::Invoke
:
1452 // If range metadata is attached to this call, set known bits from that,
1453 // and then intersect with known bits based on other properties of the
1456 Q
.IIQ
.getMetadata(cast
<Instruction
>(I
), LLVMContext::MD_range
))
1457 computeKnownBitsFromRangeMetadata(*MD
, Known
);
1458 if (const Value
*RV
= ImmutableCallSite(I
).getReturnedArgOperand()) {
1459 computeKnownBits(RV
, Known2
, Depth
+ 1, Q
);
1460 Known
.Zero
|= Known2
.Zero
;
1461 Known
.One
|= Known2
.One
;
1463 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
1464 switch (II
->getIntrinsicID()) {
1466 case Intrinsic::bitreverse
:
1467 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1468 Known
.Zero
|= Known2
.Zero
.reverseBits();
1469 Known
.One
|= Known2
.One
.reverseBits();
1471 case Intrinsic::bswap
:
1472 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1473 Known
.Zero
|= Known2
.Zero
.byteSwap();
1474 Known
.One
|= Known2
.One
.byteSwap();
1476 case Intrinsic::ctlz
: {
1477 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1478 // If we have a known 1, its position is our upper bound.
1479 unsigned PossibleLZ
= Known2
.One
.countLeadingZeros();
1480 // If this call is undefined for 0, the result will be less than 2^n.
1481 if (II
->getArgOperand(1) == ConstantInt::getTrue(II
->getContext()))
1482 PossibleLZ
= std::min(PossibleLZ
, BitWidth
- 1);
1483 unsigned LowBits
= Log2_32(PossibleLZ
)+1;
1484 Known
.Zero
.setBitsFrom(LowBits
);
1487 case Intrinsic::cttz
: {
1488 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1489 // If we have a known 1, its position is our upper bound.
1490 unsigned PossibleTZ
= Known2
.One
.countTrailingZeros();
1491 // If this call is undefined for 0, the result will be less than 2^n.
1492 if (II
->getArgOperand(1) == ConstantInt::getTrue(II
->getContext()))
1493 PossibleTZ
= std::min(PossibleTZ
, BitWidth
- 1);
1494 unsigned LowBits
= Log2_32(PossibleTZ
)+1;
1495 Known
.Zero
.setBitsFrom(LowBits
);
1498 case Intrinsic::ctpop
: {
1499 computeKnownBits(I
->getOperand(0), Known2
, Depth
+ 1, Q
);
1500 // We can bound the space the count needs. Also, bits known to be zero
1501 // can't contribute to the population.
1502 unsigned BitsPossiblySet
= Known2
.countMaxPopulation();
1503 unsigned LowBits
= Log2_32(BitsPossiblySet
)+1;
1504 Known
.Zero
.setBitsFrom(LowBits
);
1505 // TODO: we could bound KnownOne using the lower bound on the number
1506 // of bits which might be set provided by popcnt KnownOne2.
1509 case Intrinsic::x86_sse42_crc32_64_64
:
1510 Known
.Zero
.setBitsFrom(32);
1515 case Instruction::ExtractElement
:
1516 // Look through extract element. At the moment we keep this simple and skip
1517 // tracking the specific element. But at least we might find information
1518 // valid for all elements of the vector (for example if vector is sign
1519 // extended, shifted, etc).
1520 computeKnownBits(I
->getOperand(0), Known
, Depth
+ 1, Q
);
1522 case Instruction::ExtractValue
:
1523 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
->getOperand(0))) {
1524 const ExtractValueInst
*EVI
= cast
<ExtractValueInst
>(I
);
1525 if (EVI
->getNumIndices() != 1) break;
1526 if (EVI
->getIndices()[0] == 0) {
1527 switch (II
->getIntrinsicID()) {
1529 case Intrinsic::uadd_with_overflow
:
1530 case Intrinsic::sadd_with_overflow
:
1531 computeKnownBitsAddSub(true, II
->getArgOperand(0),
1532 II
->getArgOperand(1), false, Known
, Known2
,
1535 case Intrinsic::usub_with_overflow
:
1536 case Intrinsic::ssub_with_overflow
:
1537 computeKnownBitsAddSub(false, II
->getArgOperand(0),
1538 II
->getArgOperand(1), false, Known
, Known2
,
1541 case Intrinsic::umul_with_overflow
:
1542 case Intrinsic::smul_with_overflow
:
1543 computeKnownBitsMul(II
->getArgOperand(0), II
->getArgOperand(1), false,
1544 Known
, Known2
, Depth
, Q
);
1552 /// Determine which bits of V are known to be either zero or one and return
1554 KnownBits
computeKnownBits(const Value
*V
, unsigned Depth
, const Query
&Q
) {
1555 KnownBits
Known(getBitWidth(V
->getType(), Q
.DL
));
1556 computeKnownBits(V
, Known
, Depth
, Q
);
1560 /// Determine which bits of V are known to be either zero or one and return
1561 /// them in the Known bit set.
1563 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1564 /// we cannot optimize based on the assumption that it is zero without changing
1565 /// it to be an explicit zero. If we don't change it to zero, other code could
1566 /// optimized based on the contradictory assumption that it is non-zero.
1567 /// Because instcombine aggressively folds operations with undef args anyway,
1568 /// this won't lose us code quality.
1570 /// This function is defined on values with integer type, values with pointer
1571 /// type, and vectors of integers. In the case
1572 /// where V is a vector, known zero, and known one values are the
1573 /// same width as the vector element, and the bit is set only if it is true
1574 /// for all of the elements in the vector.
1575 void computeKnownBits(const Value
*V
, KnownBits
&Known
, unsigned Depth
,
1577 assert(V
&& "No Value?");
1578 assert(Depth
<= MaxDepth
&& "Limit Search Depth");
1579 unsigned BitWidth
= Known
.getBitWidth();
1581 assert((V
->getType()->isIntOrIntVectorTy(BitWidth
) ||
1582 V
->getType()->isPtrOrPtrVectorTy()) &&
1583 "Not integer or pointer type!");
1585 Type
*ScalarTy
= V
->getType()->getScalarType();
1586 unsigned ExpectedWidth
= ScalarTy
->isPointerTy() ?
1587 Q
.DL
.getIndexTypeSizeInBits(ScalarTy
) : Q
.DL
.getTypeSizeInBits(ScalarTy
);
1588 assert(ExpectedWidth
== BitWidth
&& "V and Known should have same BitWidth");
1590 (void)ExpectedWidth
;
1593 if (match(V
, m_APInt(C
))) {
1594 // We know all of the bits for a scalar constant or a splat vector constant!
1596 Known
.Zero
= ~Known
.One
;
1599 // Null and aggregate-zero are all-zeros.
1600 if (isa
<ConstantPointerNull
>(V
) || isa
<ConstantAggregateZero
>(V
)) {
1604 // Handle a constant vector by taking the intersection of the known bits of
1606 if (const ConstantDataSequential
*CDS
= dyn_cast
<ConstantDataSequential
>(V
)) {
1607 // We know that CDS must be a vector of integers. Take the intersection of
1609 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
1610 for (unsigned i
= 0, e
= CDS
->getNumElements(); i
!= e
; ++i
) {
1611 APInt Elt
= CDS
->getElementAsAPInt(i
);
1618 if (const auto *CV
= dyn_cast
<ConstantVector
>(V
)) {
1619 // We know that CV must be a vector of integers. Take the intersection of
1621 Known
.Zero
.setAllBits(); Known
.One
.setAllBits();
1622 for (unsigned i
= 0, e
= CV
->getNumOperands(); i
!= e
; ++i
) {
1623 Constant
*Element
= CV
->getAggregateElement(i
);
1624 auto *ElementCI
= dyn_cast_or_null
<ConstantInt
>(Element
);
1629 const APInt
&Elt
= ElementCI
->getValue();
1636 // Start out not knowing anything.
1639 // We can't imply anything about undefs.
1640 if (isa
<UndefValue
>(V
))
1643 // There's no point in looking through other users of ConstantData for
1644 // assumptions. Confirm that we've handled them all.
1645 assert(!isa
<ConstantData
>(V
) && "Unhandled constant data!");
1647 // Limit search depth.
1648 // All recursive calls that increase depth must come after this.
1649 if (Depth
== MaxDepth
)
1652 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1653 // the bits of its aliasee.
1654 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
1655 if (!GA
->isInterposable())
1656 computeKnownBits(GA
->getAliasee(), Known
, Depth
+ 1, Q
);
1660 if (const Operator
*I
= dyn_cast
<Operator
>(V
))
1661 computeKnownBitsFromOperator(I
, Known
, Depth
, Q
);
1663 // Aligned pointers have trailing zeros - refine Known.Zero set
1664 if (V
->getType()->isPointerTy()) {
1665 unsigned Align
= V
->getPointerAlignment(Q
.DL
);
1667 Known
.Zero
.setLowBits(countTrailingZeros(Align
));
1670 // computeKnownBitsFromAssume strictly refines Known.
1671 // Therefore, we run them after computeKnownBitsFromOperator.
1673 // Check whether a nearby assume intrinsic can determine some known bits.
1674 computeKnownBitsFromAssume(V
, Known
, Depth
, Q
);
1676 assert((Known
.Zero
& Known
.One
) == 0 && "Bits known to be one AND zero?");
1679 /// Return true if the given value is known to have exactly one
1680 /// bit set when defined. For vectors return true if every element is known to
1681 /// be a power of two when defined. Supports values with integer or pointer
1682 /// types and vectors of integers.
1683 bool isKnownToBeAPowerOfTwo(const Value
*V
, bool OrZero
, unsigned Depth
,
1685 assert(Depth
<= MaxDepth
&& "Limit Search Depth");
1687 // Attempt to match against constants.
1688 if (OrZero
&& match(V
, m_Power2OrZero()))
1690 if (match(V
, m_Power2()))
1693 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1694 // it is shifted off the end then the result is undefined.
1695 if (match(V
, m_Shl(m_One(), m_Value())))
1698 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1699 // the bottom. If it is shifted off the bottom then the result is undefined.
1700 if (match(V
, m_LShr(m_SignMask(), m_Value())))
1703 // The remaining tests are all recursive, so bail out if we hit the limit.
1704 if (Depth
++ == MaxDepth
)
1707 Value
*X
= nullptr, *Y
= nullptr;
1708 // A shift left or a logical shift right of a power of two is a power of two
1710 if (OrZero
&& (match(V
, m_Shl(m_Value(X
), m_Value())) ||
1711 match(V
, m_LShr(m_Value(X
), m_Value()))))
1712 return isKnownToBeAPowerOfTwo(X
, /*OrZero*/ true, Depth
, Q
);
1714 if (const ZExtInst
*ZI
= dyn_cast
<ZExtInst
>(V
))
1715 return isKnownToBeAPowerOfTwo(ZI
->getOperand(0), OrZero
, Depth
, Q
);
1717 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
))
1718 return isKnownToBeAPowerOfTwo(SI
->getTrueValue(), OrZero
, Depth
, Q
) &&
1719 isKnownToBeAPowerOfTwo(SI
->getFalseValue(), OrZero
, Depth
, Q
);
1721 if (OrZero
&& match(V
, m_And(m_Value(X
), m_Value(Y
)))) {
1722 // A power of two and'd with anything is a power of two or zero.
1723 if (isKnownToBeAPowerOfTwo(X
, /*OrZero*/ true, Depth
, Q
) ||
1724 isKnownToBeAPowerOfTwo(Y
, /*OrZero*/ true, Depth
, Q
))
1726 // X & (-X) is always a power of two or zero.
1727 if (match(X
, m_Neg(m_Specific(Y
))) || match(Y
, m_Neg(m_Specific(X
))))
1732 // Adding a power-of-two or zero to the same power-of-two or zero yields
1733 // either the original power-of-two, a larger power-of-two or zero.
1734 if (match(V
, m_Add(m_Value(X
), m_Value(Y
)))) {
1735 const OverflowingBinaryOperator
*VOBO
= cast
<OverflowingBinaryOperator
>(V
);
1736 if (OrZero
|| Q
.IIQ
.hasNoUnsignedWrap(VOBO
) ||
1737 Q
.IIQ
.hasNoSignedWrap(VOBO
)) {
1738 if (match(X
, m_And(m_Specific(Y
), m_Value())) ||
1739 match(X
, m_And(m_Value(), m_Specific(Y
))))
1740 if (isKnownToBeAPowerOfTwo(Y
, OrZero
, Depth
, Q
))
1742 if (match(Y
, m_And(m_Specific(X
), m_Value())) ||
1743 match(Y
, m_And(m_Value(), m_Specific(X
))))
1744 if (isKnownToBeAPowerOfTwo(X
, OrZero
, Depth
, Q
))
1747 unsigned BitWidth
= V
->getType()->getScalarSizeInBits();
1748 KnownBits
LHSBits(BitWidth
);
1749 computeKnownBits(X
, LHSBits
, Depth
, Q
);
1751 KnownBits
RHSBits(BitWidth
);
1752 computeKnownBits(Y
, RHSBits
, Depth
, Q
);
1753 // If i8 V is a power of two or zero:
1754 // ZeroBits: 1 1 1 0 1 1 1 1
1755 // ~ZeroBits: 0 0 0 1 0 0 0 0
1756 if ((~(LHSBits
.Zero
& RHSBits
.Zero
)).isPowerOf2())
1757 // If OrZero isn't set, we cannot give back a zero result.
1758 // Make sure either the LHS or RHS has a bit set.
1759 if (OrZero
|| RHSBits
.One
.getBoolValue() || LHSBits
.One
.getBoolValue())
1764 // An exact divide or right shift can only shift off zero bits, so the result
1765 // is a power of two only if the first operand is a power of two and not
1766 // copying a sign bit (sdiv int_min, 2).
1767 if (match(V
, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1768 match(V
, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1769 return isKnownToBeAPowerOfTwo(cast
<Operator
>(V
)->getOperand(0), OrZero
,
1776 /// Test whether a GEP's result is known to be non-null.
1778 /// Uses properties inherent in a GEP to try to determine whether it is known
1781 /// Currently this routine does not support vector GEPs.
1782 static bool isGEPKnownNonNull(const GEPOperator
*GEP
, unsigned Depth
,
1784 const Function
*F
= nullptr;
1785 if (const Instruction
*I
= dyn_cast
<Instruction
>(GEP
))
1786 F
= I
->getFunction();
1788 if (!GEP
->isInBounds() ||
1789 NullPointerIsDefined(F
, GEP
->getPointerAddressSpace()))
1792 // FIXME: Support vector-GEPs.
1793 assert(GEP
->getType()->isPointerTy() && "We only support plain pointer GEP");
1795 // If the base pointer is non-null, we cannot walk to a null address with an
1796 // inbounds GEP in address space zero.
1797 if (isKnownNonZero(GEP
->getPointerOperand(), Depth
, Q
))
1800 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1801 // If so, then the GEP cannot produce a null pointer, as doing so would
1802 // inherently violate the inbounds contract within address space zero.
1803 for (gep_type_iterator GTI
= gep_type_begin(GEP
), GTE
= gep_type_end(GEP
);
1804 GTI
!= GTE
; ++GTI
) {
1805 // Struct types are easy -- they must always be indexed by a constant.
1806 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
1807 ConstantInt
*OpC
= cast
<ConstantInt
>(GTI
.getOperand());
1808 unsigned ElementIdx
= OpC
->getZExtValue();
1809 const StructLayout
*SL
= Q
.DL
.getStructLayout(STy
);
1810 uint64_t ElementOffset
= SL
->getElementOffset(ElementIdx
);
1811 if (ElementOffset
> 0)
1816 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1817 if (Q
.DL
.getTypeAllocSize(GTI
.getIndexedType()) == 0)
1820 // Fast path the constant operand case both for efficiency and so we don't
1821 // increment Depth when just zipping down an all-constant GEP.
1822 if (ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GTI
.getOperand())) {
1828 // We post-increment Depth here because while isKnownNonZero increments it
1829 // as well, when we pop back up that increment won't persist. We don't want
1830 // to recurse 10k times just because we have 10k GEP operands. We don't
1831 // bail completely out because we want to handle constant GEPs regardless
1833 if (Depth
++ >= MaxDepth
)
1836 if (isKnownNonZero(GTI
.getOperand(), Depth
, Q
))
1843 static bool isKnownNonNullFromDominatingCondition(const Value
*V
,
1844 const Instruction
*CtxI
,
1845 const DominatorTree
*DT
) {
1846 assert(V
->getType()->isPointerTy() && "V must be pointer type");
1847 assert(!isa
<ConstantData
>(V
) && "Did not expect ConstantPointerNull");
1852 unsigned NumUsesExplored
= 0;
1853 for (auto *U
: V
->users()) {
1854 // Avoid massive lists
1855 if (NumUsesExplored
>= DomConditionsMaxUses
)
1859 // If the value is used as an argument to a call or invoke, then argument
1860 // attributes may provide an answer about null-ness.
1861 if (auto CS
= ImmutableCallSite(U
))
1862 if (auto *CalledFunc
= CS
.getCalledFunction())
1863 for (const Argument
&Arg
: CalledFunc
->args())
1864 if (CS
.getArgOperand(Arg
.getArgNo()) == V
&&
1865 Arg
.hasNonNullAttr() && DT
->dominates(CS
.getInstruction(), CtxI
))
1868 // Consider only compare instructions uniquely controlling a branch
1869 CmpInst::Predicate Pred
;
1870 if (!match(const_cast<User
*>(U
),
1871 m_c_ICmp(Pred
, m_Specific(V
), m_Zero())) ||
1872 (Pred
!= ICmpInst::ICMP_EQ
&& Pred
!= ICmpInst::ICMP_NE
))
1875 SmallVector
<const User
*, 4> WorkList
;
1876 SmallPtrSet
<const User
*, 4> Visited
;
1877 for (auto *CmpU
: U
->users()) {
1878 assert(WorkList
.empty() && "Should be!");
1879 if (Visited
.insert(CmpU
).second
)
1880 WorkList
.push_back(CmpU
);
1882 while (!WorkList
.empty()) {
1883 auto *Curr
= WorkList
.pop_back_val();
1885 // If a user is an AND, add all its users to the work list. We only
1886 // propagate "pred != null" condition through AND because it is only
1887 // correct to assume that all conditions of AND are met in true branch.
1888 // TODO: Support similar logic of OR and EQ predicate?
1889 if (Pred
== ICmpInst::ICMP_NE
)
1890 if (auto *BO
= dyn_cast
<BinaryOperator
>(Curr
))
1891 if (BO
->getOpcode() == Instruction::And
) {
1892 for (auto *BOU
: BO
->users())
1893 if (Visited
.insert(BOU
).second
)
1894 WorkList
.push_back(BOU
);
1898 if (const BranchInst
*BI
= dyn_cast
<BranchInst
>(Curr
)) {
1899 assert(BI
->isConditional() && "uses a comparison!");
1901 BasicBlock
*NonNullSuccessor
=
1902 BI
->getSuccessor(Pred
== ICmpInst::ICMP_EQ
? 1 : 0);
1903 BasicBlockEdge
Edge(BI
->getParent(), NonNullSuccessor
);
1904 if (Edge
.isSingleEdge() && DT
->dominates(Edge
, CtxI
->getParent()))
1906 } else if (Pred
== ICmpInst::ICMP_NE
&& isGuard(Curr
) &&
1907 DT
->dominates(cast
<Instruction
>(Curr
), CtxI
)) {
1917 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1918 /// ensure that the value it's attached to is never Value? 'RangeType' is
1919 /// is the type of the value described by the range.
1920 static bool rangeMetadataExcludesValue(const MDNode
* Ranges
, const APInt
& Value
) {
1921 const unsigned NumRanges
= Ranges
->getNumOperands() / 2;
1922 assert(NumRanges
>= 1);
1923 for (unsigned i
= 0; i
< NumRanges
; ++i
) {
1924 ConstantInt
*Lower
=
1925 mdconst::extract
<ConstantInt
>(Ranges
->getOperand(2 * i
+ 0));
1926 ConstantInt
*Upper
=
1927 mdconst::extract
<ConstantInt
>(Ranges
->getOperand(2 * i
+ 1));
1928 ConstantRange
Range(Lower
->getValue(), Upper
->getValue());
1929 if (Range
.contains(Value
))
1935 /// Return true if the given value is known to be non-zero when defined. For
1936 /// vectors, return true if every element is known to be non-zero when
1937 /// defined. For pointers, if the context instruction and dominator tree are
1938 /// specified, perform context-sensitive analysis and return true if the
1939 /// pointer couldn't possibly be null at the specified instruction.
1940 /// Supports values with integer or pointer type and vectors of integers.
1941 bool isKnownNonZero(const Value
*V
, unsigned Depth
, const Query
&Q
) {
1942 if (auto *C
= dyn_cast
<Constant
>(V
)) {
1943 if (C
->isNullValue())
1945 if (isa
<ConstantInt
>(C
))
1946 // Must be non-zero due to null test above.
1949 // For constant vectors, check that all elements are undefined or known
1950 // non-zero to determine that the whole vector is known non-zero.
1951 if (auto *VecTy
= dyn_cast
<VectorType
>(C
->getType())) {
1952 for (unsigned i
= 0, e
= VecTy
->getNumElements(); i
!= e
; ++i
) {
1953 Constant
*Elt
= C
->getAggregateElement(i
);
1954 if (!Elt
|| Elt
->isNullValue())
1956 if (!isa
<UndefValue
>(Elt
) && !isa
<ConstantInt
>(Elt
))
1962 // A global variable in address space 0 is non null unless extern weak
1963 // or an absolute symbol reference. Other address spaces may have null as a
1964 // valid address for a global, so we can't assume anything.
1965 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
1966 if (!GV
->isAbsoluteSymbolRef() && !GV
->hasExternalWeakLinkage() &&
1967 GV
->getType()->getAddressSpace() == 0)
1973 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
1974 if (MDNode
*Ranges
= Q
.IIQ
.getMetadata(I
, LLVMContext::MD_range
)) {
1975 // If the possible ranges don't contain zero, then the value is
1976 // definitely non-zero.
1977 if (auto *Ty
= dyn_cast
<IntegerType
>(V
->getType())) {
1978 const APInt
ZeroValue(Ty
->getBitWidth(), 0);
1979 if (rangeMetadataExcludesValue(Ranges
, ZeroValue
))
1985 // Some of the tests below are recursive, so bail out if we hit the limit.
1986 if (Depth
++ >= MaxDepth
)
1989 // Check for pointer simplifications.
1990 if (V
->getType()->isPointerTy()) {
1991 // Alloca never returns null, malloc might.
1992 if (isa
<AllocaInst
>(V
) && Q
.DL
.getAllocaAddrSpace() == 0)
1995 // A byval, inalloca, or nonnull argument is never null.
1996 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
1997 if (A
->hasByValOrInAllocaAttr() || A
->hasNonNullAttr())
2000 // A Load tagged with nonnull metadata is never null.
2001 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(V
))
2002 if (Q
.IIQ
.getMetadata(LI
, LLVMContext::MD_nonnull
))
2005 if (auto CS
= ImmutableCallSite(V
)) {
2006 if (CS
.isReturnNonNull())
2008 if (const auto *RP
= getArgumentAliasingToReturnedPointer(CS
))
2009 return isKnownNonZero(RP
, Depth
, Q
);
2014 // Check for recursive pointer simplifications.
2015 if (V
->getType()->isPointerTy()) {
2016 if (isKnownNonNullFromDominatingCondition(V
, Q
.CxtI
, Q
.DT
))
2019 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
))
2020 if (isGEPKnownNonNull(GEP
, Depth
, Q
))
2024 unsigned BitWidth
= getBitWidth(V
->getType()->getScalarType(), Q
.DL
);
2026 // X | Y != 0 if X != 0 or Y != 0.
2027 Value
*X
= nullptr, *Y
= nullptr;
2028 if (match(V
, m_Or(m_Value(X
), m_Value(Y
))))
2029 return isKnownNonZero(X
, Depth
, Q
) || isKnownNonZero(Y
, Depth
, Q
);
2031 // ext X != 0 if X != 0.
2032 if (isa
<SExtInst
>(V
) || isa
<ZExtInst
>(V
))
2033 return isKnownNonZero(cast
<Instruction
>(V
)->getOperand(0), Depth
, Q
);
2035 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2036 // if the lowest bit is shifted off the end.
2037 if (match(V
, m_Shl(m_Value(X
), m_Value(Y
)))) {
2038 // shl nuw can't remove any non-zero bits.
2039 const OverflowingBinaryOperator
*BO
= cast
<OverflowingBinaryOperator
>(V
);
2040 if (Q
.IIQ
.hasNoUnsignedWrap(BO
))
2041 return isKnownNonZero(X
, Depth
, Q
);
2043 KnownBits
Known(BitWidth
);
2044 computeKnownBits(X
, Known
, Depth
, Q
);
2048 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2049 // defined if the sign bit is shifted off the end.
2050 else if (match(V
, m_Shr(m_Value(X
), m_Value(Y
)))) {
2051 // shr exact can only shift out zero bits.
2052 const PossiblyExactOperator
*BO
= cast
<PossiblyExactOperator
>(V
);
2054 return isKnownNonZero(X
, Depth
, Q
);
2056 KnownBits Known
= computeKnownBits(X
, Depth
, Q
);
2057 if (Known
.isNegative())
2060 // If the shifter operand is a constant, and all of the bits shifted
2061 // out are known to be zero, and X is known non-zero then at least one
2062 // non-zero bit must remain.
2063 if (ConstantInt
*Shift
= dyn_cast
<ConstantInt
>(Y
)) {
2064 auto ShiftVal
= Shift
->getLimitedValue(BitWidth
- 1);
2065 // Is there a known one in the portion not shifted out?
2066 if (Known
.countMaxLeadingZeros() < BitWidth
- ShiftVal
)
2068 // Are all the bits to be shifted out known zero?
2069 if (Known
.countMinTrailingZeros() >= ShiftVal
)
2070 return isKnownNonZero(X
, Depth
, Q
);
2073 // div exact can only produce a zero if the dividend is zero.
2074 else if (match(V
, m_Exact(m_IDiv(m_Value(X
), m_Value())))) {
2075 return isKnownNonZero(X
, Depth
, Q
);
2078 else if (match(V
, m_Add(m_Value(X
), m_Value(Y
)))) {
2079 KnownBits XKnown
= computeKnownBits(X
, Depth
, Q
);
2080 KnownBits YKnown
= computeKnownBits(Y
, Depth
, Q
);
2082 // If X and Y are both non-negative (as signed values) then their sum is not
2083 // zero unless both X and Y are zero.
2084 if (XKnown
.isNonNegative() && YKnown
.isNonNegative())
2085 if (isKnownNonZero(X
, Depth
, Q
) || isKnownNonZero(Y
, Depth
, Q
))
2088 // If X and Y are both negative (as signed values) then their sum is not
2089 // zero unless both X and Y equal INT_MIN.
2090 if (XKnown
.isNegative() && YKnown
.isNegative()) {
2091 APInt Mask
= APInt::getSignedMaxValue(BitWidth
);
2092 // The sign bit of X is set. If some other bit is set then X is not equal
2094 if (XKnown
.One
.intersects(Mask
))
2096 // The sign bit of Y is set. If some other bit is set then Y is not equal
2098 if (YKnown
.One
.intersects(Mask
))
2102 // The sum of a non-negative number and a power of two is not zero.
2103 if (XKnown
.isNonNegative() &&
2104 isKnownToBeAPowerOfTwo(Y
, /*OrZero*/ false, Depth
, Q
))
2106 if (YKnown
.isNonNegative() &&
2107 isKnownToBeAPowerOfTwo(X
, /*OrZero*/ false, Depth
, Q
))
2111 else if (match(V
, m_Mul(m_Value(X
), m_Value(Y
)))) {
2112 const OverflowingBinaryOperator
*BO
= cast
<OverflowingBinaryOperator
>(V
);
2113 // If X and Y are non-zero then so is X * Y as long as the multiplication
2114 // does not overflow.
2115 if ((Q
.IIQ
.hasNoSignedWrap(BO
) || Q
.IIQ
.hasNoUnsignedWrap(BO
)) &&
2116 isKnownNonZero(X
, Depth
, Q
) && isKnownNonZero(Y
, Depth
, Q
))
2119 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2120 else if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
2121 if (isKnownNonZero(SI
->getTrueValue(), Depth
, Q
) &&
2122 isKnownNonZero(SI
->getFalseValue(), Depth
, Q
))
2126 else if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
2127 // Try and detect a recurrence that monotonically increases from a
2128 // starting value, as these are common as induction variables.
2129 if (PN
->getNumIncomingValues() == 2) {
2130 Value
*Start
= PN
->getIncomingValue(0);
2131 Value
*Induction
= PN
->getIncomingValue(1);
2132 if (isa
<ConstantInt
>(Induction
) && !isa
<ConstantInt
>(Start
))
2133 std::swap(Start
, Induction
);
2134 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Start
)) {
2135 if (!C
->isZero() && !C
->isNegative()) {
2137 if (Q
.IIQ
.UseInstrInfo
&&
2138 (match(Induction
, m_NSWAdd(m_Specific(PN
), m_ConstantInt(X
))) ||
2139 match(Induction
, m_NUWAdd(m_Specific(PN
), m_ConstantInt(X
)))) &&
2145 // Check if all incoming values are non-zero constant.
2146 bool AllNonZeroConstants
= llvm::all_of(PN
->operands(), [](Value
*V
) {
2147 return isa
<ConstantInt
>(V
) && !cast
<ConstantInt
>(V
)->isZero();
2149 if (AllNonZeroConstants
)
2153 KnownBits
Known(BitWidth
);
2154 computeKnownBits(V
, Known
, Depth
, Q
);
2155 return Known
.One
!= 0;
2158 /// Return true if V2 == V1 + X, where X is known non-zero.
2159 static bool isAddOfNonZero(const Value
*V1
, const Value
*V2
, const Query
&Q
) {
2160 const BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(V1
);
2161 if (!BO
|| BO
->getOpcode() != Instruction::Add
)
2163 Value
*Op
= nullptr;
2164 if (V2
== BO
->getOperand(0))
2165 Op
= BO
->getOperand(1);
2166 else if (V2
== BO
->getOperand(1))
2167 Op
= BO
->getOperand(0);
2170 return isKnownNonZero(Op
, 0, Q
);
2173 /// Return true if it is known that V1 != V2.
2174 static bool isKnownNonEqual(const Value
*V1
, const Value
*V2
, const Query
&Q
) {
2177 if (V1
->getType() != V2
->getType())
2178 // We can't look through casts yet.
2180 if (isAddOfNonZero(V1
, V2
, Q
) || isAddOfNonZero(V2
, V1
, Q
))
2183 if (V1
->getType()->isIntOrIntVectorTy()) {
2184 // Are any known bits in V1 contradictory to known bits in V2? If V1
2185 // has a known zero where V2 has a known one, they must not be equal.
2186 KnownBits Known1
= computeKnownBits(V1
, 0, Q
);
2187 KnownBits Known2
= computeKnownBits(V2
, 0, Q
);
2189 if (Known1
.Zero
.intersects(Known2
.One
) ||
2190 Known2
.Zero
.intersects(Known1
.One
))
2196 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2197 /// simplify operations downstream. Mask is known to be zero for bits that V
2200 /// This function is defined on values with integer type, values with pointer
2201 /// type, and vectors of integers. In the case
2202 /// where V is a vector, the mask, known zero, and known one values are the
2203 /// same width as the vector element, and the bit is set only if it is true
2204 /// for all of the elements in the vector.
2205 bool MaskedValueIsZero(const Value
*V
, const APInt
&Mask
, unsigned Depth
,
2207 KnownBits
Known(Mask
.getBitWidth());
2208 computeKnownBits(V
, Known
, Depth
, Q
);
2209 return Mask
.isSubsetOf(Known
.Zero
);
2212 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2213 // Returns the input and lower/upper bounds.
2214 static bool isSignedMinMaxClamp(const Value
*Select
, const Value
*&In
,
2215 const APInt
*&CLow
, const APInt
*&CHigh
) {
2216 assert(isa
<Operator
>(Select
) &&
2217 cast
<Operator
>(Select
)->getOpcode() == Instruction::Select
&&
2218 "Input should be a Select!");
2220 const Value
*LHS
, *RHS
, *LHS2
, *RHS2
;
2221 SelectPatternFlavor SPF
= matchSelectPattern(Select
, LHS
, RHS
).Flavor
;
2222 if (SPF
!= SPF_SMAX
&& SPF
!= SPF_SMIN
)
2225 if (!match(RHS
, m_APInt(CLow
)))
2228 SelectPatternFlavor SPF2
= matchSelectPattern(LHS
, LHS2
, RHS2
).Flavor
;
2229 if (getInverseMinMaxFlavor(SPF
) != SPF2
)
2232 if (!match(RHS2
, m_APInt(CHigh
)))
2235 if (SPF
== SPF_SMIN
)
2236 std::swap(CLow
, CHigh
);
2239 return CLow
->sle(*CHigh
);
2242 /// For vector constants, loop over the elements and find the constant with the
2243 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2244 /// or if any element was not analyzed; otherwise, return the count for the
2245 /// element with the minimum number of sign bits.
2246 static unsigned computeNumSignBitsVectorConstant(const Value
*V
,
2248 const auto *CV
= dyn_cast
<Constant
>(V
);
2249 if (!CV
|| !CV
->getType()->isVectorTy())
2252 unsigned MinSignBits
= TyBits
;
2253 unsigned NumElts
= CV
->getType()->getVectorNumElements();
2254 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2255 // If we find a non-ConstantInt, bail out.
2256 auto *Elt
= dyn_cast_or_null
<ConstantInt
>(CV
->getAggregateElement(i
));
2260 MinSignBits
= std::min(MinSignBits
, Elt
->getValue().getNumSignBits());
2266 static unsigned ComputeNumSignBitsImpl(const Value
*V
, unsigned Depth
,
2269 static unsigned ComputeNumSignBits(const Value
*V
, unsigned Depth
,
2271 unsigned Result
= ComputeNumSignBitsImpl(V
, Depth
, Q
);
2272 assert(Result
> 0 && "At least one sign bit needs to be present!");
2276 /// Return the number of times the sign bit of the register is replicated into
2277 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2278 /// (itself), but other cases can give us information. For example, immediately
2279 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2280 /// other, so we return 3. For vectors, return the number of sign bits for the
2281 /// vector element with the minimum number of known sign bits.
2282 static unsigned ComputeNumSignBitsImpl(const Value
*V
, unsigned Depth
,
2284 assert(Depth
<= MaxDepth
&& "Limit Search Depth");
2286 // We return the minimum number of sign bits that are guaranteed to be present
2287 // in V, so for undef we have to conservatively return 1. We don't have the
2288 // same behavior for poison though -- that's a FIXME today.
2290 Type
*ScalarTy
= V
->getType()->getScalarType();
2291 unsigned TyBits
= ScalarTy
->isPointerTy() ?
2292 Q
.DL
.getIndexTypeSizeInBits(ScalarTy
) :
2293 Q
.DL
.getTypeSizeInBits(ScalarTy
);
2296 unsigned FirstAnswer
= 1;
2298 // Note that ConstantInt is handled by the general computeKnownBits case
2301 if (Depth
== MaxDepth
)
2302 return 1; // Limit search depth.
2304 const Operator
*U
= dyn_cast
<Operator
>(V
);
2305 switch (Operator::getOpcode(V
)) {
2307 case Instruction::SExt
:
2308 Tmp
= TyBits
- U
->getOperand(0)->getType()->getScalarSizeInBits();
2309 return ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
) + Tmp
;
2311 case Instruction::SDiv
: {
2312 const APInt
*Denominator
;
2313 // sdiv X, C -> adds log(C) sign bits.
2314 if (match(U
->getOperand(1), m_APInt(Denominator
))) {
2316 // Ignore non-positive denominator.
2317 if (!Denominator
->isStrictlyPositive())
2320 // Calculate the incoming numerator bits.
2321 unsigned NumBits
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2323 // Add floor(log(C)) bits to the numerator bits.
2324 return std::min(TyBits
, NumBits
+ Denominator
->logBase2());
2329 case Instruction::SRem
: {
2330 const APInt
*Denominator
;
2331 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2332 // positive constant. This let us put a lower bound on the number of sign
2334 if (match(U
->getOperand(1), m_APInt(Denominator
))) {
2336 // Ignore non-positive denominator.
2337 if (!Denominator
->isStrictlyPositive())
2340 // Calculate the incoming numerator bits. SRem by a positive constant
2341 // can't lower the number of sign bits.
2343 ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2345 // Calculate the leading sign bit constraints by examining the
2346 // denominator. Given that the denominator is positive, there are two
2349 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2350 // (1 << ceilLogBase2(C)).
2352 // 2. the numerator is negative. Then the result range is (-C,0] and
2353 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2355 // Thus a lower bound on the number of sign bits is `TyBits -
2356 // ceilLogBase2(C)`.
2358 unsigned ResBits
= TyBits
- Denominator
->ceilLogBase2();
2359 return std::max(NumrBits
, ResBits
);
2364 case Instruction::AShr
: {
2365 Tmp
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2366 // ashr X, C -> adds C sign bits. Vectors too.
2368 if (match(U
->getOperand(1), m_APInt(ShAmt
))) {
2369 if (ShAmt
->uge(TyBits
))
2370 break; // Bad shift.
2371 unsigned ShAmtLimited
= ShAmt
->getZExtValue();
2372 Tmp
+= ShAmtLimited
;
2373 if (Tmp
> TyBits
) Tmp
= TyBits
;
2377 case Instruction::Shl
: {
2379 if (match(U
->getOperand(1), m_APInt(ShAmt
))) {
2380 // shl destroys sign bits.
2381 Tmp
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2382 if (ShAmt
->uge(TyBits
) || // Bad shift.
2383 ShAmt
->uge(Tmp
)) break; // Shifted all sign bits out.
2384 Tmp2
= ShAmt
->getZExtValue();
2389 case Instruction::And
:
2390 case Instruction::Or
:
2391 case Instruction::Xor
: // NOT is handled here.
2392 // Logical binary ops preserve the number of sign bits at the worst.
2393 Tmp
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2395 Tmp2
= ComputeNumSignBits(U
->getOperand(1), Depth
+ 1, Q
);
2396 FirstAnswer
= std::min(Tmp
, Tmp2
);
2397 // We computed what we know about the sign bits as our first
2398 // answer. Now proceed to the generic code that uses
2399 // computeKnownBits, and pick whichever answer is better.
2403 case Instruction::Select
: {
2404 // If we have a clamp pattern, we know that the number of sign bits will be
2405 // the minimum of the clamp min/max range.
2407 const APInt
*CLow
, *CHigh
;
2408 if (isSignedMinMaxClamp(U
, X
, CLow
, CHigh
))
2409 return std::min(CLow
->getNumSignBits(), CHigh
->getNumSignBits());
2411 Tmp
= ComputeNumSignBits(U
->getOperand(1), Depth
+ 1, Q
);
2412 if (Tmp
== 1) break;
2413 Tmp2
= ComputeNumSignBits(U
->getOperand(2), Depth
+ 1, Q
);
2414 return std::min(Tmp
, Tmp2
);
2417 case Instruction::Add
:
2418 // Add can have at most one carry bit. Thus we know that the output
2419 // is, at worst, one more bit than the inputs.
2420 Tmp
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2421 if (Tmp
== 1) break;
2423 // Special case decrementing a value (ADD X, -1):
2424 if (const auto *CRHS
= dyn_cast
<Constant
>(U
->getOperand(1)))
2425 if (CRHS
->isAllOnesValue()) {
2426 KnownBits
Known(TyBits
);
2427 computeKnownBits(U
->getOperand(0), Known
, Depth
+ 1, Q
);
2429 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2431 if ((Known
.Zero
| 1).isAllOnesValue())
2434 // If we are subtracting one from a positive number, there is no carry
2435 // out of the result.
2436 if (Known
.isNonNegative())
2440 Tmp2
= ComputeNumSignBits(U
->getOperand(1), Depth
+ 1, Q
);
2441 if (Tmp2
== 1) break;
2442 return std::min(Tmp
, Tmp2
)-1;
2444 case Instruction::Sub
:
2445 Tmp2
= ComputeNumSignBits(U
->getOperand(1), Depth
+ 1, Q
);
2446 if (Tmp2
== 1) break;
2449 if (const auto *CLHS
= dyn_cast
<Constant
>(U
->getOperand(0)))
2450 if (CLHS
->isNullValue()) {
2451 KnownBits
Known(TyBits
);
2452 computeKnownBits(U
->getOperand(1), Known
, Depth
+ 1, Q
);
2453 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2455 if ((Known
.Zero
| 1).isAllOnesValue())
2458 // If the input is known to be positive (the sign bit is known clear),
2459 // the output of the NEG has the same number of sign bits as the input.
2460 if (Known
.isNonNegative())
2463 // Otherwise, we treat this like a SUB.
2466 // Sub can have at most one carry bit. Thus we know that the output
2467 // is, at worst, one more bit than the inputs.
2468 Tmp
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2469 if (Tmp
== 1) break;
2470 return std::min(Tmp
, Tmp2
)-1;
2472 case Instruction::Mul
: {
2473 // The output of the Mul can be at most twice the valid bits in the inputs.
2474 unsigned SignBitsOp0
= ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2475 if (SignBitsOp0
== 1) break;
2476 unsigned SignBitsOp1
= ComputeNumSignBits(U
->getOperand(1), Depth
+ 1, Q
);
2477 if (SignBitsOp1
== 1) break;
2478 unsigned OutValidBits
=
2479 (TyBits
- SignBitsOp0
+ 1) + (TyBits
- SignBitsOp1
+ 1);
2480 return OutValidBits
> TyBits
? 1 : TyBits
- OutValidBits
+ 1;
2483 case Instruction::PHI
: {
2484 const PHINode
*PN
= cast
<PHINode
>(U
);
2485 unsigned NumIncomingValues
= PN
->getNumIncomingValues();
2486 // Don't analyze large in-degree PHIs.
2487 if (NumIncomingValues
> 4) break;
2488 // Unreachable blocks may have zero-operand PHI nodes.
2489 if (NumIncomingValues
== 0) break;
2491 // Take the minimum of all incoming values. This can't infinitely loop
2492 // because of our depth threshold.
2493 Tmp
= ComputeNumSignBits(PN
->getIncomingValue(0), Depth
+ 1, Q
);
2494 for (unsigned i
= 1, e
= NumIncomingValues
; i
!= e
; ++i
) {
2495 if (Tmp
== 1) return Tmp
;
2497 Tmp
, ComputeNumSignBits(PN
->getIncomingValue(i
), Depth
+ 1, Q
));
2502 case Instruction::Trunc
:
2503 // FIXME: it's tricky to do anything useful for this, but it is an important
2504 // case for targets like X86.
2507 case Instruction::ExtractElement
:
2508 // Look through extract element. At the moment we keep this simple and skip
2509 // tracking the specific element. But at least we might find information
2510 // valid for all elements of the vector (for example if vector is sign
2511 // extended, shifted, etc).
2512 return ComputeNumSignBits(U
->getOperand(0), Depth
+ 1, Q
);
2515 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2516 // use this information.
2518 // If we can examine all elements of a vector constant successfully, we're
2519 // done (we can't do any better than that). If not, keep trying.
2520 if (unsigned VecSignBits
= computeNumSignBitsVectorConstant(V
, TyBits
))
2523 KnownBits
Known(TyBits
);
2524 computeKnownBits(V
, Known
, Depth
, Q
);
2526 // If we know that the sign bit is either zero or one, determine the number of
2527 // identical bits in the top of the input value.
2528 return std::max(FirstAnswer
, Known
.countMinSignBits());
2531 /// This function computes the integer multiple of Base that equals V.
2532 /// If successful, it returns true and returns the multiple in
2533 /// Multiple. If unsuccessful, it returns false. It looks
2534 /// through SExt instructions only if LookThroughSExt is true.
2535 bool llvm::ComputeMultiple(Value
*V
, unsigned Base
, Value
*&Multiple
,
2536 bool LookThroughSExt
, unsigned Depth
) {
2537 const unsigned MaxDepth
= 6;
2539 assert(V
&& "No Value?");
2540 assert(Depth
<= MaxDepth
&& "Limit Search Depth");
2541 assert(V
->getType()->isIntegerTy() && "Not integer or pointer type!");
2543 Type
*T
= V
->getType();
2545 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
);
2555 ConstantExpr
*CO
= dyn_cast
<ConstantExpr
>(V
);
2556 Constant
*BaseVal
= ConstantInt::get(T
, Base
);
2557 if (CO
&& CO
== BaseVal
) {
2559 Multiple
= ConstantInt::get(T
, 1);
2563 if (CI
&& CI
->getZExtValue() % Base
== 0) {
2564 Multiple
= ConstantInt::get(T
, CI
->getZExtValue() / Base
);
2568 if (Depth
== MaxDepth
) return false; // Limit search depth.
2570 Operator
*I
= dyn_cast
<Operator
>(V
);
2571 if (!I
) return false;
2573 switch (I
->getOpcode()) {
2575 case Instruction::SExt
:
2576 if (!LookThroughSExt
) return false;
2577 // otherwise fall through to ZExt
2579 case Instruction::ZExt
:
2580 return ComputeMultiple(I
->getOperand(0), Base
, Multiple
,
2581 LookThroughSExt
, Depth
+1);
2582 case Instruction::Shl
:
2583 case Instruction::Mul
: {
2584 Value
*Op0
= I
->getOperand(0);
2585 Value
*Op1
= I
->getOperand(1);
2587 if (I
->getOpcode() == Instruction::Shl
) {
2588 ConstantInt
*Op1CI
= dyn_cast
<ConstantInt
>(Op1
);
2589 if (!Op1CI
) return false;
2590 // Turn Op0 << Op1 into Op0 * 2^Op1
2591 APInt Op1Int
= Op1CI
->getValue();
2592 uint64_t BitToSet
= Op1Int
.getLimitedValue(Op1Int
.getBitWidth() - 1);
2593 APInt
API(Op1Int
.getBitWidth(), 0);
2594 API
.setBit(BitToSet
);
2595 Op1
= ConstantInt::get(V
->getContext(), API
);
2598 Value
*Mul0
= nullptr;
2599 if (ComputeMultiple(Op0
, Base
, Mul0
, LookThroughSExt
, Depth
+1)) {
2600 if (Constant
*Op1C
= dyn_cast
<Constant
>(Op1
))
2601 if (Constant
*MulC
= dyn_cast
<Constant
>(Mul0
)) {
2602 if (Op1C
->getType()->getPrimitiveSizeInBits() <
2603 MulC
->getType()->getPrimitiveSizeInBits())
2604 Op1C
= ConstantExpr::getZExt(Op1C
, MulC
->getType());
2605 if (Op1C
->getType()->getPrimitiveSizeInBits() >
2606 MulC
->getType()->getPrimitiveSizeInBits())
2607 MulC
= ConstantExpr::getZExt(MulC
, Op1C
->getType());
2609 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2610 Multiple
= ConstantExpr::getMul(MulC
, Op1C
);
2614 if (ConstantInt
*Mul0CI
= dyn_cast
<ConstantInt
>(Mul0
))
2615 if (Mul0CI
->getValue() == 1) {
2616 // V == Base * Op1, so return Op1
2622 Value
*Mul1
= nullptr;
2623 if (ComputeMultiple(Op1
, Base
, Mul1
, LookThroughSExt
, Depth
+1)) {
2624 if (Constant
*Op0C
= dyn_cast
<Constant
>(Op0
))
2625 if (Constant
*MulC
= dyn_cast
<Constant
>(Mul1
)) {
2626 if (Op0C
->getType()->getPrimitiveSizeInBits() <
2627 MulC
->getType()->getPrimitiveSizeInBits())
2628 Op0C
= ConstantExpr::getZExt(Op0C
, MulC
->getType());
2629 if (Op0C
->getType()->getPrimitiveSizeInBits() >
2630 MulC
->getType()->getPrimitiveSizeInBits())
2631 MulC
= ConstantExpr::getZExt(MulC
, Op0C
->getType());
2633 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2634 Multiple
= ConstantExpr::getMul(MulC
, Op0C
);
2638 if (ConstantInt
*Mul1CI
= dyn_cast
<ConstantInt
>(Mul1
))
2639 if (Mul1CI
->getValue() == 1) {
2640 // V == Base * Op0, so return Op0
2648 // We could not determine if V is a multiple of Base.
2652 Intrinsic::ID
llvm::getIntrinsicForCallSite(ImmutableCallSite ICS
,
2653 const TargetLibraryInfo
*TLI
) {
2654 const Function
*F
= ICS
.getCalledFunction();
2656 return Intrinsic::not_intrinsic
;
2658 if (F
->isIntrinsic())
2659 return F
->getIntrinsicID();
2662 return Intrinsic::not_intrinsic
;
2665 // We're going to make assumptions on the semantics of the functions, check
2666 // that the target knows that it's available in this environment and it does
2667 // not have local linkage.
2668 if (!F
|| F
->hasLocalLinkage() || !TLI
->getLibFunc(*F
, Func
))
2669 return Intrinsic::not_intrinsic
;
2671 if (!ICS
.onlyReadsMemory())
2672 return Intrinsic::not_intrinsic
;
2674 // Otherwise check if we have a call to a function that can be turned into a
2675 // vector intrinsic.
2682 return Intrinsic::sin
;
2686 return Intrinsic::cos
;
2690 return Intrinsic::exp
;
2694 return Intrinsic::exp2
;
2698 return Intrinsic::log
;
2700 case LibFunc_log10f
:
2701 case LibFunc_log10l
:
2702 return Intrinsic::log10
;
2706 return Intrinsic::log2
;
2710 return Intrinsic::fabs
;
2714 return Intrinsic::minnum
;
2718 return Intrinsic::maxnum
;
2719 case LibFunc_copysign
:
2720 case LibFunc_copysignf
:
2721 case LibFunc_copysignl
:
2722 return Intrinsic::copysign
;
2724 case LibFunc_floorf
:
2725 case LibFunc_floorl
:
2726 return Intrinsic::floor
;
2730 return Intrinsic::ceil
;
2732 case LibFunc_truncf
:
2733 case LibFunc_truncl
:
2734 return Intrinsic::trunc
;
2738 return Intrinsic::rint
;
2739 case LibFunc_nearbyint
:
2740 case LibFunc_nearbyintf
:
2741 case LibFunc_nearbyintl
:
2742 return Intrinsic::nearbyint
;
2744 case LibFunc_roundf
:
2745 case LibFunc_roundl
:
2746 return Intrinsic::round
;
2750 return Intrinsic::pow
;
2754 return Intrinsic::sqrt
;
2757 return Intrinsic::not_intrinsic
;
2760 /// Return true if we can prove that the specified FP value is never equal to
2763 /// NOTE: this function will need to be revisited when we support non-default
2765 bool llvm::CannotBeNegativeZero(const Value
*V
, const TargetLibraryInfo
*TLI
,
2767 if (auto *CFP
= dyn_cast
<ConstantFP
>(V
))
2768 return !CFP
->getValueAPF().isNegZero();
2770 // Limit search depth.
2771 if (Depth
== MaxDepth
)
2774 auto *Op
= dyn_cast
<Operator
>(V
);
2778 // Check if the nsz fast-math flag is set.
2779 if (auto *FPO
= dyn_cast
<FPMathOperator
>(Op
))
2780 if (FPO
->hasNoSignedZeros())
2783 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2784 if (match(Op
, m_FAdd(m_Value(), m_PosZeroFP())))
2787 // sitofp and uitofp turn into +0.0 for zero.
2788 if (isa
<SIToFPInst
>(Op
) || isa
<UIToFPInst
>(Op
))
2791 if (auto *Call
= dyn_cast
<CallInst
>(Op
)) {
2792 Intrinsic::ID IID
= getIntrinsicForCallSite(Call
, TLI
);
2796 // sqrt(-0.0) = -0.0, no other negative results are possible.
2797 case Intrinsic::sqrt
:
2798 case Intrinsic::canonicalize
:
2799 return CannotBeNegativeZero(Call
->getArgOperand(0), TLI
, Depth
+ 1);
2801 case Intrinsic::fabs
:
2809 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2810 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2811 /// bit despite comparing equal.
2812 static bool cannotBeOrderedLessThanZeroImpl(const Value
*V
,
2813 const TargetLibraryInfo
*TLI
,
2816 // TODO: This function does not do the right thing when SignBitOnly is true
2817 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2818 // which flips the sign bits of NaNs. See
2819 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2821 if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(V
)) {
2822 return !CFP
->getValueAPF().isNegative() ||
2823 (!SignBitOnly
&& CFP
->getValueAPF().isZero());
2826 // Handle vector of constants.
2827 if (auto *CV
= dyn_cast
<Constant
>(V
)) {
2828 if (CV
->getType()->isVectorTy()) {
2829 unsigned NumElts
= CV
->getType()->getVectorNumElements();
2830 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2831 auto *CFP
= dyn_cast_or_null
<ConstantFP
>(CV
->getAggregateElement(i
));
2834 if (CFP
->getValueAPF().isNegative() &&
2835 (SignBitOnly
|| !CFP
->getValueAPF().isZero()))
2839 // All non-negative ConstantFPs.
2844 if (Depth
== MaxDepth
)
2845 return false; // Limit search depth.
2847 const Operator
*I
= dyn_cast
<Operator
>(V
);
2851 switch (I
->getOpcode()) {
2854 // Unsigned integers are always nonnegative.
2855 case Instruction::UIToFP
:
2857 case Instruction::FMul
:
2858 // x*x is always non-negative or a NaN.
2859 if (I
->getOperand(0) == I
->getOperand(1) &&
2860 (!SignBitOnly
|| cast
<FPMathOperator
>(I
)->hasNoNaNs()))
2864 case Instruction::FAdd
:
2865 case Instruction::FDiv
:
2866 case Instruction::FRem
:
2867 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
, SignBitOnly
,
2869 cannotBeOrderedLessThanZeroImpl(I
->getOperand(1), TLI
, SignBitOnly
,
2871 case Instruction::Select
:
2872 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(1), TLI
, SignBitOnly
,
2874 cannotBeOrderedLessThanZeroImpl(I
->getOperand(2), TLI
, SignBitOnly
,
2876 case Instruction::FPExt
:
2877 case Instruction::FPTrunc
:
2878 // Widening/narrowing never change sign.
2879 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
, SignBitOnly
,
2881 case Instruction::ExtractElement
:
2882 // Look through extract element. At the moment we keep this simple and skip
2883 // tracking the specific element. But at least we might find information
2884 // valid for all elements of the vector.
2885 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
, SignBitOnly
,
2887 case Instruction::Call
:
2888 const auto *CI
= cast
<CallInst
>(I
);
2889 Intrinsic::ID IID
= getIntrinsicForCallSite(CI
, TLI
);
2893 case Intrinsic::maxnum
:
2894 return (isKnownNeverNaN(I
->getOperand(0), TLI
) &&
2895 cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
,
2896 SignBitOnly
, Depth
+ 1)) ||
2897 (isKnownNeverNaN(I
->getOperand(1), TLI
) &&
2898 cannotBeOrderedLessThanZeroImpl(I
->getOperand(1), TLI
,
2899 SignBitOnly
, Depth
+ 1));
2901 case Intrinsic::minnum
:
2902 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
, SignBitOnly
,
2904 cannotBeOrderedLessThanZeroImpl(I
->getOperand(1), TLI
, SignBitOnly
,
2906 case Intrinsic::exp
:
2907 case Intrinsic::exp2
:
2908 case Intrinsic::fabs
:
2911 case Intrinsic::sqrt
:
2912 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2915 return CI
->hasNoNaNs() && (CI
->hasNoSignedZeros() ||
2916 CannotBeNegativeZero(CI
->getOperand(0), TLI
));
2918 case Intrinsic::powi
:
2919 if (ConstantInt
*Exponent
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
2920 // powi(x,n) is non-negative if n is even.
2921 if (Exponent
->getBitWidth() <= 64 && Exponent
->getSExtValue() % 2u == 0)
2924 // TODO: This is not correct. Given that exp is an integer, here are the
2925 // ways that pow can return a negative value:
2927 // pow(x, exp) --> negative if exp is odd and x is negative.
2928 // pow(-0, exp) --> -inf if exp is negative odd.
2929 // pow(-0, exp) --> -0 if exp is positive odd.
2930 // pow(-inf, exp) --> -0 if exp is negative odd.
2931 // pow(-inf, exp) --> -inf if exp is positive odd.
2933 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2934 // but we must return false if x == -0. Unfortunately we do not currently
2935 // have a way of expressing this constraint. See details in
2936 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2937 return cannotBeOrderedLessThanZeroImpl(I
->getOperand(0), TLI
, SignBitOnly
,
2940 case Intrinsic::fma
:
2941 case Intrinsic::fmuladd
:
2942 // x*x+y is non-negative if y is non-negative.
2943 return I
->getOperand(0) == I
->getOperand(1) &&
2944 (!SignBitOnly
|| cast
<FPMathOperator
>(I
)->hasNoNaNs()) &&
2945 cannotBeOrderedLessThanZeroImpl(I
->getOperand(2), TLI
, SignBitOnly
,
2953 bool llvm::CannotBeOrderedLessThanZero(const Value
*V
,
2954 const TargetLibraryInfo
*TLI
) {
2955 return cannotBeOrderedLessThanZeroImpl(V
, TLI
, false, 0);
2958 bool llvm::SignBitMustBeZero(const Value
*V
, const TargetLibraryInfo
*TLI
) {
2959 return cannotBeOrderedLessThanZeroImpl(V
, TLI
, true, 0);
2962 bool llvm::isKnownNeverNaN(const Value
*V
, const TargetLibraryInfo
*TLI
,
2964 assert(V
->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2966 // If we're told that NaNs won't happen, assume they won't.
2967 if (auto *FPMathOp
= dyn_cast
<FPMathOperator
>(V
))
2968 if (FPMathOp
->hasNoNaNs())
2971 // Handle scalar constants.
2972 if (auto *CFP
= dyn_cast
<ConstantFP
>(V
))
2973 return !CFP
->isNaN();
2975 if (Depth
== MaxDepth
)
2978 if (auto *Inst
= dyn_cast
<Instruction
>(V
)) {
2979 switch (Inst
->getOpcode()) {
2980 case Instruction::FAdd
:
2981 case Instruction::FMul
:
2982 case Instruction::FSub
:
2983 case Instruction::FDiv
:
2984 case Instruction::FRem
: {
2985 // TODO: Need isKnownNeverInfinity
2988 case Instruction::Select
: {
2989 return isKnownNeverNaN(Inst
->getOperand(1), TLI
, Depth
+ 1) &&
2990 isKnownNeverNaN(Inst
->getOperand(2), TLI
, Depth
+ 1);
2992 case Instruction::SIToFP
:
2993 case Instruction::UIToFP
:
2995 case Instruction::FPTrunc
:
2996 case Instruction::FPExt
:
2997 return isKnownNeverNaN(Inst
->getOperand(0), TLI
, Depth
+ 1);
3003 if (const auto *II
= dyn_cast
<IntrinsicInst
>(V
)) {
3004 switch (II
->getIntrinsicID()) {
3005 case Intrinsic::canonicalize
:
3006 case Intrinsic::fabs
:
3007 case Intrinsic::copysign
:
3008 case Intrinsic::exp
:
3009 case Intrinsic::exp2
:
3010 case Intrinsic::floor
:
3011 case Intrinsic::ceil
:
3012 case Intrinsic::trunc
:
3013 case Intrinsic::rint
:
3014 case Intrinsic::nearbyint
:
3015 case Intrinsic::round
:
3016 return isKnownNeverNaN(II
->getArgOperand(0), TLI
, Depth
+ 1);
3017 case Intrinsic::sqrt
:
3018 return isKnownNeverNaN(II
->getArgOperand(0), TLI
, Depth
+ 1) &&
3019 CannotBeOrderedLessThanZero(II
->getArgOperand(0), TLI
);
3025 // Bail out for constant expressions, but try to handle vector constants.
3026 if (!V
->getType()->isVectorTy() || !isa
<Constant
>(V
))
3029 // For vectors, verify that each element is not NaN.
3030 unsigned NumElts
= V
->getType()->getVectorNumElements();
3031 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
3032 Constant
*Elt
= cast
<Constant
>(V
)->getAggregateElement(i
);
3035 if (isa
<UndefValue
>(Elt
))
3037 auto *CElt
= dyn_cast
<ConstantFP
>(Elt
);
3038 if (!CElt
|| CElt
->isNaN())
3041 // All elements were confirmed not-NaN or undefined.
3045 Value
*llvm::isBytewiseValue(Value
*V
) {
3047 // All byte-wide stores are splatable, even of arbitrary variables.
3048 if (V
->getType()->isIntegerTy(8))
3051 LLVMContext
&Ctx
= V
->getContext();
3053 // Undef don't care.
3054 auto *UndefInt8
= UndefValue::get(Type::getInt8Ty(Ctx
));
3055 if (isa
<UndefValue
>(V
))
3058 Constant
*C
= dyn_cast
<Constant
>(V
);
3060 // Conceptually, we could handle things like:
3061 // %a = zext i8 %X to i16
3062 // %b = shl i16 %a, 8
3063 // %c = or i16 %a, %b
3064 // but until there is an example that actually needs this, it doesn't seem
3065 // worth worrying about.
3069 // Handle 'null' ConstantArrayZero etc.
3070 if (C
->isNullValue())
3071 return Constant::getNullValue(Type::getInt8Ty(Ctx
));
3073 // Constant floating-point values can be handled as integer values if the
3074 // corresponding integer value is "byteable". An important case is 0.0.
3075 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
)) {
3077 if (CFP
->getType()->isHalfTy())
3078 Ty
= Type::getInt16Ty(Ctx
);
3079 else if (CFP
->getType()->isFloatTy())
3080 Ty
= Type::getInt32Ty(Ctx
);
3081 else if (CFP
->getType()->isDoubleTy())
3082 Ty
= Type::getInt64Ty(Ctx
);
3083 // Don't handle long double formats, which have strange constraints.
3084 return Ty
? isBytewiseValue(ConstantExpr::getBitCast(CFP
, Ty
)) : nullptr;
3087 // We can handle constant integers that are multiple of 8 bits.
3088 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(C
)) {
3089 if (CI
->getBitWidth() % 8 == 0) {
3090 assert(CI
->getBitWidth() > 8 && "8 bits should be handled above!");
3091 if (!CI
->getValue().isSplat(8))
3093 return ConstantInt::get(Ctx
, CI
->getValue().trunc(8));
3097 auto Merge
= [&](Value
*LHS
, Value
*RHS
) -> Value
* {
3102 if (LHS
== UndefInt8
)
3104 if (RHS
== UndefInt8
)
3109 if (ConstantDataSequential
*CA
= dyn_cast
<ConstantDataSequential
>(C
)) {
3110 Value
*Val
= UndefInt8
;
3111 for (unsigned I
= 0, E
= CA
->getNumElements(); I
!= E
; ++I
)
3112 if (!(Val
= Merge(Val
, isBytewiseValue(CA
->getElementAsConstant(I
)))))
3117 if (isa
<ConstantVector
>(C
)) {
3118 Constant
*Splat
= cast
<ConstantVector
>(C
)->getSplatValue();
3119 return Splat
? isBytewiseValue(Splat
) : nullptr;
3122 if (isa
<ConstantArray
>(C
) || isa
<ConstantStruct
>(C
)) {
3123 Value
*Val
= UndefInt8
;
3124 for (unsigned I
= 0, E
= C
->getNumOperands(); I
!= E
; ++I
)
3125 if (!(Val
= Merge(Val
, isBytewiseValue(C
->getOperand(I
)))))
3130 // Don't try to handle the handful of other constants.
3134 // This is the recursive version of BuildSubAggregate. It takes a few different
3135 // arguments. Idxs is the index within the nested struct From that we are
3136 // looking at now (which is of type IndexedType). IdxSkip is the number of
3137 // indices from Idxs that should be left out when inserting into the resulting
3138 // struct. To is the result struct built so far, new insertvalue instructions
3140 static Value
*BuildSubAggregate(Value
*From
, Value
* To
, Type
*IndexedType
,
3141 SmallVectorImpl
<unsigned> &Idxs
,
3143 Instruction
*InsertBefore
) {
3144 StructType
*STy
= dyn_cast
<StructType
>(IndexedType
);
3146 // Save the original To argument so we can modify it
3148 // General case, the type indexed by Idxs is a struct
3149 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
3150 // Process each struct element recursively
3153 To
= BuildSubAggregate(From
, To
, STy
->getElementType(i
), Idxs
, IdxSkip
,
3157 // Couldn't find any inserted value for this index? Cleanup
3158 while (PrevTo
!= OrigTo
) {
3159 InsertValueInst
* Del
= cast
<InsertValueInst
>(PrevTo
);
3160 PrevTo
= Del
->getAggregateOperand();
3161 Del
->eraseFromParent();
3163 // Stop processing elements
3167 // If we successfully found a value for each of our subaggregates
3171 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3172 // the struct's elements had a value that was inserted directly. In the latter
3173 // case, perhaps we can't determine each of the subelements individually, but
3174 // we might be able to find the complete struct somewhere.
3176 // Find the value that is at that particular spot
3177 Value
*V
= FindInsertedValue(From
, Idxs
);
3182 // Insert the value in the new (sub) aggregate
3183 return InsertValueInst::Create(To
, V
, makeArrayRef(Idxs
).slice(IdxSkip
),
3184 "tmp", InsertBefore
);
3187 // This helper takes a nested struct and extracts a part of it (which is again a
3188 // struct) into a new value. For example, given the struct:
3189 // { a, { b, { c, d }, e } }
3190 // and the indices "1, 1" this returns
3193 // It does this by inserting an insertvalue for each element in the resulting
3194 // struct, as opposed to just inserting a single struct. This will only work if
3195 // each of the elements of the substruct are known (ie, inserted into From by an
3196 // insertvalue instruction somewhere).
3198 // All inserted insertvalue instructions are inserted before InsertBefore
3199 static Value
*BuildSubAggregate(Value
*From
, ArrayRef
<unsigned> idx_range
,
3200 Instruction
*InsertBefore
) {
3201 assert(InsertBefore
&& "Must have someplace to insert!");
3202 Type
*IndexedType
= ExtractValueInst::getIndexedType(From
->getType(),
3204 Value
*To
= UndefValue::get(IndexedType
);
3205 SmallVector
<unsigned, 10> Idxs(idx_range
.begin(), idx_range
.end());
3206 unsigned IdxSkip
= Idxs
.size();
3208 return BuildSubAggregate(From
, To
, IndexedType
, Idxs
, IdxSkip
, InsertBefore
);
3211 /// Given an aggregate and a sequence of indices, see if the scalar value
3212 /// indexed is already around as a register, for example if it was inserted
3213 /// directly into the aggregate.
3215 /// If InsertBefore is not null, this function will duplicate (modified)
3216 /// insertvalues when a part of a nested struct is extracted.
3217 Value
*llvm::FindInsertedValue(Value
*V
, ArrayRef
<unsigned> idx_range
,
3218 Instruction
*InsertBefore
) {
3219 // Nothing to index? Just return V then (this is useful at the end of our
3221 if (idx_range
.empty())
3223 // We have indices, so V should have an indexable type.
3224 assert((V
->getType()->isStructTy() || V
->getType()->isArrayTy()) &&
3225 "Not looking at a struct or array?");
3226 assert(ExtractValueInst::getIndexedType(V
->getType(), idx_range
) &&
3227 "Invalid indices for type?");
3229 if (Constant
*C
= dyn_cast
<Constant
>(V
)) {
3230 C
= C
->getAggregateElement(idx_range
[0]);
3231 if (!C
) return nullptr;
3232 return FindInsertedValue(C
, idx_range
.slice(1), InsertBefore
);
3235 if (InsertValueInst
*I
= dyn_cast
<InsertValueInst
>(V
)) {
3236 // Loop the indices for the insertvalue instruction in parallel with the
3237 // requested indices
3238 const unsigned *req_idx
= idx_range
.begin();
3239 for (const unsigned *i
= I
->idx_begin(), *e
= I
->idx_end();
3240 i
!= e
; ++i
, ++req_idx
) {
3241 if (req_idx
== idx_range
.end()) {
3242 // We can't handle this without inserting insertvalues
3246 // The requested index identifies a part of a nested aggregate. Handle
3247 // this specially. For example,
3248 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3249 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3250 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3251 // This can be changed into
3252 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3253 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3254 // which allows the unused 0,0 element from the nested struct to be
3256 return BuildSubAggregate(V
, makeArrayRef(idx_range
.begin(), req_idx
),
3260 // This insert value inserts something else than what we are looking for.
3261 // See if the (aggregate) value inserted into has the value we are
3262 // looking for, then.
3264 return FindInsertedValue(I
->getAggregateOperand(), idx_range
,
3267 // If we end up here, the indices of the insertvalue match with those
3268 // requested (though possibly only partially). Now we recursively look at
3269 // the inserted value, passing any remaining indices.
3270 return FindInsertedValue(I
->getInsertedValueOperand(),
3271 makeArrayRef(req_idx
, idx_range
.end()),
3275 if (ExtractValueInst
*I
= dyn_cast
<ExtractValueInst
>(V
)) {
3276 // If we're extracting a value from an aggregate that was extracted from
3277 // something else, we can extract from that something else directly instead.
3278 // However, we will need to chain I's indices with the requested indices.
3280 // Calculate the number of indices required
3281 unsigned size
= I
->getNumIndices() + idx_range
.size();
3282 // Allocate some space to put the new indices in
3283 SmallVector
<unsigned, 5> Idxs
;
3285 // Add indices from the extract value instruction
3286 Idxs
.append(I
->idx_begin(), I
->idx_end());
3288 // Add requested indices
3289 Idxs
.append(idx_range
.begin(), idx_range
.end());
3291 assert(Idxs
.size() == size
3292 && "Number of indices added not correct?");
3294 return FindInsertedValue(I
->getAggregateOperand(), Idxs
, InsertBefore
);
3296 // Otherwise, we don't know (such as, extracting from a function return value
3297 // or load instruction)
3301 /// Analyze the specified pointer to see if it can be expressed as a base
3302 /// pointer plus a constant offset. Return the base and offset to the caller.
3303 Value
*llvm::GetPointerBaseWithConstantOffset(Value
*Ptr
, int64_t &Offset
,
3304 const DataLayout
&DL
) {
3305 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(Ptr
->getType());
3306 APInt
ByteOffset(BitWidth
, 0);
3308 // We walk up the defs but use a visited set to handle unreachable code. In
3309 // that case, we stop after accumulating the cycle once (not that it
3311 SmallPtrSet
<Value
*, 16> Visited
;
3312 while (Visited
.insert(Ptr
).second
) {
3313 if (Ptr
->getType()->isVectorTy())
3316 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
3317 // If one of the values we have visited is an addrspacecast, then
3318 // the pointer type of this GEP may be different from the type
3319 // of the Ptr parameter which was passed to this function. This
3320 // means when we construct GEPOffset, we need to use the size
3321 // of GEP's pointer type rather than the size of the original
3323 APInt
GEPOffset(DL
.getIndexTypeSizeInBits(Ptr
->getType()), 0);
3324 if (!GEP
->accumulateConstantOffset(DL
, GEPOffset
))
3327 ByteOffset
+= GEPOffset
.getSExtValue();
3329 Ptr
= GEP
->getPointerOperand();
3330 } else if (Operator::getOpcode(Ptr
) == Instruction::BitCast
||
3331 Operator::getOpcode(Ptr
) == Instruction::AddrSpaceCast
) {
3332 Ptr
= cast
<Operator
>(Ptr
)->getOperand(0);
3333 } else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(Ptr
)) {
3334 if (GA
->isInterposable())
3336 Ptr
= GA
->getAliasee();
3341 Offset
= ByteOffset
.getSExtValue();
3345 bool llvm::isGEPBasedOnPointerToString(const GEPOperator
*GEP
,
3346 unsigned CharSize
) {
3347 // Make sure the GEP has exactly three arguments.
3348 if (GEP
->getNumOperands() != 3)
3351 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3353 ArrayType
*AT
= dyn_cast
<ArrayType
>(GEP
->getSourceElementType());
3354 if (!AT
|| !AT
->getElementType()->isIntegerTy(CharSize
))
3357 // Check to make sure that the first operand of the GEP is an integer and
3358 // has value 0 so that we are sure we're indexing into the initializer.
3359 const ConstantInt
*FirstIdx
= dyn_cast
<ConstantInt
>(GEP
->getOperand(1));
3360 if (!FirstIdx
|| !FirstIdx
->isZero())
3366 bool llvm::getConstantDataArrayInfo(const Value
*V
,
3367 ConstantDataArraySlice
&Slice
,
3368 unsigned ElementSize
, uint64_t Offset
) {
3371 // Look through bitcast instructions and geps.
3372 V
= V
->stripPointerCasts();
3374 // If the value is a GEP instruction or constant expression, treat it as an
3376 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
3377 // The GEP operator should be based on a pointer to string constant, and is
3378 // indexing into the string constant.
3379 if (!isGEPBasedOnPointerToString(GEP
, ElementSize
))
3382 // If the second index isn't a ConstantInt, then this is a variable index
3383 // into the array. If this occurs, we can't say anything meaningful about
3385 uint64_t StartIdx
= 0;
3386 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(2)))
3387 StartIdx
= CI
->getZExtValue();
3390 return getConstantDataArrayInfo(GEP
->getOperand(0), Slice
, ElementSize
,
3394 // The GEP instruction, constant or instruction, must reference a global
3395 // variable that is a constant and is initialized. The referenced constant
3396 // initializer is the array that we'll use for optimization.
3397 const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
);
3398 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer())
3401 const ConstantDataArray
*Array
;
3403 if (GV
->getInitializer()->isNullValue()) {
3404 Type
*GVTy
= GV
->getValueType();
3405 if ( (ArrayTy
= dyn_cast
<ArrayType
>(GVTy
)) ) {
3406 // A zeroinitializer for the array; there is no ConstantDataArray.
3409 const DataLayout
&DL
= GV
->getParent()->getDataLayout();
3410 uint64_t SizeInBytes
= DL
.getTypeStoreSize(GVTy
);
3411 uint64_t Length
= SizeInBytes
/ (ElementSize
/ 8);
3412 if (Length
<= Offset
)
3415 Slice
.Array
= nullptr;
3417 Slice
.Length
= Length
- Offset
;
3421 // This must be a ConstantDataArray.
3422 Array
= dyn_cast
<ConstantDataArray
>(GV
->getInitializer());
3425 ArrayTy
= Array
->getType();
3427 if (!ArrayTy
->getElementType()->isIntegerTy(ElementSize
))
3430 uint64_t NumElts
= ArrayTy
->getArrayNumElements();
3431 if (Offset
> NumElts
)
3434 Slice
.Array
= Array
;
3435 Slice
.Offset
= Offset
;
3436 Slice
.Length
= NumElts
- Offset
;
3440 /// This function computes the length of a null-terminated C string pointed to
3441 /// by V. If successful, it returns true and returns the string in Str.
3442 /// If unsuccessful, it returns false.
3443 bool llvm::getConstantStringInfo(const Value
*V
, StringRef
&Str
,
3444 uint64_t Offset
, bool TrimAtNul
) {
3445 ConstantDataArraySlice Slice
;
3446 if (!getConstantDataArrayInfo(V
, Slice
, 8, Offset
))
3449 if (Slice
.Array
== nullptr) {
3454 if (Slice
.Length
== 1) {
3455 Str
= StringRef("", 1);
3458 // We cannot instantiate a StringRef as we do not have an appropriate string
3463 // Start out with the entire array in the StringRef.
3464 Str
= Slice
.Array
->getAsString();
3465 // Skip over 'offset' bytes.
3466 Str
= Str
.substr(Slice
.Offset
);
3469 // Trim off the \0 and anything after it. If the array is not nul
3470 // terminated, we just return the whole end of string. The client may know
3471 // some other way that the string is length-bound.
3472 Str
= Str
.substr(0, Str
.find('\0'));
3477 // These next two are very similar to the above, but also look through PHI
3479 // TODO: See if we can integrate these two together.
3481 /// If we can compute the length of the string pointed to by
3482 /// the specified pointer, return 'len+1'. If we can't, return 0.
3483 static uint64_t GetStringLengthH(const Value
*V
,
3484 SmallPtrSetImpl
<const PHINode
*> &PHIs
,
3485 unsigned CharSize
) {
3486 // Look through noop bitcast instructions.
3487 V
= V
->stripPointerCasts();
3489 // If this is a PHI node, there are two cases: either we have already seen it
3491 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
3492 if (!PHIs
.insert(PN
).second
)
3493 return ~0ULL; // already in the set.
3495 // If it was new, see if all the input strings are the same length.
3496 uint64_t LenSoFar
= ~0ULL;
3497 for (Value
*IncValue
: PN
->incoming_values()) {
3498 uint64_t Len
= GetStringLengthH(IncValue
, PHIs
, CharSize
);
3499 if (Len
== 0) return 0; // Unknown length -> unknown.
3501 if (Len
== ~0ULL) continue;
3503 if (Len
!= LenSoFar
&& LenSoFar
!= ~0ULL)
3504 return 0; // Disagree -> unknown.
3508 // Success, all agree.
3512 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3513 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
3514 uint64_t Len1
= GetStringLengthH(SI
->getTrueValue(), PHIs
, CharSize
);
3515 if (Len1
== 0) return 0;
3516 uint64_t Len2
= GetStringLengthH(SI
->getFalseValue(), PHIs
, CharSize
);
3517 if (Len2
== 0) return 0;
3518 if (Len1
== ~0ULL) return Len2
;
3519 if (Len2
== ~0ULL) return Len1
;
3520 if (Len1
!= Len2
) return 0;
3524 // Otherwise, see if we can read the string.
3525 ConstantDataArraySlice Slice
;
3526 if (!getConstantDataArrayInfo(V
, Slice
, CharSize
))
3529 if (Slice
.Array
== nullptr)
3532 // Search for nul characters
3533 unsigned NullIndex
= 0;
3534 for (unsigned E
= Slice
.Length
; NullIndex
< E
; ++NullIndex
) {
3535 if (Slice
.Array
->getElementAsInteger(Slice
.Offset
+ NullIndex
) == 0)
3539 return NullIndex
+ 1;
3542 /// If we can compute the length of the string pointed to by
3543 /// the specified pointer, return 'len+1'. If we can't, return 0.
3544 uint64_t llvm::GetStringLength(const Value
*V
, unsigned CharSize
) {
3545 if (!V
->getType()->isPointerTy())
3548 SmallPtrSet
<const PHINode
*, 32> PHIs
;
3549 uint64_t Len
= GetStringLengthH(V
, PHIs
, CharSize
);
3550 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3551 // an empty string as a length.
3552 return Len
== ~0ULL ? 1 : Len
;
3555 const Value
*llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS
) {
3557 "getArgumentAliasingToReturnedPointer only works on nonnull CallSite");
3558 if (const Value
*RV
= CS
.getReturnedArgOperand())
3560 // This can be used only as a aliasing property.
3561 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS
))
3562 return CS
.getArgOperand(0);
3566 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3567 ImmutableCallSite CS
) {
3568 return CS
.getIntrinsicID() == Intrinsic::launder_invariant_group
||
3569 CS
.getIntrinsicID() == Intrinsic::strip_invariant_group
;
3572 /// \p PN defines a loop-variant pointer to an object. Check if the
3573 /// previous iteration of the loop was referring to the same object as \p PN.
3574 static bool isSameUnderlyingObjectInLoop(const PHINode
*PN
,
3575 const LoopInfo
*LI
) {
3576 // Find the loop-defined value.
3577 Loop
*L
= LI
->getLoopFor(PN
->getParent());
3578 if (PN
->getNumIncomingValues() != 2)
3581 // Find the value from previous iteration.
3582 auto *PrevValue
= dyn_cast
<Instruction
>(PN
->getIncomingValue(0));
3583 if (!PrevValue
|| LI
->getLoopFor(PrevValue
->getParent()) != L
)
3584 PrevValue
= dyn_cast
<Instruction
>(PN
->getIncomingValue(1));
3585 if (!PrevValue
|| LI
->getLoopFor(PrevValue
->getParent()) != L
)
3588 // If a new pointer is loaded in the loop, the pointer references a different
3589 // object in every iteration. E.g.:
3593 if (auto *Load
= dyn_cast
<LoadInst
>(PrevValue
))
3594 if (!L
->isLoopInvariant(Load
->getPointerOperand()))
3599 Value
*llvm::GetUnderlyingObject(Value
*V
, const DataLayout
&DL
,
3600 unsigned MaxLookup
) {
3601 if (!V
->getType()->isPointerTy())
3603 for (unsigned Count
= 0; MaxLookup
== 0 || Count
< MaxLookup
; ++Count
) {
3604 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
3605 V
= GEP
->getPointerOperand();
3606 } else if (Operator::getOpcode(V
) == Instruction::BitCast
||
3607 Operator::getOpcode(V
) == Instruction::AddrSpaceCast
) {
3608 V
= cast
<Operator
>(V
)->getOperand(0);
3609 } else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
3610 if (GA
->isInterposable())
3612 V
= GA
->getAliasee();
3613 } else if (isa
<AllocaInst
>(V
)) {
3614 // An alloca can't be further simplified.
3617 if (auto CS
= CallSite(V
)) {
3618 // CaptureTracking can know about special capturing properties of some
3619 // intrinsics like launder.invariant.group, that can't be expressed with
3620 // the attributes, but have properties like returning aliasing pointer.
3621 // Because some analysis may assume that nocaptured pointer is not
3622 // returned from some special intrinsic (because function would have to
3623 // be marked with returns attribute), it is crucial to use this function
3624 // because it should be in sync with CaptureTracking. Not using it may
3625 // cause weird miscompilations where 2 aliasing pointers are assumed to
3627 if (auto *RP
= getArgumentAliasingToReturnedPointer(CS
)) {
3633 // See if InstructionSimplify knows any relevant tricks.
3634 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
3635 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3636 if (Value
*Simplified
= SimplifyInstruction(I
, {DL
, I
})) {
3643 assert(V
->getType()->isPointerTy() && "Unexpected operand type!");
3648 void llvm::GetUnderlyingObjects(Value
*V
, SmallVectorImpl
<Value
*> &Objects
,
3649 const DataLayout
&DL
, LoopInfo
*LI
,
3650 unsigned MaxLookup
) {
3651 SmallPtrSet
<Value
*, 4> Visited
;
3652 SmallVector
<Value
*, 4> Worklist
;
3653 Worklist
.push_back(V
);
3655 Value
*P
= Worklist
.pop_back_val();
3656 P
= GetUnderlyingObject(P
, DL
, MaxLookup
);
3658 if (!Visited
.insert(P
).second
)
3661 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(P
)) {
3662 Worklist
.push_back(SI
->getTrueValue());
3663 Worklist
.push_back(SI
->getFalseValue());
3667 if (PHINode
*PN
= dyn_cast
<PHINode
>(P
)) {
3668 // If this PHI changes the underlying object in every iteration of the
3669 // loop, don't look through it. Consider:
3672 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3676 // Prev is tracking Curr one iteration behind so they refer to different
3677 // underlying objects.
3678 if (!LI
|| !LI
->isLoopHeader(PN
->getParent()) ||
3679 isSameUnderlyingObjectInLoop(PN
, LI
))
3680 for (Value
*IncValue
: PN
->incoming_values())
3681 Worklist
.push_back(IncValue
);
3685 Objects
.push_back(P
);
3686 } while (!Worklist
.empty());
3689 /// This is the function that does the work of looking through basic
3690 /// ptrtoint+arithmetic+inttoptr sequences.
3691 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
3693 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
3694 // If we find a ptrtoint, we can transfer control back to the
3695 // regular getUnderlyingObjectFromInt.
3696 if (U
->getOpcode() == Instruction::PtrToInt
)
3697 return U
->getOperand(0);
3698 // If we find an add of a constant, a multiplied value, or a phi, it's
3699 // likely that the other operand will lead us to the base
3700 // object. We don't have to worry about the case where the
3701 // object address is somehow being computed by the multiply,
3702 // because our callers only care when the result is an
3703 // identifiable object.
3704 if (U
->getOpcode() != Instruction::Add
||
3705 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
3706 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
&&
3707 !isa
<PHINode
>(U
->getOperand(1))))
3709 V
= U
->getOperand(0);
3713 assert(V
->getType()->isIntegerTy() && "Unexpected operand type!");
3717 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3718 /// ptrtoint+arithmetic+inttoptr sequences.
3719 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3720 bool llvm::getUnderlyingObjectsForCodeGen(const Value
*V
,
3721 SmallVectorImpl
<Value
*> &Objects
,
3722 const DataLayout
&DL
) {
3723 SmallPtrSet
<const Value
*, 16> Visited
;
3724 SmallVector
<const Value
*, 4> Working(1, V
);
3726 V
= Working
.pop_back_val();
3728 SmallVector
<Value
*, 4> Objs
;
3729 GetUnderlyingObjects(const_cast<Value
*>(V
), Objs
, DL
);
3731 for (Value
*V
: Objs
) {
3732 if (!Visited
.insert(V
).second
)
3734 if (Operator::getOpcode(V
) == Instruction::IntToPtr
) {
3736 getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
3737 if (O
->getType()->isPointerTy()) {
3738 Working
.push_back(O
);
3742 // If GetUnderlyingObjects fails to find an identifiable object,
3743 // getUnderlyingObjectsForCodeGen also fails for safety.
3744 if (!isIdentifiedObject(V
)) {
3748 Objects
.push_back(const_cast<Value
*>(V
));
3750 } while (!Working
.empty());
3754 /// Return true if the only users of this pointer are lifetime markers.
3755 bool llvm::onlyUsedByLifetimeMarkers(const Value
*V
) {
3756 for (const User
*U
: V
->users()) {
3757 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
);
3758 if (!II
) return false;
3760 if (II
->getIntrinsicID() != Intrinsic::lifetime_start
&&
3761 II
->getIntrinsicID() != Intrinsic::lifetime_end
)
3767 bool llvm::isSafeToSpeculativelyExecute(const Value
*V
,
3768 const Instruction
*CtxI
,
3769 const DominatorTree
*DT
) {
3770 const Operator
*Inst
= dyn_cast
<Operator
>(V
);
3774 for (unsigned i
= 0, e
= Inst
->getNumOperands(); i
!= e
; ++i
)
3775 if (Constant
*C
= dyn_cast
<Constant
>(Inst
->getOperand(i
)))
3779 switch (Inst
->getOpcode()) {
3782 case Instruction::UDiv
:
3783 case Instruction::URem
: {
3784 // x / y is undefined if y == 0.
3786 if (match(Inst
->getOperand(1), m_APInt(V
)))
3790 case Instruction::SDiv
:
3791 case Instruction::SRem
: {
3792 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3793 const APInt
*Numerator
, *Denominator
;
3794 if (!match(Inst
->getOperand(1), m_APInt(Denominator
)))
3796 // We cannot hoist this division if the denominator is 0.
3797 if (*Denominator
== 0)
3799 // It's safe to hoist if the denominator is not 0 or -1.
3800 if (*Denominator
!= -1)
3802 // At this point we know that the denominator is -1. It is safe to hoist as
3803 // long we know that the numerator is not INT_MIN.
3804 if (match(Inst
->getOperand(0), m_APInt(Numerator
)))
3805 return !Numerator
->isMinSignedValue();
3806 // The numerator *might* be MinSignedValue.
3809 case Instruction::Load
: {
3810 const LoadInst
*LI
= cast
<LoadInst
>(Inst
);
3811 if (!LI
->isUnordered() ||
3812 // Speculative load may create a race that did not exist in the source.
3813 LI
->getFunction()->hasFnAttribute(Attribute::SanitizeThread
) ||
3814 // Speculative load may load data from dirty regions.
3815 LI
->getFunction()->hasFnAttribute(Attribute::SanitizeAddress
) ||
3816 LI
->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress
))
3818 const DataLayout
&DL
= LI
->getModule()->getDataLayout();
3819 return isDereferenceableAndAlignedPointer(LI
->getPointerOperand(),
3820 LI
->getAlignment(), DL
, CtxI
, DT
);
3822 case Instruction::Call
: {
3823 auto *CI
= cast
<const CallInst
>(Inst
);
3824 const Function
*Callee
= CI
->getCalledFunction();
3826 // The called function could have undefined behavior or side-effects, even
3827 // if marked readnone nounwind.
3828 return Callee
&& Callee
->isSpeculatable();
3830 case Instruction::VAArg
:
3831 case Instruction::Alloca
:
3832 case Instruction::Invoke
:
3833 case Instruction::PHI
:
3834 case Instruction::Store
:
3835 case Instruction::Ret
:
3836 case Instruction::Br
:
3837 case Instruction::IndirectBr
:
3838 case Instruction::Switch
:
3839 case Instruction::Unreachable
:
3840 case Instruction::Fence
:
3841 case Instruction::AtomicRMW
:
3842 case Instruction::AtomicCmpXchg
:
3843 case Instruction::LandingPad
:
3844 case Instruction::Resume
:
3845 case Instruction::CatchSwitch
:
3846 case Instruction::CatchPad
:
3847 case Instruction::CatchRet
:
3848 case Instruction::CleanupPad
:
3849 case Instruction::CleanupRet
:
3850 return false; // Misc instructions which have effects
3854 bool llvm::mayBeMemoryDependent(const Instruction
&I
) {
3855 return I
.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I
);
3858 OverflowResult
llvm::computeOverflowForUnsignedMul(
3859 const Value
*LHS
, const Value
*RHS
, const DataLayout
&DL
,
3860 AssumptionCache
*AC
, const Instruction
*CxtI
, const DominatorTree
*DT
,
3861 bool UseInstrInfo
) {
3862 // Multiplying n * m significant bits yields a result of n + m significant
3863 // bits. If the total number of significant bits does not exceed the
3864 // result bit width (minus 1), there is no overflow.
3865 // This means if we have enough leading zero bits in the operands
3866 // we can guarantee that the result does not overflow.
3867 // Ref: "Hacker's Delight" by Henry Warren
3868 unsigned BitWidth
= LHS
->getType()->getScalarSizeInBits();
3869 KnownBits
LHSKnown(BitWidth
);
3870 KnownBits
RHSKnown(BitWidth
);
3871 computeKnownBits(LHS
, LHSKnown
, DL
, /*Depth=*/0, AC
, CxtI
, DT
, nullptr,
3873 computeKnownBits(RHS
, RHSKnown
, DL
, /*Depth=*/0, AC
, CxtI
, DT
, nullptr,
3875 // Note that underestimating the number of zero bits gives a more
3876 // conservative answer.
3877 unsigned ZeroBits
= LHSKnown
.countMinLeadingZeros() +
3878 RHSKnown
.countMinLeadingZeros();
3879 // First handle the easy case: if we have enough zero bits there's
3880 // definitely no overflow.
3881 if (ZeroBits
>= BitWidth
)
3882 return OverflowResult::NeverOverflows
;
3884 // Get the largest possible values for each operand.
3885 APInt LHSMax
= ~LHSKnown
.Zero
;
3886 APInt RHSMax
= ~RHSKnown
.Zero
;
3888 // We know the multiply operation doesn't overflow if the maximum values for
3889 // each operand will not overflow after we multiply them together.
3891 (void)LHSMax
.umul_ov(RHSMax
, MaxOverflow
);
3893 return OverflowResult::NeverOverflows
;
3895 // We know it always overflows if multiplying the smallest possible values for
3896 // the operands also results in overflow.
3898 (void)LHSKnown
.One
.umul_ov(RHSKnown
.One
, MinOverflow
);
3900 return OverflowResult::AlwaysOverflows
;
3902 return OverflowResult::MayOverflow
;
3906 llvm::computeOverflowForSignedMul(const Value
*LHS
, const Value
*RHS
,
3907 const DataLayout
&DL
, AssumptionCache
*AC
,
3908 const Instruction
*CxtI
,
3909 const DominatorTree
*DT
, bool UseInstrInfo
) {
3910 // Multiplying n * m significant bits yields a result of n + m significant
3911 // bits. If the total number of significant bits does not exceed the
3912 // result bit width (minus 1), there is no overflow.
3913 // This means if we have enough leading sign bits in the operands
3914 // we can guarantee that the result does not overflow.
3915 // Ref: "Hacker's Delight" by Henry Warren
3916 unsigned BitWidth
= LHS
->getType()->getScalarSizeInBits();
3918 // Note that underestimating the number of sign bits gives a more
3919 // conservative answer.
3920 unsigned SignBits
= ComputeNumSignBits(LHS
, DL
, 0, AC
, CxtI
, DT
) +
3921 ComputeNumSignBits(RHS
, DL
, 0, AC
, CxtI
, DT
);
3923 // First handle the easy case: if we have enough sign bits there's
3924 // definitely no overflow.
3925 if (SignBits
> BitWidth
+ 1)
3926 return OverflowResult::NeverOverflows
;
3928 // There are two ambiguous cases where there can be no overflow:
3929 // SignBits == BitWidth + 1 and
3930 // SignBits == BitWidth
3931 // The second case is difficult to check, therefore we only handle the
3933 if (SignBits
== BitWidth
+ 1) {
3934 // It overflows only when both arguments are negative and the true
3935 // product is exactly the minimum negative number.
3936 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
3937 // For simplicity we just check if at least one side is not negative.
3938 KnownBits LHSKnown
= computeKnownBits(LHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
,
3939 nullptr, UseInstrInfo
);
3940 KnownBits RHSKnown
= computeKnownBits(RHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
,
3941 nullptr, UseInstrInfo
);
3942 if (LHSKnown
.isNonNegative() || RHSKnown
.isNonNegative())
3943 return OverflowResult::NeverOverflows
;
3945 return OverflowResult::MayOverflow
;
3948 OverflowResult
llvm::computeOverflowForUnsignedAdd(
3949 const Value
*LHS
, const Value
*RHS
, const DataLayout
&DL
,
3950 AssumptionCache
*AC
, const Instruction
*CxtI
, const DominatorTree
*DT
,
3951 bool UseInstrInfo
) {
3952 KnownBits LHSKnown
= computeKnownBits(LHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
,
3953 nullptr, UseInstrInfo
);
3954 if (LHSKnown
.isNonNegative() || LHSKnown
.isNegative()) {
3955 KnownBits RHSKnown
= computeKnownBits(RHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
,
3956 nullptr, UseInstrInfo
);
3958 if (LHSKnown
.isNegative() && RHSKnown
.isNegative()) {
3959 // The sign bit is set in both cases: this MUST overflow.
3960 // Create a simple add instruction, and insert it into the struct.
3961 return OverflowResult::AlwaysOverflows
;
3964 if (LHSKnown
.isNonNegative() && RHSKnown
.isNonNegative()) {
3965 // The sign bit is clear in both cases: this CANNOT overflow.
3966 // Create a simple add instruction, and insert it into the struct.
3967 return OverflowResult::NeverOverflows
;
3971 return OverflowResult::MayOverflow
;
3974 /// Return true if we can prove that adding the two values of the
3975 /// knownbits will not overflow.
3976 /// Otherwise return false.
3977 static bool checkRippleForSignedAdd(const KnownBits
&LHSKnown
,
3978 const KnownBits
&RHSKnown
) {
3979 // Addition of two 2's complement numbers having opposite signs will never
3981 if ((LHSKnown
.isNegative() && RHSKnown
.isNonNegative()) ||
3982 (LHSKnown
.isNonNegative() && RHSKnown
.isNegative()))
3985 // If either of the values is known to be non-negative, adding them can only
3986 // overflow if the second is also non-negative, so we can assume that.
3987 // Two non-negative numbers will only overflow if there is a carry to the
3988 // sign bit, so we can check if even when the values are as big as possible
3989 // there is no overflow to the sign bit.
3990 if (LHSKnown
.isNonNegative() || RHSKnown
.isNonNegative()) {
3991 APInt MaxLHS
= ~LHSKnown
.Zero
;
3992 MaxLHS
.clearSignBit();
3993 APInt MaxRHS
= ~RHSKnown
.Zero
;
3994 MaxRHS
.clearSignBit();
3995 APInt Result
= std::move(MaxLHS
) + std::move(MaxRHS
);
3996 return Result
.isSignBitClear();
3999 // If either of the values is known to be negative, adding them can only
4000 // overflow if the second is also negative, so we can assume that.
4001 // Two negative number will only overflow if there is no carry to the sign
4002 // bit, so we can check if even when the values are as small as possible
4003 // there is overflow to the sign bit.
4004 if (LHSKnown
.isNegative() || RHSKnown
.isNegative()) {
4005 APInt MinLHS
= LHSKnown
.One
;
4006 MinLHS
.clearSignBit();
4007 APInt MinRHS
= RHSKnown
.One
;
4008 MinRHS
.clearSignBit();
4009 APInt Result
= std::move(MinLHS
) + std::move(MinRHS
);
4010 return Result
.isSignBitSet();
4013 // If we reached here it means that we know nothing about the sign bits.
4014 // In this case we can't know if there will be an overflow, since by
4015 // changing the sign bits any two values can be made to overflow.
4019 static OverflowResult
computeOverflowForSignedAdd(const Value
*LHS
,
4021 const AddOperator
*Add
,
4022 const DataLayout
&DL
,
4023 AssumptionCache
*AC
,
4024 const Instruction
*CxtI
,
4025 const DominatorTree
*DT
) {
4026 if (Add
&& Add
->hasNoSignedWrap()) {
4027 return OverflowResult::NeverOverflows
;
4030 // If LHS and RHS each have at least two sign bits, the addition will look
4036 // If the carry into the most significant position is 0, X and Y can't both
4037 // be 1 and therefore the carry out of the addition is also 0.
4039 // If the carry into the most significant position is 1, X and Y can't both
4040 // be 0 and therefore the carry out of the addition is also 1.
4042 // Since the carry into the most significant position is always equal to
4043 // the carry out of the addition, there is no signed overflow.
4044 if (ComputeNumSignBits(LHS
, DL
, 0, AC
, CxtI
, DT
) > 1 &&
4045 ComputeNumSignBits(RHS
, DL
, 0, AC
, CxtI
, DT
) > 1)
4046 return OverflowResult::NeverOverflows
;
4048 KnownBits LHSKnown
= computeKnownBits(LHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
);
4049 KnownBits RHSKnown
= computeKnownBits(RHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
);
4051 if (checkRippleForSignedAdd(LHSKnown
, RHSKnown
))
4052 return OverflowResult::NeverOverflows
;
4054 // The remaining code needs Add to be available. Early returns if not so.
4056 return OverflowResult::MayOverflow
;
4058 // If the sign of Add is the same as at least one of the operands, this add
4059 // CANNOT overflow. This is particularly useful when the sum is
4060 // @llvm.assume'ed non-negative rather than proved so from analyzing its
4062 bool LHSOrRHSKnownNonNegative
=
4063 (LHSKnown
.isNonNegative() || RHSKnown
.isNonNegative());
4064 bool LHSOrRHSKnownNegative
=
4065 (LHSKnown
.isNegative() || RHSKnown
.isNegative());
4066 if (LHSOrRHSKnownNonNegative
|| LHSOrRHSKnownNegative
) {
4067 KnownBits AddKnown
= computeKnownBits(Add
, DL
, /*Depth=*/0, AC
, CxtI
, DT
);
4068 if ((AddKnown
.isNonNegative() && LHSOrRHSKnownNonNegative
) ||
4069 (AddKnown
.isNegative() && LHSOrRHSKnownNegative
)) {
4070 return OverflowResult::NeverOverflows
;
4074 return OverflowResult::MayOverflow
;
4077 OverflowResult
llvm::computeOverflowForUnsignedSub(const Value
*LHS
,
4079 const DataLayout
&DL
,
4080 AssumptionCache
*AC
,
4081 const Instruction
*CxtI
,
4082 const DominatorTree
*DT
) {
4083 // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
4084 KnownBits LHSKnown
= computeKnownBits(LHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
);
4085 KnownBits RHSKnown
= computeKnownBits(RHS
, DL
, /*Depth=*/0, AC
, CxtI
, DT
);
4086 if (LHSKnown
.isNegative() && RHSKnown
.isNonNegative())
4087 return OverflowResult::NeverOverflows
;
4089 return OverflowResult::MayOverflow
;
4092 OverflowResult
llvm::computeOverflowForSignedSub(const Value
*LHS
,
4094 const DataLayout
&DL
,
4095 AssumptionCache
*AC
,
4096 const Instruction
*CxtI
,
4097 const DominatorTree
*DT
) {
4098 // If LHS and RHS each have at least two sign bits, the subtraction
4100 if (ComputeNumSignBits(LHS
, DL
, 0, AC
, CxtI
, DT
) > 1 &&
4101 ComputeNumSignBits(RHS
, DL
, 0, AC
, CxtI
, DT
) > 1)
4102 return OverflowResult::NeverOverflows
;
4104 KnownBits LHSKnown
= computeKnownBits(LHS
, DL
, 0, AC
, CxtI
, DT
);
4106 KnownBits RHSKnown
= computeKnownBits(RHS
, DL
, 0, AC
, CxtI
, DT
);
4108 // Subtraction of two 2's complement numbers having identical signs will
4110 if ((LHSKnown
.isNegative() && RHSKnown
.isNegative()) ||
4111 (LHSKnown
.isNonNegative() && RHSKnown
.isNonNegative()))
4112 return OverflowResult::NeverOverflows
;
4114 // TODO: implement logic similar to checkRippleForAdd
4115 return OverflowResult::MayOverflow
;
4118 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst
*II
,
4119 const DominatorTree
&DT
) {
4121 auto IID
= II
->getIntrinsicID();
4122 assert((IID
== Intrinsic::sadd_with_overflow
||
4123 IID
== Intrinsic::uadd_with_overflow
||
4124 IID
== Intrinsic::ssub_with_overflow
||
4125 IID
== Intrinsic::usub_with_overflow
||
4126 IID
== Intrinsic::smul_with_overflow
||
4127 IID
== Intrinsic::umul_with_overflow
) &&
4128 "Not an overflow intrinsic!");
4131 SmallVector
<const BranchInst
*, 2> GuardingBranches
;
4132 SmallVector
<const ExtractValueInst
*, 2> Results
;
4134 for (const User
*U
: II
->users()) {
4135 if (const auto *EVI
= dyn_cast
<ExtractValueInst
>(U
)) {
4136 assert(EVI
->getNumIndices() == 1 && "Obvious from CI's type");
4138 if (EVI
->getIndices()[0] == 0)
4139 Results
.push_back(EVI
);
4141 assert(EVI
->getIndices()[0] == 1 && "Obvious from CI's type");
4143 for (const auto *U
: EVI
->users())
4144 if (const auto *B
= dyn_cast
<BranchInst
>(U
)) {
4145 assert(B
->isConditional() && "How else is it using an i1?");
4146 GuardingBranches
.push_back(B
);
4150 // We are using the aggregate directly in a way we don't want to analyze
4151 // here (storing it to a global, say).
4156 auto AllUsesGuardedByBranch
= [&](const BranchInst
*BI
) {
4157 BasicBlockEdge
NoWrapEdge(BI
->getParent(), BI
->getSuccessor(1));
4158 if (!NoWrapEdge
.isSingleEdge())
4161 // Check if all users of the add are provably no-wrap.
4162 for (const auto *Result
: Results
) {
4163 // If the extractvalue itself is not executed on overflow, the we don't
4164 // need to check each use separately, since domination is transitive.
4165 if (DT
.dominates(NoWrapEdge
, Result
->getParent()))
4168 for (auto &RU
: Result
->uses())
4169 if (!DT
.dominates(NoWrapEdge
, RU
))
4176 return llvm::any_of(GuardingBranches
, AllUsesGuardedByBranch
);
4180 OverflowResult
llvm::computeOverflowForSignedAdd(const AddOperator
*Add
,
4181 const DataLayout
&DL
,
4182 AssumptionCache
*AC
,
4183 const Instruction
*CxtI
,
4184 const DominatorTree
*DT
) {
4185 return ::computeOverflowForSignedAdd(Add
->getOperand(0), Add
->getOperand(1),
4186 Add
, DL
, AC
, CxtI
, DT
);
4189 OverflowResult
llvm::computeOverflowForSignedAdd(const Value
*LHS
,
4191 const DataLayout
&DL
,
4192 AssumptionCache
*AC
,
4193 const Instruction
*CxtI
,
4194 const DominatorTree
*DT
) {
4195 return ::computeOverflowForSignedAdd(LHS
, RHS
, nullptr, DL
, AC
, CxtI
, DT
);
4198 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction
*I
) {
4199 // A memory operation returns normally if it isn't volatile. A volatile
4200 // operation is allowed to trap.
4202 // An atomic operation isn't guaranteed to return in a reasonable amount of
4203 // time because it's possible for another thread to interfere with it for an
4204 // arbitrary length of time, but programs aren't allowed to rely on that.
4205 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
4206 return !LI
->isVolatile();
4207 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
4208 return !SI
->isVolatile();
4209 if (const AtomicCmpXchgInst
*CXI
= dyn_cast
<AtomicCmpXchgInst
>(I
))
4210 return !CXI
->isVolatile();
4211 if (const AtomicRMWInst
*RMWI
= dyn_cast
<AtomicRMWInst
>(I
))
4212 return !RMWI
->isVolatile();
4213 if (const MemIntrinsic
*MII
= dyn_cast
<MemIntrinsic
>(I
))
4214 return !MII
->isVolatile();
4216 // If there is no successor, then execution can't transfer to it.
4217 if (const auto *CRI
= dyn_cast
<CleanupReturnInst
>(I
))
4218 return !CRI
->unwindsToCaller();
4219 if (const auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(I
))
4220 return !CatchSwitch
->unwindsToCaller();
4221 if (isa
<ResumeInst
>(I
))
4223 if (isa
<ReturnInst
>(I
))
4225 if (isa
<UnreachableInst
>(I
))
4228 // Calls can throw, or contain an infinite loop, or kill the process.
4229 if (auto CS
= ImmutableCallSite(I
)) {
4230 // Call sites that throw have implicit non-local control flow.
4231 if (!CS
.doesNotThrow())
4234 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4235 // etc. and thus not return. However, LLVM already assumes that
4237 // - Thread exiting actions are modeled as writes to memory invisible to
4240 // - Loops that don't have side effects (side effects are volatile/atomic
4241 // stores and IO) always terminate (see http://llvm.org/PR965).
4242 // Furthermore IO itself is also modeled as writes to memory invisible to
4245 // We rely on those assumptions here, and use the memory effects of the call
4246 // target as a proxy for checking that it always returns.
4248 // FIXME: This isn't aggressive enough; a call which only writes to a global
4249 // is guaranteed to return.
4250 return CS
.onlyReadsMemory() || CS
.onlyAccessesArgMemory() ||
4251 match(I
, m_Intrinsic
<Intrinsic::assume
>()) ||
4252 match(I
, m_Intrinsic
<Intrinsic::sideeffect
>());
4255 // Other instructions return normally.
4259 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock
*BB
) {
4260 // TODO: This is slightly consdervative for invoke instruction since exiting
4261 // via an exception *is* normal control for them.
4262 for (auto I
= BB
->begin(), E
= BB
->end(); I
!= E
; ++I
)
4263 if (!isGuaranteedToTransferExecutionToSuccessor(&*I
))
4268 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction
*I
,
4270 // The loop header is guaranteed to be executed for every iteration.
4272 // FIXME: Relax this constraint to cover all basic blocks that are
4273 // guaranteed to be executed at every iteration.
4274 if (I
->getParent() != L
->getHeader()) return false;
4276 for (const Instruction
&LI
: *L
->getHeader()) {
4277 if (&LI
== I
) return true;
4278 if (!isGuaranteedToTransferExecutionToSuccessor(&LI
)) return false;
4280 llvm_unreachable("Instruction not contained in its own parent basic block.");
4283 bool llvm::propagatesFullPoison(const Instruction
*I
) {
4284 switch (I
->getOpcode()) {
4285 case Instruction::Add
:
4286 case Instruction::Sub
:
4287 case Instruction::Xor
:
4288 case Instruction::Trunc
:
4289 case Instruction::BitCast
:
4290 case Instruction::AddrSpaceCast
:
4291 case Instruction::Mul
:
4292 case Instruction::Shl
:
4293 case Instruction::GetElementPtr
:
4294 // These operations all propagate poison unconditionally. Note that poison
4295 // is not any particular value, so xor or subtraction of poison with
4296 // itself still yields poison, not zero.
4299 case Instruction::AShr
:
4300 case Instruction::SExt
:
4301 // For these operations, one bit of the input is replicated across
4302 // multiple output bits. A replicated poison bit is still poison.
4305 case Instruction::ICmp
:
4306 // Comparing poison with any value yields poison. This is why, for
4307 // instance, x s< (x +nsw 1) can be folded to true.
4315 const Value
*llvm::getGuaranteedNonFullPoisonOp(const Instruction
*I
) {
4316 switch (I
->getOpcode()) {
4317 case Instruction::Store
:
4318 return cast
<StoreInst
>(I
)->getPointerOperand();
4320 case Instruction::Load
:
4321 return cast
<LoadInst
>(I
)->getPointerOperand();
4323 case Instruction::AtomicCmpXchg
:
4324 return cast
<AtomicCmpXchgInst
>(I
)->getPointerOperand();
4326 case Instruction::AtomicRMW
:
4327 return cast
<AtomicRMWInst
>(I
)->getPointerOperand();
4329 case Instruction::UDiv
:
4330 case Instruction::SDiv
:
4331 case Instruction::URem
:
4332 case Instruction::SRem
:
4333 return I
->getOperand(1);
4340 bool llvm::programUndefinedIfFullPoison(const Instruction
*PoisonI
) {
4341 // We currently only look for uses of poison values within the same basic
4342 // block, as that makes it easier to guarantee that the uses will be
4343 // executed given that PoisonI is executed.
4345 // FIXME: Expand this to consider uses beyond the same basic block. To do
4346 // this, look out for the distinction between post-dominance and strong
4348 const BasicBlock
*BB
= PoisonI
->getParent();
4350 // Set of instructions that we have proved will yield poison if PoisonI
4352 SmallSet
<const Value
*, 16> YieldsPoison
;
4353 SmallSet
<const BasicBlock
*, 4> Visited
;
4354 YieldsPoison
.insert(PoisonI
);
4355 Visited
.insert(PoisonI
->getParent());
4357 BasicBlock::const_iterator Begin
= PoisonI
->getIterator(), End
= BB
->end();
4360 while (Iter
++ < MaxDepth
) {
4361 for (auto &I
: make_range(Begin
, End
)) {
4362 if (&I
!= PoisonI
) {
4363 const Value
*NotPoison
= getGuaranteedNonFullPoisonOp(&I
);
4364 if (NotPoison
!= nullptr && YieldsPoison
.count(NotPoison
))
4366 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
4370 // Mark poison that propagates from I through uses of I.
4371 if (YieldsPoison
.count(&I
)) {
4372 for (const User
*User
: I
.users()) {
4373 const Instruction
*UserI
= cast
<Instruction
>(User
);
4374 if (propagatesFullPoison(UserI
))
4375 YieldsPoison
.insert(User
);
4380 if (auto *NextBB
= BB
->getSingleSuccessor()) {
4381 if (Visited
.insert(NextBB
).second
) {
4383 Begin
= BB
->getFirstNonPHI()->getIterator();
4394 static bool isKnownNonNaN(const Value
*V
, FastMathFlags FMF
) {
4398 if (auto *C
= dyn_cast
<ConstantFP
>(V
))
4401 if (auto *C
= dyn_cast
<ConstantDataVector
>(V
)) {
4402 if (!C
->getElementType()->isFloatingPointTy())
4404 for (unsigned I
= 0, E
= C
->getNumElements(); I
< E
; ++I
) {
4405 if (C
->getElementAsAPFloat(I
).isNaN())
4414 static bool isKnownNonZero(const Value
*V
) {
4415 if (auto *C
= dyn_cast
<ConstantFP
>(V
))
4416 return !C
->isZero();
4418 if (auto *C
= dyn_cast
<ConstantDataVector
>(V
)) {
4419 if (!C
->getElementType()->isFloatingPointTy())
4421 for (unsigned I
= 0, E
= C
->getNumElements(); I
< E
; ++I
) {
4422 if (C
->getElementAsAPFloat(I
).isZero())
4431 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4432 /// Given non-min/max outer cmp/select from the clamp pattern this
4433 /// function recognizes if it can be substitued by a "canonical" min/max
4435 static SelectPatternResult
matchFastFloatClamp(CmpInst::Predicate Pred
,
4436 Value
*CmpLHS
, Value
*CmpRHS
,
4437 Value
*TrueVal
, Value
*FalseVal
,
4438 Value
*&LHS
, Value
*&RHS
) {
4440 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4441 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4442 // and return description of the outer Max/Min.
4444 // First, check if select has inverse order:
4445 if (CmpRHS
== FalseVal
) {
4446 std::swap(TrueVal
, FalseVal
);
4447 Pred
= CmpInst::getInversePredicate(Pred
);
4450 // Assume success now. If there's no match, callers should not use these anyway.
4455 if (CmpRHS
!= TrueVal
|| !match(CmpRHS
, m_APFloat(FC1
)) || !FC1
->isFinite())
4456 return {SPF_UNKNOWN
, SPNB_NA
, false};
4460 case CmpInst::FCMP_OLT
:
4461 case CmpInst::FCMP_OLE
:
4462 case CmpInst::FCMP_ULT
:
4463 case CmpInst::FCMP_ULE
:
4465 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS
), m_APFloat(FC2
)),
4466 m_UnordFMin(m_Specific(CmpLHS
), m_APFloat(FC2
)))) &&
4467 FC1
->compare(*FC2
) == APFloat::cmpResult::cmpLessThan
)
4468 return {SPF_FMAXNUM
, SPNB_RETURNS_ANY
, false};
4470 case CmpInst::FCMP_OGT
:
4471 case CmpInst::FCMP_OGE
:
4472 case CmpInst::FCMP_UGT
:
4473 case CmpInst::FCMP_UGE
:
4475 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS
), m_APFloat(FC2
)),
4476 m_UnordFMax(m_Specific(CmpLHS
), m_APFloat(FC2
)))) &&
4477 FC1
->compare(*FC2
) == APFloat::cmpResult::cmpGreaterThan
)
4478 return {SPF_FMINNUM
, SPNB_RETURNS_ANY
, false};
4484 return {SPF_UNKNOWN
, SPNB_NA
, false};
4487 /// Recognize variations of:
4488 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4489 static SelectPatternResult
matchClamp(CmpInst::Predicate Pred
,
4490 Value
*CmpLHS
, Value
*CmpRHS
,
4491 Value
*TrueVal
, Value
*FalseVal
) {
4492 // Swap the select operands and predicate to match the patterns below.
4493 if (CmpRHS
!= TrueVal
) {
4494 Pred
= ICmpInst::getSwappedPredicate(Pred
);
4495 std::swap(TrueVal
, FalseVal
);
4498 if (CmpRHS
== TrueVal
&& match(CmpRHS
, m_APInt(C1
))) {
4500 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4501 if (match(FalseVal
, m_SMin(m_Specific(CmpLHS
), m_APInt(C2
))) &&
4502 C1
->slt(*C2
) && Pred
== CmpInst::ICMP_SLT
)
4503 return {SPF_SMAX
, SPNB_NA
, false};
4505 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4506 if (match(FalseVal
, m_SMax(m_Specific(CmpLHS
), m_APInt(C2
))) &&
4507 C1
->sgt(*C2
) && Pred
== CmpInst::ICMP_SGT
)
4508 return {SPF_SMIN
, SPNB_NA
, false};
4510 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4511 if (match(FalseVal
, m_UMin(m_Specific(CmpLHS
), m_APInt(C2
))) &&
4512 C1
->ult(*C2
) && Pred
== CmpInst::ICMP_ULT
)
4513 return {SPF_UMAX
, SPNB_NA
, false};
4515 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4516 if (match(FalseVal
, m_UMax(m_Specific(CmpLHS
), m_APInt(C2
))) &&
4517 C1
->ugt(*C2
) && Pred
== CmpInst::ICMP_UGT
)
4518 return {SPF_UMIN
, SPNB_NA
, false};
4520 return {SPF_UNKNOWN
, SPNB_NA
, false};
4523 /// Recognize variations of:
4524 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4525 static SelectPatternResult
matchMinMaxOfMinMax(CmpInst::Predicate Pred
,
4526 Value
*CmpLHS
, Value
*CmpRHS
,
4527 Value
*TVal
, Value
*FVal
,
4529 // TODO: Allow FP min/max with nnan/nsz.
4530 assert(CmpInst::isIntPredicate(Pred
) && "Expected integer comparison");
4533 SelectPatternResult L
= matchSelectPattern(TVal
, A
, B
, nullptr, Depth
+ 1);
4534 if (!SelectPatternResult::isMinOrMax(L
.Flavor
))
4535 return {SPF_UNKNOWN
, SPNB_NA
, false};
4538 SelectPatternResult R
= matchSelectPattern(FVal
, C
, D
, nullptr, Depth
+ 1);
4539 if (L
.Flavor
!= R
.Flavor
)
4540 return {SPF_UNKNOWN
, SPNB_NA
, false};
4542 // We have something like: x Pred y ? min(a, b) : min(c, d).
4543 // Try to match the compare to the min/max operations of the select operands.
4544 // First, make sure we have the right compare predicate.
4547 if (Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
) {
4548 Pred
= ICmpInst::getSwappedPredicate(Pred
);
4549 std::swap(CmpLHS
, CmpRHS
);
4551 if (Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
)
4553 return {SPF_UNKNOWN
, SPNB_NA
, false};
4555 if (Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
) {
4556 Pred
= ICmpInst::getSwappedPredicate(Pred
);
4557 std::swap(CmpLHS
, CmpRHS
);
4559 if (Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
)
4561 return {SPF_UNKNOWN
, SPNB_NA
, false};
4563 if (Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
) {
4564 Pred
= ICmpInst::getSwappedPredicate(Pred
);
4565 std::swap(CmpLHS
, CmpRHS
);
4567 if (Pred
== ICmpInst::ICMP_ULT
|| Pred
== ICmpInst::ICMP_ULE
)
4569 return {SPF_UNKNOWN
, SPNB_NA
, false};
4571 if (Pred
== ICmpInst::ICMP_ULT
|| Pred
== ICmpInst::ICMP_ULE
) {
4572 Pred
= ICmpInst::getSwappedPredicate(Pred
);
4573 std::swap(CmpLHS
, CmpRHS
);
4575 if (Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
)
4577 return {SPF_UNKNOWN
, SPNB_NA
, false};
4579 return {SPF_UNKNOWN
, SPNB_NA
, false};
4582 // If there is a common operand in the already matched min/max and the other
4583 // min/max operands match the compare operands (either directly or inverted),
4584 // then this is min/max of the same flavor.
4586 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4587 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4589 if ((CmpLHS
== A
&& CmpRHS
== C
) || (match(C
, m_Not(m_Specific(CmpLHS
))) &&
4590 match(A
, m_Not(m_Specific(CmpRHS
)))))
4591 return {L
.Flavor
, SPNB_NA
, false};
4593 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4594 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4596 if ((CmpLHS
== A
&& CmpRHS
== D
) || (match(D
, m_Not(m_Specific(CmpLHS
))) &&
4597 match(A
, m_Not(m_Specific(CmpRHS
)))))
4598 return {L
.Flavor
, SPNB_NA
, false};
4600 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4601 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4603 if ((CmpLHS
== B
&& CmpRHS
== C
) || (match(C
, m_Not(m_Specific(CmpLHS
))) &&
4604 match(B
, m_Not(m_Specific(CmpRHS
)))))
4605 return {L
.Flavor
, SPNB_NA
, false};
4607 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4608 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4610 if ((CmpLHS
== B
&& CmpRHS
== D
) || (match(D
, m_Not(m_Specific(CmpLHS
))) &&
4611 match(B
, m_Not(m_Specific(CmpRHS
)))))
4612 return {L
.Flavor
, SPNB_NA
, false};
4615 return {SPF_UNKNOWN
, SPNB_NA
, false};
4618 /// Match non-obvious integer minimum and maximum sequences.
4619 static SelectPatternResult
matchMinMax(CmpInst::Predicate Pred
,
4620 Value
*CmpLHS
, Value
*CmpRHS
,
4621 Value
*TrueVal
, Value
*FalseVal
,
4622 Value
*&LHS
, Value
*&RHS
,
4624 // Assume success. If there's no match, callers should not use these anyway.
4628 SelectPatternResult SPR
= matchClamp(Pred
, CmpLHS
, CmpRHS
, TrueVal
, FalseVal
);
4629 if (SPR
.Flavor
!= SelectPatternFlavor::SPF_UNKNOWN
)
4632 SPR
= matchMinMaxOfMinMax(Pred
, CmpLHS
, CmpRHS
, TrueVal
, FalseVal
, Depth
);
4633 if (SPR
.Flavor
!= SelectPatternFlavor::SPF_UNKNOWN
)
4636 if (Pred
!= CmpInst::ICMP_SGT
&& Pred
!= CmpInst::ICMP_SLT
)
4637 return {SPF_UNKNOWN
, SPNB_NA
, false};
4640 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4641 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4642 if (match(TrueVal
, m_Zero()) &&
4643 match(FalseVal
, m_NSWSub(m_Specific(CmpLHS
), m_Specific(CmpRHS
))))
4644 return {Pred
== CmpInst::ICMP_SGT
? SPF_SMIN
: SPF_SMAX
, SPNB_NA
, false};
4647 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4648 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4649 if (match(FalseVal
, m_Zero()) &&
4650 match(TrueVal
, m_NSWSub(m_Specific(CmpLHS
), m_Specific(CmpRHS
))))
4651 return {Pred
== CmpInst::ICMP_SGT
? SPF_SMAX
: SPF_SMIN
, SPNB_NA
, false};
4654 if (!match(CmpRHS
, m_APInt(C1
)))
4655 return {SPF_UNKNOWN
, SPNB_NA
, false};
4657 // An unsigned min/max can be written with a signed compare.
4659 if ((CmpLHS
== TrueVal
&& match(FalseVal
, m_APInt(C2
))) ||
4660 (CmpLHS
== FalseVal
&& match(TrueVal
, m_APInt(C2
)))) {
4661 // Is the sign bit set?
4662 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4663 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4664 if (Pred
== CmpInst::ICMP_SLT
&& C1
->isNullValue() &&
4665 C2
->isMaxSignedValue())
4666 return {CmpLHS
== TrueVal
? SPF_UMAX
: SPF_UMIN
, SPNB_NA
, false};
4668 // Is the sign bit clear?
4669 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4670 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4671 if (Pred
== CmpInst::ICMP_SGT
&& C1
->isAllOnesValue() &&
4672 C2
->isMinSignedValue())
4673 return {CmpLHS
== FalseVal
? SPF_UMAX
: SPF_UMIN
, SPNB_NA
, false};
4676 // Look through 'not' ops to find disguised signed min/max.
4677 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4678 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4679 if (match(TrueVal
, m_Not(m_Specific(CmpLHS
))) &&
4680 match(FalseVal
, m_APInt(C2
)) && ~(*C1
) == *C2
)
4681 return {Pred
== CmpInst::ICMP_SGT
? SPF_SMIN
: SPF_SMAX
, SPNB_NA
, false};
4683 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4684 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4685 if (match(FalseVal
, m_Not(m_Specific(CmpLHS
))) &&
4686 match(TrueVal
, m_APInt(C2
)) && ~(*C1
) == *C2
)
4687 return {Pred
== CmpInst::ICMP_SGT
? SPF_SMAX
: SPF_SMIN
, SPNB_NA
, false};
4689 return {SPF_UNKNOWN
, SPNB_NA
, false};
4692 bool llvm::isKnownNegation(const Value
*X
, const Value
*Y
, bool NeedNSW
) {
4693 assert(X
&& Y
&& "Invalid operand");
4695 // X = sub (0, Y) || X = sub nsw (0, Y)
4696 if ((!NeedNSW
&& match(X
, m_Sub(m_ZeroInt(), m_Specific(Y
)))) ||
4697 (NeedNSW
&& match(X
, m_NSWSub(m_ZeroInt(), m_Specific(Y
)))))
4700 // Y = sub (0, X) || Y = sub nsw (0, X)
4701 if ((!NeedNSW
&& match(Y
, m_Sub(m_ZeroInt(), m_Specific(X
)))) ||
4702 (NeedNSW
&& match(Y
, m_NSWSub(m_ZeroInt(), m_Specific(X
)))))
4705 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4707 return (!NeedNSW
&& (match(X
, m_Sub(m_Value(A
), m_Value(B
))) &&
4708 match(Y
, m_Sub(m_Specific(B
), m_Specific(A
))))) ||
4709 (NeedNSW
&& (match(X
, m_NSWSub(m_Value(A
), m_Value(B
))) &&
4710 match(Y
, m_NSWSub(m_Specific(B
), m_Specific(A
)))));
4713 static SelectPatternResult
matchSelectPattern(CmpInst::Predicate Pred
,
4715 Value
*CmpLHS
, Value
*CmpRHS
,
4716 Value
*TrueVal
, Value
*FalseVal
,
4717 Value
*&LHS
, Value
*&RHS
,
4722 // Signed zero may return inconsistent results between implementations.
4723 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4724 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4725 // Therefore, we behave conservatively and only proceed if at least one of the
4726 // operands is known to not be zero or if we don't care about signed zero.
4729 // FIXME: Include OGT/OLT/UGT/ULT.
4730 case CmpInst::FCMP_OGE
: case CmpInst::FCMP_OLE
:
4731 case CmpInst::FCMP_UGE
: case CmpInst::FCMP_ULE
:
4732 if (!FMF
.noSignedZeros() && !isKnownNonZero(CmpLHS
) &&
4733 !isKnownNonZero(CmpRHS
))
4734 return {SPF_UNKNOWN
, SPNB_NA
, false};
4737 SelectPatternNaNBehavior NaNBehavior
= SPNB_NA
;
4738 bool Ordered
= false;
4740 // When given one NaN and one non-NaN input:
4741 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4742 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4743 // ordered comparison fails), which could be NaN or non-NaN.
4744 // so here we discover exactly what NaN behavior is required/accepted.
4745 if (CmpInst::isFPPredicate(Pred
)) {
4746 bool LHSSafe
= isKnownNonNaN(CmpLHS
, FMF
);
4747 bool RHSSafe
= isKnownNonNaN(CmpRHS
, FMF
);
4749 if (LHSSafe
&& RHSSafe
) {
4750 // Both operands are known non-NaN.
4751 NaNBehavior
= SPNB_RETURNS_ANY
;
4752 } else if (CmpInst::isOrdered(Pred
)) {
4753 // An ordered comparison will return false when given a NaN, so it
4757 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4758 NaNBehavior
= SPNB_RETURNS_NAN
;
4760 NaNBehavior
= SPNB_RETURNS_OTHER
;
4762 // Completely unsafe.
4763 return {SPF_UNKNOWN
, SPNB_NA
, false};
4766 // An unordered comparison will return true when given a NaN, so it
4769 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4770 NaNBehavior
= SPNB_RETURNS_OTHER
;
4772 NaNBehavior
= SPNB_RETURNS_NAN
;
4774 // Completely unsafe.
4775 return {SPF_UNKNOWN
, SPNB_NA
, false};
4779 if (TrueVal
== CmpRHS
&& FalseVal
== CmpLHS
) {
4780 std::swap(CmpLHS
, CmpRHS
);
4781 Pred
= CmpInst::getSwappedPredicate(Pred
);
4782 if (NaNBehavior
== SPNB_RETURNS_NAN
)
4783 NaNBehavior
= SPNB_RETURNS_OTHER
;
4784 else if (NaNBehavior
== SPNB_RETURNS_OTHER
)
4785 NaNBehavior
= SPNB_RETURNS_NAN
;
4789 // ([if]cmp X, Y) ? X : Y
4790 if (TrueVal
== CmpLHS
&& FalseVal
== CmpRHS
) {
4792 default: return {SPF_UNKNOWN
, SPNB_NA
, false}; // Equality.
4793 case ICmpInst::ICMP_UGT
:
4794 case ICmpInst::ICMP_UGE
: return {SPF_UMAX
, SPNB_NA
, false};
4795 case ICmpInst::ICMP_SGT
:
4796 case ICmpInst::ICMP_SGE
: return {SPF_SMAX
, SPNB_NA
, false};
4797 case ICmpInst::ICMP_ULT
:
4798 case ICmpInst::ICMP_ULE
: return {SPF_UMIN
, SPNB_NA
, false};
4799 case ICmpInst::ICMP_SLT
:
4800 case ICmpInst::ICMP_SLE
: return {SPF_SMIN
, SPNB_NA
, false};
4801 case FCmpInst::FCMP_UGT
:
4802 case FCmpInst::FCMP_UGE
:
4803 case FCmpInst::FCMP_OGT
:
4804 case FCmpInst::FCMP_OGE
: return {SPF_FMAXNUM
, NaNBehavior
, Ordered
};
4805 case FCmpInst::FCMP_ULT
:
4806 case FCmpInst::FCMP_ULE
:
4807 case FCmpInst::FCMP_OLT
:
4808 case FCmpInst::FCMP_OLE
: return {SPF_FMINNUM
, NaNBehavior
, Ordered
};
4812 if (isKnownNegation(TrueVal
, FalseVal
)) {
4813 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4814 // match against either LHS or sext(LHS).
4815 auto MaybeSExtCmpLHS
=
4816 m_CombineOr(m_Specific(CmpLHS
), m_SExt(m_Specific(CmpLHS
)));
4817 auto ZeroOrAllOnes
= m_CombineOr(m_ZeroInt(), m_AllOnes());
4818 auto ZeroOrOne
= m_CombineOr(m_ZeroInt(), m_One());
4819 if (match(TrueVal
, MaybeSExtCmpLHS
)) {
4820 // Set the return values. If the compare uses the negated value (-X >s 0),
4821 // swap the return values because the negated value is always 'RHS'.
4824 if (match(CmpLHS
, m_Neg(m_Specific(FalseVal
))))
4825 std::swap(LHS
, RHS
);
4827 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4828 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4829 if (Pred
== ICmpInst::ICMP_SGT
&& match(CmpRHS
, ZeroOrAllOnes
))
4830 return {SPF_ABS
, SPNB_NA
, false};
4832 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4833 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4834 if (Pred
== ICmpInst::ICMP_SLT
&& match(CmpRHS
, ZeroOrOne
))
4835 return {SPF_NABS
, SPNB_NA
, false};
4837 else if (match(FalseVal
, MaybeSExtCmpLHS
)) {
4838 // Set the return values. If the compare uses the negated value (-X >s 0),
4839 // swap the return values because the negated value is always 'RHS'.
4842 if (match(CmpLHS
, m_Neg(m_Specific(TrueVal
))))
4843 std::swap(LHS
, RHS
);
4845 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4846 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4847 if (Pred
== ICmpInst::ICMP_SGT
&& match(CmpRHS
, ZeroOrAllOnes
))
4848 return {SPF_NABS
, SPNB_NA
, false};
4850 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4851 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4852 if (Pred
== ICmpInst::ICMP_SLT
&& match(CmpRHS
, ZeroOrOne
))
4853 return {SPF_ABS
, SPNB_NA
, false};
4857 if (CmpInst::isIntPredicate(Pred
))
4858 return matchMinMax(Pred
, CmpLHS
, CmpRHS
, TrueVal
, FalseVal
, LHS
, RHS
, Depth
);
4860 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4861 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4862 // semantics than minNum. Be conservative in such case.
4863 if (NaNBehavior
!= SPNB_RETURNS_ANY
||
4864 (!FMF
.noSignedZeros() && !isKnownNonZero(CmpLHS
) &&
4865 !isKnownNonZero(CmpRHS
)))
4866 return {SPF_UNKNOWN
, SPNB_NA
, false};
4868 return matchFastFloatClamp(Pred
, CmpLHS
, CmpRHS
, TrueVal
, FalseVal
, LHS
, RHS
);
4871 /// Helps to match a select pattern in case of a type mismatch.
4873 /// The function processes the case when type of true and false values of a
4874 /// select instruction differs from type of the cmp instruction operands because
4875 /// of a cast instruction. The function checks if it is legal to move the cast
4876 /// operation after "select". If yes, it returns the new second value of
4877 /// "select" (with the assumption that cast is moved):
4878 /// 1. As operand of cast instruction when both values of "select" are same cast
4880 /// 2. As restored constant (by applying reverse cast operation) when the first
4881 /// value of the "select" is a cast operation and the second value is a
4883 /// NOTE: We return only the new second value because the first value could be
4884 /// accessed as operand of cast instruction.
4885 static Value
*lookThroughCast(CmpInst
*CmpI
, Value
*V1
, Value
*V2
,
4886 Instruction::CastOps
*CastOp
) {
4887 auto *Cast1
= dyn_cast
<CastInst
>(V1
);
4891 *CastOp
= Cast1
->getOpcode();
4892 Type
*SrcTy
= Cast1
->getSrcTy();
4893 if (auto *Cast2
= dyn_cast
<CastInst
>(V2
)) {
4894 // If V1 and V2 are both the same cast from the same type, look through V1.
4895 if (*CastOp
== Cast2
->getOpcode() && SrcTy
== Cast2
->getSrcTy())
4896 return Cast2
->getOperand(0);
4900 auto *C
= dyn_cast
<Constant
>(V2
);
4904 Constant
*CastedTo
= nullptr;
4906 case Instruction::ZExt
:
4907 if (CmpI
->isUnsigned())
4908 CastedTo
= ConstantExpr::getTrunc(C
, SrcTy
);
4910 case Instruction::SExt
:
4911 if (CmpI
->isSigned())
4912 CastedTo
= ConstantExpr::getTrunc(C
, SrcTy
, true);
4914 case Instruction::Trunc
:
4916 if (match(CmpI
->getOperand(1), m_Constant(CmpConst
)) &&
4917 CmpConst
->getType() == SrcTy
) {
4918 // Here we have the following case:
4920 // %cond = cmp iN %x, CmpConst
4921 // %tr = trunc iN %x to iK
4922 // %narrowsel = select i1 %cond, iK %t, iK C
4924 // We can always move trunc after select operation:
4926 // %cond = cmp iN %x, CmpConst
4927 // %widesel = select i1 %cond, iN %x, iN CmpConst
4928 // %tr = trunc iN %widesel to iK
4930 // Note that C could be extended in any way because we don't care about
4931 // upper bits after truncation. It can't be abs pattern, because it would
4934 // select i1 %cond, x, -x.
4936 // So only min/max pattern could be matched. Such match requires widened C
4937 // == CmpConst. That is why set widened C = CmpConst, condition trunc
4938 // CmpConst == C is checked below.
4939 CastedTo
= CmpConst
;
4941 CastedTo
= ConstantExpr::getIntegerCast(C
, SrcTy
, CmpI
->isSigned());
4944 case Instruction::FPTrunc
:
4945 CastedTo
= ConstantExpr::getFPExtend(C
, SrcTy
, true);
4947 case Instruction::FPExt
:
4948 CastedTo
= ConstantExpr::getFPTrunc(C
, SrcTy
, true);
4950 case Instruction::FPToUI
:
4951 CastedTo
= ConstantExpr::getUIToFP(C
, SrcTy
, true);
4953 case Instruction::FPToSI
:
4954 CastedTo
= ConstantExpr::getSIToFP(C
, SrcTy
, true);
4956 case Instruction::UIToFP
:
4957 CastedTo
= ConstantExpr::getFPToUI(C
, SrcTy
, true);
4959 case Instruction::SIToFP
:
4960 CastedTo
= ConstantExpr::getFPToSI(C
, SrcTy
, true);
4969 // Make sure the cast doesn't lose any information.
4970 Constant
*CastedBack
=
4971 ConstantExpr::getCast(*CastOp
, CastedTo
, C
->getType(), true);
4972 if (CastedBack
!= C
)
4978 SelectPatternResult
llvm::matchSelectPattern(Value
*V
, Value
*&LHS
, Value
*&RHS
,
4979 Instruction::CastOps
*CastOp
,
4981 if (Depth
>= MaxDepth
)
4982 return {SPF_UNKNOWN
, SPNB_NA
, false};
4984 SelectInst
*SI
= dyn_cast
<SelectInst
>(V
);
4985 if (!SI
) return {SPF_UNKNOWN
, SPNB_NA
, false};
4987 CmpInst
*CmpI
= dyn_cast
<CmpInst
>(SI
->getCondition());
4988 if (!CmpI
) return {SPF_UNKNOWN
, SPNB_NA
, false};
4990 CmpInst::Predicate Pred
= CmpI
->getPredicate();
4991 Value
*CmpLHS
= CmpI
->getOperand(0);
4992 Value
*CmpRHS
= CmpI
->getOperand(1);
4993 Value
*TrueVal
= SI
->getTrueValue();
4994 Value
*FalseVal
= SI
->getFalseValue();
4996 if (isa
<FPMathOperator
>(CmpI
))
4997 FMF
= CmpI
->getFastMathFlags();
5000 if (CmpI
->isEquality())
5001 return {SPF_UNKNOWN
, SPNB_NA
, false};
5003 // Deal with type mismatches.
5004 if (CastOp
&& CmpLHS
->getType() != TrueVal
->getType()) {
5005 if (Value
*C
= lookThroughCast(CmpI
, TrueVal
, FalseVal
, CastOp
)) {
5006 // If this is a potential fmin/fmax with a cast to integer, then ignore
5007 // -0.0 because there is no corresponding integer value.
5008 if (*CastOp
== Instruction::FPToSI
|| *CastOp
== Instruction::FPToUI
)
5009 FMF
.setNoSignedZeros();
5010 return ::matchSelectPattern(Pred
, FMF
, CmpLHS
, CmpRHS
,
5011 cast
<CastInst
>(TrueVal
)->getOperand(0), C
,
5014 if (Value
*C
= lookThroughCast(CmpI
, FalseVal
, TrueVal
, CastOp
)) {
5015 // If this is a potential fmin/fmax with a cast to integer, then ignore
5016 // -0.0 because there is no corresponding integer value.
5017 if (*CastOp
== Instruction::FPToSI
|| *CastOp
== Instruction::FPToUI
)
5018 FMF
.setNoSignedZeros();
5019 return ::matchSelectPattern(Pred
, FMF
, CmpLHS
, CmpRHS
,
5020 C
, cast
<CastInst
>(FalseVal
)->getOperand(0),
5024 return ::matchSelectPattern(Pred
, FMF
, CmpLHS
, CmpRHS
, TrueVal
, FalseVal
,
5028 CmpInst::Predicate
llvm::getMinMaxPred(SelectPatternFlavor SPF
, bool Ordered
) {
5029 if (SPF
== SPF_SMIN
) return ICmpInst::ICMP_SLT
;
5030 if (SPF
== SPF_UMIN
) return ICmpInst::ICMP_ULT
;
5031 if (SPF
== SPF_SMAX
) return ICmpInst::ICMP_SGT
;
5032 if (SPF
== SPF_UMAX
) return ICmpInst::ICMP_UGT
;
5033 if (SPF
== SPF_FMINNUM
)
5034 return Ordered
? FCmpInst::FCMP_OLT
: FCmpInst::FCMP_ULT
;
5035 if (SPF
== SPF_FMAXNUM
)
5036 return Ordered
? FCmpInst::FCMP_OGT
: FCmpInst::FCMP_UGT
;
5037 llvm_unreachable("unhandled!");
5040 SelectPatternFlavor
llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF
) {
5041 if (SPF
== SPF_SMIN
) return SPF_SMAX
;
5042 if (SPF
== SPF_UMIN
) return SPF_UMAX
;
5043 if (SPF
== SPF_SMAX
) return SPF_SMIN
;
5044 if (SPF
== SPF_UMAX
) return SPF_UMIN
;
5045 llvm_unreachable("unhandled!");
5048 CmpInst::Predicate
llvm::getInverseMinMaxPred(SelectPatternFlavor SPF
) {
5049 return getMinMaxPred(getInverseMinMaxFlavor(SPF
));
5052 /// Return true if "icmp Pred LHS RHS" is always true.
5053 static bool isTruePredicate(CmpInst::Predicate Pred
, const Value
*LHS
,
5054 const Value
*RHS
, const DataLayout
&DL
,
5056 assert(!LHS
->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5057 if (ICmpInst::isTrueWhenEqual(Pred
) && LHS
== RHS
)
5064 case CmpInst::ICMP_SLE
: {
5067 // LHS s<= LHS +_{nsw} C if C >= 0
5068 if (match(RHS
, m_NSWAdd(m_Specific(LHS
), m_APInt(C
))))
5069 return !C
->isNegative();
5073 case CmpInst::ICMP_ULE
: {
5076 // LHS u<= LHS +_{nuw} C for any C
5077 if (match(RHS
, m_NUWAdd(m_Specific(LHS
), m_APInt(C
))))
5080 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5081 auto MatchNUWAddsToSameValue
= [&](const Value
*A
, const Value
*B
,
5083 const APInt
*&CA
, const APInt
*&CB
) {
5084 if (match(A
, m_NUWAdd(m_Value(X
), m_APInt(CA
))) &&
5085 match(B
, m_NUWAdd(m_Specific(X
), m_APInt(CB
))))
5088 // If X & C == 0 then (X | C) == X +_{nuw} C
5089 if (match(A
, m_Or(m_Value(X
), m_APInt(CA
))) &&
5090 match(B
, m_Or(m_Specific(X
), m_APInt(CB
)))) {
5091 KnownBits
Known(CA
->getBitWidth());
5092 computeKnownBits(X
, Known
, DL
, Depth
+ 1, /*AC*/ nullptr,
5093 /*CxtI*/ nullptr, /*DT*/ nullptr);
5094 if (CA
->isSubsetOf(Known
.Zero
) && CB
->isSubsetOf(Known
.Zero
))
5102 const APInt
*CLHS
, *CRHS
;
5103 if (MatchNUWAddsToSameValue(LHS
, RHS
, X
, CLHS
, CRHS
))
5104 return CLHS
->ule(*CRHS
);
5111 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5112 /// ALHS ARHS" is true. Otherwise, return None.
5113 static Optional
<bool>
5114 isImpliedCondOperands(CmpInst::Predicate Pred
, const Value
*ALHS
,
5115 const Value
*ARHS
, const Value
*BLHS
, const Value
*BRHS
,
5116 const DataLayout
&DL
, unsigned Depth
) {
5121 case CmpInst::ICMP_SLT
:
5122 case CmpInst::ICMP_SLE
:
5123 if (isTruePredicate(CmpInst::ICMP_SLE
, BLHS
, ALHS
, DL
, Depth
) &&
5124 isTruePredicate(CmpInst::ICMP_SLE
, ARHS
, BRHS
, DL
, Depth
))
5128 case CmpInst::ICMP_ULT
:
5129 case CmpInst::ICMP_ULE
:
5130 if (isTruePredicate(CmpInst::ICMP_ULE
, BLHS
, ALHS
, DL
, Depth
) &&
5131 isTruePredicate(CmpInst::ICMP_ULE
, ARHS
, BRHS
, DL
, Depth
))
5137 /// Return true if the operands of the two compares match. IsSwappedOps is true
5138 /// when the operands match, but are swapped.
5139 static bool isMatchingOps(const Value
*ALHS
, const Value
*ARHS
,
5140 const Value
*BLHS
, const Value
*BRHS
,
5141 bool &IsSwappedOps
) {
5143 bool IsMatchingOps
= (ALHS
== BLHS
&& ARHS
== BRHS
);
5144 IsSwappedOps
= (ALHS
== BRHS
&& ARHS
== BLHS
);
5145 return IsMatchingOps
|| IsSwappedOps
;
5148 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
5149 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
5150 /// BRHS" is false. Otherwise, return None if we can't infer anything.
5151 static Optional
<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred
,
5154 CmpInst::Predicate BPred
,
5157 bool IsSwappedOps
) {
5158 // Canonicalize the operands so they're matching.
5160 std::swap(BLHS
, BRHS
);
5161 BPred
= ICmpInst::getSwappedPredicate(BPred
);
5163 if (CmpInst::isImpliedTrueByMatchingCmp(APred
, BPred
))
5165 if (CmpInst::isImpliedFalseByMatchingCmp(APred
, BPred
))
5171 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
5172 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
5173 /// C2" is false. Otherwise, return None if we can't infer anything.
5174 static Optional
<bool>
5175 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred
, const Value
*ALHS
,
5176 const ConstantInt
*C1
,
5177 CmpInst::Predicate BPred
,
5178 const Value
*BLHS
, const ConstantInt
*C2
) {
5179 assert(ALHS
== BLHS
&& "LHS operands must match.");
5180 ConstantRange DomCR
=
5181 ConstantRange::makeExactICmpRegion(APred
, C1
->getValue());
5183 ConstantRange::makeAllowedICmpRegion(BPred
, C2
->getValue());
5184 ConstantRange Intersection
= DomCR
.intersectWith(CR
);
5185 ConstantRange Difference
= DomCR
.difference(CR
);
5186 if (Intersection
.isEmptySet())
5188 if (Difference
.isEmptySet())
5193 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5194 /// false. Otherwise, return None if we can't infer anything.
5195 static Optional
<bool> isImpliedCondICmps(const ICmpInst
*LHS
,
5196 const ICmpInst
*RHS
,
5197 const DataLayout
&DL
, bool LHSIsTrue
,
5199 Value
*ALHS
= LHS
->getOperand(0);
5200 Value
*ARHS
= LHS
->getOperand(1);
5201 // The rest of the logic assumes the LHS condition is true. If that's not the
5202 // case, invert the predicate to make it so.
5203 ICmpInst::Predicate APred
=
5204 LHSIsTrue
? LHS
->getPredicate() : LHS
->getInversePredicate();
5206 Value
*BLHS
= RHS
->getOperand(0);
5207 Value
*BRHS
= RHS
->getOperand(1);
5208 ICmpInst::Predicate BPred
= RHS
->getPredicate();
5210 // Can we infer anything when the two compares have matching operands?
5212 if (isMatchingOps(ALHS
, ARHS
, BLHS
, BRHS
, IsSwappedOps
)) {
5213 if (Optional
<bool> Implication
= isImpliedCondMatchingOperands(
5214 APred
, ALHS
, ARHS
, BPred
, BLHS
, BRHS
, IsSwappedOps
))
5216 // No amount of additional analysis will infer the second condition, so
5221 // Can we infer anything when the LHS operands match and the RHS operands are
5222 // constants (not necessarily matching)?
5223 if (ALHS
== BLHS
&& isa
<ConstantInt
>(ARHS
) && isa
<ConstantInt
>(BRHS
)) {
5224 if (Optional
<bool> Implication
= isImpliedCondMatchingImmOperands(
5225 APred
, ALHS
, cast
<ConstantInt
>(ARHS
), BPred
, BLHS
,
5226 cast
<ConstantInt
>(BRHS
)))
5228 // No amount of additional analysis will infer the second condition, so
5234 return isImpliedCondOperands(APred
, ALHS
, ARHS
, BLHS
, BRHS
, DL
, Depth
);
5238 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5239 /// false. Otherwise, return None if we can't infer anything. We expect the
5240 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5241 static Optional
<bool> isImpliedCondAndOr(const BinaryOperator
*LHS
,
5242 const ICmpInst
*RHS
,
5243 const DataLayout
&DL
, bool LHSIsTrue
,
5245 // The LHS must be an 'or' or an 'and' instruction.
5246 assert((LHS
->getOpcode() == Instruction::And
||
5247 LHS
->getOpcode() == Instruction::Or
) &&
5248 "Expected LHS to be 'and' or 'or'.");
5250 assert(Depth
<= MaxDepth
&& "Hit recursion limit");
5252 // If the result of an 'or' is false, then we know both legs of the 'or' are
5253 // false. Similarly, if the result of an 'and' is true, then we know both
5254 // legs of the 'and' are true.
5256 if ((!LHSIsTrue
&& match(LHS
, m_Or(m_Value(ALHS
), m_Value(ARHS
)))) ||
5257 (LHSIsTrue
&& match(LHS
, m_And(m_Value(ALHS
), m_Value(ARHS
))))) {
5258 // FIXME: Make this non-recursion.
5259 if (Optional
<bool> Implication
=
5260 isImpliedCondition(ALHS
, RHS
, DL
, LHSIsTrue
, Depth
+ 1))
5262 if (Optional
<bool> Implication
=
5263 isImpliedCondition(ARHS
, RHS
, DL
, LHSIsTrue
, Depth
+ 1))
5270 Optional
<bool> llvm::isImpliedCondition(const Value
*LHS
, const Value
*RHS
,
5271 const DataLayout
&DL
, bool LHSIsTrue
,
5273 // Bail out when we hit the limit.
5274 if (Depth
== MaxDepth
)
5277 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5279 if (LHS
->getType() != RHS
->getType())
5282 Type
*OpTy
= LHS
->getType();
5283 assert(OpTy
->isIntOrIntVectorTy(1) && "Expected integer type only!");
5285 // LHS ==> RHS by definition
5289 // FIXME: Extending the code below to handle vectors.
5290 if (OpTy
->isVectorTy())
5293 assert(OpTy
->isIntegerTy(1) && "implied by above");
5295 // Both LHS and RHS are icmps.
5296 const ICmpInst
*LHSCmp
= dyn_cast
<ICmpInst
>(LHS
);
5297 const ICmpInst
*RHSCmp
= dyn_cast
<ICmpInst
>(RHS
);
5298 if (LHSCmp
&& RHSCmp
)
5299 return isImpliedCondICmps(LHSCmp
, RHSCmp
, DL
, LHSIsTrue
, Depth
);
5301 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
5302 // an icmp. FIXME: Add support for and/or on the RHS.
5303 const BinaryOperator
*LHSBO
= dyn_cast
<BinaryOperator
>(LHS
);
5304 if (LHSBO
&& RHSCmp
) {
5305 if ((LHSBO
->getOpcode() == Instruction::And
||
5306 LHSBO
->getOpcode() == Instruction::Or
))
5307 return isImpliedCondAndOr(LHSBO
, RHSCmp
, DL
, LHSIsTrue
, Depth
);