1 //===- InstCombineCompares.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitICmp and visitFCmp functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Support/ConstantRange.h"
20 #include "llvm/Support/GetElementPtrTypeIterator.h"
21 #include "llvm/Support/PatternMatch.h"
23 using namespace PatternMatch
;
25 static ConstantInt
*getOne(Constant
*C
) {
26 return ConstantInt::get(cast
<IntegerType
>(C
->getType()), 1);
29 /// AddOne - Add one to a ConstantInt
30 static Constant
*AddOne(Constant
*C
) {
31 return ConstantExpr::getAdd(C
, ConstantInt::get(C
->getType(), 1));
33 /// SubOne - Subtract one from a ConstantInt
34 static Constant
*SubOne(Constant
*C
) {
35 return ConstantExpr::getSub(C
, ConstantInt::get(C
->getType(), 1));
38 static ConstantInt
*ExtractElement(Constant
*V
, Constant
*Idx
) {
39 return cast
<ConstantInt
>(ConstantExpr::getExtractElement(V
, Idx
));
42 static bool HasAddOverflow(ConstantInt
*Result
,
43 ConstantInt
*In1
, ConstantInt
*In2
,
46 if (In2
->getValue().isNegative())
47 return Result
->getValue().sgt(In1
->getValue());
49 return Result
->getValue().slt(In1
->getValue());
51 return Result
->getValue().ult(In1
->getValue());
54 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
55 /// overflowed for this type.
56 static bool AddWithOverflow(Constant
*&Result
, Constant
*In1
,
57 Constant
*In2
, bool IsSigned
= false) {
58 Result
= ConstantExpr::getAdd(In1
, In2
);
60 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
61 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
62 Constant
*Idx
= ConstantInt::get(Type::getInt32Ty(In1
->getContext()), i
);
63 if (HasAddOverflow(ExtractElement(Result
, Idx
),
64 ExtractElement(In1
, Idx
),
65 ExtractElement(In2
, Idx
),
72 return HasAddOverflow(cast
<ConstantInt
>(Result
),
73 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
77 static bool HasSubOverflow(ConstantInt
*Result
,
78 ConstantInt
*In1
, ConstantInt
*In2
,
81 if (In2
->getValue().isNegative())
82 return Result
->getValue().slt(In1
->getValue());
84 return Result
->getValue().sgt(In1
->getValue());
86 return Result
->getValue().ugt(In1
->getValue());
89 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
90 /// overflowed for this type.
91 static bool SubWithOverflow(Constant
*&Result
, Constant
*In1
,
92 Constant
*In2
, bool IsSigned
= false) {
93 Result
= ConstantExpr::getSub(In1
, In2
);
95 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
96 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
97 Constant
*Idx
= ConstantInt::get(Type::getInt32Ty(In1
->getContext()), i
);
98 if (HasSubOverflow(ExtractElement(Result
, Idx
),
99 ExtractElement(In1
, Idx
),
100 ExtractElement(In2
, Idx
),
107 return HasSubOverflow(cast
<ConstantInt
>(Result
),
108 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
112 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
113 /// comparison only checks the sign bit. If it only checks the sign bit, set
114 /// TrueIfSigned if the result of the comparison is true when the input value is
116 static bool isSignBitCheck(ICmpInst::Predicate pred
, ConstantInt
*RHS
,
117 bool &TrueIfSigned
) {
119 case ICmpInst::ICMP_SLT
: // True if LHS s< 0
121 return RHS
->isZero();
122 case ICmpInst::ICMP_SLE
: // True if LHS s<= RHS and RHS == -1
124 return RHS
->isAllOnesValue();
125 case ICmpInst::ICMP_SGT
: // True if LHS s> -1
126 TrueIfSigned
= false;
127 return RHS
->isAllOnesValue();
128 case ICmpInst::ICMP_UGT
:
129 // True if LHS u> RHS and RHS == high-bit-mask - 1
131 return RHS
->getValue() ==
132 APInt::getSignedMaxValue(RHS
->getType()->getPrimitiveSizeInBits());
133 case ICmpInst::ICMP_UGE
:
134 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
136 return RHS
->getValue().isSignBit();
142 // isHighOnes - Return true if the constant is of the form 1+0+.
143 // This is the same as lowones(~X).
144 static bool isHighOnes(const ConstantInt
*CI
) {
145 return (~CI
->getValue() + 1).isPowerOf2();
148 /// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
149 /// set of known zero and one bits, compute the maximum and minimum values that
150 /// could have the specified known zero and known one bits, returning them in
152 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt
& KnownZero
,
153 const APInt
& KnownOne
,
154 APInt
& Min
, APInt
& Max
) {
155 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
156 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
157 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
158 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
159 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
161 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
162 // bit if it is unknown.
164 Max
= KnownOne
|UnknownBits
;
166 if (UnknownBits
.isNegative()) { // Sign bit is unknown
167 Min
.setBit(Min
.getBitWidth()-1);
168 Max
.clearBit(Max
.getBitWidth()-1);
172 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
173 // a set of known zero and one bits, compute the maximum and minimum values that
174 // could have the specified known zero and known one bits, returning them in
176 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt
&KnownZero
,
177 const APInt
&KnownOne
,
178 APInt
&Min
, APInt
&Max
) {
179 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
180 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
181 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
182 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
183 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
185 // The minimum value is when the unknown bits are all zeros.
187 // The maximum value is when the unknown bits are all ones.
188 Max
= KnownOne
|UnknownBits
;
193 /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
194 /// cmp pred (load (gep GV, ...)), cmpcst
195 /// where GV is a global variable with a constant initializer. Try to simplify
196 /// this into some simple computation that does not need the load. For example
197 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
199 /// If AndCst is non-null, then the loaded value is masked with that constant
200 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
201 Instruction
*InstCombiner::
202 FoldCmpLoadFromIndexedGlobal(GetElementPtrInst
*GEP
, GlobalVariable
*GV
,
203 CmpInst
&ICI
, ConstantInt
*AndCst
) {
204 // We need TD information to know the pointer size unless this is inbounds.
205 if (!GEP
->isInBounds() && TD
== 0) return 0;
207 ConstantArray
*Init
= dyn_cast
<ConstantArray
>(GV
->getInitializer());
208 if (Init
== 0 || Init
->getNumOperands() > 1024) return 0;
210 // There are many forms of this optimization we can handle, for now, just do
211 // the simple index into a single-dimensional array.
213 // Require: GEP GV, 0, i {{, constant indices}}
214 if (GEP
->getNumOperands() < 3 ||
215 !isa
<ConstantInt
>(GEP
->getOperand(1)) ||
216 !cast
<ConstantInt
>(GEP
->getOperand(1))->isZero() ||
217 isa
<Constant
>(GEP
->getOperand(2)))
220 // Check that indices after the variable are constants and in-range for the
221 // type they index. Collect the indices. This is typically for arrays of
223 SmallVector
<unsigned, 4> LaterIndices
;
225 const Type
*EltTy
= cast
<ArrayType
>(Init
->getType())->getElementType();
226 for (unsigned i
= 3, e
= GEP
->getNumOperands(); i
!= e
; ++i
) {
227 ConstantInt
*Idx
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
228 if (Idx
== 0) return 0; // Variable index.
230 uint64_t IdxVal
= Idx
->getZExtValue();
231 if ((unsigned)IdxVal
!= IdxVal
) return 0; // Too large array index.
233 if (const StructType
*STy
= dyn_cast
<StructType
>(EltTy
))
234 EltTy
= STy
->getElementType(IdxVal
);
235 else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(EltTy
)) {
236 if (IdxVal
>= ATy
->getNumElements()) return 0;
237 EltTy
= ATy
->getElementType();
239 return 0; // Unknown type.
242 LaterIndices
.push_back(IdxVal
);
245 enum { Overdefined
= -3, Undefined
= -2 };
247 // Variables for our state machines.
249 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
250 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
251 // and 87 is the second (and last) index. FirstTrueElement is -2 when
252 // undefined, otherwise set to the first true element. SecondTrueElement is
253 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
254 int FirstTrueElement
= Undefined
, SecondTrueElement
= Undefined
;
256 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
257 // form "i != 47 & i != 87". Same state transitions as for true elements.
258 int FirstFalseElement
= Undefined
, SecondFalseElement
= Undefined
;
260 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
261 /// define a state machine that triggers for ranges of values that the index
262 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
263 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
264 /// index in the range (inclusive). We use -2 for undefined here because we
265 /// use relative comparisons and don't want 0-1 to match -1.
266 int TrueRangeEnd
= Undefined
, FalseRangeEnd
= Undefined
;
268 // MagicBitvector - This is a magic bitvector where we set a bit if the
269 // comparison is true for element 'i'. If there are 64 elements or less in
270 // the array, this will fully represent all the comparison results.
271 uint64_t MagicBitvector
= 0;
274 // Scan the array and see if one of our patterns matches.
275 Constant
*CompareRHS
= cast
<Constant
>(ICI
.getOperand(1));
276 for (unsigned i
= 0, e
= Init
->getNumOperands(); i
!= e
; ++i
) {
277 Constant
*Elt
= Init
->getOperand(i
);
279 // If this is indexing an array of structures, get the structure element.
280 if (!LaterIndices
.empty())
281 Elt
= ConstantExpr::getExtractValue(Elt
, LaterIndices
.data(),
282 LaterIndices
.size());
284 // If the element is masked, handle it.
285 if (AndCst
) Elt
= ConstantExpr::getAnd(Elt
, AndCst
);
287 // Find out if the comparison would be true or false for the i'th element.
288 Constant
*C
= ConstantFoldCompareInstOperands(ICI
.getPredicate(), Elt
,
290 // If the result is undef for this element, ignore it.
291 if (isa
<UndefValue
>(C
)) {
292 // Extend range state machines to cover this element in case there is an
293 // undef in the middle of the range.
294 if (TrueRangeEnd
== (int)i
-1)
296 if (FalseRangeEnd
== (int)i
-1)
301 // If we can't compute the result for any of the elements, we have to give
302 // up evaluating the entire conditional.
303 if (!isa
<ConstantInt
>(C
)) return 0;
305 // Otherwise, we know if the comparison is true or false for this element,
306 // update our state machines.
307 bool IsTrueForElt
= !cast
<ConstantInt
>(C
)->isZero();
309 // State machine for single/double/range index comparison.
311 // Update the TrueElement state machine.
312 if (FirstTrueElement
== Undefined
)
313 FirstTrueElement
= TrueRangeEnd
= i
; // First true element.
315 // Update double-compare state machine.
316 if (SecondTrueElement
== Undefined
)
317 SecondTrueElement
= i
;
319 SecondTrueElement
= Overdefined
;
321 // Update range state machine.
322 if (TrueRangeEnd
== (int)i
-1)
325 TrueRangeEnd
= Overdefined
;
328 // Update the FalseElement state machine.
329 if (FirstFalseElement
== Undefined
)
330 FirstFalseElement
= FalseRangeEnd
= i
; // First false element.
332 // Update double-compare state machine.
333 if (SecondFalseElement
== Undefined
)
334 SecondFalseElement
= i
;
336 SecondFalseElement
= Overdefined
;
338 // Update range state machine.
339 if (FalseRangeEnd
== (int)i
-1)
342 FalseRangeEnd
= Overdefined
;
347 // If this element is in range, update our magic bitvector.
348 if (i
< 64 && IsTrueForElt
)
349 MagicBitvector
|= 1ULL << i
;
351 // If all of our states become overdefined, bail out early. Since the
352 // predicate is expensive, only check it every 8 elements. This is only
353 // really useful for really huge arrays.
354 if ((i
& 8) == 0 && i
>= 64 && SecondTrueElement
== Overdefined
&&
355 SecondFalseElement
== Overdefined
&& TrueRangeEnd
== Overdefined
&&
356 FalseRangeEnd
== Overdefined
)
360 // Now that we've scanned the entire array, emit our new comparison(s). We
361 // order the state machines in complexity of the generated code.
362 Value
*Idx
= GEP
->getOperand(2);
364 // If the index is larger than the pointer size of the target, truncate the
365 // index down like the GEP would do implicitly. We don't have to do this for
366 // an inbounds GEP because the index can't be out of range.
367 if (!GEP
->isInBounds() &&
368 Idx
->getType()->getPrimitiveSizeInBits() > TD
->getPointerSizeInBits())
369 Idx
= Builder
->CreateTrunc(Idx
, TD
->getIntPtrType(Idx
->getContext()));
371 // If the comparison is only true for one or two elements, emit direct
373 if (SecondTrueElement
!= Overdefined
) {
374 // None true -> false.
375 if (FirstTrueElement
== Undefined
)
376 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(GEP
->getContext()));
378 Value
*FirstTrueIdx
= ConstantInt::get(Idx
->getType(), FirstTrueElement
);
380 // True for one element -> 'i == 47'.
381 if (SecondTrueElement
== Undefined
)
382 return new ICmpInst(ICmpInst::ICMP_EQ
, Idx
, FirstTrueIdx
);
384 // True for two elements -> 'i == 47 | i == 72'.
385 Value
*C1
= Builder
->CreateICmpEQ(Idx
, FirstTrueIdx
);
386 Value
*SecondTrueIdx
= ConstantInt::get(Idx
->getType(), SecondTrueElement
);
387 Value
*C2
= Builder
->CreateICmpEQ(Idx
, SecondTrueIdx
);
388 return BinaryOperator::CreateOr(C1
, C2
);
391 // If the comparison is only false for one or two elements, emit direct
393 if (SecondFalseElement
!= Overdefined
) {
394 // None false -> true.
395 if (FirstFalseElement
== Undefined
)
396 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(GEP
->getContext()));
398 Value
*FirstFalseIdx
= ConstantInt::get(Idx
->getType(), FirstFalseElement
);
400 // False for one element -> 'i != 47'.
401 if (SecondFalseElement
== Undefined
)
402 return new ICmpInst(ICmpInst::ICMP_NE
, Idx
, FirstFalseIdx
);
404 // False for two elements -> 'i != 47 & i != 72'.
405 Value
*C1
= Builder
->CreateICmpNE(Idx
, FirstFalseIdx
);
406 Value
*SecondFalseIdx
= ConstantInt::get(Idx
->getType(),SecondFalseElement
);
407 Value
*C2
= Builder
->CreateICmpNE(Idx
, SecondFalseIdx
);
408 return BinaryOperator::CreateAnd(C1
, C2
);
411 // If the comparison can be replaced with a range comparison for the elements
412 // where it is true, emit the range check.
413 if (TrueRangeEnd
!= Overdefined
) {
414 assert(TrueRangeEnd
!= FirstTrueElement
&& "Should emit single compare");
416 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
417 if (FirstTrueElement
) {
418 Value
*Offs
= ConstantInt::get(Idx
->getType(), -FirstTrueElement
);
419 Idx
= Builder
->CreateAdd(Idx
, Offs
);
422 Value
*End
= ConstantInt::get(Idx
->getType(),
423 TrueRangeEnd
-FirstTrueElement
+1);
424 return new ICmpInst(ICmpInst::ICMP_ULT
, Idx
, End
);
427 // False range check.
428 if (FalseRangeEnd
!= Overdefined
) {
429 assert(FalseRangeEnd
!= FirstFalseElement
&& "Should emit single compare");
430 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
431 if (FirstFalseElement
) {
432 Value
*Offs
= ConstantInt::get(Idx
->getType(), -FirstFalseElement
);
433 Idx
= Builder
->CreateAdd(Idx
, Offs
);
436 Value
*End
= ConstantInt::get(Idx
->getType(),
437 FalseRangeEnd
-FirstFalseElement
);
438 return new ICmpInst(ICmpInst::ICMP_UGT
, Idx
, End
);
442 // If a 32-bit or 64-bit magic bitvector captures the entire comparison state
443 // of this load, replace it with computation that does:
444 // ((magic_cst >> i) & 1) != 0
445 if (Init
->getNumOperands() <= 32 ||
446 (TD
&& Init
->getNumOperands() <= 64 && TD
->isLegalInteger(64))) {
448 if (Init
->getNumOperands() <= 32)
449 Ty
= Type::getInt32Ty(Init
->getContext());
451 Ty
= Type::getInt64Ty(Init
->getContext());
452 Value
*V
= Builder
->CreateIntCast(Idx
, Ty
, false);
453 V
= Builder
->CreateLShr(ConstantInt::get(Ty
, MagicBitvector
), V
);
454 V
= Builder
->CreateAnd(ConstantInt::get(Ty
, 1), V
);
455 return new ICmpInst(ICmpInst::ICMP_NE
, V
, ConstantInt::get(Ty
, 0));
462 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
463 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
464 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
465 /// be complex, and scales are involved. The above expression would also be
466 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
467 /// This later form is less amenable to optimization though, and we are allowed
468 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
470 /// If we can't emit an optimized form for this expression, this returns null.
472 static Value
*EvaluateGEPOffsetExpression(User
*GEP
, Instruction
&I
,
474 TargetData
&TD
= *IC
.getTargetData();
475 gep_type_iterator GTI
= gep_type_begin(GEP
);
477 // Check to see if this gep only has a single variable index. If so, and if
478 // any constant indices are a multiple of its scale, then we can compute this
479 // in terms of the scale of the variable index. For example, if the GEP
480 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
481 // because the expression will cross zero at the same point.
482 unsigned i
, e
= GEP
->getNumOperands();
484 for (i
= 1; i
!= e
; ++i
, ++GTI
) {
485 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
486 // Compute the aggregate offset of constant indices.
487 if (CI
->isZero()) continue;
489 // Handle a struct index, which adds its field offset to the pointer.
490 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
491 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
493 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
494 Offset
+= Size
*CI
->getSExtValue();
497 // Found our variable index.
502 // If there are no variable indices, we must have a constant offset, just
503 // evaluate it the general way.
504 if (i
== e
) return 0;
506 Value
*VariableIdx
= GEP
->getOperand(i
);
507 // Determine the scale factor of the variable element. For example, this is
508 // 4 if the variable index is into an array of i32.
509 uint64_t VariableScale
= TD
.getTypeAllocSize(GTI
.getIndexedType());
511 // Verify that there are no other variable indices. If so, emit the hard way.
512 for (++i
, ++GTI
; i
!= e
; ++i
, ++GTI
) {
513 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
516 // Compute the aggregate offset of constant indices.
517 if (CI
->isZero()) continue;
519 // Handle a struct index, which adds its field offset to the pointer.
520 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
521 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
523 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
524 Offset
+= Size
*CI
->getSExtValue();
528 // Okay, we know we have a single variable index, which must be a
529 // pointer/array/vector index. If there is no offset, life is simple, return
531 unsigned IntPtrWidth
= TD
.getPointerSizeInBits();
533 // Cast to intptrty in case a truncation occurs. If an extension is needed,
534 // we don't need to bother extending: the extension won't affect where the
535 // computation crosses zero.
536 if (VariableIdx
->getType()->getPrimitiveSizeInBits() > IntPtrWidth
)
537 VariableIdx
= new TruncInst(VariableIdx
,
538 TD
.getIntPtrType(VariableIdx
->getContext()),
539 VariableIdx
->getName(), &I
);
543 // Otherwise, there is an index. The computation we will do will be modulo
544 // the pointer size, so get it.
545 uint64_t PtrSizeMask
= ~0ULL >> (64-IntPtrWidth
);
547 Offset
&= PtrSizeMask
;
548 VariableScale
&= PtrSizeMask
;
550 // To do this transformation, any constant index must be a multiple of the
551 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
552 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
553 // multiple of the variable scale.
554 int64_t NewOffs
= Offset
/ (int64_t)VariableScale
;
555 if (Offset
!= NewOffs
*(int64_t)VariableScale
)
558 // Okay, we can do this evaluation. Start by converting the index to intptr.
559 const Type
*IntPtrTy
= TD
.getIntPtrType(VariableIdx
->getContext());
560 if (VariableIdx
->getType() != IntPtrTy
)
561 VariableIdx
= CastInst::CreateIntegerCast(VariableIdx
, IntPtrTy
,
563 VariableIdx
->getName(), &I
);
564 Constant
*OffsetVal
= ConstantInt::get(IntPtrTy
, NewOffs
);
565 return BinaryOperator::CreateAdd(VariableIdx
, OffsetVal
, "offset", &I
);
568 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
569 /// else. At this point we know that the GEP is on the LHS of the comparison.
570 Instruction
*InstCombiner::FoldGEPICmp(GEPOperator
*GEPLHS
, Value
*RHS
,
571 ICmpInst::Predicate Cond
,
573 // Look through bitcasts.
574 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(RHS
))
575 RHS
= BCI
->getOperand(0);
577 Value
*PtrBase
= GEPLHS
->getOperand(0);
578 if (TD
&& PtrBase
== RHS
&& GEPLHS
->isInBounds()) {
579 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
580 // This transformation (ignoring the base and scales) is valid because we
581 // know pointers can't overflow since the gep is inbounds. See if we can
582 // output an optimized form.
583 Value
*Offset
= EvaluateGEPOffsetExpression(GEPLHS
, I
, *this);
585 // If not, synthesize the offset the hard way.
587 Offset
= EmitGEPOffset(GEPLHS
);
588 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), Offset
,
589 Constant::getNullValue(Offset
->getType()));
590 } else if (GEPOperator
*GEPRHS
= dyn_cast
<GEPOperator
>(RHS
)) {
591 // If the base pointers are different, but the indices are the same, just
592 // compare the base pointer.
593 if (PtrBase
!= GEPRHS
->getOperand(0)) {
594 bool IndicesTheSame
= GEPLHS
->getNumOperands()==GEPRHS
->getNumOperands();
595 IndicesTheSame
&= GEPLHS
->getOperand(0)->getType() ==
596 GEPRHS
->getOperand(0)->getType();
598 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
599 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
600 IndicesTheSame
= false;
604 // If all indices are the same, just compare the base pointers.
606 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
),
607 GEPLHS
->getOperand(0), GEPRHS
->getOperand(0));
609 // Otherwise, the base pointers are different and the indices are
610 // different, bail out.
614 // If one of the GEPs has all zero indices, recurse.
615 bool AllZeros
= true;
616 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
617 if (!isa
<Constant
>(GEPLHS
->getOperand(i
)) ||
618 !cast
<Constant
>(GEPLHS
->getOperand(i
))->isNullValue()) {
623 return FoldGEPICmp(GEPRHS
, GEPLHS
->getOperand(0),
624 ICmpInst::getSwappedPredicate(Cond
), I
);
626 // If the other GEP has all zero indices, recurse.
628 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
629 if (!isa
<Constant
>(GEPRHS
->getOperand(i
)) ||
630 !cast
<Constant
>(GEPRHS
->getOperand(i
))->isNullValue()) {
635 return FoldGEPICmp(GEPLHS
, GEPRHS
->getOperand(0), Cond
, I
);
637 if (GEPLHS
->getNumOperands() == GEPRHS
->getNumOperands()) {
638 // If the GEPs only differ by one index, compare it.
639 unsigned NumDifferences
= 0; // Keep track of # differences.
640 unsigned DiffOperand
= 0; // The operand that differs.
641 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
642 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
643 if (GEPLHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits() !=
644 GEPRHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits()) {
645 // Irreconcilable differences.
649 if (NumDifferences
++) break;
654 if (NumDifferences
== 0) // SAME GEP?
655 return ReplaceInstUsesWith(I
, // No comparison is needed here.
656 ConstantInt::get(Type::getInt1Ty(I
.getContext()),
657 ICmpInst::isTrueWhenEqual(Cond
)));
659 else if (NumDifferences
== 1) {
660 Value
*LHSV
= GEPLHS
->getOperand(DiffOperand
);
661 Value
*RHSV
= GEPRHS
->getOperand(DiffOperand
);
662 // Make sure we do a signed comparison here.
663 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), LHSV
, RHSV
);
667 // Only lower this if the icmp is the only user of the GEP or if we expect
668 // the result to fold to a constant!
670 (isa
<ConstantExpr
>(GEPLHS
) || GEPLHS
->hasOneUse()) &&
671 (isa
<ConstantExpr
>(GEPRHS
) || GEPRHS
->hasOneUse())) {
672 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
673 Value
*L
= EmitGEPOffset(GEPLHS
);
674 Value
*R
= EmitGEPOffset(GEPRHS
);
675 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), L
, R
);
681 /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
682 Instruction
*InstCombiner::FoldICmpAddOpCst(ICmpInst
&ICI
,
683 Value
*X
, ConstantInt
*CI
,
684 ICmpInst::Predicate Pred
,
686 // If we have X+0, exit early (simplifying logic below) and let it get folded
687 // elsewhere. icmp X+0, X -> icmp X, X
689 bool isTrue
= ICmpInst::isTrueWhenEqual(Pred
);
690 return ReplaceInstUsesWith(ICI
, ConstantInt::get(ICI
.getType(), isTrue
));
693 // (X+4) == X -> false.
694 if (Pred
== ICmpInst::ICMP_EQ
)
695 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(X
->getContext()));
697 // (X+4) != X -> true.
698 if (Pred
== ICmpInst::ICMP_NE
)
699 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(X
->getContext()));
701 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
702 // so the values can never be equal. Similiarly for all other "or equals"
705 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
706 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
707 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
708 if (Pred
== ICmpInst::ICMP_ULT
|| Pred
== ICmpInst::ICMP_ULE
) {
710 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI
->getType()), CI
);
711 return new ICmpInst(ICmpInst::ICMP_UGT
, X
, R
);
714 // (X+1) >u X --> X <u (0-1) --> X != 255
715 // (X+2) >u X --> X <u (0-2) --> X <u 254
716 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
717 if (Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
)
718 return new ICmpInst(ICmpInst::ICMP_ULT
, X
, ConstantExpr::getNeg(CI
));
720 unsigned BitWidth
= CI
->getType()->getPrimitiveSizeInBits();
721 ConstantInt
*SMax
= ConstantInt::get(X
->getContext(),
722 APInt::getSignedMaxValue(BitWidth
));
724 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
725 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
726 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
727 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
728 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
729 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
730 if (Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
)
731 return new ICmpInst(ICmpInst::ICMP_SGT
, X
, ConstantExpr::getSub(SMax
, CI
));
733 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
734 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
735 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
736 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
737 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
738 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
740 assert(Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
);
741 Constant
*C
= ConstantInt::get(X
->getContext(), CI
->getValue()-1);
742 return new ICmpInst(ICmpInst::ICMP_SLT
, X
, ConstantExpr::getSub(SMax
, C
));
745 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
746 /// and CmpRHS are both known to be integer constants.
747 Instruction
*InstCombiner::FoldICmpDivCst(ICmpInst
&ICI
, BinaryOperator
*DivI
,
748 ConstantInt
*DivRHS
) {
749 ConstantInt
*CmpRHS
= cast
<ConstantInt
>(ICI
.getOperand(1));
750 const APInt
&CmpRHSV
= CmpRHS
->getValue();
752 // FIXME: If the operand types don't match the type of the divide
753 // then don't attempt this transform. The code below doesn't have the
754 // logic to deal with a signed divide and an unsigned compare (and
755 // vice versa). This is because (x /s C1) <s C2 produces different
756 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
757 // (x /u C1) <u C2. Simply casting the operands and result won't
758 // work. :( The if statement below tests that condition and bails
760 bool DivIsSigned
= DivI
->getOpcode() == Instruction::SDiv
;
761 if (!ICI
.isEquality() && DivIsSigned
!= ICI
.isSigned())
763 if (DivRHS
->isZero())
764 return 0; // The ProdOV computation fails on divide by zero.
765 if (DivIsSigned
&& DivRHS
->isAllOnesValue())
766 return 0; // The overflow computation also screws up here
767 if (DivRHS
->isOne()) {
768 // This eliminates some funny cases with INT_MIN.
769 ICI
.setOperand(0, DivI
->getOperand(0)); // X/1 == X.
773 // Compute Prod = CI * DivRHS. We are essentially solving an equation
774 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
775 // C2 (CI). By solving for X we can turn this into a range check
776 // instead of computing a divide.
777 Constant
*Prod
= ConstantExpr::getMul(CmpRHS
, DivRHS
);
779 // Determine if the product overflows by seeing if the product is
780 // not equal to the divide. Make sure we do the same kind of divide
781 // as in the LHS instruction that we're folding.
782 bool ProdOV
= (DivIsSigned
? ConstantExpr::getSDiv(Prod
, DivRHS
) :
783 ConstantExpr::getUDiv(Prod
, DivRHS
)) != CmpRHS
;
785 // Get the ICmp opcode
786 ICmpInst::Predicate Pred
= ICI
.getPredicate();
788 /// If the division is known to be exact, then there is no remainder from the
789 /// divide, so the covered range size is unit, otherwise it is the divisor.
790 ConstantInt
*RangeSize
= DivI
->isExact() ? getOne(Prod
) : DivRHS
;
792 // Figure out the interval that is being checked. For example, a comparison
793 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
794 // Compute this interval based on the constants involved and the signedness of
795 // the compare/divide. This computes a half-open interval, keeping track of
796 // whether either value in the interval overflows. After analysis each
797 // overflow variable is set to 0 if it's corresponding bound variable is valid
798 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
799 int LoOverflow
= 0, HiOverflow
= 0;
800 Constant
*LoBound
= 0, *HiBound
= 0;
802 if (!DivIsSigned
) { // udiv
803 // e.g. X/5 op 3 --> [15, 20)
805 HiOverflow
= LoOverflow
= ProdOV
;
807 // If this is not an exact divide, then many values in the range collapse
808 // to the same result value.
809 HiOverflow
= AddWithOverflow(HiBound
, LoBound
, RangeSize
, false);
812 } else if (DivRHS
->getValue().isStrictlyPositive()) { // Divisor is > 0.
813 if (CmpRHSV
== 0) { // (X / pos) op 0
814 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
815 LoBound
= ConstantExpr::getNeg(SubOne(RangeSize
));
817 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / pos) op pos
818 LoBound
= Prod
; // e.g. X/5 op 3 --> [15, 20)
819 HiOverflow
= LoOverflow
= ProdOV
;
821 HiOverflow
= AddWithOverflow(HiBound
, Prod
, RangeSize
, true);
822 } else { // (X / pos) op neg
823 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
824 HiBound
= AddOne(Prod
);
825 LoOverflow
= HiOverflow
= ProdOV
? -1 : 0;
827 ConstantInt
*DivNeg
=cast
<ConstantInt
>(ConstantExpr::getNeg(RangeSize
));
828 LoOverflow
= AddWithOverflow(LoBound
, HiBound
, DivNeg
, true) ? -1 : 0;
831 } else if (DivRHS
->getValue().isNegative()) { // Divisor is < 0.
833 RangeSize
= cast
<ConstantInt
>(ConstantExpr::getNeg(RangeSize
));
834 if (CmpRHSV
== 0) { // (X / neg) op 0
835 // e.g. X/-5 op 0 --> [-4, 5)
836 LoBound
= AddOne(RangeSize
);
837 HiBound
= cast
<ConstantInt
>(ConstantExpr::getNeg(RangeSize
));
838 if (HiBound
== DivRHS
) { // -INTMIN = INTMIN
839 HiOverflow
= 1; // [INTMIN+1, overflow)
840 HiBound
= 0; // e.g. X/INTMIN = 0 --> X > INTMIN
842 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / neg) op pos
843 // e.g. X/-5 op 3 --> [-19, -14)
844 HiBound
= AddOne(Prod
);
845 HiOverflow
= LoOverflow
= ProdOV
? -1 : 0;
847 LoOverflow
= AddWithOverflow(LoBound
, HiBound
, RangeSize
, true) ? -1:0;
848 } else { // (X / neg) op neg
849 LoBound
= Prod
; // e.g. X/-5 op -3 --> [15, 20)
850 LoOverflow
= HiOverflow
= ProdOV
;
852 HiOverflow
= SubWithOverflow(HiBound
, Prod
, RangeSize
, true);
855 // Dividing by a negative swaps the condition. LT <-> GT
856 Pred
= ICmpInst::getSwappedPredicate(Pred
);
859 Value
*X
= DivI
->getOperand(0);
861 default: llvm_unreachable("Unhandled icmp opcode!");
862 case ICmpInst::ICMP_EQ
:
863 if (LoOverflow
&& HiOverflow
)
864 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(ICI
.getContext()));
866 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SGE
:
867 ICmpInst::ICMP_UGE
, X
, LoBound
);
869 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SLT
:
870 ICmpInst::ICMP_ULT
, X
, HiBound
);
871 return ReplaceInstUsesWith(ICI
, InsertRangeTest(X
, LoBound
, HiBound
,
873 case ICmpInst::ICMP_NE
:
874 if (LoOverflow
&& HiOverflow
)
875 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(ICI
.getContext()));
877 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SLT
:
878 ICmpInst::ICMP_ULT
, X
, LoBound
);
880 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SGE
:
881 ICmpInst::ICMP_UGE
, X
, HiBound
);
882 return ReplaceInstUsesWith(ICI
, InsertRangeTest(X
, LoBound
, HiBound
,
883 DivIsSigned
, false));
884 case ICmpInst::ICMP_ULT
:
885 case ICmpInst::ICMP_SLT
:
886 if (LoOverflow
== +1) // Low bound is greater than input range.
887 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(ICI
.getContext()));
888 if (LoOverflow
== -1) // Low bound is less than input range.
889 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(ICI
.getContext()));
890 return new ICmpInst(Pred
, X
, LoBound
);
891 case ICmpInst::ICMP_UGT
:
892 case ICmpInst::ICMP_SGT
:
893 if (HiOverflow
== +1) // High bound greater than input range.
894 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(ICI
.getContext()));
895 if (HiOverflow
== -1) // High bound less than input range.
896 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(ICI
.getContext()));
897 if (Pred
== ICmpInst::ICMP_UGT
)
898 return new ICmpInst(ICmpInst::ICMP_UGE
, X
, HiBound
);
899 return new ICmpInst(ICmpInst::ICMP_SGE
, X
, HiBound
);
903 /// FoldICmpShrCst - Handle "icmp(([al]shr X, cst1), cst2)".
904 Instruction
*InstCombiner::FoldICmpShrCst(ICmpInst
&ICI
, BinaryOperator
*Shr
,
905 ConstantInt
*ShAmt
) {
906 const APInt
&CmpRHSV
= cast
<ConstantInt
>(ICI
.getOperand(1))->getValue();
908 // Check that the shift amount is in range. If not, don't perform
909 // undefined shifts. When the shift is visited it will be
911 uint32_t TypeBits
= CmpRHSV
.getBitWidth();
912 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
913 if (ShAmtVal
>= TypeBits
|| ShAmtVal
== 0)
916 if (!ICI
.isEquality()) {
917 // If we have an unsigned comparison and an ashr, we can't simplify this.
918 // Similarly for signed comparisons with lshr.
919 if (ICI
.isSigned() != (Shr
->getOpcode() == Instruction::AShr
))
922 // Otherwise, all lshr and all exact ashr's are equivalent to a udiv/sdiv by
923 // a power of 2. Since we already have logic to simplify these, transform
924 // to div and then simplify the resultant comparison.
925 if (Shr
->getOpcode() == Instruction::AShr
&&
929 // Revisit the shift (to delete it).
933 ConstantInt::get(Shr
->getType(), APInt::getOneBitSet(TypeBits
, ShAmtVal
));
936 Shr
->getOpcode() == Instruction::AShr
?
937 Builder
->CreateSDiv(Shr
->getOperand(0), DivCst
, "", Shr
->isExact()) :
938 Builder
->CreateUDiv(Shr
->getOperand(0), DivCst
, "", Shr
->isExact());
940 ICI
.setOperand(0, Tmp
);
942 // If the builder folded the binop, just return it.
943 BinaryOperator
*TheDiv
= dyn_cast
<BinaryOperator
>(Tmp
);
947 // Otherwise, fold this div/compare.
948 assert(TheDiv
->getOpcode() == Instruction::SDiv
||
949 TheDiv
->getOpcode() == Instruction::UDiv
);
951 Instruction
*Res
= FoldICmpDivCst(ICI
, TheDiv
, cast
<ConstantInt
>(DivCst
));
952 assert(Res
&& "This div/cst should have folded!");
957 // If we are comparing against bits always shifted out, the
958 // comparison cannot succeed.
959 APInt Comp
= CmpRHSV
<< ShAmtVal
;
960 ConstantInt
*ShiftedCmpRHS
= ConstantInt::get(ICI
.getContext(), Comp
);
961 if (Shr
->getOpcode() == Instruction::LShr
)
962 Comp
= Comp
.lshr(ShAmtVal
);
964 Comp
= Comp
.ashr(ShAmtVal
);
966 if (Comp
!= CmpRHSV
) { // Comparing against a bit that we know is zero.
967 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
968 Constant
*Cst
= ConstantInt::get(Type::getInt1Ty(ICI
.getContext()),
970 return ReplaceInstUsesWith(ICI
, Cst
);
973 // Otherwise, check to see if the bits shifted out are known to be zero.
974 // If so, we can compare against the unshifted value:
975 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
976 if (Shr
->hasOneUse() && Shr
->isExact())
977 return new ICmpInst(ICI
.getPredicate(), Shr
->getOperand(0), ShiftedCmpRHS
);
979 if (Shr
->hasOneUse()) {
980 // Otherwise strength reduce the shift into an and.
981 APInt
Val(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShAmtVal
));
982 Constant
*Mask
= ConstantInt::get(ICI
.getContext(), Val
);
984 Value
*And
= Builder
->CreateAnd(Shr
->getOperand(0),
985 Mask
, Shr
->getName()+".mask");
986 return new ICmpInst(ICI
.getPredicate(), And
, ShiftedCmpRHS
);
992 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
994 Instruction
*InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst
&ICI
,
997 const APInt
&RHSV
= RHS
->getValue();
999 switch (LHSI
->getOpcode()) {
1000 case Instruction::Trunc
:
1001 if (ICI
.isEquality() && LHSI
->hasOneUse()) {
1002 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1003 // of the high bits truncated out of x are known.
1004 unsigned DstBits
= LHSI
->getType()->getPrimitiveSizeInBits(),
1005 SrcBits
= LHSI
->getOperand(0)->getType()->getPrimitiveSizeInBits();
1006 APInt
Mask(APInt::getHighBitsSet(SrcBits
, SrcBits
-DstBits
));
1007 APInt
KnownZero(SrcBits
, 0), KnownOne(SrcBits
, 0);
1008 ComputeMaskedBits(LHSI
->getOperand(0), Mask
, KnownZero
, KnownOne
);
1010 // If all the high bits are known, we can do this xform.
1011 if ((KnownZero
|KnownOne
).countLeadingOnes() >= SrcBits
-DstBits
) {
1012 // Pull in the high bits from known-ones set.
1013 APInt NewRHS
= RHS
->getValue().zext(SrcBits
);
1015 return new ICmpInst(ICI
.getPredicate(), LHSI
->getOperand(0),
1016 ConstantInt::get(ICI
.getContext(), NewRHS
));
1021 case Instruction::Xor
: // (icmp pred (xor X, XorCST), CI)
1022 if (ConstantInt
*XorCST
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1))) {
1023 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1025 if ((ICI
.getPredicate() == ICmpInst::ICMP_SLT
&& RHSV
== 0) ||
1026 (ICI
.getPredicate() == ICmpInst::ICMP_SGT
&& RHSV
.isAllOnesValue())) {
1027 Value
*CompareVal
= LHSI
->getOperand(0);
1029 // If the sign bit of the XorCST is not set, there is no change to
1030 // the operation, just stop using the Xor.
1031 if (!XorCST
->getValue().isNegative()) {
1032 ICI
.setOperand(0, CompareVal
);
1037 // Was the old condition true if the operand is positive?
1038 bool isTrueIfPositive
= ICI
.getPredicate() == ICmpInst::ICMP_SGT
;
1040 // If so, the new one isn't.
1041 isTrueIfPositive
^= true;
1043 if (isTrueIfPositive
)
1044 return new ICmpInst(ICmpInst::ICMP_SGT
, CompareVal
,
1047 return new ICmpInst(ICmpInst::ICMP_SLT
, CompareVal
,
1051 if (LHSI
->hasOneUse()) {
1052 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
1053 if (!ICI
.isEquality() && XorCST
->getValue().isSignBit()) {
1054 const APInt
&SignBit
= XorCST
->getValue();
1055 ICmpInst::Predicate Pred
= ICI
.isSigned()
1056 ? ICI
.getUnsignedPredicate()
1057 : ICI
.getSignedPredicate();
1058 return new ICmpInst(Pred
, LHSI
->getOperand(0),
1059 ConstantInt::get(ICI
.getContext(),
1063 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
1064 if (!ICI
.isEquality() && XorCST
->getValue().isMaxSignedValue()) {
1065 const APInt
&NotSignBit
= XorCST
->getValue();
1066 ICmpInst::Predicate Pred
= ICI
.isSigned()
1067 ? ICI
.getUnsignedPredicate()
1068 : ICI
.getSignedPredicate();
1069 Pred
= ICI
.getSwappedPredicate(Pred
);
1070 return new ICmpInst(Pred
, LHSI
->getOperand(0),
1071 ConstantInt::get(ICI
.getContext(),
1072 RHSV
^ NotSignBit
));
1077 case Instruction::And
: // (icmp pred (and X, AndCST), RHS)
1078 if (LHSI
->hasOneUse() && isa
<ConstantInt
>(LHSI
->getOperand(1)) &&
1079 LHSI
->getOperand(0)->hasOneUse()) {
1080 ConstantInt
*AndCST
= cast
<ConstantInt
>(LHSI
->getOperand(1));
1082 // If the LHS is an AND of a truncating cast, we can widen the
1083 // and/compare to be the input width without changing the value
1084 // produced, eliminating a cast.
1085 if (TruncInst
*Cast
= dyn_cast
<TruncInst
>(LHSI
->getOperand(0))) {
1086 // We can do this transformation if either the AND constant does not
1087 // have its sign bit set or if it is an equality comparison.
1088 // Extending a relational comparison when we're checking the sign
1089 // bit would not work.
1090 if (Cast
->hasOneUse() &&
1091 (ICI
.isEquality() ||
1092 (AndCST
->getValue().isNonNegative() && RHSV
.isNonNegative()))) {
1094 cast
<IntegerType
>(Cast
->getOperand(0)->getType())->getBitWidth();
1095 APInt NewCST
= AndCST
->getValue().zext(BitWidth
);
1096 APInt NewCI
= RHSV
.zext(BitWidth
);
1098 Builder
->CreateAnd(Cast
->getOperand(0),
1099 ConstantInt::get(ICI
.getContext(), NewCST
),
1101 return new ICmpInst(ICI
.getPredicate(), NewAnd
,
1102 ConstantInt::get(ICI
.getContext(), NewCI
));
1106 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
1107 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
1108 // happens a LOT in code produced by the C front-end, for bitfield
1110 BinaryOperator
*Shift
= dyn_cast
<BinaryOperator
>(LHSI
->getOperand(0));
1111 if (Shift
&& !Shift
->isShift())
1115 ShAmt
= Shift
? dyn_cast
<ConstantInt
>(Shift
->getOperand(1)) : 0;
1116 const Type
*Ty
= Shift
? Shift
->getType() : 0; // Type of the shift.
1117 const Type
*AndTy
= AndCST
->getType(); // Type of the and.
1119 // We can fold this as long as we can't shift unknown bits
1120 // into the mask. This can only happen with signed shift
1121 // rights, as they sign-extend.
1123 bool CanFold
= Shift
->isLogicalShift();
1125 // To test for the bad case of the signed shr, see if any
1126 // of the bits shifted in could be tested after the mask.
1127 uint32_t TyBits
= Ty
->getPrimitiveSizeInBits();
1128 int ShAmtVal
= TyBits
- ShAmt
->getLimitedValue(TyBits
);
1130 uint32_t BitWidth
= AndTy
->getPrimitiveSizeInBits();
1131 if ((APInt::getHighBitsSet(BitWidth
, BitWidth
-ShAmtVal
) &
1132 AndCST
->getValue()) == 0)
1138 if (Shift
->getOpcode() == Instruction::Shl
)
1139 NewCst
= ConstantExpr::getLShr(RHS
, ShAmt
);
1141 NewCst
= ConstantExpr::getShl(RHS
, ShAmt
);
1143 // Check to see if we are shifting out any of the bits being
1145 if (ConstantExpr::get(Shift
->getOpcode(),
1146 NewCst
, ShAmt
) != RHS
) {
1147 // If we shifted bits out, the fold is not going to work out.
1148 // As a special case, check to see if this means that the
1149 // result is always true or false now.
1150 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
1151 return ReplaceInstUsesWith(ICI
,
1152 ConstantInt::getFalse(ICI
.getContext()));
1153 if (ICI
.getPredicate() == ICmpInst::ICMP_NE
)
1154 return ReplaceInstUsesWith(ICI
,
1155 ConstantInt::getTrue(ICI
.getContext()));
1157 ICI
.setOperand(1, NewCst
);
1158 Constant
*NewAndCST
;
1159 if (Shift
->getOpcode() == Instruction::Shl
)
1160 NewAndCST
= ConstantExpr::getLShr(AndCST
, ShAmt
);
1162 NewAndCST
= ConstantExpr::getShl(AndCST
, ShAmt
);
1163 LHSI
->setOperand(1, NewAndCST
);
1164 LHSI
->setOperand(0, Shift
->getOperand(0));
1165 Worklist
.Add(Shift
); // Shift is dead.
1171 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
1172 // preferable because it allows the C<<Y expression to be hoisted out
1173 // of a loop if Y is invariant and X is not.
1174 if (Shift
&& Shift
->hasOneUse() && RHSV
== 0 &&
1175 ICI
.isEquality() && !Shift
->isArithmeticShift() &&
1176 !isa
<Constant
>(Shift
->getOperand(0))) {
1179 if (Shift
->getOpcode() == Instruction::LShr
) {
1180 NS
= Builder
->CreateShl(AndCST
, Shift
->getOperand(1), "tmp");
1182 // Insert a logical shift.
1183 NS
= Builder
->CreateLShr(AndCST
, Shift
->getOperand(1), "tmp");
1186 // Compute X & (C << Y).
1188 Builder
->CreateAnd(Shift
->getOperand(0), NS
, LHSI
->getName());
1190 ICI
.setOperand(0, NewAnd
);
1195 // Try to optimize things like "A[i]&42 == 0" to index computations.
1196 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(LHSI
->getOperand(0))) {
1197 if (GetElementPtrInst
*GEP
=
1198 dyn_cast
<GetElementPtrInst
>(LI
->getOperand(0)))
1199 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0)))
1200 if (GV
->isConstant() && GV
->hasDefinitiveInitializer() &&
1201 !LI
->isVolatile() && isa
<ConstantInt
>(LHSI
->getOperand(1))) {
1202 ConstantInt
*C
= cast
<ConstantInt
>(LHSI
->getOperand(1));
1203 if (Instruction
*Res
= FoldCmpLoadFromIndexedGlobal(GEP
, GV
,ICI
, C
))
1209 case Instruction::Or
: {
1210 if (!ICI
.isEquality() || !RHS
->isNullValue() || !LHSI
->hasOneUse())
1213 if (match(LHSI
, m_Or(m_PtrToInt(m_Value(P
)), m_PtrToInt(m_Value(Q
))))) {
1214 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1215 // -> and (icmp eq P, null), (icmp eq Q, null).
1216 Value
*ICIP
= Builder
->CreateICmp(ICI
.getPredicate(), P
,
1217 Constant::getNullValue(P
->getType()));
1218 Value
*ICIQ
= Builder
->CreateICmp(ICI
.getPredicate(), Q
,
1219 Constant::getNullValue(Q
->getType()));
1221 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
1222 Op
= BinaryOperator::CreateAnd(ICIP
, ICIQ
);
1224 Op
= BinaryOperator::CreateOr(ICIP
, ICIQ
);
1230 case Instruction::Shl
: { // (icmp pred (shl X, ShAmt), CI)
1231 ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
1234 uint32_t TypeBits
= RHSV
.getBitWidth();
1236 // Check that the shift amount is in range. If not, don't perform
1237 // undefined shifts. When the shift is visited it will be
1239 if (ShAmt
->uge(TypeBits
))
1242 if (ICI
.isEquality()) {
1243 // If we are comparing against bits always shifted out, the
1244 // comparison cannot succeed.
1246 ConstantExpr::getShl(ConstantExpr::getLShr(RHS
, ShAmt
),
1248 if (Comp
!= RHS
) {// Comparing against a bit that we know is zero.
1249 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
1251 ConstantInt::get(Type::getInt1Ty(ICI
.getContext()), IsICMP_NE
);
1252 return ReplaceInstUsesWith(ICI
, Cst
);
1255 // If the shift is NUW, then it is just shifting out zeros, no need for an
1257 if (cast
<BinaryOperator
>(LHSI
)->hasNoUnsignedWrap())
1258 return new ICmpInst(ICI
.getPredicate(), LHSI
->getOperand(0),
1259 ConstantExpr::getLShr(RHS
, ShAmt
));
1261 if (LHSI
->hasOneUse()) {
1262 // Otherwise strength reduce the shift into an and.
1263 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
1265 ConstantInt::get(ICI
.getContext(), APInt::getLowBitsSet(TypeBits
,
1266 TypeBits
-ShAmtVal
));
1269 Builder
->CreateAnd(LHSI
->getOperand(0),Mask
, LHSI
->getName()+".mask");
1270 return new ICmpInst(ICI
.getPredicate(), And
,
1271 ConstantExpr::getLShr(RHS
, ShAmt
));
1275 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
1276 bool TrueIfSigned
= false;
1277 if (LHSI
->hasOneUse() &&
1278 isSignBitCheck(ICI
.getPredicate(), RHS
, TrueIfSigned
)) {
1279 // (X << 31) <s 0 --> (X&1) != 0
1280 Constant
*Mask
= ConstantInt::get(LHSI
->getOperand(0)->getType(),
1281 APInt::getOneBitSet(TypeBits
,
1282 TypeBits
-ShAmt
->getZExtValue()-1));
1284 Builder
->CreateAnd(LHSI
->getOperand(0), Mask
, LHSI
->getName()+".mask");
1285 return new ICmpInst(TrueIfSigned
? ICmpInst::ICMP_NE
: ICmpInst::ICMP_EQ
,
1286 And
, Constant::getNullValue(And
->getType()));
1291 case Instruction::LShr
: // (icmp pred (shr X, ShAmt), CI)
1292 case Instruction::AShr
: {
1293 // Handle equality comparisons of shift-by-constant.
1294 BinaryOperator
*BO
= cast
<BinaryOperator
>(LHSI
);
1295 if (ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1))) {
1296 if (Instruction
*Res
= FoldICmpShrCst(ICI
, BO
, ShAmt
))
1300 // Handle exact shr's.
1301 if (ICI
.isEquality() && BO
->isExact() && BO
->hasOneUse()) {
1302 if (RHSV
.isMinValue())
1303 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0), RHS
);
1308 case Instruction::SDiv
:
1309 case Instruction::UDiv
:
1310 // Fold: icmp pred ([us]div X, C1), C2 -> range test
1311 // Fold this div into the comparison, producing a range check.
1312 // Determine, based on the divide type, what the range is being
1313 // checked. If there is an overflow on the low or high side, remember
1314 // it, otherwise compute the range [low, hi) bounding the new value.
1315 // See: InsertRangeTest above for the kinds of replacements possible.
1316 if (ConstantInt
*DivRHS
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1)))
1317 if (Instruction
*R
= FoldICmpDivCst(ICI
, cast
<BinaryOperator
>(LHSI
),
1322 case Instruction::Add
:
1323 // Fold: icmp pred (add X, C1), C2
1324 if (!ICI
.isEquality()) {
1325 ConstantInt
*LHSC
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
1327 const APInt
&LHSV
= LHSC
->getValue();
1329 ConstantRange CR
= ICI
.makeConstantRange(ICI
.getPredicate(), RHSV
)
1332 if (ICI
.isSigned()) {
1333 if (CR
.getLower().isSignBit()) {
1334 return new ICmpInst(ICmpInst::ICMP_SLT
, LHSI
->getOperand(0),
1335 ConstantInt::get(ICI
.getContext(),CR
.getUpper()));
1336 } else if (CR
.getUpper().isSignBit()) {
1337 return new ICmpInst(ICmpInst::ICMP_SGE
, LHSI
->getOperand(0),
1338 ConstantInt::get(ICI
.getContext(),CR
.getLower()));
1341 if (CR
.getLower().isMinValue()) {
1342 return new ICmpInst(ICmpInst::ICMP_ULT
, LHSI
->getOperand(0),
1343 ConstantInt::get(ICI
.getContext(),CR
.getUpper()));
1344 } else if (CR
.getUpper().isMinValue()) {
1345 return new ICmpInst(ICmpInst::ICMP_UGE
, LHSI
->getOperand(0),
1346 ConstantInt::get(ICI
.getContext(),CR
.getLower()));
1353 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
1354 if (ICI
.isEquality()) {
1355 bool isICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
1357 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
1358 // the second operand is a constant, simplify a bit.
1359 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(LHSI
)) {
1360 switch (BO
->getOpcode()) {
1361 case Instruction::SRem
:
1362 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
1363 if (RHSV
== 0 && isa
<ConstantInt
>(BO
->getOperand(1)) &&BO
->hasOneUse()){
1364 const APInt
&V
= cast
<ConstantInt
>(BO
->getOperand(1))->getValue();
1365 if (V
.sgt(1) && V
.isPowerOf2()) {
1367 Builder
->CreateURem(BO
->getOperand(0), BO
->getOperand(1),
1369 return new ICmpInst(ICI
.getPredicate(), NewRem
,
1370 Constant::getNullValue(BO
->getType()));
1374 case Instruction::Add
:
1375 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
1376 if (ConstantInt
*BOp1C
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
1377 if (BO
->hasOneUse())
1378 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
1379 ConstantExpr::getSub(RHS
, BOp1C
));
1380 } else if (RHSV
== 0) {
1381 // Replace ((add A, B) != 0) with (A != -B) if A or B is
1382 // efficiently invertible, or if the add has just this one use.
1383 Value
*BOp0
= BO
->getOperand(0), *BOp1
= BO
->getOperand(1);
1385 if (Value
*NegVal
= dyn_castNegVal(BOp1
))
1386 return new ICmpInst(ICI
.getPredicate(), BOp0
, NegVal
);
1387 else if (Value
*NegVal
= dyn_castNegVal(BOp0
))
1388 return new ICmpInst(ICI
.getPredicate(), NegVal
, BOp1
);
1389 else if (BO
->hasOneUse()) {
1390 Value
*Neg
= Builder
->CreateNeg(BOp1
);
1392 return new ICmpInst(ICI
.getPredicate(), BOp0
, Neg
);
1396 case Instruction::Xor
:
1397 // For the xor case, we can xor two constants together, eliminating
1398 // the explicit xor.
1399 if (Constant
*BOC
= dyn_cast
<Constant
>(BO
->getOperand(1)))
1400 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
1401 ConstantExpr::getXor(RHS
, BOC
));
1404 case Instruction::Sub
:
1405 // Replace (([sub|xor] A, B) != 0) with (A != B)
1407 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
1411 case Instruction::Or
:
1412 // If bits are being or'd in that are not present in the constant we
1413 // are comparing against, then the comparison could never succeed!
1414 if (ConstantInt
*BOC
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
1415 Constant
*NotCI
= ConstantExpr::getNot(RHS
);
1416 if (!ConstantExpr::getAnd(BOC
, NotCI
)->isNullValue())
1417 return ReplaceInstUsesWith(ICI
,
1418 ConstantInt::get(Type::getInt1Ty(ICI
.getContext()),
1423 case Instruction::And
:
1424 if (ConstantInt
*BOC
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
1425 // If bits are being compared against that are and'd out, then the
1426 // comparison can never succeed!
1427 if ((RHSV
& ~BOC
->getValue()) != 0)
1428 return ReplaceInstUsesWith(ICI
,
1429 ConstantInt::get(Type::getInt1Ty(ICI
.getContext()),
1432 // If we have ((X & C) == C), turn it into ((X & C) != 0).
1433 if (RHS
== BOC
&& RHSV
.isPowerOf2())
1434 return new ICmpInst(isICMP_NE
? ICmpInst::ICMP_EQ
:
1435 ICmpInst::ICMP_NE
, LHSI
,
1436 Constant::getNullValue(RHS
->getType()));
1438 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
1439 if (BOC
->getValue().isSignBit()) {
1440 Value
*X
= BO
->getOperand(0);
1441 Constant
*Zero
= Constant::getNullValue(X
->getType());
1442 ICmpInst::Predicate pred
= isICMP_NE
?
1443 ICmpInst::ICMP_SLT
: ICmpInst::ICMP_SGE
;
1444 return new ICmpInst(pred
, X
, Zero
);
1447 // ((X & ~7) == 0) --> X < 8
1448 if (RHSV
== 0 && isHighOnes(BOC
)) {
1449 Value
*X
= BO
->getOperand(0);
1450 Constant
*NegX
= ConstantExpr::getNeg(BOC
);
1451 ICmpInst::Predicate pred
= isICMP_NE
?
1452 ICmpInst::ICMP_UGE
: ICmpInst::ICMP_ULT
;
1453 return new ICmpInst(pred
, X
, NegX
);
1458 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(LHSI
)) {
1459 // Handle icmp {eq|ne} <intrinsic>, intcst.
1460 switch (II
->getIntrinsicID()) {
1461 case Intrinsic::bswap
:
1463 ICI
.setOperand(0, II
->getArgOperand(0));
1464 ICI
.setOperand(1, ConstantInt::get(II
->getContext(), RHSV
.byteSwap()));
1466 case Intrinsic::ctlz
:
1467 case Intrinsic::cttz
:
1468 // ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
1469 if (RHSV
== RHS
->getType()->getBitWidth()) {
1471 ICI
.setOperand(0, II
->getArgOperand(0));
1472 ICI
.setOperand(1, ConstantInt::get(RHS
->getType(), 0));
1476 case Intrinsic::ctpop
:
1477 // popcount(A) == 0 -> A == 0 and likewise for !=
1478 if (RHS
->isZero()) {
1480 ICI
.setOperand(0, II
->getArgOperand(0));
1481 ICI
.setOperand(1, RHS
);
1493 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
1494 /// We only handle extending casts so far.
1496 Instruction
*InstCombiner::visitICmpInstWithCastAndCast(ICmpInst
&ICI
) {
1497 const CastInst
*LHSCI
= cast
<CastInst
>(ICI
.getOperand(0));
1498 Value
*LHSCIOp
= LHSCI
->getOperand(0);
1499 const Type
*SrcTy
= LHSCIOp
->getType();
1500 const Type
*DestTy
= LHSCI
->getType();
1503 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
1504 // integer type is the same size as the pointer type.
1505 if (TD
&& LHSCI
->getOpcode() == Instruction::PtrToInt
&&
1506 TD
->getPointerSizeInBits() ==
1507 cast
<IntegerType
>(DestTy
)->getBitWidth()) {
1509 if (Constant
*RHSC
= dyn_cast
<Constant
>(ICI
.getOperand(1))) {
1510 RHSOp
= ConstantExpr::getIntToPtr(RHSC
, SrcTy
);
1511 } else if (PtrToIntInst
*RHSC
= dyn_cast
<PtrToIntInst
>(ICI
.getOperand(1))) {
1512 RHSOp
= RHSC
->getOperand(0);
1513 // If the pointer types don't match, insert a bitcast.
1514 if (LHSCIOp
->getType() != RHSOp
->getType())
1515 RHSOp
= Builder
->CreateBitCast(RHSOp
, LHSCIOp
->getType());
1519 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSOp
);
1522 // The code below only handles extension cast instructions, so far.
1524 if (LHSCI
->getOpcode() != Instruction::ZExt
&&
1525 LHSCI
->getOpcode() != Instruction::SExt
)
1528 bool isSignedExt
= LHSCI
->getOpcode() == Instruction::SExt
;
1529 bool isSignedCmp
= ICI
.isSigned();
1531 if (CastInst
*CI
= dyn_cast
<CastInst
>(ICI
.getOperand(1))) {
1532 // Not an extension from the same type?
1533 RHSCIOp
= CI
->getOperand(0);
1534 if (RHSCIOp
->getType() != LHSCIOp
->getType())
1537 // If the signedness of the two casts doesn't agree (i.e. one is a sext
1538 // and the other is a zext), then we can't handle this.
1539 if (CI
->getOpcode() != LHSCI
->getOpcode())
1542 // Deal with equality cases early.
1543 if (ICI
.isEquality())
1544 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
1546 // A signed comparison of sign extended values simplifies into a
1547 // signed comparison.
1548 if (isSignedCmp
&& isSignedExt
)
1549 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
1551 // The other three cases all fold into an unsigned comparison.
1552 return new ICmpInst(ICI
.getUnsignedPredicate(), LHSCIOp
, RHSCIOp
);
1555 // If we aren't dealing with a constant on the RHS, exit early
1556 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ICI
.getOperand(1));
1560 // Compute the constant that would happen if we truncated to SrcTy then
1561 // reextended to DestTy.
1562 Constant
*Res1
= ConstantExpr::getTrunc(CI
, SrcTy
);
1563 Constant
*Res2
= ConstantExpr::getCast(LHSCI
->getOpcode(),
1566 // If the re-extended constant didn't change...
1568 // Deal with equality cases early.
1569 if (ICI
.isEquality())
1570 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, Res1
);
1572 // A signed comparison of sign extended values simplifies into a
1573 // signed comparison.
1574 if (isSignedExt
&& isSignedCmp
)
1575 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, Res1
);
1577 // The other three cases all fold into an unsigned comparison.
1578 return new ICmpInst(ICI
.getUnsignedPredicate(), LHSCIOp
, Res1
);
1581 // The re-extended constant changed so the constant cannot be represented
1582 // in the shorter type. Consequently, we cannot emit a simple comparison.
1583 // All the cases that fold to true or false will have already been handled
1584 // by SimplifyICmpInst, so only deal with the tricky case.
1586 if (isSignedCmp
|| !isSignedExt
)
1589 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
1590 // should have been folded away previously and not enter in here.
1592 // We're performing an unsigned comp with a sign extended value.
1593 // This is true if the input is >= 0. [aka >s -1]
1594 Constant
*NegOne
= Constant::getAllOnesValue(SrcTy
);
1595 Value
*Result
= Builder
->CreateICmpSGT(LHSCIOp
, NegOne
, ICI
.getName());
1597 // Finally, return the value computed.
1598 if (ICI
.getPredicate() == ICmpInst::ICMP_ULT
)
1599 return ReplaceInstUsesWith(ICI
, Result
);
1601 assert(ICI
.getPredicate() == ICmpInst::ICMP_UGT
&& "ICmp should be folded!");
1602 return BinaryOperator::CreateNot(Result
);
1605 /// ProcessUGT_ADDCST_ADD - The caller has matched a pattern of the form:
1606 /// I = icmp ugt (add (add A, B), CI2), CI1
1607 /// If this is of the form:
1609 /// if (sum+128 >u 255)
1610 /// Then replace it with llvm.sadd.with.overflow.i8.
1612 static Instruction
*ProcessUGT_ADDCST_ADD(ICmpInst
&I
, Value
*A
, Value
*B
,
1613 ConstantInt
*CI2
, ConstantInt
*CI1
,
1615 // The transformation we're trying to do here is to transform this into an
1616 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1617 // with a narrower add, and discard the add-with-constant that is part of the
1618 // range check (if we can't eliminate it, this isn't profitable).
1620 // In order to eliminate the add-with-constant, the compare can be its only
1622 Instruction
*AddWithCst
= cast
<Instruction
>(I
.getOperand(0));
1623 if (!AddWithCst
->hasOneUse()) return 0;
1625 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1626 if (!CI2
->getValue().isPowerOf2()) return 0;
1627 unsigned NewWidth
= CI2
->getValue().countTrailingZeros();
1628 if (NewWidth
!= 7 && NewWidth
!= 15 && NewWidth
!= 31) return 0;
1630 // The width of the new add formed is 1 more than the bias.
1633 // Check to see that CI1 is an all-ones value with NewWidth bits.
1634 if (CI1
->getBitWidth() == NewWidth
||
1635 CI1
->getValue() != APInt::getLowBitsSet(CI1
->getBitWidth(), NewWidth
))
1638 // In order to replace the original add with a narrower
1639 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1640 // and truncates that discard the high bits of the add. Verify that this is
1642 Instruction
*OrigAdd
= cast
<Instruction
>(AddWithCst
->getOperand(0));
1643 for (Value::use_iterator UI
= OrigAdd
->use_begin(), E
= OrigAdd
->use_end();
1645 if (*UI
== AddWithCst
) continue;
1647 // Only accept truncates for now. We would really like a nice recursive
1648 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1649 // chain to see which bits of a value are actually demanded. If the
1650 // original add had another add which was then immediately truncated, we
1651 // could still do the transformation.
1652 TruncInst
*TI
= dyn_cast
<TruncInst
>(*UI
);
1654 TI
->getType()->getPrimitiveSizeInBits() > NewWidth
) return 0;
1657 // If the pattern matches, truncate the inputs to the narrower type and
1658 // use the sadd_with_overflow intrinsic to efficiently compute both the
1659 // result and the overflow bit.
1660 Module
*M
= I
.getParent()->getParent()->getParent();
1662 const Type
*NewType
= IntegerType::get(OrigAdd
->getContext(), NewWidth
);
1663 Value
*F
= Intrinsic::getDeclaration(M
, Intrinsic::sadd_with_overflow
,
1666 InstCombiner::BuilderTy
*Builder
= IC
.Builder
;
1668 // Put the new code above the original add, in case there are any uses of the
1669 // add between the add and the compare.
1670 Builder
->SetInsertPoint(OrigAdd
);
1672 Value
*TruncA
= Builder
->CreateTrunc(A
, NewType
, A
->getName()+".trunc");
1673 Value
*TruncB
= Builder
->CreateTrunc(B
, NewType
, B
->getName()+".trunc");
1674 CallInst
*Call
= Builder
->CreateCall2(F
, TruncA
, TruncB
, "sadd");
1675 Value
*Add
= Builder
->CreateExtractValue(Call
, 0, "sadd.result");
1676 Value
*ZExt
= Builder
->CreateZExt(Add
, OrigAdd
->getType());
1678 // The inner add was the result of the narrow add, zero extended to the
1679 // wider type. Replace it with the result computed by the intrinsic.
1680 IC
.ReplaceInstUsesWith(*OrigAdd
, ZExt
);
1682 // The original icmp gets replaced with the overflow value.
1683 return ExtractValueInst::Create(Call
, 1, "sadd.overflow");
1686 static Instruction
*ProcessUAddIdiom(Instruction
&I
, Value
*OrigAddV
,
1688 // Don't bother doing this transformation for pointers, don't do it for
1690 if (!isa
<IntegerType
>(OrigAddV
->getType())) return 0;
1692 // If the add is a constant expr, then we don't bother transforming it.
1693 Instruction
*OrigAdd
= dyn_cast
<Instruction
>(OrigAddV
);
1694 if (OrigAdd
== 0) return 0;
1696 Value
*LHS
= OrigAdd
->getOperand(0), *RHS
= OrigAdd
->getOperand(1);
1698 // Put the new code above the original add, in case there are any uses of the
1699 // add between the add and the compare.
1700 InstCombiner::BuilderTy
*Builder
= IC
.Builder
;
1701 Builder
->SetInsertPoint(OrigAdd
);
1703 Module
*M
= I
.getParent()->getParent()->getParent();
1704 const Type
*Ty
= LHS
->getType();
1705 Value
*F
= Intrinsic::getDeclaration(M
, Intrinsic::uadd_with_overflow
, &Ty
,1);
1706 CallInst
*Call
= Builder
->CreateCall2(F
, LHS
, RHS
, "uadd");
1707 Value
*Add
= Builder
->CreateExtractValue(Call
, 0);
1709 IC
.ReplaceInstUsesWith(*OrigAdd
, Add
);
1711 // The original icmp gets replaced with the overflow value.
1712 return ExtractValueInst::Create(Call
, 1, "uadd.overflow");
1715 // DemandedBitsLHSMask - When performing a comparison against a constant,
1716 // it is possible that not all the bits in the LHS are demanded. This helper
1717 // method computes the mask that IS demanded.
1718 static APInt
DemandedBitsLHSMask(ICmpInst
&I
,
1719 unsigned BitWidth
, bool isSignCheck
) {
1721 return APInt::getSignBit(BitWidth
);
1723 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
.getOperand(1));
1724 if (!CI
) return APInt::getAllOnesValue(BitWidth
);
1725 const APInt
&RHS
= CI
->getValue();
1727 switch (I
.getPredicate()) {
1728 // For a UGT comparison, we don't care about any bits that
1729 // correspond to the trailing ones of the comparand. The value of these
1730 // bits doesn't impact the outcome of the comparison, because any value
1731 // greater than the RHS must differ in a bit higher than these due to carry.
1732 case ICmpInst::ICMP_UGT
: {
1733 unsigned trailingOnes
= RHS
.countTrailingOnes();
1734 APInt lowBitsSet
= APInt::getLowBitsSet(BitWidth
, trailingOnes
);
1738 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
1739 // Any value less than the RHS must differ in a higher bit because of carries.
1740 case ICmpInst::ICMP_ULT
: {
1741 unsigned trailingZeros
= RHS
.countTrailingZeros();
1742 APInt lowBitsSet
= APInt::getLowBitsSet(BitWidth
, trailingZeros
);
1747 return APInt::getAllOnesValue(BitWidth
);
1752 Instruction
*InstCombiner::visitICmpInst(ICmpInst
&I
) {
1753 bool Changed
= false;
1754 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1756 /// Orders the operands of the compare so that they are listed from most
1757 /// complex to least complex. This puts constants before unary operators,
1758 /// before binary operators.
1759 if (getComplexity(Op0
) < getComplexity(Op1
)) {
1761 std::swap(Op0
, Op1
);
1765 if (Value
*V
= SimplifyICmpInst(I
.getPredicate(), Op0
, Op1
, TD
))
1766 return ReplaceInstUsesWith(I
, V
);
1768 const Type
*Ty
= Op0
->getType();
1770 // icmp's with boolean values can always be turned into bitwise operations
1771 if (Ty
->isIntegerTy(1)) {
1772 switch (I
.getPredicate()) {
1773 default: llvm_unreachable("Invalid icmp instruction!");
1774 case ICmpInst::ICMP_EQ
: { // icmp eq i1 A, B -> ~(A^B)
1775 Value
*Xor
= Builder
->CreateXor(Op0
, Op1
, I
.getName()+"tmp");
1776 return BinaryOperator::CreateNot(Xor
);
1778 case ICmpInst::ICMP_NE
: // icmp eq i1 A, B -> A^B
1779 return BinaryOperator::CreateXor(Op0
, Op1
);
1781 case ICmpInst::ICMP_UGT
:
1782 std::swap(Op0
, Op1
); // Change icmp ugt -> icmp ult
1784 case ICmpInst::ICMP_ULT
:{ // icmp ult i1 A, B -> ~A & B
1785 Value
*Not
= Builder
->CreateNot(Op0
, I
.getName()+"tmp");
1786 return BinaryOperator::CreateAnd(Not
, Op1
);
1788 case ICmpInst::ICMP_SGT
:
1789 std::swap(Op0
, Op1
); // Change icmp sgt -> icmp slt
1791 case ICmpInst::ICMP_SLT
: { // icmp slt i1 A, B -> A & ~B
1792 Value
*Not
= Builder
->CreateNot(Op1
, I
.getName()+"tmp");
1793 return BinaryOperator::CreateAnd(Not
, Op0
);
1795 case ICmpInst::ICMP_UGE
:
1796 std::swap(Op0
, Op1
); // Change icmp uge -> icmp ule
1798 case ICmpInst::ICMP_ULE
: { // icmp ule i1 A, B -> ~A | B
1799 Value
*Not
= Builder
->CreateNot(Op0
, I
.getName()+"tmp");
1800 return BinaryOperator::CreateOr(Not
, Op1
);
1802 case ICmpInst::ICMP_SGE
:
1803 std::swap(Op0
, Op1
); // Change icmp sge -> icmp sle
1805 case ICmpInst::ICMP_SLE
: { // icmp sle i1 A, B -> A | ~B
1806 Value
*Not
= Builder
->CreateNot(Op1
, I
.getName()+"tmp");
1807 return BinaryOperator::CreateOr(Not
, Op0
);
1812 unsigned BitWidth
= 0;
1813 if (Ty
->isIntOrIntVectorTy())
1814 BitWidth
= Ty
->getScalarSizeInBits();
1815 else if (TD
) // Pointers require TD info to get their size.
1816 BitWidth
= TD
->getTypeSizeInBits(Ty
->getScalarType());
1818 bool isSignBit
= false;
1820 // See if we are doing a comparison with a constant.
1821 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
1822 Value
*A
= 0, *B
= 0;
1824 // Match the following pattern, which is a common idiom when writing
1825 // overflow-safe integer arithmetic function. The source performs an
1826 // addition in wider type, and explicitly checks for overflow using
1827 // comparisons against INT_MIN and INT_MAX. Simplify this by using the
1828 // sadd_with_overflow intrinsic.
1830 // TODO: This could probably be generalized to handle other overflow-safe
1831 // operations if we worked out the formulas to compute the appropriate
1835 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1837 ConstantInt
*CI2
; // I = icmp ugt (add (add A, B), CI2), CI
1838 if (I
.getPredicate() == ICmpInst::ICMP_UGT
&&
1839 match(Op0
, m_Add(m_Add(m_Value(A
), m_Value(B
)), m_ConstantInt(CI2
))))
1840 if (Instruction
*Res
= ProcessUGT_ADDCST_ADD(I
, A
, B
, CI2
, CI
, *this))
1844 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
1845 if (I
.isEquality() && CI
->isZero() &&
1846 match(Op0
, m_Sub(m_Value(A
), m_Value(B
)))) {
1847 // (icmp cond A B) if cond is equality
1848 return new ICmpInst(I
.getPredicate(), A
, B
);
1851 // If we have an icmp le or icmp ge instruction, turn it into the
1852 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
1853 // them being folded in the code below. The SimplifyICmpInst code has
1854 // already handled the edge cases for us, so we just assert on them.
1855 switch (I
.getPredicate()) {
1857 case ICmpInst::ICMP_ULE
:
1858 assert(!CI
->isMaxValue(false)); // A <=u MAX -> TRUE
1859 return new ICmpInst(ICmpInst::ICMP_ULT
, Op0
,
1860 ConstantInt::get(CI
->getContext(), CI
->getValue()+1));
1861 case ICmpInst::ICMP_SLE
:
1862 assert(!CI
->isMaxValue(true)); // A <=s MAX -> TRUE
1863 return new ICmpInst(ICmpInst::ICMP_SLT
, Op0
,
1864 ConstantInt::get(CI
->getContext(), CI
->getValue()+1));
1865 case ICmpInst::ICMP_UGE
:
1866 assert(!CI
->isMinValue(false)); // A >=u MIN -> TRUE
1867 return new ICmpInst(ICmpInst::ICMP_UGT
, Op0
,
1868 ConstantInt::get(CI
->getContext(), CI
->getValue()-1));
1869 case ICmpInst::ICMP_SGE
:
1870 assert(!CI
->isMinValue(true)); // A >=s MIN -> TRUE
1871 return new ICmpInst(ICmpInst::ICMP_SGT
, Op0
,
1872 ConstantInt::get(CI
->getContext(), CI
->getValue()-1));
1875 // If this comparison is a normal comparison, it demands all
1876 // bits, if it is a sign bit comparison, it only demands the sign bit.
1878 isSignBit
= isSignBitCheck(I
.getPredicate(), CI
, UnusedBit
);
1881 // See if we can fold the comparison based on range information we can get
1882 // by checking whether bits are known to be zero or one in the input.
1883 if (BitWidth
!= 0) {
1884 APInt
Op0KnownZero(BitWidth
, 0), Op0KnownOne(BitWidth
, 0);
1885 APInt
Op1KnownZero(BitWidth
, 0), Op1KnownOne(BitWidth
, 0);
1887 if (SimplifyDemandedBits(I
.getOperandUse(0),
1888 DemandedBitsLHSMask(I
, BitWidth
, isSignBit
),
1889 Op0KnownZero
, Op0KnownOne
, 0))
1891 if (SimplifyDemandedBits(I
.getOperandUse(1),
1892 APInt::getAllOnesValue(BitWidth
),
1893 Op1KnownZero
, Op1KnownOne
, 0))
1896 // Given the known and unknown bits, compute a range that the LHS could be
1897 // in. Compute the Min, Max and RHS values based on the known bits. For the
1898 // EQ and NE we use unsigned values.
1899 APInt
Op0Min(BitWidth
, 0), Op0Max(BitWidth
, 0);
1900 APInt
Op1Min(BitWidth
, 0), Op1Max(BitWidth
, 0);
1902 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
1904 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
1907 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
1909 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
1913 // If Min and Max are known to be the same, then SimplifyDemandedBits
1914 // figured out that the LHS is a constant. Just constant fold this now so
1915 // that code below can assume that Min != Max.
1916 if (!isa
<Constant
>(Op0
) && Op0Min
== Op0Max
)
1917 return new ICmpInst(I
.getPredicate(),
1918 ConstantInt::get(Op0
->getType(), Op0Min
), Op1
);
1919 if (!isa
<Constant
>(Op1
) && Op1Min
== Op1Max
)
1920 return new ICmpInst(I
.getPredicate(), Op0
,
1921 ConstantInt::get(Op1
->getType(), Op1Min
));
1923 // Based on the range information we know about the LHS, see if we can
1924 // simplify this comparison. For example, (x&4) < 8 is always true.
1925 switch (I
.getPredicate()) {
1926 default: llvm_unreachable("Unknown icmp opcode!");
1927 case ICmpInst::ICMP_EQ
: {
1928 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
1929 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
1931 // If all bits are known zero except for one, then we know at most one
1932 // bit is set. If the comparison is against zero, then this is a check
1933 // to see if *that* bit is set.
1934 APInt Op0KnownZeroInverted
= ~Op0KnownZero
;
1935 if (~Op1KnownZero
== 0 && Op0KnownZeroInverted
.isPowerOf2()) {
1936 // If the LHS is an AND with the same constant, look through it.
1938 ConstantInt
*LHSC
= 0;
1939 if (!match(Op0
, m_And(m_Value(LHS
), m_ConstantInt(LHSC
))) ||
1940 LHSC
->getValue() != Op0KnownZeroInverted
)
1943 // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
1944 // then turn "((1 << x)&8) == 0" into "x != 3".
1946 if (match(LHS
, m_Shl(m_One(), m_Value(X
)))) {
1947 unsigned CmpVal
= Op0KnownZeroInverted
.countTrailingZeros();
1948 return new ICmpInst(ICmpInst::ICMP_NE
, X
,
1949 ConstantInt::get(X
->getType(), CmpVal
));
1952 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
1953 // then turn "((8 >>u x)&1) == 0" into "x != 3".
1955 if (Op0KnownZeroInverted
== 1 &&
1956 match(LHS
, m_LShr(m_Power2(CI
), m_Value(X
))))
1957 return new ICmpInst(ICmpInst::ICMP_NE
, X
,
1958 ConstantInt::get(X
->getType(),
1959 CI
->countTrailingZeros()));
1964 case ICmpInst::ICMP_NE
: {
1965 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
1966 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
1968 // If all bits are known zero except for one, then we know at most one
1969 // bit is set. If the comparison is against zero, then this is a check
1970 // to see if *that* bit is set.
1971 APInt Op0KnownZeroInverted
= ~Op0KnownZero
;
1972 if (~Op1KnownZero
== 0 && Op0KnownZeroInverted
.isPowerOf2()) {
1973 // If the LHS is an AND with the same constant, look through it.
1975 ConstantInt
*LHSC
= 0;
1976 if (!match(Op0
, m_And(m_Value(LHS
), m_ConstantInt(LHSC
))) ||
1977 LHSC
->getValue() != Op0KnownZeroInverted
)
1980 // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
1981 // then turn "((1 << x)&8) != 0" into "x == 3".
1983 if (match(LHS
, m_Shl(m_One(), m_Value(X
)))) {
1984 unsigned CmpVal
= Op0KnownZeroInverted
.countTrailingZeros();
1985 return new ICmpInst(ICmpInst::ICMP_EQ
, X
,
1986 ConstantInt::get(X
->getType(), CmpVal
));
1989 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
1990 // then turn "((8 >>u x)&1) != 0" into "x == 3".
1992 if (Op0KnownZeroInverted
== 1 &&
1993 match(LHS
, m_LShr(m_Power2(CI
), m_Value(X
))))
1994 return new ICmpInst(ICmpInst::ICMP_EQ
, X
,
1995 ConstantInt::get(X
->getType(),
1996 CI
->countTrailingZeros()));
2001 case ICmpInst::ICMP_ULT
:
2002 if (Op0Max
.ult(Op1Min
)) // A <u B -> true if max(A) < min(B)
2003 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2004 if (Op0Min
.uge(Op1Max
)) // A <u B -> false if min(A) >= max(B)
2005 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2006 if (Op1Min
== Op0Max
) // A <u B -> A != B if max(A) == min(B)
2007 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
2008 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2009 if (Op1Max
== Op0Min
+1) // A <u C -> A == C-1 if min(A)+1 == C
2010 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
2011 ConstantInt::get(CI
->getContext(), CI
->getValue()-1));
2013 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
2014 if (CI
->isMinValue(true))
2015 return new ICmpInst(ICmpInst::ICMP_SGT
, Op0
,
2016 Constant::getAllOnesValue(Op0
->getType()));
2019 case ICmpInst::ICMP_UGT
:
2020 if (Op0Min
.ugt(Op1Max
)) // A >u B -> true if min(A) > max(B)
2021 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2022 if (Op0Max
.ule(Op1Min
)) // A >u B -> false if max(A) <= max(B)
2023 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2025 if (Op1Max
== Op0Min
) // A >u B -> A != B if min(A) == max(B)
2026 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
2027 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2028 if (Op1Min
== Op0Max
-1) // A >u C -> A == C+1 if max(a)-1 == C
2029 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
2030 ConstantInt::get(CI
->getContext(), CI
->getValue()+1));
2032 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
2033 if (CI
->isMaxValue(true))
2034 return new ICmpInst(ICmpInst::ICMP_SLT
, Op0
,
2035 Constant::getNullValue(Op0
->getType()));
2038 case ICmpInst::ICMP_SLT
:
2039 if (Op0Max
.slt(Op1Min
)) // A <s B -> true if max(A) < min(C)
2040 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2041 if (Op0Min
.sge(Op1Max
)) // A <s B -> false if min(A) >= max(C)
2042 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2043 if (Op1Min
== Op0Max
) // A <s B -> A != B if max(A) == min(B)
2044 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
2045 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2046 if (Op1Max
== Op0Min
+1) // A <s C -> A == C-1 if min(A)+1 == C
2047 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
2048 ConstantInt::get(CI
->getContext(), CI
->getValue()-1));
2051 case ICmpInst::ICMP_SGT
:
2052 if (Op0Min
.sgt(Op1Max
)) // A >s B -> true if min(A) > max(B)
2053 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2054 if (Op0Max
.sle(Op1Min
)) // A >s B -> false if max(A) <= min(B)
2055 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2057 if (Op1Max
== Op0Min
) // A >s B -> A != B if min(A) == max(B)
2058 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
2059 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2060 if (Op1Min
== Op0Max
-1) // A >s C -> A == C+1 if max(A)-1 == C
2061 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
2062 ConstantInt::get(CI
->getContext(), CI
->getValue()+1));
2065 case ICmpInst::ICMP_SGE
:
2066 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SGE with ConstantInt not folded!");
2067 if (Op0Min
.sge(Op1Max
)) // A >=s B -> true if min(A) >= max(B)
2068 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2069 if (Op0Max
.slt(Op1Min
)) // A >=s B -> false if max(A) < min(B)
2070 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2072 case ICmpInst::ICMP_SLE
:
2073 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SLE with ConstantInt not folded!");
2074 if (Op0Max
.sle(Op1Min
)) // A <=s B -> true if max(A) <= min(B)
2075 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2076 if (Op0Min
.sgt(Op1Max
)) // A <=s B -> false if min(A) > max(B)
2077 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2079 case ICmpInst::ICMP_UGE
:
2080 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_UGE with ConstantInt not folded!");
2081 if (Op0Min
.uge(Op1Max
)) // A >=u B -> true if min(A) >= max(B)
2082 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2083 if (Op0Max
.ult(Op1Min
)) // A >=u B -> false if max(A) < min(B)
2084 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2086 case ICmpInst::ICMP_ULE
:
2087 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_ULE with ConstantInt not folded!");
2088 if (Op0Max
.ule(Op1Min
)) // A <=u B -> true if max(A) <= min(B)
2089 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2090 if (Op0Min
.ugt(Op1Max
)) // A <=u B -> false if min(A) > max(B)
2091 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2095 // Turn a signed comparison into an unsigned one if both operands
2096 // are known to have the same sign.
2098 ((Op0KnownZero
.isNegative() && Op1KnownZero
.isNegative()) ||
2099 (Op0KnownOne
.isNegative() && Op1KnownOne
.isNegative())))
2100 return new ICmpInst(I
.getUnsignedPredicate(), Op0
, Op1
);
2103 // Test if the ICmpInst instruction is used exclusively by a select as
2104 // part of a minimum or maximum operation. If so, refrain from doing
2105 // any other folding. This helps out other analyses which understand
2106 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
2107 // and CodeGen. And in this case, at least one of the comparison
2108 // operands has at least one user besides the compare (the select),
2109 // which would often largely negate the benefit of folding anyway.
2111 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(*I
.use_begin()))
2112 if ((SI
->getOperand(1) == Op0
&& SI
->getOperand(2) == Op1
) ||
2113 (SI
->getOperand(2) == Op0
&& SI
->getOperand(1) == Op1
))
2116 // See if we are doing a comparison between a constant and an instruction that
2117 // can be folded into the comparison.
2118 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2119 // Since the RHS is a ConstantInt (CI), if the left hand side is an
2120 // instruction, see if that instruction also has constants so that the
2121 // instruction can be folded into the icmp
2122 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
2123 if (Instruction
*Res
= visitICmpInstWithInstAndIntCst(I
, LHSI
, CI
))
2127 // Handle icmp with constant (but not simple integer constant) RHS
2128 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
2129 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
2130 switch (LHSI
->getOpcode()) {
2131 case Instruction::GetElementPtr
:
2132 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
2133 if (RHSC
->isNullValue() &&
2134 cast
<GetElementPtrInst
>(LHSI
)->hasAllZeroIndices())
2135 return new ICmpInst(I
.getPredicate(), LHSI
->getOperand(0),
2136 Constant::getNullValue(LHSI
->getOperand(0)->getType()));
2138 case Instruction::PHI
:
2139 // Only fold icmp into the PHI if the phi and icmp are in the same
2140 // block. If in the same block, we're encouraging jump threading. If
2141 // not, we are just pessimizing the code by making an i1 phi.
2142 if (LHSI
->getParent() == I
.getParent())
2143 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2146 case Instruction::Select
: {
2147 // If either operand of the select is a constant, we can fold the
2148 // comparison into the select arms, which will cause one to be
2149 // constant folded and the select turned into a bitwise or.
2150 Value
*Op1
= 0, *Op2
= 0;
2151 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1)))
2152 Op1
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
2153 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2)))
2154 Op2
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
2156 // We only want to perform this transformation if it will not lead to
2157 // additional code. This is true if either both sides of the select
2158 // fold to a constant (in which case the icmp is replaced with a select
2159 // which will usually simplify) or this is the only user of the
2160 // select (in which case we are trading a select+icmp for a simpler
2162 if ((Op1
&& Op2
) || (LHSI
->hasOneUse() && (Op1
|| Op2
))) {
2164 Op1
= Builder
->CreateICmp(I
.getPredicate(), LHSI
->getOperand(1),
2167 Op2
= Builder
->CreateICmp(I
.getPredicate(), LHSI
->getOperand(2),
2169 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
2173 case Instruction::IntToPtr
:
2174 // icmp pred inttoptr(X), null -> icmp pred X, 0
2175 if (RHSC
->isNullValue() && TD
&&
2176 TD
->getIntPtrType(RHSC
->getContext()) ==
2177 LHSI
->getOperand(0)->getType())
2178 return new ICmpInst(I
.getPredicate(), LHSI
->getOperand(0),
2179 Constant::getNullValue(LHSI
->getOperand(0)->getType()));
2182 case Instruction::Load
:
2183 // Try to optimize things like "A[i] > 4" to index computations.
2184 if (GetElementPtrInst
*GEP
=
2185 dyn_cast
<GetElementPtrInst
>(LHSI
->getOperand(0))) {
2186 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0)))
2187 if (GV
->isConstant() && GV
->hasDefinitiveInitializer() &&
2188 !cast
<LoadInst
>(LHSI
)->isVolatile())
2189 if (Instruction
*Res
= FoldCmpLoadFromIndexedGlobal(GEP
, GV
, I
))
2196 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
2197 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op0
))
2198 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op1
, I
.getPredicate(), I
))
2200 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op1
))
2201 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op0
,
2202 ICmpInst::getSwappedPredicate(I
.getPredicate()), I
))
2205 // Test to see if the operands of the icmp are casted versions of other
2206 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
2208 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(Op0
)) {
2209 if (Op0
->getType()->isPointerTy() &&
2210 (isa
<Constant
>(Op1
) || isa
<BitCastInst
>(Op1
))) {
2211 // We keep moving the cast from the left operand over to the right
2212 // operand, where it can often be eliminated completely.
2213 Op0
= CI
->getOperand(0);
2215 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2216 // so eliminate it as well.
2217 if (BitCastInst
*CI2
= dyn_cast
<BitCastInst
>(Op1
))
2218 Op1
= CI2
->getOperand(0);
2220 // If Op1 is a constant, we can fold the cast into the constant.
2221 if (Op0
->getType() != Op1
->getType()) {
2222 if (Constant
*Op1C
= dyn_cast
<Constant
>(Op1
)) {
2223 Op1
= ConstantExpr::getBitCast(Op1C
, Op0
->getType());
2225 // Otherwise, cast the RHS right before the icmp
2226 Op1
= Builder
->CreateBitCast(Op1
, Op0
->getType());
2229 return new ICmpInst(I
.getPredicate(), Op0
, Op1
);
2233 if (isa
<CastInst
>(Op0
)) {
2234 // Handle the special case of: icmp (cast bool to X), <cst>
2235 // This comes up when you have code like
2238 // For generality, we handle any zero-extension of any operand comparison
2239 // with a constant or another cast from the same type.
2240 if (isa
<Constant
>(Op1
) || isa
<CastInst
>(Op1
))
2241 if (Instruction
*R
= visitICmpInstWithCastAndCast(I
))
2245 // Special logic for binary operators.
2246 BinaryOperator
*BO0
= dyn_cast
<BinaryOperator
>(Op0
);
2247 BinaryOperator
*BO1
= dyn_cast
<BinaryOperator
>(Op1
);
2249 CmpInst::Predicate Pred
= I
.getPredicate();
2250 bool NoOp0WrapProblem
= false, NoOp1WrapProblem
= false;
2251 if (BO0
&& isa
<OverflowingBinaryOperator
>(BO0
))
2252 NoOp0WrapProblem
= ICmpInst::isEquality(Pred
) ||
2253 (CmpInst::isUnsigned(Pred
) && BO0
->hasNoUnsignedWrap()) ||
2254 (CmpInst::isSigned(Pred
) && BO0
->hasNoSignedWrap());
2255 if (BO1
&& isa
<OverflowingBinaryOperator
>(BO1
))
2256 NoOp1WrapProblem
= ICmpInst::isEquality(Pred
) ||
2257 (CmpInst::isUnsigned(Pred
) && BO1
->hasNoUnsignedWrap()) ||
2258 (CmpInst::isSigned(Pred
) && BO1
->hasNoSignedWrap());
2260 // Analyze the case when either Op0 or Op1 is an add instruction.
2261 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
2262 Value
*A
= 0, *B
= 0, *C
= 0, *D
= 0;
2263 if (BO0
&& BO0
->getOpcode() == Instruction::Add
)
2264 A
= BO0
->getOperand(0), B
= BO0
->getOperand(1);
2265 if (BO1
&& BO1
->getOpcode() == Instruction::Add
)
2266 C
= BO1
->getOperand(0), D
= BO1
->getOperand(1);
2268 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2269 if ((A
== Op1
|| B
== Op1
) && NoOp0WrapProblem
)
2270 return new ICmpInst(Pred
, A
== Op1
? B
: A
,
2271 Constant::getNullValue(Op1
->getType()));
2273 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2274 if ((C
== Op0
|| D
== Op0
) && NoOp1WrapProblem
)
2275 return new ICmpInst(Pred
, Constant::getNullValue(Op0
->getType()),
2278 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
2279 if (A
&& C
&& (A
== C
|| A
== D
|| B
== C
|| B
== D
) &&
2280 NoOp0WrapProblem
&& NoOp1WrapProblem
&&
2281 // Try not to increase register pressure.
2282 BO0
->hasOneUse() && BO1
->hasOneUse()) {
2283 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2284 Value
*Y
= (A
== C
|| A
== D
) ? B
: A
;
2285 Value
*Z
= (C
== A
|| C
== B
) ? D
: C
;
2286 return new ICmpInst(Pred
, Y
, Z
);
2289 // Analyze the case when either Op0 or Op1 is a sub instruction.
2290 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
2291 A
= 0; B
= 0; C
= 0; D
= 0;
2292 if (BO0
&& BO0
->getOpcode() == Instruction::Sub
)
2293 A
= BO0
->getOperand(0), B
= BO0
->getOperand(1);
2294 if (BO1
&& BO1
->getOpcode() == Instruction::Sub
)
2295 C
= BO1
->getOperand(0), D
= BO1
->getOperand(1);
2297 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
2298 if (A
== Op1
&& NoOp0WrapProblem
)
2299 return new ICmpInst(Pred
, Constant::getNullValue(Op1
->getType()), B
);
2301 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
2302 if (C
== Op0
&& NoOp1WrapProblem
)
2303 return new ICmpInst(Pred
, D
, Constant::getNullValue(Op0
->getType()));
2305 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
2306 if (B
&& D
&& B
== D
&& NoOp0WrapProblem
&& NoOp1WrapProblem
&&
2307 // Try not to increase register pressure.
2308 BO0
->hasOneUse() && BO1
->hasOneUse())
2309 return new ICmpInst(Pred
, A
, C
);
2311 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
2312 if (A
&& C
&& A
== C
&& NoOp0WrapProblem
&& NoOp1WrapProblem
&&
2313 // Try not to increase register pressure.
2314 BO0
->hasOneUse() && BO1
->hasOneUse())
2315 return new ICmpInst(Pred
, D
, B
);
2317 BinaryOperator
*SRem
= NULL
;
2318 // icmp (srem X, Y), Y
2319 if (BO0
&& BO0
->getOpcode() == Instruction::SRem
&&
2320 Op1
== BO0
->getOperand(1))
2322 // icmp Y, (srem X, Y)
2323 else if (BO1
&& BO1
->getOpcode() == Instruction::SRem
&&
2324 Op0
== BO1
->getOperand(1))
2327 // We don't check hasOneUse to avoid increasing register pressure because
2328 // the value we use is the same value this instruction was already using.
2329 switch (SRem
== BO0
? ICmpInst::getSwappedPredicate(Pred
) : Pred
) {
2331 case ICmpInst::ICMP_EQ
:
2332 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getType()));
2333 case ICmpInst::ICMP_NE
:
2334 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getType()));
2335 case ICmpInst::ICMP_SGT
:
2336 case ICmpInst::ICMP_SGE
:
2337 return new ICmpInst(ICmpInst::ICMP_SGT
, SRem
->getOperand(1),
2338 Constant::getAllOnesValue(SRem
->getType()));
2339 case ICmpInst::ICMP_SLT
:
2340 case ICmpInst::ICMP_SLE
:
2341 return new ICmpInst(ICmpInst::ICMP_SLT
, SRem
->getOperand(1),
2342 Constant::getNullValue(SRem
->getType()));
2346 if (BO0
&& BO1
&& BO0
->getOpcode() == BO1
->getOpcode() &&
2347 BO0
->hasOneUse() && BO1
->hasOneUse() &&
2348 BO0
->getOperand(1) == BO1
->getOperand(1)) {
2349 switch (BO0
->getOpcode()) {
2351 case Instruction::Add
:
2352 case Instruction::Sub
:
2353 case Instruction::Xor
:
2354 if (I
.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
2355 return new ICmpInst(I
.getPredicate(), BO0
->getOperand(0),
2356 BO1
->getOperand(0));
2357 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
2358 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO0
->getOperand(1))) {
2359 if (CI
->getValue().isSignBit()) {
2360 ICmpInst::Predicate Pred
= I
.isSigned()
2361 ? I
.getUnsignedPredicate()
2362 : I
.getSignedPredicate();
2363 return new ICmpInst(Pred
, BO0
->getOperand(0),
2364 BO1
->getOperand(0));
2367 if (CI
->getValue().isMaxSignedValue()) {
2368 ICmpInst::Predicate Pred
= I
.isSigned()
2369 ? I
.getUnsignedPredicate()
2370 : I
.getSignedPredicate();
2371 Pred
= I
.getSwappedPredicate(Pred
);
2372 return new ICmpInst(Pred
, BO0
->getOperand(0),
2373 BO1
->getOperand(0));
2377 case Instruction::Mul
:
2378 if (!I
.isEquality())
2381 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO0
->getOperand(1))) {
2382 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
2383 // Mask = -1 >> count-trailing-zeros(Cst).
2384 if (!CI
->isZero() && !CI
->isOne()) {
2385 const APInt
&AP
= CI
->getValue();
2386 ConstantInt
*Mask
= ConstantInt::get(I
.getContext(),
2387 APInt::getLowBitsSet(AP
.getBitWidth(),
2389 AP
.countTrailingZeros()));
2390 Value
*And1
= Builder
->CreateAnd(BO0
->getOperand(0), Mask
);
2391 Value
*And2
= Builder
->CreateAnd(BO1
->getOperand(0), Mask
);
2392 return new ICmpInst(I
.getPredicate(), And1
, And2
);
2396 case Instruction::UDiv
:
2397 case Instruction::LShr
:
2401 case Instruction::SDiv
:
2402 case Instruction::AShr
:
2403 if (!BO0
->isExact() && !BO1
->isExact())
2405 return new ICmpInst(I
.getPredicate(), BO0
->getOperand(0),
2406 BO1
->getOperand(0));
2407 case Instruction::Shl
: {
2408 bool NUW
= BO0
->hasNoUnsignedWrap() && BO1
->hasNoUnsignedWrap();
2409 bool NSW
= BO0
->hasNoSignedWrap() && BO1
->hasNoSignedWrap();
2412 if (!NSW
&& I
.isSigned())
2414 return new ICmpInst(I
.getPredicate(), BO0
->getOperand(0),
2415 BO1
->getOperand(0));
2422 // ~x < ~y --> y < x
2423 // ~x < cst --> ~cst < x
2424 if (match(Op0
, m_Not(m_Value(A
)))) {
2425 if (match(Op1
, m_Not(m_Value(B
))))
2426 return new ICmpInst(I
.getPredicate(), B
, A
);
2427 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(Op1
))
2428 return new ICmpInst(I
.getPredicate(), ConstantExpr::getNot(RHSC
), A
);
2431 // (a+b) <u a --> llvm.uadd.with.overflow.
2432 // (a+b) <u b --> llvm.uadd.with.overflow.
2433 if (I
.getPredicate() == ICmpInst::ICMP_ULT
&&
2434 match(Op0
, m_Add(m_Value(A
), m_Value(B
))) &&
2435 (Op1
== A
|| Op1
== B
))
2436 if (Instruction
*R
= ProcessUAddIdiom(I
, Op0
, *this))
2439 // a >u (a+b) --> llvm.uadd.with.overflow.
2440 // b >u (a+b) --> llvm.uadd.with.overflow.
2441 if (I
.getPredicate() == ICmpInst::ICMP_UGT
&&
2442 match(Op1
, m_Add(m_Value(A
), m_Value(B
))) &&
2443 (Op0
== A
|| Op0
== B
))
2444 if (Instruction
*R
= ProcessUAddIdiom(I
, Op1
, *this))
2448 if (I
.isEquality()) {
2449 Value
*A
, *B
, *C
, *D
;
2451 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
)))) {
2452 if (A
== Op1
|| B
== Op1
) { // (A^B) == A -> B == 0
2453 Value
*OtherVal
= A
== Op1
? B
: A
;
2454 return new ICmpInst(I
.getPredicate(), OtherVal
,
2455 Constant::getNullValue(A
->getType()));
2458 if (match(Op1
, m_Xor(m_Value(C
), m_Value(D
)))) {
2459 // A^c1 == C^c2 --> A == C^(c1^c2)
2460 ConstantInt
*C1
, *C2
;
2461 if (match(B
, m_ConstantInt(C1
)) &&
2462 match(D
, m_ConstantInt(C2
)) && Op1
->hasOneUse()) {
2463 Constant
*NC
= ConstantInt::get(I
.getContext(),
2464 C1
->getValue() ^ C2
->getValue());
2465 Value
*Xor
= Builder
->CreateXor(C
, NC
, "tmp");
2466 return new ICmpInst(I
.getPredicate(), A
, Xor
);
2469 // A^B == A^D -> B == D
2470 if (A
== C
) return new ICmpInst(I
.getPredicate(), B
, D
);
2471 if (A
== D
) return new ICmpInst(I
.getPredicate(), B
, C
);
2472 if (B
== C
) return new ICmpInst(I
.getPredicate(), A
, D
);
2473 if (B
== D
) return new ICmpInst(I
.getPredicate(), A
, C
);
2477 if (match(Op1
, m_Xor(m_Value(A
), m_Value(B
))) &&
2478 (A
== Op0
|| B
== Op0
)) {
2479 // A == (A^B) -> B == 0
2480 Value
*OtherVal
= A
== Op0
? B
: A
;
2481 return new ICmpInst(I
.getPredicate(), OtherVal
,
2482 Constant::getNullValue(A
->getType()));
2485 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
2486 if (Op0
->hasOneUse() && Op1
->hasOneUse() &&
2487 match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
2488 match(Op1
, m_And(m_Value(C
), m_Value(D
)))) {
2489 Value
*X
= 0, *Y
= 0, *Z
= 0;
2492 X
= B
; Y
= D
; Z
= A
;
2493 } else if (A
== D
) {
2494 X
= B
; Y
= C
; Z
= A
;
2495 } else if (B
== C
) {
2496 X
= A
; Y
= D
; Z
= B
;
2497 } else if (B
== D
) {
2498 X
= A
; Y
= C
; Z
= B
;
2501 if (X
) { // Build (X^Y) & Z
2502 Op1
= Builder
->CreateXor(X
, Y
, "tmp");
2503 Op1
= Builder
->CreateAnd(Op1
, Z
, "tmp");
2504 I
.setOperand(0, Op1
);
2505 I
.setOperand(1, Constant::getNullValue(Op1
->getType()));
2512 Value
*X
; ConstantInt
*Cst
;
2514 if (match(Op0
, m_Add(m_Value(X
), m_ConstantInt(Cst
))) && Op1
== X
)
2515 return FoldICmpAddOpCst(I
, X
, Cst
, I
.getPredicate(), Op0
);
2518 if (match(Op1
, m_Add(m_Value(X
), m_ConstantInt(Cst
))) && Op0
== X
)
2519 return FoldICmpAddOpCst(I
, X
, Cst
, I
.getSwappedPredicate(), Op1
);
2521 return Changed
? &I
: 0;
2529 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
2531 Instruction
*InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst
&I
,
2534 if (!isa
<ConstantFP
>(RHSC
)) return 0;
2535 const APFloat
&RHS
= cast
<ConstantFP
>(RHSC
)->getValueAPF();
2537 // Get the width of the mantissa. We don't want to hack on conversions that
2538 // might lose information from the integer, e.g. "i64 -> float"
2539 int MantissaWidth
= LHSI
->getType()->getFPMantissaWidth();
2540 if (MantissaWidth
== -1) return 0; // Unknown.
2542 // Check to see that the input is converted from an integer type that is small
2543 // enough that preserves all bits. TODO: check here for "known" sign bits.
2544 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
2545 unsigned InputSize
= LHSI
->getOperand(0)->getType()->getScalarSizeInBits();
2547 // If this is a uitofp instruction, we need an extra bit to hold the sign.
2548 bool LHSUnsigned
= isa
<UIToFPInst
>(LHSI
);
2552 // If the conversion would lose info, don't hack on this.
2553 if ((int)InputSize
> MantissaWidth
)
2556 // Otherwise, we can potentially simplify the comparison. We know that it
2557 // will always come through as an integer value and we know the constant is
2558 // not a NAN (it would have been previously simplified).
2559 assert(!RHS
.isNaN() && "NaN comparison not already folded!");
2561 ICmpInst::Predicate Pred
;
2562 switch (I
.getPredicate()) {
2563 default: llvm_unreachable("Unexpected predicate!");
2564 case FCmpInst::FCMP_UEQ
:
2565 case FCmpInst::FCMP_OEQ
:
2566 Pred
= ICmpInst::ICMP_EQ
;
2568 case FCmpInst::FCMP_UGT
:
2569 case FCmpInst::FCMP_OGT
:
2570 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGT
: ICmpInst::ICMP_SGT
;
2572 case FCmpInst::FCMP_UGE
:
2573 case FCmpInst::FCMP_OGE
:
2574 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGE
: ICmpInst::ICMP_SGE
;
2576 case FCmpInst::FCMP_ULT
:
2577 case FCmpInst::FCMP_OLT
:
2578 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULT
: ICmpInst::ICMP_SLT
;
2580 case FCmpInst::FCMP_ULE
:
2581 case FCmpInst::FCMP_OLE
:
2582 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULE
: ICmpInst::ICMP_SLE
;
2584 case FCmpInst::FCMP_UNE
:
2585 case FCmpInst::FCMP_ONE
:
2586 Pred
= ICmpInst::ICMP_NE
;
2588 case FCmpInst::FCMP_ORD
:
2589 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2590 case FCmpInst::FCMP_UNO
:
2591 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2594 const IntegerType
*IntTy
= cast
<IntegerType
>(LHSI
->getOperand(0)->getType());
2596 // Now we know that the APFloat is a normal number, zero or inf.
2598 // See if the FP constant is too large for the integer. For example,
2599 // comparing an i8 to 300.0.
2600 unsigned IntWidth
= IntTy
->getScalarSizeInBits();
2603 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
2604 // and large values.
2605 APFloat
SMax(RHS
.getSemantics(), APFloat::fcZero
, false);
2606 SMax
.convertFromAPInt(APInt::getSignedMaxValue(IntWidth
), true,
2607 APFloat::rmNearestTiesToEven
);
2608 if (SMax
.compare(RHS
) == APFloat::cmpLessThan
) { // smax < 13123.0
2609 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SLT
||
2610 Pred
== ICmpInst::ICMP_SLE
)
2611 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2612 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2615 // If the RHS value is > UnsignedMax, fold the comparison. This handles
2616 // +INF and large values.
2617 APFloat
UMax(RHS
.getSemantics(), APFloat::fcZero
, false);
2618 UMax
.convertFromAPInt(APInt::getMaxValue(IntWidth
), false,
2619 APFloat::rmNearestTiesToEven
);
2620 if (UMax
.compare(RHS
) == APFloat::cmpLessThan
) { // umax < 13123.0
2621 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_ULT
||
2622 Pred
== ICmpInst::ICMP_ULE
)
2623 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2624 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2629 // See if the RHS value is < SignedMin.
2630 APFloat
SMin(RHS
.getSemantics(), APFloat::fcZero
, false);
2631 SMin
.convertFromAPInt(APInt::getSignedMinValue(IntWidth
), true,
2632 APFloat::rmNearestTiesToEven
);
2633 if (SMin
.compare(RHS
) == APFloat::cmpGreaterThan
) { // smin > 12312.0
2634 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SGT
||
2635 Pred
== ICmpInst::ICMP_SGE
)
2636 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2637 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2641 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
2642 // [0, UMAX], but it may still be fractional. See if it is fractional by
2643 // casting the FP value to the integer value and back, checking for equality.
2644 // Don't do this for zero, because -0.0 is not fractional.
2645 Constant
*RHSInt
= LHSUnsigned
2646 ? ConstantExpr::getFPToUI(RHSC
, IntTy
)
2647 : ConstantExpr::getFPToSI(RHSC
, IntTy
);
2648 if (!RHS
.isZero()) {
2649 bool Equal
= LHSUnsigned
2650 ? ConstantExpr::getUIToFP(RHSInt
, RHSC
->getType()) == RHSC
2651 : ConstantExpr::getSIToFP(RHSInt
, RHSC
->getType()) == RHSC
;
2653 // If we had a comparison against a fractional value, we have to adjust
2654 // the compare predicate and sometimes the value. RHSC is rounded towards
2655 // zero at this point.
2657 default: llvm_unreachable("Unexpected integer comparison!");
2658 case ICmpInst::ICMP_NE
: // (float)int != 4.4 --> true
2659 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2660 case ICmpInst::ICMP_EQ
: // (float)int == 4.4 --> false
2661 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2662 case ICmpInst::ICMP_ULE
:
2663 // (float)int <= 4.4 --> int <= 4
2664 // (float)int <= -4.4 --> false
2665 if (RHS
.isNegative())
2666 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2668 case ICmpInst::ICMP_SLE
:
2669 // (float)int <= 4.4 --> int <= 4
2670 // (float)int <= -4.4 --> int < -4
2671 if (RHS
.isNegative())
2672 Pred
= ICmpInst::ICMP_SLT
;
2674 case ICmpInst::ICMP_ULT
:
2675 // (float)int < -4.4 --> false
2676 // (float)int < 4.4 --> int <= 4
2677 if (RHS
.isNegative())
2678 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(I
.getContext()));
2679 Pred
= ICmpInst::ICMP_ULE
;
2681 case ICmpInst::ICMP_SLT
:
2682 // (float)int < -4.4 --> int < -4
2683 // (float)int < 4.4 --> int <= 4
2684 if (!RHS
.isNegative())
2685 Pred
= ICmpInst::ICMP_SLE
;
2687 case ICmpInst::ICMP_UGT
:
2688 // (float)int > 4.4 --> int > 4
2689 // (float)int > -4.4 --> true
2690 if (RHS
.isNegative())
2691 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2693 case ICmpInst::ICMP_SGT
:
2694 // (float)int > 4.4 --> int > 4
2695 // (float)int > -4.4 --> int >= -4
2696 if (RHS
.isNegative())
2697 Pred
= ICmpInst::ICMP_SGE
;
2699 case ICmpInst::ICMP_UGE
:
2700 // (float)int >= -4.4 --> true
2701 // (float)int >= 4.4 --> int > 4
2702 if (!RHS
.isNegative())
2703 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(I
.getContext()));
2704 Pred
= ICmpInst::ICMP_UGT
;
2706 case ICmpInst::ICMP_SGE
:
2707 // (float)int >= -4.4 --> int >= -4
2708 // (float)int >= 4.4 --> int > 4
2709 if (!RHS
.isNegative())
2710 Pred
= ICmpInst::ICMP_SGT
;
2716 // Lower this FP comparison into an appropriate integer version of the
2718 return new ICmpInst(Pred
, LHSI
->getOperand(0), RHSInt
);
2721 Instruction
*InstCombiner::visitFCmpInst(FCmpInst
&I
) {
2722 bool Changed
= false;
2724 /// Orders the operands of the compare so that they are listed from most
2725 /// complex to least complex. This puts constants before unary operators,
2726 /// before binary operators.
2727 if (getComplexity(I
.getOperand(0)) < getComplexity(I
.getOperand(1))) {
2732 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2734 if (Value
*V
= SimplifyFCmpInst(I
.getPredicate(), Op0
, Op1
, TD
))
2735 return ReplaceInstUsesWith(I
, V
);
2737 // Simplify 'fcmp pred X, X'
2739 switch (I
.getPredicate()) {
2740 default: llvm_unreachable("Unknown predicate!");
2741 case FCmpInst::FCMP_UNO
: // True if unordered: isnan(X) | isnan(Y)
2742 case FCmpInst::FCMP_ULT
: // True if unordered or less than
2743 case FCmpInst::FCMP_UGT
: // True if unordered or greater than
2744 case FCmpInst::FCMP_UNE
: // True if unordered or not equal
2745 // Canonicalize these to be 'fcmp uno %X, 0.0'.
2746 I
.setPredicate(FCmpInst::FCMP_UNO
);
2747 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
2750 case FCmpInst::FCMP_ORD
: // True if ordered (no nans)
2751 case FCmpInst::FCMP_OEQ
: // True if ordered and equal
2752 case FCmpInst::FCMP_OGE
: // True if ordered and greater than or equal
2753 case FCmpInst::FCMP_OLE
: // True if ordered and less than or equal
2754 // Canonicalize these to be 'fcmp ord %X, 0.0'.
2755 I
.setPredicate(FCmpInst::FCMP_ORD
);
2756 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
2761 // Handle fcmp with constant RHS
2762 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
2763 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
2764 switch (LHSI
->getOpcode()) {
2765 case Instruction::FPExt
: {
2766 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
2767 FPExtInst
*LHSExt
= cast
<FPExtInst
>(LHSI
);
2768 ConstantFP
*RHSF
= dyn_cast
<ConstantFP
>(RHSC
);
2772 // We can't convert a PPC double double.
2773 if (RHSF
->getType()->isPPC_FP128Ty())
2776 const fltSemantics
*Sem
;
2777 // FIXME: This shouldn't be here.
2778 if (LHSExt
->getSrcTy()->isFloatTy())
2779 Sem
= &APFloat::IEEEsingle
;
2780 else if (LHSExt
->getSrcTy()->isDoubleTy())
2781 Sem
= &APFloat::IEEEdouble
;
2782 else if (LHSExt
->getSrcTy()->isFP128Ty())
2783 Sem
= &APFloat::IEEEquad
;
2784 else if (LHSExt
->getSrcTy()->isX86_FP80Ty())
2785 Sem
= &APFloat::x87DoubleExtended
;
2790 APFloat F
= RHSF
->getValueAPF();
2791 F
.convert(*Sem
, APFloat::rmNearestTiesToEven
, &Lossy
);
2793 // Avoid lossy conversions and denormals.
2795 F
.compare(APFloat::getSmallestNormalized(*Sem
)) !=
2796 APFloat::cmpLessThan
)
2797 return new FCmpInst(I
.getPredicate(), LHSExt
->getOperand(0),
2798 ConstantFP::get(RHSC
->getContext(), F
));
2801 case Instruction::PHI
:
2802 // Only fold fcmp into the PHI if the phi and fcmp are in the same
2803 // block. If in the same block, we're encouraging jump threading. If
2804 // not, we are just pessimizing the code by making an i1 phi.
2805 if (LHSI
->getParent() == I
.getParent())
2806 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2809 case Instruction::SIToFP
:
2810 case Instruction::UIToFP
:
2811 if (Instruction
*NV
= FoldFCmp_IntToFP_Cst(I
, LHSI
, RHSC
))
2814 case Instruction::Select
: {
2815 // If either operand of the select is a constant, we can fold the
2816 // comparison into the select arms, which will cause one to be
2817 // constant folded and the select turned into a bitwise or.
2818 Value
*Op1
= 0, *Op2
= 0;
2819 if (LHSI
->hasOneUse()) {
2820 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1))) {
2821 // Fold the known value into the constant operand.
2822 Op1
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
2823 // Insert a new FCmp of the other select operand.
2824 Op2
= Builder
->CreateFCmp(I
.getPredicate(),
2825 LHSI
->getOperand(2), RHSC
, I
.getName());
2826 } else if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2))) {
2827 // Fold the known value into the constant operand.
2828 Op2
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
2829 // Insert a new FCmp of the other select operand.
2830 Op1
= Builder
->CreateFCmp(I
.getPredicate(), LHSI
->getOperand(1),
2836 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
2839 case Instruction::FSub
: {
2840 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
2842 if (match(LHSI
, m_FNeg(m_Value(Op
))))
2843 return new FCmpInst(I
.getSwappedPredicate(), Op
,
2844 ConstantExpr::getFNeg(RHSC
));
2847 case Instruction::Load
:
2848 if (GetElementPtrInst
*GEP
=
2849 dyn_cast
<GetElementPtrInst
>(LHSI
->getOperand(0))) {
2850 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0)))
2851 if (GV
->isConstant() && GV
->hasDefinitiveInitializer() &&
2852 !cast
<LoadInst
>(LHSI
)->isVolatile())
2853 if (Instruction
*Res
= FoldCmpLoadFromIndexedGlobal(GEP
, GV
, I
))
2860 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
2862 if (match(Op0
, m_FNeg(m_Value(X
))) && match(Op1
, m_FNeg(m_Value(Y
))))
2863 return new FCmpInst(I
.getSwappedPredicate(), X
, Y
);
2865 // fcmp (fpext x), (fpext y) -> fcmp x, y
2866 if (FPExtInst
*LHSExt
= dyn_cast
<FPExtInst
>(Op0
))
2867 if (FPExtInst
*RHSExt
= dyn_cast
<FPExtInst
>(Op1
))
2868 if (LHSExt
->getSrcTy() == RHSExt
->getSrcTy())
2869 return new FCmpInst(I
.getPredicate(), LHSExt
->getOperand(0),
2870 RHSExt
->getOperand(0));
2872 return Changed
? &I
: 0;