1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/BuildLibCalls.h"
37 #define DEBUG_TYPE "instcombine"
38 #include "llvm/Transforms/Utils/InstructionWorklist.h"
41 using namespace PatternMatch
;
43 /// The specific integer value is used in a context where it is known to be
44 /// non-zero. If this allows us to simplify the computation, do so and return
45 /// the new operand, otherwise return null.
46 static Value
*simplifyValueKnownNonZero(Value
*V
, InstCombinerImpl
&IC
,
48 // If V has multiple uses, then we would have to do more analysis to determine
49 // if this is safe. For example, the use could be in dynamically unreached
51 if (!V
->hasOneUse()) return nullptr;
53 bool MadeChange
= false;
55 // ((1 << A) >>u B) --> (1 << (A-B))
56 // Because V cannot be zero, we know that B is less than A.
57 Value
*A
= nullptr, *B
= nullptr, *One
= nullptr;
58 if (match(V
, m_LShr(m_OneUse(m_Shl(m_Value(One
), m_Value(A
))), m_Value(B
))) &&
59 match(One
, m_One())) {
60 A
= IC
.Builder
.CreateSub(A
, B
);
61 return IC
.Builder
.CreateShl(One
, A
);
64 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
65 // inexact. Similarly for <<.
66 BinaryOperator
*I
= dyn_cast
<BinaryOperator
>(V
);
67 if (I
&& I
->isLogicalShift() &&
68 IC
.isKnownToBeAPowerOfTwo(I
->getOperand(0), false, 0, &CxtI
)) {
69 // We know that this is an exact/nuw shift and that the input is a
70 // non-zero context as well.
71 if (Value
*V2
= simplifyValueKnownNonZero(I
->getOperand(0), IC
, CxtI
)) {
72 IC
.replaceOperand(*I
, 0, V2
);
76 if (I
->getOpcode() == Instruction::LShr
&& !I
->isExact()) {
81 if (I
->getOpcode() == Instruction::Shl
&& !I
->hasNoUnsignedWrap()) {
82 I
->setHasNoUnsignedWrap();
87 // TODO: Lots more we could do here:
88 // If V is a phi node, we can call this on each of its operands.
89 // "select cond, X, 0" can simplify to "X".
91 return MadeChange
? V
: nullptr;
94 // TODO: This is a specific form of a much more general pattern.
95 // We could detect a select with any binop identity constant, or we
96 // could use SimplifyBinOp to see if either arm of the select reduces.
97 // But that needs to be done carefully and/or while removing potential
98 // reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
99 static Value
*foldMulSelectToNegate(BinaryOperator
&I
,
100 InstCombiner::BuilderTy
&Builder
) {
101 Value
*Cond
, *OtherOp
;
103 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
104 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
105 if (match(&I
, m_c_Mul(m_OneUse(m_Select(m_Value(Cond
), m_One(), m_AllOnes())),
106 m_Value(OtherOp
)))) {
107 bool HasAnyNoWrap
= I
.hasNoSignedWrap() || I
.hasNoUnsignedWrap();
108 Value
*Neg
= Builder
.CreateNeg(OtherOp
, "", false, HasAnyNoWrap
);
109 return Builder
.CreateSelect(Cond
, OtherOp
, Neg
);
111 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
112 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
113 if (match(&I
, m_c_Mul(m_OneUse(m_Select(m_Value(Cond
), m_AllOnes(), m_One())),
114 m_Value(OtherOp
)))) {
115 bool HasAnyNoWrap
= I
.hasNoSignedWrap() || I
.hasNoUnsignedWrap();
116 Value
*Neg
= Builder
.CreateNeg(OtherOp
, "", false, HasAnyNoWrap
);
117 return Builder
.CreateSelect(Cond
, Neg
, OtherOp
);
120 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
121 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
122 if (match(&I
, m_c_FMul(m_OneUse(m_Select(m_Value(Cond
), m_SpecificFP(1.0),
123 m_SpecificFP(-1.0))),
124 m_Value(OtherOp
)))) {
125 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
126 Builder
.setFastMathFlags(I
.getFastMathFlags());
127 return Builder
.CreateSelect(Cond
, OtherOp
, Builder
.CreateFNeg(OtherOp
));
130 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
131 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
132 if (match(&I
, m_c_FMul(m_OneUse(m_Select(m_Value(Cond
), m_SpecificFP(-1.0),
134 m_Value(OtherOp
)))) {
135 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
136 Builder
.setFastMathFlags(I
.getFastMathFlags());
137 return Builder
.CreateSelect(Cond
, Builder
.CreateFNeg(OtherOp
), OtherOp
);
143 /// Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
144 /// Callers are expected to call this twice to handle commuted patterns.
145 static Value
*foldMulShl1(BinaryOperator
&Mul
, bool CommuteOperands
,
146 InstCombiner::BuilderTy
&Builder
) {
147 Value
*X
= Mul
.getOperand(0), *Y
= Mul
.getOperand(1);
151 const bool HasNSW
= Mul
.hasNoSignedWrap();
152 const bool HasNUW
= Mul
.hasNoUnsignedWrap();
154 // X * (1 << Z) --> X << Z
156 if (match(Y
, m_Shl(m_One(), m_Value(Z
)))) {
157 bool PropagateNSW
= HasNSW
&& cast
<ShlOperator
>(Y
)->hasNoSignedWrap();
158 return Builder
.CreateShl(X
, Z
, Mul
.getName(), HasNUW
, PropagateNSW
);
161 // Similar to above, but an increment of the shifted value becomes an add:
162 // X * ((1 << Z) + 1) --> (X * (1 << Z)) + X --> (X << Z) + X
163 // This increases uses of X, so it may require a freeze, but that is still
164 // expected to be an improvement because it removes the multiply.
165 BinaryOperator
*Shift
;
166 if (match(Y
, m_OneUse(m_Add(m_BinOp(Shift
), m_One()))) &&
167 match(Shift
, m_OneUse(m_Shl(m_One(), m_Value(Z
))))) {
168 bool PropagateNSW
= HasNSW
&& Shift
->hasNoSignedWrap();
169 Value
*FrX
= Builder
.CreateFreeze(X
, X
->getName() + ".fr");
170 Value
*Shl
= Builder
.CreateShl(FrX
, Z
, "mulshl", HasNUW
, PropagateNSW
);
171 return Builder
.CreateAdd(Shl
, FrX
, Mul
.getName(), HasNUW
, PropagateNSW
);
174 // Similar to above, but a decrement of the shifted value is disguised as
175 // 'not' and becomes a sub:
176 // X * (~(-1 << Z)) --> X * ((1 << Z) - 1) --> (X << Z) - X
177 // This increases uses of X, so it may require a freeze, but that is still
178 // expected to be an improvement because it removes the multiply.
179 if (match(Y
, m_OneUse(m_Not(m_OneUse(m_Shl(m_AllOnes(), m_Value(Z
))))))) {
180 Value
*FrX
= Builder
.CreateFreeze(X
, X
->getName() + ".fr");
181 Value
*Shl
= Builder
.CreateShl(FrX
, Z
, "mulshl");
182 return Builder
.CreateSub(Shl
, FrX
, Mul
.getName());
188 static Value
*takeLog2(IRBuilderBase
&Builder
, Value
*Op
, unsigned Depth
,
189 bool AssumeNonZero
, bool DoFold
);
191 Instruction
*InstCombinerImpl::visitMul(BinaryOperator
&I
) {
192 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
194 simplifyMulInst(Op0
, Op1
, I
.hasNoSignedWrap(), I
.hasNoUnsignedWrap(),
195 SQ
.getWithInstruction(&I
)))
196 return replaceInstUsesWith(I
, V
);
198 if (SimplifyAssociativeOrCommutative(I
))
201 if (Instruction
*X
= foldVectorBinop(I
))
204 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
207 if (Value
*V
= foldUsingDistributiveLaws(I
))
208 return replaceInstUsesWith(I
, V
);
210 Type
*Ty
= I
.getType();
211 const unsigned BitWidth
= Ty
->getScalarSizeInBits();
212 const bool HasNSW
= I
.hasNoSignedWrap();
213 const bool HasNUW
= I
.hasNoUnsignedWrap();
216 if (match(Op1
, m_AllOnes())) {
217 return HasNSW
? BinaryOperator::CreateNSWNeg(Op0
)
218 : BinaryOperator::CreateNeg(Op0
);
221 // Also allow combining multiply instructions on vectors.
226 if (match(&I
, m_Mul(m_Shl(m_Value(NewOp
), m_Constant(C2
)),
228 match(C1
, m_APInt(IVal
))) {
229 // ((X << C2)*C1) == (X * (C1 << C2))
230 Constant
*Shl
= ConstantExpr::getShl(C1
, C2
);
231 BinaryOperator
*Mul
= cast
<BinaryOperator
>(I
.getOperand(0));
232 BinaryOperator
*BO
= BinaryOperator::CreateMul(NewOp
, Shl
);
233 if (HasNUW
&& Mul
->hasNoUnsignedWrap())
234 BO
->setHasNoUnsignedWrap();
235 if (HasNSW
&& Mul
->hasNoSignedWrap() && Shl
->isNotMinSignedValue())
236 BO
->setHasNoSignedWrap();
240 if (match(&I
, m_Mul(m_Value(NewOp
), m_Constant(C1
)))) {
241 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
242 if (Constant
*NewCst
= ConstantExpr::getExactLogBase2(C1
)) {
243 BinaryOperator
*Shl
= BinaryOperator::CreateShl(NewOp
, NewCst
);
246 Shl
->setHasNoUnsignedWrap();
249 if (match(NewCst
, m_APInt(V
)) && *V
!= V
->getBitWidth() - 1)
250 Shl
->setHasNoSignedWrap();
258 if (Op0
->hasOneUse() && match(Op1
, m_NegatedPower2())) {
259 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
260 // The "* (1<<C)" thus becomes a potential shifting opportunity.
262 Negator::Negate(/*IsNegation*/ true, HasNSW
, Op0
, *this)) {
263 auto *Op1C
= cast
<Constant
>(Op1
);
264 return replaceInstUsesWith(
265 I
, Builder
.CreateMul(NegOp0
, ConstantExpr::getNeg(Op1C
), "",
267 HasNSW
&& Op1C
->isNotMinSignedValue()));
270 // Try to convert multiply of extended operand to narrow negate and shift
271 // for better analysis.
272 // This is valid if the shift amount (trailing zeros in the multiplier
273 // constant) clears more high bits than the bitwidth difference between
274 // source and destination types:
275 // ({z/s}ext X) * (-1<<C) --> (zext (-X)) << C
276 const APInt
*NegPow2C
;
278 if (match(Op0
, m_ZExtOrSExt(m_Value(X
))) &&
279 match(Op1
, m_APIntAllowUndef(NegPow2C
))) {
280 unsigned SrcWidth
= X
->getType()->getScalarSizeInBits();
281 unsigned ShiftAmt
= NegPow2C
->countr_zero();
282 if (ShiftAmt
>= BitWidth
- SrcWidth
) {
283 Value
*N
= Builder
.CreateNeg(X
, X
->getName() + ".neg");
284 Value
*Z
= Builder
.CreateZExt(N
, Ty
, N
->getName() + ".z");
285 return BinaryOperator::CreateShl(Z
, ConstantInt::get(Ty
, ShiftAmt
));
290 if (Instruction
*FoldedMul
= foldBinOpIntoSelectOrPhi(I
))
293 if (Value
*FoldedMul
= foldMulSelectToNegate(I
, Builder
))
294 return replaceInstUsesWith(I
, FoldedMul
);
296 // Simplify mul instructions with a constant RHS.
298 if (match(Op1
, m_ImmConstant(MulC
))) {
299 // Canonicalize (X+C1)*MulC -> X*MulC+C1*MulC.
300 // Canonicalize (X|C1)*MulC -> X*MulC+C1*MulC.
303 if ((match(Op0
, m_OneUse(m_Add(m_Value(X
), m_ImmConstant(C1
))))) ||
304 (match(Op0
, m_OneUse(m_Or(m_Value(X
), m_ImmConstant(C1
)))) &&
305 haveNoCommonBitsSet(X
, C1
, SQ
.getWithInstruction(&I
)))) {
306 // C1*MulC simplifies to a tidier constant.
307 Value
*NewC
= Builder
.CreateMul(C1
, MulC
);
308 auto *BOp0
= cast
<BinaryOperator
>(Op0
);
310 (BOp0
->getOpcode() == Instruction::Or
|| BOp0
->hasNoUnsignedWrap());
311 Value
*NewMul
= Builder
.CreateMul(X
, MulC
);
312 auto *BO
= BinaryOperator::CreateAdd(NewMul
, NewC
);
313 if (HasNUW
&& Op0NUW
) {
314 // If NewMulBO is constant we also can set BO to nuw.
315 if (auto *NewMulBO
= dyn_cast
<BinaryOperator
>(NewMul
))
316 NewMulBO
->setHasNoUnsignedWrap();
317 BO
->setHasNoUnsignedWrap();
323 // abs(X) * abs(X) -> X * X
324 // nabs(X) * nabs(X) -> X * X
327 SelectPatternFlavor SPF
= matchSelectPattern(Op0
, X
, Y
).Flavor
;
328 if (SPF
== SPF_ABS
|| SPF
== SPF_NABS
)
329 return BinaryOperator::CreateMul(X
, X
);
331 if (match(Op0
, m_Intrinsic
<Intrinsic::abs
>(m_Value(X
))))
332 return BinaryOperator::CreateMul(X
, X
);
338 if (match(Op0
, m_Neg(m_Value(X
))) && match(Op1
, m_Constant(Op1C
)))
339 return BinaryOperator::CreateMul(X
, ConstantExpr::getNeg(Op1C
));
342 if (match(Op0
, m_Neg(m_Value(X
))) && match(Op1
, m_Neg(m_Value(Y
)))) {
343 auto *NewMul
= BinaryOperator::CreateMul(X
, Y
);
344 if (HasNSW
&& cast
<OverflowingBinaryOperator
>(Op0
)->hasNoSignedWrap() &&
345 cast
<OverflowingBinaryOperator
>(Op1
)->hasNoSignedWrap())
346 NewMul
->setHasNoSignedWrap();
350 // -X * Y --> -(X * Y)
351 // X * -Y --> -(X * Y)
352 if (match(&I
, m_c_Mul(m_OneUse(m_Neg(m_Value(X
))), m_Value(Y
))))
353 return BinaryOperator::CreateNeg(Builder
.CreateMul(X
, Y
));
355 // (X / Y) * Y = X - (X % Y)
356 // (X / Y) * -Y = (X % Y) - X
359 BinaryOperator
*Div
= dyn_cast
<BinaryOperator
>(Op0
);
360 if (!Div
|| (Div
->getOpcode() != Instruction::UDiv
&&
361 Div
->getOpcode() != Instruction::SDiv
)) {
363 Div
= dyn_cast
<BinaryOperator
>(Op1
);
365 Value
*Neg
= dyn_castNegVal(Y
);
366 if (Div
&& Div
->hasOneUse() &&
367 (Div
->getOperand(1) == Y
|| Div
->getOperand(1) == Neg
) &&
368 (Div
->getOpcode() == Instruction::UDiv
||
369 Div
->getOpcode() == Instruction::SDiv
)) {
370 Value
*X
= Div
->getOperand(0), *DivOp1
= Div
->getOperand(1);
372 // If the division is exact, X % Y is zero, so we end up with X or -X.
373 if (Div
->isExact()) {
375 return replaceInstUsesWith(I
, X
);
376 return BinaryOperator::CreateNeg(X
);
379 auto RemOpc
= Div
->getOpcode() == Instruction::UDiv
? Instruction::URem
381 // X must be frozen because we are increasing its number of uses.
382 Value
*XFreeze
= Builder
.CreateFreeze(X
, X
->getName() + ".fr");
383 Value
*Rem
= Builder
.CreateBinOp(RemOpc
, XFreeze
, DivOp1
);
385 return BinaryOperator::CreateSub(XFreeze
, Rem
);
386 return BinaryOperator::CreateSub(Rem
, XFreeze
);
390 // Fold the following two scenarios:
391 // 1) i1 mul -> i1 and.
392 // 2) X * Y --> X & Y, iff X, Y can be only {0,1}.
393 // Note: We could use known bits to generalize this and related patterns with
395 if (Ty
->isIntOrIntVectorTy(1) ||
396 (match(Op0
, m_And(m_Value(), m_One())) &&
397 match(Op1
, m_And(m_Value(), m_One()))))
398 return BinaryOperator::CreateAnd(Op0
, Op1
);
400 if (Value
*R
= foldMulShl1(I
, /* CommuteOperands */ false, Builder
))
401 return replaceInstUsesWith(I
, R
);
402 if (Value
*R
= foldMulShl1(I
, /* CommuteOperands */ true, Builder
))
403 return replaceInstUsesWith(I
, R
);
405 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
406 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
407 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
408 if (((match(Op0
, m_ZExt(m_Value(X
))) && match(Op1
, m_ZExt(m_Value(Y
)))) ||
409 (match(Op0
, m_SExt(m_Value(X
))) && match(Op1
, m_SExt(m_Value(Y
))))) &&
410 X
->getType()->isIntOrIntVectorTy(1) && X
->getType() == Y
->getType() &&
411 (Op0
->hasOneUse() || Op1
->hasOneUse() || X
== Y
)) {
412 Value
*And
= Builder
.CreateAnd(X
, Y
, "mulbool");
413 return CastInst::Create(Instruction::ZExt
, And
, Ty
);
415 // (sext bool X) * (zext bool Y) --> sext (and X, Y)
416 // (zext bool X) * (sext bool Y) --> sext (and X, Y)
417 // Note: -1 * 1 == 1 * -1 == -1
418 if (((match(Op0
, m_SExt(m_Value(X
))) && match(Op1
, m_ZExt(m_Value(Y
)))) ||
419 (match(Op0
, m_ZExt(m_Value(X
))) && match(Op1
, m_SExt(m_Value(Y
))))) &&
420 X
->getType()->isIntOrIntVectorTy(1) && X
->getType() == Y
->getType() &&
421 (Op0
->hasOneUse() || Op1
->hasOneUse())) {
422 Value
*And
= Builder
.CreateAnd(X
, Y
, "mulbool");
423 return CastInst::Create(Instruction::SExt
, And
, Ty
);
426 // (zext bool X) * Y --> X ? Y : 0
427 // Y * (zext bool X) --> X ? Y : 0
428 if (match(Op0
, m_ZExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1))
429 return SelectInst::Create(X
, Op1
, ConstantInt::getNullValue(Ty
));
430 if (match(Op1
, m_ZExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1))
431 return SelectInst::Create(X
, Op0
, ConstantInt::getNullValue(Ty
));
434 if (match(Op1
, m_ImmConstant(ImmC
))) {
435 // (sext bool X) * C --> X ? -C : 0
436 if (match(Op0
, m_SExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1)) {
437 Constant
*NegC
= ConstantExpr::getNeg(ImmC
);
438 return SelectInst::Create(X
, NegC
, ConstantInt::getNullValue(Ty
));
441 // (ashr i32 X, 31) * C --> (X < 0) ? -C : 0
443 if (match(Op0
, m_OneUse(m_AShr(m_Value(X
), m_APInt(C
)))) &&
444 *C
== C
->getBitWidth() - 1) {
445 Constant
*NegC
= ConstantExpr::getNeg(ImmC
);
446 Value
*IsNeg
= Builder
.CreateIsNeg(X
, "isneg");
447 return SelectInst::Create(IsNeg
, NegC
, ConstantInt::getNullValue(Ty
));
451 // (lshr X, 31) * Y --> (X < 0) ? Y : 0
452 // TODO: We are not checking one-use because the elimination of the multiply
453 // is better for analysis?
455 if (match(&I
, m_c_BinOp(m_LShr(m_Value(X
), m_APInt(C
)), m_Value(Y
))) &&
456 *C
== C
->getBitWidth() - 1) {
457 Value
*IsNeg
= Builder
.CreateIsNeg(X
, "isneg");
458 return SelectInst::Create(IsNeg
, Y
, ConstantInt::getNullValue(Ty
));
461 // (and X, 1) * Y --> (trunc X) ? Y : 0
462 if (match(&I
, m_c_BinOp(m_OneUse(m_And(m_Value(X
), m_One())), m_Value(Y
)))) {
463 Value
*Tr
= Builder
.CreateTrunc(X
, CmpInst::makeCmpResultType(Ty
));
464 return SelectInst::Create(Tr
, Y
, ConstantInt::getNullValue(Ty
));
467 // ((ashr X, 31) | 1) * X --> abs(X)
468 // X * ((ashr X, 31) | 1) --> abs(X)
469 if (match(&I
, m_c_BinOp(m_Or(m_AShr(m_Value(X
),
470 m_SpecificIntAllowUndef(BitWidth
- 1)),
473 Value
*Abs
= Builder
.CreateBinaryIntrinsic(
474 Intrinsic::abs
, X
, ConstantInt::getBool(I
.getContext(), HasNSW
));
476 return replaceInstUsesWith(I
, Abs
);
479 if (Instruction
*Ext
= narrowMathIfNoOverflow(I
))
482 if (Instruction
*Res
= foldBinOpOfSelectAndCastOfSelectCondition(I
))
485 // min(X, Y) * max(X, Y) => X * Y.
486 if (match(&I
, m_CombineOr(m_c_Mul(m_SMax(m_Value(X
), m_Value(Y
)),
487 m_c_SMin(m_Deferred(X
), m_Deferred(Y
))),
488 m_c_Mul(m_UMax(m_Value(X
), m_Value(Y
)),
489 m_c_UMin(m_Deferred(X
), m_Deferred(Y
))))))
490 return BinaryOperator::CreateWithCopiedFlags(Instruction::Mul
, X
, Y
, &I
);
493 // if Log2(Op0) folds away ->
494 // (shl Op1, Log2(Op0))
495 // if Log2(Op1) folds away ->
496 // (shl Op0, Log2(Op1))
497 if (takeLog2(Builder
, Op0
, /*Depth*/ 0, /*AssumeNonZero*/ false,
499 Value
*Res
= takeLog2(Builder
, Op0
, /*Depth*/ 0, /*AssumeNonZero*/ false,
501 BinaryOperator
*Shl
= BinaryOperator::CreateShl(Op1
, Res
);
502 // We can only propegate nuw flag.
503 Shl
->setHasNoUnsignedWrap(HasNUW
);
506 if (takeLog2(Builder
, Op1
, /*Depth*/ 0, /*AssumeNonZero*/ false,
508 Value
*Res
= takeLog2(Builder
, Op1
, /*Depth*/ 0, /*AssumeNonZero*/ false,
510 BinaryOperator
*Shl
= BinaryOperator::CreateShl(Op0
, Res
);
511 // We can only propegate nuw flag.
512 Shl
->setHasNoUnsignedWrap(HasNUW
);
516 bool Changed
= false;
517 if (!HasNSW
&& willNotOverflowSignedMul(Op0
, Op1
, I
)) {
519 I
.setHasNoSignedWrap(true);
522 if (!HasNUW
&& willNotOverflowUnsignedMul(Op0
, Op1
, I
)) {
524 I
.setHasNoUnsignedWrap(true);
527 return Changed
? &I
: nullptr;
530 Instruction
*InstCombinerImpl::foldFPSignBitOps(BinaryOperator
&I
) {
531 BinaryOperator::BinaryOps Opcode
= I
.getOpcode();
532 assert((Opcode
== Instruction::FMul
|| Opcode
== Instruction::FDiv
) &&
533 "Expected fmul or fdiv");
535 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
540 if (match(Op0
, m_FNeg(m_Value(X
))) && match(Op1
, m_FNeg(m_Value(Y
))))
541 return BinaryOperator::CreateWithCopiedFlags(Opcode
, X
, Y
, &I
);
543 // fabs(X) * fabs(X) -> X * X
544 // fabs(X) / fabs(X) -> X / X
545 if (Op0
== Op1
&& match(Op0
, m_FAbs(m_Value(X
))))
546 return BinaryOperator::CreateWithCopiedFlags(Opcode
, X
, X
, &I
);
548 // fabs(X) * fabs(Y) --> fabs(X * Y)
549 // fabs(X) / fabs(Y) --> fabs(X / Y)
550 if (match(Op0
, m_FAbs(m_Value(X
))) && match(Op1
, m_FAbs(m_Value(Y
))) &&
551 (Op0
->hasOneUse() || Op1
->hasOneUse())) {
552 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
553 Builder
.setFastMathFlags(I
.getFastMathFlags());
554 Value
*XY
= Builder
.CreateBinOp(Opcode
, X
, Y
);
555 Value
*Fabs
= Builder
.CreateUnaryIntrinsic(Intrinsic::fabs
, XY
);
557 return replaceInstUsesWith(I
, Fabs
);
563 Instruction
*InstCombinerImpl::visitFMul(BinaryOperator
&I
) {
564 if (Value
*V
= simplifyFMulInst(I
.getOperand(0), I
.getOperand(1),
565 I
.getFastMathFlags(),
566 SQ
.getWithInstruction(&I
)))
567 return replaceInstUsesWith(I
, V
);
569 if (SimplifyAssociativeOrCommutative(I
))
572 if (Instruction
*X
= foldVectorBinop(I
))
575 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
578 if (Instruction
*FoldedMul
= foldBinOpIntoSelectOrPhi(I
))
581 if (Value
*FoldedMul
= foldMulSelectToNegate(I
, Builder
))
582 return replaceInstUsesWith(I
, FoldedMul
);
584 if (Instruction
*R
= foldFPSignBitOps(I
))
588 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
589 if (match(Op1
, m_SpecificFP(-1.0)))
590 return UnaryOperator::CreateFNegFMF(Op0
, &I
);
592 // With no-nans: X * 0.0 --> copysign(0.0, X)
593 if (I
.hasNoNaNs() && match(Op1
, m_PosZeroFP())) {
594 CallInst
*CopySign
= Builder
.CreateIntrinsic(Intrinsic::copysign
,
595 {I
.getType()}, {Op1
, Op0
}, &I
);
596 return replaceInstUsesWith(I
, CopySign
);
602 if (match(Op0
, m_FNeg(m_Value(X
))) && match(Op1
, m_Constant(C
)))
603 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
604 return BinaryOperator::CreateFMulFMF(X
, NegC
, &I
);
606 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
607 if (Value
*V
= SimplifySelectsFeedingBinaryOp(I
, Op0
, Op1
))
608 return replaceInstUsesWith(I
, V
);
610 if (I
.hasAllowReassoc()) {
611 // Reassociate constant RHS with another constant to form constant
613 if (match(Op1
, m_Constant(C
)) && C
->isFiniteNonZeroFP()) {
615 if (match(Op0
, m_OneUse(m_FDiv(m_Constant(C1
), m_Value(X
))))) {
616 // (C1 / X) * C --> (C * C1) / X
618 ConstantFoldBinaryOpOperands(Instruction::FMul
, C
, C1
, DL
);
619 if (CC1
&& CC1
->isNormalFP())
620 return BinaryOperator::CreateFDivFMF(CC1
, X
, &I
);
622 if (match(Op0
, m_FDiv(m_Value(X
), m_Constant(C1
)))) {
623 // (X / C1) * C --> X * (C / C1)
625 ConstantFoldBinaryOpOperands(Instruction::FDiv
, C
, C1
, DL
);
626 if (CDivC1
&& CDivC1
->isNormalFP())
627 return BinaryOperator::CreateFMulFMF(X
, CDivC1
, &I
);
629 // If the constant was a denormal, try reassociating differently.
630 // (X / C1) * C --> X / (C1 / C)
632 ConstantFoldBinaryOpOperands(Instruction::FDiv
, C1
, C
, DL
);
633 if (C1DivC
&& Op0
->hasOneUse() && C1DivC
->isNormalFP())
634 return BinaryOperator::CreateFDivFMF(X
, C1DivC
, &I
);
637 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
638 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
639 // further folds and (X * C) + C2 is 'fma'.
640 if (match(Op0
, m_OneUse(m_FAdd(m_Value(X
), m_Constant(C1
))))) {
641 // (X + C1) * C --> (X * C) + (C * C1)
642 if (Constant
*CC1
= ConstantFoldBinaryOpOperands(
643 Instruction::FMul
, C
, C1
, DL
)) {
644 Value
*XC
= Builder
.CreateFMulFMF(X
, C
, &I
);
645 return BinaryOperator::CreateFAddFMF(XC
, CC1
, &I
);
648 if (match(Op0
, m_OneUse(m_FSub(m_Constant(C1
), m_Value(X
))))) {
649 // (C1 - X) * C --> (C * C1) - (X * C)
650 if (Constant
*CC1
= ConstantFoldBinaryOpOperands(
651 Instruction::FMul
, C
, C1
, DL
)) {
652 Value
*XC
= Builder
.CreateFMulFMF(X
, C
, &I
);
653 return BinaryOperator::CreateFSubFMF(CC1
, XC
, &I
);
659 if (match(&I
, m_c_FMul(m_OneUse(m_FDiv(m_Value(X
), m_Value(Y
))),
661 // Sink division: (X / Y) * Z --> (X * Z) / Y
662 Value
*NewFMul
= Builder
.CreateFMulFMF(X
, Z
, &I
);
663 return BinaryOperator::CreateFDivFMF(NewFMul
, Y
, &I
);
666 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
667 // nnan disallows the possibility of returning a number if both operands are
668 // negative (in that case, we should return NaN).
669 if (I
.hasNoNaNs() && match(Op0
, m_OneUse(m_Sqrt(m_Value(X
)))) &&
670 match(Op1
, m_OneUse(m_Sqrt(m_Value(Y
))))) {
671 Value
*XY
= Builder
.CreateFMulFMF(X
, Y
, &I
);
672 Value
*Sqrt
= Builder
.CreateUnaryIntrinsic(Intrinsic::sqrt
, XY
, &I
);
673 return replaceInstUsesWith(I
, Sqrt
);
676 // The following transforms are done irrespective of the number of uses
677 // for the expression "1.0/sqrt(X)".
678 // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
679 // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
680 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
681 // has the necessary (reassoc) fast-math-flags.
682 if (I
.hasNoSignedZeros() &&
683 match(Op0
, (m_FDiv(m_SpecificFP(1.0), m_Value(Y
)))) &&
684 match(Y
, m_Sqrt(m_Value(X
))) && Op1
== X
)
685 return BinaryOperator::CreateFDivFMF(X
, Y
, &I
);
686 if (I
.hasNoSignedZeros() &&
687 match(Op1
, (m_FDiv(m_SpecificFP(1.0), m_Value(Y
)))) &&
688 match(Y
, m_Sqrt(m_Value(X
))) && Op0
== X
)
689 return BinaryOperator::CreateFDivFMF(X
, Y
, &I
);
691 // Like the similar transform in instsimplify, this requires 'nsz' because
692 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
693 if (I
.hasNoNaNs() && I
.hasNoSignedZeros() && Op0
== Op1
&&
695 // Peek through fdiv to find squaring of square root:
696 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
697 if (match(Op0
, m_FDiv(m_Value(X
), m_Sqrt(m_Value(Y
))))) {
698 Value
*XX
= Builder
.CreateFMulFMF(X
, X
, &I
);
699 return BinaryOperator::CreateFDivFMF(XX
, Y
, &I
);
701 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
702 if (match(Op0
, m_FDiv(m_Sqrt(m_Value(Y
)), m_Value(X
)))) {
703 Value
*XX
= Builder
.CreateFMulFMF(X
, X
, &I
);
704 return BinaryOperator::CreateFDivFMF(Y
, XX
, &I
);
708 // pow(X, Y) * X --> pow(X, Y+1)
709 // X * pow(X, Y) --> pow(X, Y+1)
710 if (match(&I
, m_c_FMul(m_OneUse(m_Intrinsic
<Intrinsic::pow
>(m_Value(X
),
714 Builder
.CreateFAddFMF(Y
, ConstantFP::get(I
.getType(), 1.0), &I
);
715 Value
*Pow
= Builder
.CreateBinaryIntrinsic(Intrinsic::pow
, X
, Y1
, &I
);
716 return replaceInstUsesWith(I
, Pow
);
719 if (I
.isOnlyUserOfAnyOperand()) {
720 // pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
721 if (match(Op0
, m_Intrinsic
<Intrinsic::pow
>(m_Value(X
), m_Value(Y
))) &&
722 match(Op1
, m_Intrinsic
<Intrinsic::pow
>(m_Specific(X
), m_Value(Z
)))) {
723 auto *YZ
= Builder
.CreateFAddFMF(Y
, Z
, &I
);
724 auto *NewPow
= Builder
.CreateBinaryIntrinsic(Intrinsic::pow
, X
, YZ
, &I
);
725 return replaceInstUsesWith(I
, NewPow
);
727 // pow(X, Y) * pow(Z, Y) -> pow(X * Z, Y)
728 if (match(Op0
, m_Intrinsic
<Intrinsic::pow
>(m_Value(X
), m_Value(Y
))) &&
729 match(Op1
, m_Intrinsic
<Intrinsic::pow
>(m_Value(Z
), m_Specific(Y
)))) {
730 auto *XZ
= Builder
.CreateFMulFMF(X
, Z
, &I
);
731 auto *NewPow
= Builder
.CreateBinaryIntrinsic(Intrinsic::pow
, XZ
, Y
, &I
);
732 return replaceInstUsesWith(I
, NewPow
);
735 // powi(x, y) * powi(x, z) -> powi(x, y + z)
736 if (match(Op0
, m_Intrinsic
<Intrinsic::powi
>(m_Value(X
), m_Value(Y
))) &&
737 match(Op1
, m_Intrinsic
<Intrinsic::powi
>(m_Specific(X
), m_Value(Z
))) &&
738 Y
->getType() == Z
->getType()) {
739 auto *YZ
= Builder
.CreateAdd(Y
, Z
);
740 auto *NewPow
= Builder
.CreateIntrinsic(
741 Intrinsic::powi
, {X
->getType(), YZ
->getType()}, {X
, YZ
}, &I
);
742 return replaceInstUsesWith(I
, NewPow
);
745 // exp(X) * exp(Y) -> exp(X + Y)
746 if (match(Op0
, m_Intrinsic
<Intrinsic::exp
>(m_Value(X
))) &&
747 match(Op1
, m_Intrinsic
<Intrinsic::exp
>(m_Value(Y
)))) {
748 Value
*XY
= Builder
.CreateFAddFMF(X
, Y
, &I
);
749 Value
*Exp
= Builder
.CreateUnaryIntrinsic(Intrinsic::exp
, XY
, &I
);
750 return replaceInstUsesWith(I
, Exp
);
753 // exp2(X) * exp2(Y) -> exp2(X + Y)
754 if (match(Op0
, m_Intrinsic
<Intrinsic::exp2
>(m_Value(X
))) &&
755 match(Op1
, m_Intrinsic
<Intrinsic::exp2
>(m_Value(Y
)))) {
756 Value
*XY
= Builder
.CreateFAddFMF(X
, Y
, &I
);
757 Value
*Exp2
= Builder
.CreateUnaryIntrinsic(Intrinsic::exp2
, XY
, &I
);
758 return replaceInstUsesWith(I
, Exp2
);
762 // (X*Y) * X => (X*X) * Y where Y != X
763 // The purpose is two-fold:
764 // 1) to form a power expression (of X).
765 // 2) potentially shorten the critical path: After transformation, the
766 // latency of the instruction Y is amortized by the expression of X*X,
767 // and therefore Y is in a "less critical" position compared to what it
768 // was before the transformation.
769 if (match(Op0
, m_OneUse(m_c_FMul(m_Specific(Op1
), m_Value(Y
)))) &&
771 Value
*XX
= Builder
.CreateFMulFMF(Op1
, Op1
, &I
);
772 return BinaryOperator::CreateFMulFMF(XX
, Y
, &I
);
774 if (match(Op1
, m_OneUse(m_c_FMul(m_Specific(Op0
), m_Value(Y
)))) &&
776 Value
*XX
= Builder
.CreateFMulFMF(Op0
, Op0
, &I
);
777 return BinaryOperator::CreateFMulFMF(XX
, Y
, &I
);
781 // log2(X * 0.5) * Y = log2(X) * Y - Y
783 IntrinsicInst
*Log2
= nullptr;
784 if (match(Op0
, m_OneUse(m_Intrinsic
<Intrinsic::log2
>(
785 m_OneUse(m_FMul(m_Value(X
), m_SpecificFP(0.5))))))) {
786 Log2
= cast
<IntrinsicInst
>(Op0
);
789 if (match(Op1
, m_OneUse(m_Intrinsic
<Intrinsic::log2
>(
790 m_OneUse(m_FMul(m_Value(X
), m_SpecificFP(0.5))))))) {
791 Log2
= cast
<IntrinsicInst
>(Op1
);
795 Value
*Log2
= Builder
.CreateUnaryIntrinsic(Intrinsic::log2
, X
, &I
);
796 Value
*LogXTimesY
= Builder
.CreateFMulFMF(Log2
, Y
, &I
);
797 return BinaryOperator::CreateFSubFMF(LogXTimesY
, Y
, &I
);
801 // Simplify FMUL recurrences starting with 0.0 to 0.0 if nnan and nsz are set.
802 // Given a phi node with entry value as 0 and it used in fmul operation,
803 // we can replace fmul with 0 safely and eleminate loop operation.
804 PHINode
*PN
= nullptr;
805 Value
*Start
= nullptr, *Step
= nullptr;
806 if (matchSimpleRecurrence(&I
, PN
, Start
, Step
) && I
.hasNoNaNs() &&
807 I
.hasNoSignedZeros() && match(Start
, m_Zero()))
808 return replaceInstUsesWith(I
, Start
);
810 // minimum(X, Y) * maximum(X, Y) => X * Y.
812 m_c_FMul(m_Intrinsic
<Intrinsic::maximum
>(m_Value(X
), m_Value(Y
)),
813 m_c_Intrinsic
<Intrinsic::minimum
>(m_Deferred(X
),
815 BinaryOperator
*Result
= BinaryOperator::CreateFMulFMF(X
, Y
, &I
);
816 // We cannot preserve ninf if nnan flag is not set.
817 // If X is NaN and Y is Inf then in original program we had NaN * NaN,
818 // while in optimized version NaN * Inf and this is a poison with ninf flag.
819 if (!Result
->hasNoNaNs())
820 Result
->setHasNoInfs(false);
827 /// Fold a divide or remainder with a select instruction divisor when one of the
828 /// select operands is zero. In that case, we can use the other select operand
829 /// because div/rem by zero is undefined.
830 bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator
&I
) {
831 SelectInst
*SI
= dyn_cast
<SelectInst
>(I
.getOperand(1));
836 if (match(SI
->getTrueValue(), m_Zero()))
837 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
839 else if (match(SI
->getFalseValue(), m_Zero()))
840 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
845 // Change the div/rem to use 'Y' instead of the select.
846 replaceOperand(I
, 1, SI
->getOperand(NonNullOperand
));
848 // Okay, we know we replace the operand of the div/rem with 'Y' with no
849 // problem. However, the select, or the condition of the select may have
850 // multiple uses. Based on our knowledge that the operand must be non-zero,
851 // propagate the known value for the select into other uses of it, and
852 // propagate a known value of the condition into its other users.
854 // If the select and condition only have a single use, don't bother with this,
856 Value
*SelectCond
= SI
->getCondition();
857 if (SI
->use_empty() && SelectCond
->hasOneUse())
860 // Scan the current block backward, looking for other uses of SI.
861 BasicBlock::iterator BBI
= I
.getIterator(), BBFront
= I
.getParent()->begin();
862 Type
*CondTy
= SelectCond
->getType();
863 while (BBI
!= BBFront
) {
865 // If we found an instruction that we can't assume will return, so
866 // information from below it cannot be propagated above it.
867 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI
))
870 // Replace uses of the select or its condition with the known values.
871 for (Use
&Op
: BBI
->operands()) {
873 replaceUse(Op
, SI
->getOperand(NonNullOperand
));
874 Worklist
.push(&*BBI
);
875 } else if (Op
== SelectCond
) {
876 replaceUse(Op
, NonNullOperand
== 1 ? ConstantInt::getTrue(CondTy
)
877 : ConstantInt::getFalse(CondTy
));
878 Worklist
.push(&*BBI
);
882 // If we past the instruction, quit looking for it.
885 if (&*BBI
== SelectCond
)
886 SelectCond
= nullptr;
888 // If we ran out of things to eliminate, break out of the loop.
889 if (!SelectCond
&& !SI
)
896 /// True if the multiply can not be expressed in an int this size.
897 static bool multiplyOverflows(const APInt
&C1
, const APInt
&C2
, APInt
&Product
,
900 Product
= IsSigned
? C1
.smul_ov(C2
, Overflow
) : C1
.umul_ov(C2
, Overflow
);
904 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
905 static bool isMultiple(const APInt
&C1
, const APInt
&C2
, APInt
&Quotient
,
907 assert(C1
.getBitWidth() == C2
.getBitWidth() && "Constant widths not equal");
909 // Bail if we will divide by zero.
913 // Bail if we would divide INT_MIN by -1.
914 if (IsSigned
&& C1
.isMinSignedValue() && C2
.isAllOnes())
917 APInt
Remainder(C1
.getBitWidth(), /*val=*/0ULL, IsSigned
);
919 APInt::sdivrem(C1
, C2
, Quotient
, Remainder
);
921 APInt::udivrem(C1
, C2
, Quotient
, Remainder
);
923 return Remainder
.isMinValue();
926 static Value
*foldIDivShl(BinaryOperator
&I
, InstCombiner::BuilderTy
&Builder
) {
927 assert((I
.getOpcode() == Instruction::SDiv
||
928 I
.getOpcode() == Instruction::UDiv
) &&
929 "Expected integer divide");
931 bool IsSigned
= I
.getOpcode() == Instruction::SDiv
;
932 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
933 Type
*Ty
= I
.getType();
937 // With appropriate no-wrap constraints, remove a common factor in the
938 // dividend and divisor that is disguised as a left-shifted value.
939 if (match(Op1
, m_Shl(m_Value(X
), m_Value(Z
))) &&
940 match(Op0
, m_c_Mul(m_Specific(X
), m_Value(Y
)))) {
941 // Both operands must have the matching no-wrap for this kind of division.
942 auto *Mul
= cast
<OverflowingBinaryOperator
>(Op0
);
943 auto *Shl
= cast
<OverflowingBinaryOperator
>(Op1
);
944 bool HasNUW
= Mul
->hasNoUnsignedWrap() && Shl
->hasNoUnsignedWrap();
945 bool HasNSW
= Mul
->hasNoSignedWrap() && Shl
->hasNoSignedWrap();
947 // (X * Y) u/ (X << Z) --> Y u>> Z
948 if (!IsSigned
&& HasNUW
)
949 return Builder
.CreateLShr(Y
, Z
, "", I
.isExact());
951 // (X * Y) s/ (X << Z) --> Y s/ (1 << Z)
952 if (IsSigned
&& HasNSW
&& (Op0
->hasOneUse() || Op1
->hasOneUse())) {
953 Value
*Shl
= Builder
.CreateShl(ConstantInt::get(Ty
, 1), Z
);
954 return Builder
.CreateSDiv(Y
, Shl
, "", I
.isExact());
958 // With appropriate no-wrap constraints, remove a common factor in the
959 // dividend and divisor that is disguised as a left-shift amount.
960 if (match(Op0
, m_Shl(m_Value(X
), m_Value(Z
))) &&
961 match(Op1
, m_Shl(m_Value(Y
), m_Specific(Z
)))) {
962 auto *Shl0
= cast
<OverflowingBinaryOperator
>(Op0
);
963 auto *Shl1
= cast
<OverflowingBinaryOperator
>(Op1
);
965 // For unsigned div, we need 'nuw' on both shifts or
966 // 'nsw' on both shifts + 'nuw' on the dividend.
967 // (X << Z) / (Y << Z) --> X / Y
969 ((Shl0
->hasNoUnsignedWrap() && Shl1
->hasNoUnsignedWrap()) ||
970 (Shl0
->hasNoUnsignedWrap() && Shl0
->hasNoSignedWrap() &&
971 Shl1
->hasNoSignedWrap())))
972 return Builder
.CreateUDiv(X
, Y
, "", I
.isExact());
974 // For signed div, we need 'nsw' on both shifts + 'nuw' on the divisor.
975 // (X << Z) / (Y << Z) --> X / Y
976 if (IsSigned
&& Shl0
->hasNoSignedWrap() && Shl1
->hasNoSignedWrap() &&
977 Shl1
->hasNoUnsignedWrap())
978 return Builder
.CreateSDiv(X
, Y
, "", I
.isExact());
981 // If X << Y and X << Z does not overflow, then:
982 // (X << Y) / (X << Z) -> (1 << Y) / (1 << Z) -> 1 << Y >> Z
983 if (match(Op0
, m_Shl(m_Value(X
), m_Value(Y
))) &&
984 match(Op1
, m_Shl(m_Specific(X
), m_Value(Z
)))) {
985 auto *Shl0
= cast
<OverflowingBinaryOperator
>(Op0
);
986 auto *Shl1
= cast
<OverflowingBinaryOperator
>(Op1
);
988 if (IsSigned
? (Shl0
->hasNoSignedWrap() && Shl1
->hasNoSignedWrap())
989 : (Shl0
->hasNoUnsignedWrap() && Shl1
->hasNoUnsignedWrap())) {
990 Constant
*One
= ConstantInt::get(X
->getType(), 1);
991 // Only preserve the nsw flag if dividend has nsw
992 // or divisor has nsw and operator is sdiv.
993 Value
*Dividend
= Builder
.CreateShl(
994 One
, Y
, "shl.dividend",
997 IsSigned
? (Shl0
->hasNoUnsignedWrap() || Shl1
->hasNoUnsignedWrap())
998 : Shl0
->hasNoSignedWrap());
999 return Builder
.CreateLShr(Dividend
, Z
, "", I
.isExact());
1006 /// This function implements the transforms common to both integer division
1007 /// instructions (udiv and sdiv). It is called by the visitors to those integer
1008 /// division instructions.
1009 /// Common integer divide transforms
1010 Instruction
*InstCombinerImpl::commonIDivTransforms(BinaryOperator
&I
) {
1011 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
1014 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1015 bool IsSigned
= I
.getOpcode() == Instruction::SDiv
;
1016 Type
*Ty
= I
.getType();
1018 // The RHS is known non-zero.
1019 if (Value
*V
= simplifyValueKnownNonZero(I
.getOperand(1), *this, I
))
1020 return replaceOperand(I
, 1, V
);
1022 // Handle cases involving: [su]div X, (select Cond, Y, Z)
1023 // This does not apply for fdiv.
1024 if (simplifyDivRemOfSelectWithZeroOp(I
))
1027 // If the divisor is a select-of-constants, try to constant fold all div ops:
1028 // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
1029 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1030 if (match(Op0
, m_ImmConstant()) &&
1031 match(Op1
, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
1032 if (Instruction
*R
= FoldOpIntoSelect(I
, cast
<SelectInst
>(Op1
),
1033 /*FoldWithMultiUse*/ true))
1038 if (match(Op1
, m_APInt(C2
))) {
1042 // (X / C1) / C2 -> X / (C1*C2)
1043 if ((IsSigned
&& match(Op0
, m_SDiv(m_Value(X
), m_APInt(C1
)))) ||
1044 (!IsSigned
&& match(Op0
, m_UDiv(m_Value(X
), m_APInt(C1
))))) {
1045 APInt
Product(C1
->getBitWidth(), /*val=*/0ULL, IsSigned
);
1046 if (!multiplyOverflows(*C1
, *C2
, Product
, IsSigned
))
1047 return BinaryOperator::Create(I
.getOpcode(), X
,
1048 ConstantInt::get(Ty
, Product
));
1051 APInt
Quotient(C2
->getBitWidth(), /*val=*/0ULL, IsSigned
);
1052 if ((IsSigned
&& match(Op0
, m_NSWMul(m_Value(X
), m_APInt(C1
)))) ||
1053 (!IsSigned
&& match(Op0
, m_NUWMul(m_Value(X
), m_APInt(C1
))))) {
1055 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
1056 if (isMultiple(*C2
, *C1
, Quotient
, IsSigned
)) {
1057 auto *NewDiv
= BinaryOperator::Create(I
.getOpcode(), X
,
1058 ConstantInt::get(Ty
, Quotient
));
1059 NewDiv
->setIsExact(I
.isExact());
1063 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
1064 if (isMultiple(*C1
, *C2
, Quotient
, IsSigned
)) {
1065 auto *Mul
= BinaryOperator::Create(Instruction::Mul
, X
,
1066 ConstantInt::get(Ty
, Quotient
));
1067 auto *OBO
= cast
<OverflowingBinaryOperator
>(Op0
);
1068 Mul
->setHasNoUnsignedWrap(!IsSigned
&& OBO
->hasNoUnsignedWrap());
1069 Mul
->setHasNoSignedWrap(OBO
->hasNoSignedWrap());
1074 if ((IsSigned
&& match(Op0
, m_NSWShl(m_Value(X
), m_APInt(C1
))) &&
1075 C1
->ult(C1
->getBitWidth() - 1)) ||
1076 (!IsSigned
&& match(Op0
, m_NUWShl(m_Value(X
), m_APInt(C1
))) &&
1077 C1
->ult(C1
->getBitWidth()))) {
1078 APInt C1Shifted
= APInt::getOneBitSet(
1079 C1
->getBitWidth(), static_cast<unsigned>(C1
->getZExtValue()));
1081 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
1082 if (isMultiple(*C2
, C1Shifted
, Quotient
, IsSigned
)) {
1083 auto *BO
= BinaryOperator::Create(I
.getOpcode(), X
,
1084 ConstantInt::get(Ty
, Quotient
));
1085 BO
->setIsExact(I
.isExact());
1089 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
1090 if (isMultiple(C1Shifted
, *C2
, Quotient
, IsSigned
)) {
1091 auto *Mul
= BinaryOperator::Create(Instruction::Mul
, X
,
1092 ConstantInt::get(Ty
, Quotient
));
1093 auto *OBO
= cast
<OverflowingBinaryOperator
>(Op0
);
1094 Mul
->setHasNoUnsignedWrap(!IsSigned
&& OBO
->hasNoUnsignedWrap());
1095 Mul
->setHasNoSignedWrap(OBO
->hasNoSignedWrap());
1100 // Distribute div over add to eliminate a matching div/mul pair:
1101 // ((X * C2) + C1) / C2 --> X + C1/C2
1102 // We need a multiple of the divisor for a signed add constant, but
1103 // unsigned is fine with any constant pair.
1105 match(Op0
, m_NSWAdd(m_NSWMul(m_Value(X
), m_SpecificInt(*C2
)),
1107 isMultiple(*C1
, *C2
, Quotient
, IsSigned
)) {
1108 return BinaryOperator::CreateNSWAdd(X
, ConstantInt::get(Ty
, Quotient
));
1111 match(Op0
, m_NUWAdd(m_NUWMul(m_Value(X
), m_SpecificInt(*C2
)),
1113 return BinaryOperator::CreateNUWAdd(X
,
1114 ConstantInt::get(Ty
, C1
->udiv(*C2
)));
1117 if (!C2
->isZero()) // avoid X udiv 0
1118 if (Instruction
*FoldedDiv
= foldBinOpIntoSelectOrPhi(I
))
1122 if (match(Op0
, m_One())) {
1123 assert(!Ty
->isIntOrIntVectorTy(1) && "i1 divide not removed?");
1125 // 1 / 0 --> undef ; 1 / 1 --> 1 ; 1 / -1 --> -1 ; 1 / anything else --> 0
1126 // (Op1 + 1) u< 3 ? Op1 : 0
1127 // Op1 must be frozen because we are increasing its number of uses.
1128 Value
*F1
= Builder
.CreateFreeze(Op1
, Op1
->getName() + ".fr");
1129 Value
*Inc
= Builder
.CreateAdd(F1
, Op0
);
1130 Value
*Cmp
= Builder
.CreateICmpULT(Inc
, ConstantInt::get(Ty
, 3));
1131 return SelectInst::Create(Cmp
, F1
, ConstantInt::get(Ty
, 0));
1133 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
1134 // result is one, otherwise it's zero.
1135 return new ZExtInst(Builder
.CreateICmpEQ(Op1
, Op0
), Ty
);
1139 // See if we can fold away this div instruction.
1140 if (SimplifyDemandedInstructionBits(I
))
1143 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
1145 if (match(Op0
, m_Sub(m_Value(X
), m_Value(Z
)))) // (X - Z) / Y; Y = Op1
1146 if ((IsSigned
&& match(Z
, m_SRem(m_Specific(X
), m_Specific(Op1
)))) ||
1147 (!IsSigned
&& match(Z
, m_URem(m_Specific(X
), m_Specific(Op1
)))))
1148 return BinaryOperator::Create(I
.getOpcode(), X
, Op1
);
1150 // (X << Y) / X -> 1 << Y
1152 if (IsSigned
&& match(Op0
, m_NSWShl(m_Specific(Op1
), m_Value(Y
))))
1153 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty
, 1), Y
);
1154 if (!IsSigned
&& match(Op0
, m_NUWShl(m_Specific(Op1
), m_Value(Y
))))
1155 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty
, 1), Y
);
1157 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
1158 if (match(Op1
, m_c_Mul(m_Specific(Op0
), m_Value(Y
)))) {
1159 bool HasNSW
= cast
<OverflowingBinaryOperator
>(Op1
)->hasNoSignedWrap();
1160 bool HasNUW
= cast
<OverflowingBinaryOperator
>(Op1
)->hasNoUnsignedWrap();
1161 if ((IsSigned
&& HasNSW
) || (!IsSigned
&& HasNUW
)) {
1162 replaceOperand(I
, 0, ConstantInt::get(Ty
, 1));
1163 replaceOperand(I
, 1, Y
);
1168 // (X << Z) / (X * Y) -> (1 << Z) / Y
1169 // TODO: Handle sdiv.
1170 if (!IsSigned
&& Op1
->hasOneUse() &&
1171 match(Op0
, m_NUWShl(m_Value(X
), m_Value(Z
))) &&
1172 match(Op1
, m_c_Mul(m_Specific(X
), m_Value(Y
))))
1173 if (cast
<OverflowingBinaryOperator
>(Op1
)->hasNoUnsignedWrap()) {
1174 Instruction
*NewDiv
= BinaryOperator::CreateUDiv(
1175 Builder
.CreateShl(ConstantInt::get(Ty
, 1), Z
, "", /*NUW*/ true), Y
);
1176 NewDiv
->setIsExact(I
.isExact());
1180 if (Value
*R
= foldIDivShl(I
, Builder
))
1181 return replaceInstUsesWith(I
, R
);
1183 // With the appropriate no-wrap constraint, remove a multiply by the divisor
1184 // after peeking through another divide:
1185 // ((Op1 * X) / Y) / Op1 --> X / Y
1186 if (match(Op0
, m_BinOp(I
.getOpcode(), m_c_Mul(m_Specific(Op1
), m_Value(X
)),
1188 auto *InnerDiv
= cast
<PossiblyExactOperator
>(Op0
);
1189 auto *Mul
= cast
<OverflowingBinaryOperator
>(InnerDiv
->getOperand(0));
1190 Instruction
*NewDiv
= nullptr;
1191 if (!IsSigned
&& Mul
->hasNoUnsignedWrap())
1192 NewDiv
= BinaryOperator::CreateUDiv(X
, Y
);
1193 else if (IsSigned
&& Mul
->hasNoSignedWrap())
1194 NewDiv
= BinaryOperator::CreateSDiv(X
, Y
);
1196 // Exact propagates only if both of the original divides are exact.
1198 NewDiv
->setIsExact(I
.isExact() && InnerDiv
->isExact());
1206 static const unsigned MaxDepth
= 6;
1208 // Take the exact integer log2 of the value. If DoFold is true, create the
1209 // actual instructions, otherwise return a non-null dummy value. Return nullptr
1211 static Value
*takeLog2(IRBuilderBase
&Builder
, Value
*Op
, unsigned Depth
,
1212 bool AssumeNonZero
, bool DoFold
) {
1213 auto IfFold
= [DoFold
](function_ref
<Value
*()> Fn
) {
1215 return reinterpret_cast<Value
*>(-1);
1219 // FIXME: assert that Op1 isn't/doesn't contain undef.
1222 if (match(Op
, m_Power2()))
1223 return IfFold([&]() {
1224 Constant
*C
= ConstantExpr::getExactLogBase2(cast
<Constant
>(Op
));
1226 llvm_unreachable("Failed to constant fold udiv -> logbase2");
1230 // The remaining tests are all recursive, so bail out if we hit the limit.
1231 if (Depth
++ == MaxDepth
)
1234 // log2(zext X) -> zext log2(X)
1235 // FIXME: Require one use?
1237 if (match(Op
, m_ZExt(m_Value(X
))))
1238 if (Value
*LogX
= takeLog2(Builder
, X
, Depth
, AssumeNonZero
, DoFold
))
1239 return IfFold([&]() { return Builder
.CreateZExt(LogX
, Op
->getType()); });
1241 // log2(X << Y) -> log2(X) + Y
1242 // FIXME: Require one use unless X is 1?
1243 if (match(Op
, m_Shl(m_Value(X
), m_Value(Y
)))) {
1244 auto *BO
= cast
<OverflowingBinaryOperator
>(Op
);
1245 // nuw will be set if the `shl` is trivially non-zero.
1246 if (AssumeNonZero
|| BO
->hasNoUnsignedWrap() || BO
->hasNoSignedWrap())
1247 if (Value
*LogX
= takeLog2(Builder
, X
, Depth
, AssumeNonZero
, DoFold
))
1248 return IfFold([&]() { return Builder
.CreateAdd(LogX
, Y
); });
1251 // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
1252 // FIXME: missed optimization: if one of the hands of select is/contains
1253 // undef, just directly pick the other one.
1254 // FIXME: can both hands contain undef?
1255 // FIXME: Require one use?
1256 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
))
1257 if (Value
*LogX
= takeLog2(Builder
, SI
->getOperand(1), Depth
,
1258 AssumeNonZero
, DoFold
))
1259 if (Value
*LogY
= takeLog2(Builder
, SI
->getOperand(2), Depth
,
1260 AssumeNonZero
, DoFold
))
1261 return IfFold([&]() {
1262 return Builder
.CreateSelect(SI
->getOperand(0), LogX
, LogY
);
1265 // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
1266 // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
1267 auto *MinMax
= dyn_cast
<MinMaxIntrinsic
>(Op
);
1268 if (MinMax
&& MinMax
->hasOneUse() && !MinMax
->isSigned()) {
1269 // Use AssumeNonZero as false here. Otherwise we can hit case where
1270 // log2(umax(X, Y)) != umax(log2(X), log2(Y)) (because overflow).
1271 if (Value
*LogX
= takeLog2(Builder
, MinMax
->getLHS(), Depth
,
1272 /*AssumeNonZero*/ false, DoFold
))
1273 if (Value
*LogY
= takeLog2(Builder
, MinMax
->getRHS(), Depth
,
1274 /*AssumeNonZero*/ false, DoFold
))
1275 return IfFold([&]() {
1276 return Builder
.CreateBinaryIntrinsic(MinMax
->getIntrinsicID(), LogX
,
1284 /// If we have zero-extended operands of an unsigned div or rem, we may be able
1285 /// to narrow the operation (sink the zext below the math).
1286 static Instruction
*narrowUDivURem(BinaryOperator
&I
,
1287 InstCombinerImpl
&IC
) {
1288 Instruction::BinaryOps Opcode
= I
.getOpcode();
1289 Value
*N
= I
.getOperand(0);
1290 Value
*D
= I
.getOperand(1);
1291 Type
*Ty
= I
.getType();
1293 if (match(N
, m_ZExt(m_Value(X
))) && match(D
, m_ZExt(m_Value(Y
))) &&
1294 X
->getType() == Y
->getType() && (N
->hasOneUse() || D
->hasOneUse())) {
1295 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
1296 // urem (zext X), (zext Y) --> zext (urem X, Y)
1297 Value
*NarrowOp
= IC
.Builder
.CreateBinOp(Opcode
, X
, Y
);
1298 return new ZExtInst(NarrowOp
, Ty
);
1302 if (isa
<Instruction
>(N
) && match(N
, m_OneUse(m_ZExt(m_Value(X
)))) &&
1303 match(D
, m_Constant(C
))) {
1304 // If the constant is the same in the smaller type, use the narrow version.
1305 Constant
*TruncC
= IC
.getLosslessUnsignedTrunc(C
, X
->getType());
1309 // udiv (zext X), C --> zext (udiv X, C')
1310 // urem (zext X), C --> zext (urem X, C')
1311 return new ZExtInst(IC
.Builder
.CreateBinOp(Opcode
, X
, TruncC
), Ty
);
1313 if (isa
<Instruction
>(D
) && match(D
, m_OneUse(m_ZExt(m_Value(X
)))) &&
1314 match(N
, m_Constant(C
))) {
1315 // If the constant is the same in the smaller type, use the narrow version.
1316 Constant
*TruncC
= IC
.getLosslessUnsignedTrunc(C
, X
->getType());
1320 // udiv C, (zext X) --> zext (udiv C', X)
1321 // urem C, (zext X) --> zext (urem C', X)
1322 return new ZExtInst(IC
.Builder
.CreateBinOp(Opcode
, TruncC
, X
), Ty
);
1328 Instruction
*InstCombinerImpl::visitUDiv(BinaryOperator
&I
) {
1329 if (Value
*V
= simplifyUDivInst(I
.getOperand(0), I
.getOperand(1), I
.isExact(),
1330 SQ
.getWithInstruction(&I
)))
1331 return replaceInstUsesWith(I
, V
);
1333 if (Instruction
*X
= foldVectorBinop(I
))
1336 // Handle the integer div common cases
1337 if (Instruction
*Common
= commonIDivTransforms(I
))
1340 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1342 const APInt
*C1
, *C2
;
1343 if (match(Op0
, m_LShr(m_Value(X
), m_APInt(C1
))) && match(Op1
, m_APInt(C2
))) {
1344 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1346 APInt C2ShlC1
= C2
->ushl_ov(*C1
, Overflow
);
1348 bool IsExact
= I
.isExact() && match(Op0
, m_Exact(m_Value()));
1349 BinaryOperator
*BO
= BinaryOperator::CreateUDiv(
1350 X
, ConstantInt::get(X
->getType(), C2ShlC1
));
1357 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1358 // TODO: Could use isKnownNegative() to handle non-constant values.
1359 Type
*Ty
= I
.getType();
1360 if (match(Op1
, m_Negative())) {
1361 Value
*Cmp
= Builder
.CreateICmpUGE(Op0
, Op1
);
1362 return CastInst::CreateZExtOrBitCast(Cmp
, Ty
);
1364 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1365 if (match(Op1
, m_SExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1)) {
1366 Value
*Cmp
= Builder
.CreateICmpEQ(Op0
, ConstantInt::getAllOnesValue(Ty
));
1367 return CastInst::CreateZExtOrBitCast(Cmp
, Ty
);
1370 if (Instruction
*NarrowDiv
= narrowUDivURem(I
, *this))
1373 // If the udiv operands are non-overflowing multiplies with a common operand,
1374 // then eliminate the common factor:
1375 // (A * B) / (A * X) --> B / X (and commuted variants)
1376 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1377 // TODO: If -reassociation handled this generally, we could remove this.
1379 if (match(Op0
, m_NUWMul(m_Value(A
), m_Value(B
)))) {
1380 if (match(Op1
, m_NUWMul(m_Specific(A
), m_Value(X
))) ||
1381 match(Op1
, m_NUWMul(m_Value(X
), m_Specific(A
))))
1382 return BinaryOperator::CreateUDiv(B
, X
);
1383 if (match(Op1
, m_NUWMul(m_Specific(B
), m_Value(X
))) ||
1384 match(Op1
, m_NUWMul(m_Value(X
), m_Specific(B
))))
1385 return BinaryOperator::CreateUDiv(A
, X
);
1388 // Look through a right-shift to find the common factor:
1389 // ((Op1 *nuw A) >> B) / Op1 --> A >> B
1390 if (match(Op0
, m_LShr(m_NUWMul(m_Specific(Op1
), m_Value(A
)), m_Value(B
))) ||
1391 match(Op0
, m_LShr(m_NUWMul(m_Value(A
), m_Specific(Op1
)), m_Value(B
)))) {
1392 Instruction
*Lshr
= BinaryOperator::CreateLShr(A
, B
);
1393 if (I
.isExact() && cast
<PossiblyExactOperator
>(Op0
)->isExact())
1398 // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
1399 if (takeLog2(Builder
, Op1
, /*Depth*/ 0, /*AssumeNonZero*/ true,
1400 /*DoFold*/ false)) {
1401 Value
*Res
= takeLog2(Builder
, Op1
, /*Depth*/ 0,
1402 /*AssumeNonZero*/ true, /*DoFold*/ true);
1403 return replaceInstUsesWith(
1404 I
, Builder
.CreateLShr(Op0
, Res
, I
.getName(), I
.isExact()));
1410 Instruction
*InstCombinerImpl::visitSDiv(BinaryOperator
&I
) {
1411 if (Value
*V
= simplifySDivInst(I
.getOperand(0), I
.getOperand(1), I
.isExact(),
1412 SQ
.getWithInstruction(&I
)))
1413 return replaceInstUsesWith(I
, V
);
1415 if (Instruction
*X
= foldVectorBinop(I
))
1418 // Handle the integer div common cases
1419 if (Instruction
*Common
= commonIDivTransforms(I
))
1422 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1423 Type
*Ty
= I
.getType();
1425 // sdiv Op0, -1 --> -Op0
1426 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1427 if (match(Op1
, m_AllOnes()) ||
1428 (match(Op1
, m_SExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1)))
1429 return BinaryOperator::CreateNeg(Op0
);
1431 // X / INT_MIN --> X == INT_MIN
1432 if (match(Op1
, m_SignMask()))
1433 return new ZExtInst(Builder
.CreateICmpEQ(Op0
, Op1
), Ty
);
1436 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
1437 if (match(Op1
, m_Power2()) && match(Op1
, m_NonNegative())) {
1438 Constant
*C
= ConstantExpr::getExactLogBase2(cast
<Constant
>(Op1
));
1439 return BinaryOperator::CreateExactAShr(Op0
, C
);
1442 // sdiv exact X, (1<<ShAmt) --> ashr exact X, ShAmt (if shl is non-negative)
1444 if (match(Op1
, m_NSWShl(m_One(), m_Value(ShAmt
))))
1445 return BinaryOperator::CreateExactAShr(Op0
, ShAmt
);
1447 // sdiv exact X, -1<<C --> -(ashr exact X, C)
1448 if (match(Op1
, m_NegatedPower2())) {
1449 Constant
*NegPow2C
= ConstantExpr::getNeg(cast
<Constant
>(Op1
));
1450 Constant
*C
= ConstantExpr::getExactLogBase2(NegPow2C
);
1451 Value
*Ashr
= Builder
.CreateAShr(Op0
, C
, I
.getName() + ".neg", true);
1452 return BinaryOperator::CreateNeg(Ashr
);
1457 if (match(Op1
, m_APInt(Op1C
))) {
1458 // If the dividend is sign-extended and the constant divisor is small enough
1459 // to fit in the source type, shrink the division to the narrower type:
1460 // (sext X) sdiv C --> sext (X sdiv C)
1462 if (match(Op0
, m_OneUse(m_SExt(m_Value(Op0Src
)))) &&
1463 Op0Src
->getType()->getScalarSizeInBits() >=
1464 Op1C
->getSignificantBits()) {
1466 // In the general case, we need to make sure that the dividend is not the
1467 // minimum signed value because dividing that by -1 is UB. But here, we
1468 // know that the -1 divisor case is already handled above.
1470 Constant
*NarrowDivisor
=
1471 ConstantExpr::getTrunc(cast
<Constant
>(Op1
), Op0Src
->getType());
1472 Value
*NarrowOp
= Builder
.CreateSDiv(Op0Src
, NarrowDivisor
);
1473 return new SExtInst(NarrowOp
, Ty
);
1476 // -X / C --> X / -C (if the negation doesn't overflow).
1477 // TODO: This could be enhanced to handle arbitrary vector constants by
1478 // checking if all elements are not the min-signed-val.
1479 if (!Op1C
->isMinSignedValue() &&
1480 match(Op0
, m_NSWSub(m_Zero(), m_Value(X
)))) {
1481 Constant
*NegC
= ConstantInt::get(Ty
, -(*Op1C
));
1482 Instruction
*BO
= BinaryOperator::CreateSDiv(X
, NegC
);
1483 BO
->setIsExact(I
.isExact());
1488 // -X / Y --> -(X / Y)
1490 if (match(&I
, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X
))), m_Value(Y
))))
1491 return BinaryOperator::CreateNSWNeg(
1492 Builder
.CreateSDiv(X
, Y
, I
.getName(), I
.isExact()));
1494 // abs(X) / X --> X > -1 ? 1 : -1
1495 // X / abs(X) --> X > -1 ? 1 : -1
1496 if (match(&I
, m_c_BinOp(
1497 m_OneUse(m_Intrinsic
<Intrinsic::abs
>(m_Value(X
), m_One())),
1499 Value
*Cond
= Builder
.CreateIsNotNeg(X
);
1500 return SelectInst::Create(Cond
, ConstantInt::get(Ty
, 1),
1501 ConstantInt::getAllOnesValue(Ty
));
1504 KnownBits KnownDividend
= computeKnownBits(Op0
, 0, &I
);
1506 (match(Op1
, m_Power2(Op1C
)) || match(Op1
, m_NegatedPower2(Op1C
))) &&
1507 KnownDividend
.countMinTrailingZeros() >= Op1C
->countr_zero()) {
1512 if (KnownDividend
.isNonNegative()) {
1513 // If both operands are unsigned, turn this into a udiv.
1514 if (isKnownNonNegative(Op1
, DL
, 0, &AC
, &I
, &DT
)) {
1515 auto *BO
= BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
1516 BO
->setIsExact(I
.isExact());
1520 if (match(Op1
, m_NegatedPower2())) {
1521 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1522 // -> -(X udiv (1 << C)) -> -(X u>> C)
1523 Constant
*CNegLog2
= ConstantExpr::getExactLogBase2(
1524 ConstantExpr::getNeg(cast
<Constant
>(Op1
)));
1525 Value
*Shr
= Builder
.CreateLShr(Op0
, CNegLog2
, I
.getName(), I
.isExact());
1526 return BinaryOperator::CreateNeg(Shr
);
1529 if (isKnownToBeAPowerOfTwo(Op1
, /*OrZero*/ true, 0, &I
)) {
1530 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1531 // Safe because the only negative value (1 << Y) can take on is
1532 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1533 // the sign bit set.
1534 auto *BO
= BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
1535 BO
->setIsExact(I
.isExact());
1543 /// Remove negation and try to convert division into multiplication.
1544 Instruction
*InstCombinerImpl::foldFDivConstantDivisor(BinaryOperator
&I
) {
1546 if (!match(I
.getOperand(1), m_Constant(C
)))
1549 // -X / C --> X / -C
1551 const DataLayout
&DL
= I
.getModule()->getDataLayout();
1552 if (match(I
.getOperand(0), m_FNeg(m_Value(X
))))
1553 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
1554 return BinaryOperator::CreateFDivFMF(X
, NegC
, &I
);
1556 // nnan X / +0.0 -> copysign(inf, X)
1557 if (I
.hasNoNaNs() && match(I
.getOperand(1), m_Zero())) {
1559 // TODO: nnan nsz X / -0.0 -> copysign(inf, X)
1560 CallInst
*CopySign
= B
.CreateIntrinsic(
1561 Intrinsic::copysign
, {C
->getType()},
1562 {ConstantFP::getInfinity(I
.getType()), I
.getOperand(0)}, &I
);
1563 CopySign
->takeName(&I
);
1564 return replaceInstUsesWith(I
, CopySign
);
1567 // If the constant divisor has an exact inverse, this is always safe. If not,
1568 // then we can still create a reciprocal if fast-math-flags allow it and the
1569 // constant is a regular number (not zero, infinite, or denormal).
1570 if (!(C
->hasExactInverseFP() || (I
.hasAllowReciprocal() && C
->isNormalFP())))
1573 // Disallow denormal constants because we don't know what would happen
1575 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1576 // denorms are flushed?
1577 auto *RecipC
= ConstantFoldBinaryOpOperands(
1578 Instruction::FDiv
, ConstantFP::get(I
.getType(), 1.0), C
, DL
);
1579 if (!RecipC
|| !RecipC
->isNormalFP())
1582 // X / C --> X * (1 / C)
1583 return BinaryOperator::CreateFMulFMF(I
.getOperand(0), RecipC
, &I
);
1586 /// Remove negation and try to reassociate constant math.
1587 static Instruction
*foldFDivConstantDividend(BinaryOperator
&I
) {
1589 if (!match(I
.getOperand(0), m_Constant(C
)))
1592 // C / -X --> -C / X
1594 const DataLayout
&DL
= I
.getModule()->getDataLayout();
1595 if (match(I
.getOperand(1), m_FNeg(m_Value(X
))))
1596 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
1597 return BinaryOperator::CreateFDivFMF(NegC
, X
, &I
);
1599 if (!I
.hasAllowReassoc() || !I
.hasAllowReciprocal())
1602 // Try to reassociate C / X expressions where X includes another constant.
1603 Constant
*C2
, *NewC
= nullptr;
1604 if (match(I
.getOperand(1), m_FMul(m_Value(X
), m_Constant(C2
)))) {
1605 // C / (X * C2) --> (C / C2) / X
1606 NewC
= ConstantFoldBinaryOpOperands(Instruction::FDiv
, C
, C2
, DL
);
1607 } else if (match(I
.getOperand(1), m_FDiv(m_Value(X
), m_Constant(C2
)))) {
1608 // C / (X / C2) --> (C * C2) / X
1609 NewC
= ConstantFoldBinaryOpOperands(Instruction::FMul
, C
, C2
, DL
);
1611 // Disallow denormal constants because we don't know what would happen
1613 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1614 // denorms are flushed?
1615 if (!NewC
|| !NewC
->isNormalFP())
1618 return BinaryOperator::CreateFDivFMF(NewC
, X
, &I
);
1621 /// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
1622 static Instruction
*foldFDivPowDivisor(BinaryOperator
&I
,
1623 InstCombiner::BuilderTy
&Builder
) {
1624 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1625 auto *II
= dyn_cast
<IntrinsicInst
>(Op1
);
1626 if (!II
|| !II
->hasOneUse() || !I
.hasAllowReassoc() ||
1627 !I
.hasAllowReciprocal())
1630 // Z / pow(X, Y) --> Z * pow(X, -Y)
1631 // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1632 // In the general case, this creates an extra instruction, but fmul allows
1633 // for better canonicalization and optimization than fdiv.
1634 Intrinsic::ID IID
= II
->getIntrinsicID();
1635 SmallVector
<Value
*> Args
;
1637 case Intrinsic::pow
:
1638 Args
.push_back(II
->getArgOperand(0));
1639 Args
.push_back(Builder
.CreateFNegFMF(II
->getArgOperand(1), &I
));
1641 case Intrinsic::powi
: {
1642 // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1643 // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1644 // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1645 // non-standard results, so this corner case should be acceptable if the
1646 // code rules out INF values.
1649 Args
.push_back(II
->getArgOperand(0));
1650 Args
.push_back(Builder
.CreateNeg(II
->getArgOperand(1)));
1651 Type
*Tys
[] = {I
.getType(), II
->getArgOperand(1)->getType()};
1652 Value
*Pow
= Builder
.CreateIntrinsic(IID
, Tys
, Args
, &I
);
1653 return BinaryOperator::CreateFMulFMF(Op0
, Pow
, &I
);
1655 case Intrinsic::exp
:
1656 case Intrinsic::exp2
:
1657 Args
.push_back(Builder
.CreateFNegFMF(II
->getArgOperand(0), &I
));
1662 Value
*Pow
= Builder
.CreateIntrinsic(IID
, I
.getType(), Args
, &I
);
1663 return BinaryOperator::CreateFMulFMF(Op0
, Pow
, &I
);
1666 Instruction
*InstCombinerImpl::visitFDiv(BinaryOperator
&I
) {
1667 Module
*M
= I
.getModule();
1669 if (Value
*V
= simplifyFDivInst(I
.getOperand(0), I
.getOperand(1),
1670 I
.getFastMathFlags(),
1671 SQ
.getWithInstruction(&I
)))
1672 return replaceInstUsesWith(I
, V
);
1674 if (Instruction
*X
= foldVectorBinop(I
))
1677 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
1680 if (Instruction
*R
= foldFDivConstantDivisor(I
))
1683 if (Instruction
*R
= foldFDivConstantDividend(I
))
1686 if (Instruction
*R
= foldFPSignBitOps(I
))
1689 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1690 if (isa
<Constant
>(Op0
))
1691 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
1692 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
))
1695 if (isa
<Constant
>(Op1
))
1696 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
1697 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
))
1700 if (I
.hasAllowReassoc() && I
.hasAllowReciprocal()) {
1702 if (match(Op0
, m_OneUse(m_FDiv(m_Value(X
), m_Value(Y
)))) &&
1703 (!isa
<Constant
>(Y
) || !isa
<Constant
>(Op1
))) {
1704 // (X / Y) / Z => X / (Y * Z)
1705 Value
*YZ
= Builder
.CreateFMulFMF(Y
, Op1
, &I
);
1706 return BinaryOperator::CreateFDivFMF(X
, YZ
, &I
);
1708 if (match(Op1
, m_OneUse(m_FDiv(m_Value(X
), m_Value(Y
)))) &&
1709 (!isa
<Constant
>(Y
) || !isa
<Constant
>(Op0
))) {
1710 // Z / (X / Y) => (Y * Z) / X
1711 Value
*YZ
= Builder
.CreateFMulFMF(Y
, Op0
, &I
);
1712 return BinaryOperator::CreateFDivFMF(YZ
, X
, &I
);
1714 // Z / (1.0 / Y) => (Y * Z)
1716 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1717 // m_OneUse check is avoided because even in the case of the multiple uses
1718 // for 1.0/Y, the number of instructions remain the same and a division is
1719 // replaced by a multiplication.
1720 if (match(Op1
, m_FDiv(m_SpecificFP(1.0), m_Value(Y
))))
1721 return BinaryOperator::CreateFMulFMF(Y
, Op0
, &I
);
1724 if (I
.hasAllowReassoc() && Op0
->hasOneUse() && Op1
->hasOneUse()) {
1725 // sin(X) / cos(X) -> tan(X)
1726 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1728 bool IsTan
= match(Op0
, m_Intrinsic
<Intrinsic::sin
>(m_Value(X
))) &&
1729 match(Op1
, m_Intrinsic
<Intrinsic::cos
>(m_Specific(X
)));
1731 !IsTan
&& match(Op0
, m_Intrinsic
<Intrinsic::cos
>(m_Value(X
))) &&
1732 match(Op1
, m_Intrinsic
<Intrinsic::sin
>(m_Specific(X
)));
1734 if ((IsTan
|| IsCot
) && hasFloatFn(M
, &TLI
, I
.getType(), LibFunc_tan
,
1735 LibFunc_tanf
, LibFunc_tanl
)) {
1737 IRBuilder
<>::FastMathFlagGuard
FMFGuard(B
);
1738 B
.setFastMathFlags(I
.getFastMathFlags());
1739 AttributeList Attrs
=
1740 cast
<CallBase
>(Op0
)->getCalledFunction()->getAttributes();
1741 Value
*Res
= emitUnaryFloatFnCall(X
, &TLI
, LibFunc_tan
, LibFunc_tanf
,
1742 LibFunc_tanl
, B
, Attrs
);
1744 Res
= B
.CreateFDiv(ConstantFP::get(I
.getType(), 1.0), Res
);
1745 return replaceInstUsesWith(I
, Res
);
1749 // X / (X * Y) --> 1.0 / Y
1750 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1751 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1753 if (I
.hasNoNaNs() && I
.hasAllowReassoc() &&
1754 match(Op1
, m_c_FMul(m_Specific(Op0
), m_Value(Y
)))) {
1755 replaceOperand(I
, 0, ConstantFP::get(I
.getType(), 1.0));
1756 replaceOperand(I
, 1, Y
);
1760 // X / fabs(X) -> copysign(1.0, X)
1761 // fabs(X) / X -> copysign(1.0, X)
1762 if (I
.hasNoNaNs() && I
.hasNoInfs() &&
1763 (match(&I
, m_FDiv(m_Value(X
), m_FAbs(m_Deferred(X
)))) ||
1764 match(&I
, m_FDiv(m_FAbs(m_Value(X
)), m_Deferred(X
))))) {
1765 Value
*V
= Builder
.CreateBinaryIntrinsic(
1766 Intrinsic::copysign
, ConstantFP::get(I
.getType(), 1.0), X
, &I
);
1767 return replaceInstUsesWith(I
, V
);
1770 if (Instruction
*Mul
= foldFDivPowDivisor(I
, Builder
))
1773 // pow(X, Y) / X --> pow(X, Y-1)
1774 if (I
.hasAllowReassoc() &&
1775 match(Op0
, m_OneUse(m_Intrinsic
<Intrinsic::pow
>(m_Specific(Op1
),
1778 Builder
.CreateFAddFMF(Y
, ConstantFP::get(I
.getType(), -1.0), &I
);
1779 Value
*Pow
= Builder
.CreateBinaryIntrinsic(Intrinsic::pow
, Op1
, Y1
, &I
);
1780 return replaceInstUsesWith(I
, Pow
);
1783 // powi(X, Y) / X --> powi(X, Y-1)
1784 // This is legal when (Y - 1) can't wraparound, in which case reassoc and nnan
1786 // TODO: Multi-use may be also better off creating Powi(x,y-1)
1787 if (I
.hasAllowReassoc() && I
.hasNoNaNs() &&
1788 match(Op0
, m_OneUse(m_Intrinsic
<Intrinsic::powi
>(m_Specific(Op1
),
1790 willNotOverflowSignedSub(Y
, ConstantInt::get(Y
->getType(), 1), I
)) {
1791 Constant
*NegOne
= ConstantInt::getAllOnesValue(Y
->getType());
1792 Value
*Y1
= Builder
.CreateAdd(Y
, NegOne
);
1793 Type
*Types
[] = {Op1
->getType(), Y1
->getType()};
1794 Value
*Pow
= Builder
.CreateIntrinsic(Intrinsic::powi
, Types
, {Op1
, Y1
}, &I
);
1795 return replaceInstUsesWith(I
, Pow
);
1801 // Variety of transform for:
1802 // (urem/srem (mul X, Y), (mul X, Z))
1803 // (urem/srem (shl X, Y), (shl X, Z))
1804 // (urem/srem (shl Y, X), (shl Z, X))
1805 // NB: The shift cases are really just extensions of the mul case. We treat
1806 // shift as Val * (1 << Amt).
1807 static Instruction
*simplifyIRemMulShl(BinaryOperator
&I
,
1808 InstCombinerImpl
&IC
) {
1809 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1), *X
= nullptr;
1811 bool ShiftByX
= false;
1813 // If V is not nullptr, it will be matched using m_Specific.
1814 auto MatchShiftOrMulXC
= [](Value
*Op
, Value
*&V
, APInt
&C
) -> bool {
1815 const APInt
*Tmp
= nullptr;
1816 if ((!V
&& match(Op
, m_Mul(m_Value(V
), m_APInt(Tmp
)))) ||
1817 (V
&& match(Op
, m_Mul(m_Specific(V
), m_APInt(Tmp
)))))
1819 else if ((!V
&& match(Op
, m_Shl(m_Value(V
), m_APInt(Tmp
)))) ||
1820 (V
&& match(Op
, m_Shl(m_Specific(V
), m_APInt(Tmp
)))))
1821 C
= APInt(Tmp
->getBitWidth(), 1) << *Tmp
;
1825 // Reset `V` so we don't start with specific value on next match attempt.
1830 auto MatchShiftCX
= [](Value
*Op
, APInt
&C
, Value
*&V
) -> bool {
1831 const APInt
*Tmp
= nullptr;
1832 if ((!V
&& match(Op
, m_Shl(m_APInt(Tmp
), m_Value(V
)))) ||
1833 (V
&& match(Op
, m_Shl(m_APInt(Tmp
), m_Specific(V
))))) {
1838 // Reset `V` so we don't start with specific value on next match attempt.
1843 if (MatchShiftOrMulXC(Op0
, X
, Y
) && MatchShiftOrMulXC(Op1
, X
, Z
)) {
1845 } else if (MatchShiftCX(Op0
, Y
, X
) && MatchShiftCX(Op1
, Z
, X
)) {
1851 bool IsSRem
= I
.getOpcode() == Instruction::SRem
;
1853 OverflowingBinaryOperator
*BO0
= cast
<OverflowingBinaryOperator
>(Op0
);
1854 // TODO: We may be able to deduce more about nsw/nuw of BO0/BO1 based on Y >=
1856 bool BO0HasNSW
= BO0
->hasNoSignedWrap();
1857 bool BO0HasNUW
= BO0
->hasNoUnsignedWrap();
1858 bool BO0NoWrap
= IsSRem
? BO0HasNSW
: BO0HasNUW
;
1860 APInt RemYZ
= IsSRem
? Y
.srem(Z
) : Y
.urem(Z
);
1861 // (rem (mul nuw/nsw X, Y), (mul X, Z))
1862 // if (rem Y, Z) == 0
1864 if (RemYZ
.isZero() && BO0NoWrap
)
1865 return IC
.replaceInstUsesWith(I
, ConstantInt::getNullValue(I
.getType()));
1867 // Helper function to emit either (RemSimplificationC << X) or
1868 // (RemSimplificationC * X) depending on whether we matched Op0/Op1 as
1869 // (shl V, X) or (mul V, X) respectively.
1870 auto CreateMulOrShift
=
1871 [&](const APInt
&RemSimplificationC
) -> BinaryOperator
* {
1872 Value
*RemSimplification
=
1873 ConstantInt::get(I
.getType(), RemSimplificationC
);
1874 return ShiftByX
? BinaryOperator::CreateShl(RemSimplification
, X
)
1875 : BinaryOperator::CreateMul(X
, RemSimplification
);
1878 OverflowingBinaryOperator
*BO1
= cast
<OverflowingBinaryOperator
>(Op1
);
1879 bool BO1HasNSW
= BO1
->hasNoSignedWrap();
1880 bool BO1HasNUW
= BO1
->hasNoUnsignedWrap();
1881 bool BO1NoWrap
= IsSRem
? BO1HasNSW
: BO1HasNUW
;
1882 // (rem (mul X, Y), (mul nuw/nsw X, Z))
1883 // if (rem Y, Z) == Y
1884 // -> (mul nuw/nsw X, Y)
1885 if (RemYZ
== Y
&& BO1NoWrap
) {
1886 BinaryOperator
*BO
= CreateMulOrShift(Y
);
1887 // Copy any overflow flags from Op0.
1888 BO
->setHasNoSignedWrap(IsSRem
|| BO0HasNSW
);
1889 BO
->setHasNoUnsignedWrap(!IsSRem
|| BO0HasNUW
);
1893 // (rem (mul nuw/nsw X, Y), (mul {nsw} X, Z))
1895 // -> (mul {nuw} nsw X, (rem Y, Z))
1896 if (Y
.uge(Z
) && (IsSRem
? (BO0HasNSW
&& BO1HasNSW
) : BO0HasNUW
)) {
1897 BinaryOperator
*BO
= CreateMulOrShift(RemYZ
);
1898 BO
->setHasNoSignedWrap();
1899 BO
->setHasNoUnsignedWrap(BO0HasNUW
);
1906 /// This function implements the transforms common to both integer remainder
1907 /// instructions (urem and srem). It is called by the visitors to those integer
1908 /// remainder instructions.
1909 /// Common integer remainder transforms
1910 Instruction
*InstCombinerImpl::commonIRemTransforms(BinaryOperator
&I
) {
1911 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
1914 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1916 // The RHS is known non-zero.
1917 if (Value
*V
= simplifyValueKnownNonZero(I
.getOperand(1), *this, I
))
1918 return replaceOperand(I
, 1, V
);
1920 // Handle cases involving: rem X, (select Cond, Y, Z)
1921 if (simplifyDivRemOfSelectWithZeroOp(I
))
1924 // If the divisor is a select-of-constants, try to constant fold all rem ops:
1925 // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
1926 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1927 if (match(Op0
, m_ImmConstant()) &&
1928 match(Op1
, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
1929 if (Instruction
*R
= FoldOpIntoSelect(I
, cast
<SelectInst
>(Op1
),
1930 /*FoldWithMultiUse*/ true))
1934 if (isa
<Constant
>(Op1
)) {
1935 if (Instruction
*Op0I
= dyn_cast
<Instruction
>(Op0
)) {
1936 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0I
)) {
1937 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
))
1939 } else if (auto *PN
= dyn_cast
<PHINode
>(Op0I
)) {
1940 const APInt
*Op1Int
;
1941 if (match(Op1
, m_APInt(Op1Int
)) && !Op1Int
->isMinValue() &&
1942 (I
.getOpcode() == Instruction::URem
||
1943 !Op1Int
->isMinSignedValue())) {
1944 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1945 // predecessor blocks, so do this only if we know the srem or urem
1947 if (Instruction
*NV
= foldOpIntoPhi(I
, PN
))
1952 // See if we can fold away this rem instruction.
1953 if (SimplifyDemandedInstructionBits(I
))
1958 if (Instruction
*R
= simplifyIRemMulShl(I
, *this))
1964 Instruction
*InstCombinerImpl::visitURem(BinaryOperator
&I
) {
1965 if (Value
*V
= simplifyURemInst(I
.getOperand(0), I
.getOperand(1),
1966 SQ
.getWithInstruction(&I
)))
1967 return replaceInstUsesWith(I
, V
);
1969 if (Instruction
*X
= foldVectorBinop(I
))
1972 if (Instruction
*common
= commonIRemTransforms(I
))
1975 if (Instruction
*NarrowRem
= narrowUDivURem(I
, *this))
1978 // X urem Y -> X and Y-1, where Y is a power of 2,
1979 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1980 Type
*Ty
= I
.getType();
1981 if (isKnownToBeAPowerOfTwo(Op1
, /*OrZero*/ true, 0, &I
)) {
1982 // This may increase instruction count, we don't enforce that Y is a
1984 Constant
*N1
= Constant::getAllOnesValue(Ty
);
1985 Value
*Add
= Builder
.CreateAdd(Op1
, N1
);
1986 return BinaryOperator::CreateAnd(Op0
, Add
);
1989 // 1 urem X -> zext(X != 1)
1990 if (match(Op0
, m_One())) {
1991 Value
*Cmp
= Builder
.CreateICmpNE(Op1
, ConstantInt::get(Ty
, 1));
1992 return CastInst::CreateZExtOrBitCast(Cmp
, Ty
);
1995 // Op0 urem C -> Op0 < C ? Op0 : Op0 - C, where C >= signbit.
1996 // Op0 must be frozen because we are increasing its number of uses.
1997 if (match(Op1
, m_Negative())) {
1998 Value
*F0
= Builder
.CreateFreeze(Op0
, Op0
->getName() + ".fr");
1999 Value
*Cmp
= Builder
.CreateICmpULT(F0
, Op1
);
2000 Value
*Sub
= Builder
.CreateSub(F0
, Op1
);
2001 return SelectInst::Create(Cmp
, F0
, Sub
);
2004 // If the divisor is a sext of a boolean, then the divisor must be max
2005 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
2006 // max unsigned value. In that case, the remainder is 0:
2007 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
2009 if (match(Op1
, m_SExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1)) {
2010 Value
*FrozenOp0
= Builder
.CreateFreeze(Op0
, Op0
->getName() + ".frozen");
2012 Builder
.CreateICmpEQ(FrozenOp0
, ConstantInt::getAllOnesValue(Ty
));
2013 return SelectInst::Create(Cmp
, ConstantInt::getNullValue(Ty
), FrozenOp0
);
2016 // For "(X + 1) % Op1" and if (X u< Op1) => (X + 1) == Op1 ? 0 : X + 1 .
2017 if (match(Op0
, m_Add(m_Value(X
), m_One()))) {
2019 simplifyICmpInst(ICmpInst::ICMP_ULT
, X
, Op1
, SQ
.getWithInstruction(&I
));
2020 if (Val
&& match(Val
, m_One())) {
2021 Value
*FrozenOp0
= Builder
.CreateFreeze(Op0
, Op0
->getName() + ".frozen");
2022 Value
*Cmp
= Builder
.CreateICmpEQ(FrozenOp0
, Op1
);
2023 return SelectInst::Create(Cmp
, ConstantInt::getNullValue(Ty
), FrozenOp0
);
2030 Instruction
*InstCombinerImpl::visitSRem(BinaryOperator
&I
) {
2031 if (Value
*V
= simplifySRemInst(I
.getOperand(0), I
.getOperand(1),
2032 SQ
.getWithInstruction(&I
)))
2033 return replaceInstUsesWith(I
, V
);
2035 if (Instruction
*X
= foldVectorBinop(I
))
2038 // Handle the integer rem common cases
2039 if (Instruction
*Common
= commonIRemTransforms(I
))
2042 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2046 if (match(Op1
, m_Negative(Y
)) && !Y
->isMinSignedValue())
2047 return replaceOperand(I
, 1, ConstantInt::get(I
.getType(), -*Y
));
2050 // -X srem Y --> -(X srem Y)
2052 if (match(&I
, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X
))), m_Value(Y
))))
2053 return BinaryOperator::CreateNSWNeg(Builder
.CreateSRem(X
, Y
));
2055 // If the sign bits of both operands are zero (i.e. we can prove they are
2056 // unsigned inputs), turn this into a urem.
2057 APInt
Mask(APInt::getSignMask(I
.getType()->getScalarSizeInBits()));
2058 if (MaskedValueIsZero(Op1
, Mask
, 0, &I
) &&
2059 MaskedValueIsZero(Op0
, Mask
, 0, &I
)) {
2060 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
2061 return BinaryOperator::CreateURem(Op0
, Op1
, I
.getName());
2064 // If it's a constant vector, flip any negative values positive.
2065 if (isa
<ConstantVector
>(Op1
) || isa
<ConstantDataVector
>(Op1
)) {
2066 Constant
*C
= cast
<Constant
>(Op1
);
2067 unsigned VWidth
= cast
<FixedVectorType
>(C
->getType())->getNumElements();
2069 bool hasNegative
= false;
2070 bool hasMissing
= false;
2071 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
2072 Constant
*Elt
= C
->getAggregateElement(i
);
2078 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Elt
))
2079 if (RHS
->isNegative())
2083 if (hasNegative
&& !hasMissing
) {
2084 SmallVector
<Constant
*, 16> Elts(VWidth
);
2085 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
2086 Elts
[i
] = C
->getAggregateElement(i
); // Handle undef, etc.
2087 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Elts
[i
])) {
2088 if (RHS
->isNegative())
2089 Elts
[i
] = cast
<ConstantInt
>(ConstantExpr::getNeg(RHS
));
2093 Constant
*NewRHSV
= ConstantVector::get(Elts
);
2094 if (NewRHSV
!= C
) // Don't loop on -MININT
2095 return replaceOperand(I
, 1, NewRHSV
);
2102 Instruction
*InstCombinerImpl::visitFRem(BinaryOperator
&I
) {
2103 if (Value
*V
= simplifyFRemInst(I
.getOperand(0), I
.getOperand(1),
2104 I
.getFastMathFlags(),
2105 SQ
.getWithInstruction(&I
)))
2106 return replaceInstUsesWith(I
, V
);
2108 if (Instruction
*X
= foldVectorBinop(I
))
2111 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))