[ASan] Make insertion of version mismatch guard configurable
[llvm-core.git] / lib / Transforms / InstCombine / InstCombineMulDivRem.cpp
blob7f152aac52db5c506a443770a6fe72aa118b18e3
1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10 // srem, urem, frem.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
35 #include "llvm/Transforms/Utils/BuildLibCalls.h"
36 #include <cassert>
37 #include <cstddef>
38 #include <cstdint>
39 #include <utility>
41 using namespace llvm;
42 using namespace PatternMatch;
44 #define DEBUG_TYPE "instcombine"
46 /// The specific integer value is used in a context where it is known to be
47 /// non-zero. If this allows us to simplify the computation, do so and return
48 /// the new operand, otherwise return null.
49 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
50 Instruction &CxtI) {
51 // If V has multiple uses, then we would have to do more analysis to determine
52 // if this is safe. For example, the use could be in dynamically unreached
53 // code.
54 if (!V->hasOneUse()) return nullptr;
56 bool MadeChange = false;
58 // ((1 << A) >>u B) --> (1 << (A-B))
59 // Because V cannot be zero, we know that B is less than A.
60 Value *A = nullptr, *B = nullptr, *One = nullptr;
61 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
62 match(One, m_One())) {
63 A = IC.Builder.CreateSub(A, B);
64 return IC.Builder.CreateShl(One, A);
67 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
68 // inexact. Similarly for <<.
69 BinaryOperator *I = dyn_cast<BinaryOperator>(V);
70 if (I && I->isLogicalShift() &&
71 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
72 // We know that this is an exact/nuw shift and that the input is a
73 // non-zero context as well.
74 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
75 I->setOperand(0, V2);
76 MadeChange = true;
79 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
80 I->setIsExact();
81 MadeChange = true;
84 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
85 I->setHasNoUnsignedWrap();
86 MadeChange = true;
90 // TODO: Lots more we could do here:
91 // If V is a phi node, we can call this on each of its operands.
92 // "select cond, X, 0" can simplify to "X".
94 return MadeChange ? V : nullptr;
97 /// A helper routine of InstCombiner::visitMul().
98 ///
99 /// If C is a scalar/vector of known powers of 2, then this function returns
100 /// a new scalar/vector obtained from logBase2 of C.
101 /// Return a null pointer otherwise.
102 static Constant *getLogBase2(Type *Ty, Constant *C) {
103 const APInt *IVal;
104 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2())
105 return ConstantInt::get(Ty, IVal->logBase2());
107 if (!Ty->isVectorTy())
108 return nullptr;
110 SmallVector<Constant *, 4> Elts;
111 for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) {
112 Constant *Elt = C->getAggregateElement(I);
113 if (!Elt)
114 return nullptr;
115 if (isa<UndefValue>(Elt)) {
116 Elts.push_back(UndefValue::get(Ty->getScalarType()));
117 continue;
119 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
120 return nullptr;
121 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2()));
124 return ConstantVector::get(Elts);
127 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
128 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
129 SQ.getWithInstruction(&I)))
130 return replaceInstUsesWith(I, V);
132 if (SimplifyAssociativeOrCommutative(I))
133 return &I;
135 if (Instruction *X = foldVectorBinop(I))
136 return X;
138 if (Value *V = SimplifyUsingDistributiveLaws(I))
139 return replaceInstUsesWith(I, V);
141 // X * -1 == 0 - X
142 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
143 if (match(Op1, m_AllOnes())) {
144 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
145 if (I.hasNoSignedWrap())
146 BO->setHasNoSignedWrap();
147 return BO;
150 // Also allow combining multiply instructions on vectors.
152 Value *NewOp;
153 Constant *C1, *C2;
154 const APInt *IVal;
155 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
156 m_Constant(C1))) &&
157 match(C1, m_APInt(IVal))) {
158 // ((X << C2)*C1) == (X * (C1 << C2))
159 Constant *Shl = ConstantExpr::getShl(C1, C2);
160 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
161 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
162 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
163 BO->setHasNoUnsignedWrap();
164 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
165 Shl->isNotMinSignedValue())
166 BO->setHasNoSignedWrap();
167 return BO;
170 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
171 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
172 if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) {
173 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
175 if (I.hasNoUnsignedWrap())
176 Shl->setHasNoUnsignedWrap();
177 if (I.hasNoSignedWrap()) {
178 const APInt *V;
179 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
180 Shl->setHasNoSignedWrap();
183 return Shl;
188 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
189 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n
190 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n
191 // The "* (2**n)" thus becomes a potential shifting opportunity.
193 const APInt & Val = CI->getValue();
194 const APInt &PosVal = Val.abs();
195 if (Val.isNegative() && PosVal.isPowerOf2()) {
196 Value *X = nullptr, *Y = nullptr;
197 if (Op0->hasOneUse()) {
198 ConstantInt *C1;
199 Value *Sub = nullptr;
200 if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
201 Sub = Builder.CreateSub(X, Y, "suba");
202 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
203 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc");
204 if (Sub)
205 return
206 BinaryOperator::CreateMul(Sub,
207 ConstantInt::get(Y->getType(), PosVal));
213 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
214 return FoldedMul;
216 // Simplify mul instructions with a constant RHS.
217 if (isa<Constant>(Op1)) {
218 // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
219 Value *X;
220 Constant *C1;
221 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
222 Value *Mul = Builder.CreateMul(C1, Op1);
223 // Only go forward with the transform if C1*CI simplifies to a tidier
224 // constant.
225 if (!match(Mul, m_Mul(m_Value(), m_Value())))
226 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
230 // -X * C --> X * -C
231 Value *X, *Y;
232 Constant *Op1C;
233 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
234 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
236 // -X * -Y --> X * Y
237 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
238 auto *NewMul = BinaryOperator::CreateMul(X, Y);
239 if (I.hasNoSignedWrap() &&
240 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
241 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
242 NewMul->setHasNoSignedWrap();
243 return NewMul;
246 // -X * Y --> -(X * Y)
247 // X * -Y --> -(X * Y)
248 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
249 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
251 // (X / Y) * Y = X - (X % Y)
252 // (X / Y) * -Y = (X % Y) - X
254 Value *Y = Op1;
255 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
256 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
257 Div->getOpcode() != Instruction::SDiv)) {
258 Y = Op0;
259 Div = dyn_cast<BinaryOperator>(Op1);
261 Value *Neg = dyn_castNegVal(Y);
262 if (Div && Div->hasOneUse() &&
263 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
264 (Div->getOpcode() == Instruction::UDiv ||
265 Div->getOpcode() == Instruction::SDiv)) {
266 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
268 // If the division is exact, X % Y is zero, so we end up with X or -X.
269 if (Div->isExact()) {
270 if (DivOp1 == Y)
271 return replaceInstUsesWith(I, X);
272 return BinaryOperator::CreateNeg(X);
275 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
276 : Instruction::SRem;
277 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
278 if (DivOp1 == Y)
279 return BinaryOperator::CreateSub(X, Rem);
280 return BinaryOperator::CreateSub(Rem, X);
284 /// i1 mul -> i1 and.
285 if (I.getType()->isIntOrIntVectorTy(1))
286 return BinaryOperator::CreateAnd(Op0, Op1);
288 // X*(1 << Y) --> X << Y
289 // (1 << Y)*X --> X << Y
291 Value *Y;
292 BinaryOperator *BO = nullptr;
293 bool ShlNSW = false;
294 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
295 BO = BinaryOperator::CreateShl(Op1, Y);
296 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
297 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
298 BO = BinaryOperator::CreateShl(Op0, Y);
299 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
301 if (BO) {
302 if (I.hasNoUnsignedWrap())
303 BO->setHasNoUnsignedWrap();
304 if (I.hasNoSignedWrap() && ShlNSW)
305 BO->setHasNoSignedWrap();
306 return BO;
310 // (bool X) * Y --> X ? Y : 0
311 // Y * (bool X) --> X ? Y : 0
312 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
313 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
314 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
315 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
317 // (lshr X, 31) * Y --> (ashr X, 31) & Y
318 // Y * (lshr X, 31) --> (ashr X, 31) & Y
319 // TODO: We are not checking one-use because the elimination of the multiply
320 // is better for analysis?
321 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be
322 // more similar to what we're doing above.
323 const APInt *C;
324 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
325 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1);
326 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
327 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
329 if (Instruction *Ext = narrowMathIfNoOverflow(I))
330 return Ext;
332 bool Changed = false;
333 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
334 Changed = true;
335 I.setHasNoSignedWrap(true);
338 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
339 Changed = true;
340 I.setHasNoUnsignedWrap(true);
343 return Changed ? &I : nullptr;
346 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
347 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
348 I.getFastMathFlags(),
349 SQ.getWithInstruction(&I)))
350 return replaceInstUsesWith(I, V);
352 if (SimplifyAssociativeOrCommutative(I))
353 return &I;
355 if (Instruction *X = foldVectorBinop(I))
356 return X;
358 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
359 return FoldedMul;
361 // X * -1.0 --> -X
362 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
363 if (match(Op1, m_SpecificFP(-1.0)))
364 return BinaryOperator::CreateFNegFMF(Op0, &I);
366 // -X * -Y --> X * Y
367 Value *X, *Y;
368 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
369 return BinaryOperator::CreateFMulFMF(X, Y, &I);
371 // -X * C --> X * -C
372 Constant *C;
373 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
374 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
376 // fabs(X) * fabs(X) -> X * X
377 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))))
378 return BinaryOperator::CreateFMulFMF(X, X, &I);
380 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
381 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
382 return replaceInstUsesWith(I, V);
384 if (I.hasAllowReassoc()) {
385 // Reassociate constant RHS with another constant to form constant
386 // expression.
387 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
388 Constant *C1;
389 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
390 // (C1 / X) * C --> (C * C1) / X
391 Constant *CC1 = ConstantExpr::getFMul(C, C1);
392 if (CC1->isNormalFP())
393 return BinaryOperator::CreateFDivFMF(CC1, X, &I);
395 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
396 // (X / C1) * C --> X * (C / C1)
397 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1);
398 if (CDivC1->isNormalFP())
399 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
401 // If the constant was a denormal, try reassociating differently.
402 // (X / C1) * C --> X / (C1 / C)
403 Constant *C1DivC = ConstantExpr::getFDiv(C1, C);
404 if (Op0->hasOneUse() && C1DivC->isNormalFP())
405 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
408 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
409 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
410 // further folds and (X * C) + C2 is 'fma'.
411 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
412 // (X + C1) * C --> (X * C) + (C * C1)
413 Constant *CC1 = ConstantExpr::getFMul(C, C1);
414 Value *XC = Builder.CreateFMulFMF(X, C, &I);
415 return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
417 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
418 // (C1 - X) * C --> (C * C1) - (X * C)
419 Constant *CC1 = ConstantExpr::getFMul(C, C1);
420 Value *XC = Builder.CreateFMulFMF(X, C, &I);
421 return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
425 Value *Z;
426 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
427 m_Value(Z)))) {
428 // Sink division: (X / Y) * Z --> (X * Z) / Y
429 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
430 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
433 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
434 // nnan disallows the possibility of returning a number if both operands are
435 // negative (in that case, we should return NaN).
436 if (I.hasNoNaNs() &&
437 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) &&
438 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
439 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
440 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
441 return replaceInstUsesWith(I, Sqrt);
444 // Like the similar transform in instsimplify, this requires 'nsz' because
445 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
446 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
447 Op0->hasNUses(2)) {
448 // Peek through fdiv to find squaring of square root:
449 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
450 if (match(Op0, m_FDiv(m_Value(X),
451 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
452 Value *XX = Builder.CreateFMulFMF(X, X, &I);
453 return BinaryOperator::CreateFDivFMF(XX, Y, &I);
455 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
456 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)),
457 m_Value(X)))) {
458 Value *XX = Builder.CreateFMulFMF(X, X, &I);
459 return BinaryOperator::CreateFDivFMF(Y, XX, &I);
463 // exp(X) * exp(Y) -> exp(X + Y)
464 // Match as long as at least one of exp has only one use.
465 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
466 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) &&
467 (Op0->hasOneUse() || Op1->hasOneUse())) {
468 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
469 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
470 return replaceInstUsesWith(I, Exp);
473 // exp2(X) * exp2(Y) -> exp2(X + Y)
474 // Match as long as at least one of exp2 has only one use.
475 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
476 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) &&
477 (Op0->hasOneUse() || Op1->hasOneUse())) {
478 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
479 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
480 return replaceInstUsesWith(I, Exp2);
483 // (X*Y) * X => (X*X) * Y where Y != X
484 // The purpose is two-fold:
485 // 1) to form a power expression (of X).
486 // 2) potentially shorten the critical path: After transformation, the
487 // latency of the instruction Y is amortized by the expression of X*X,
488 // and therefore Y is in a "less critical" position compared to what it
489 // was before the transformation.
490 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
491 Op1 != Y) {
492 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
493 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
495 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
496 Op0 != Y) {
497 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
498 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
502 // log2(X * 0.5) * Y = log2(X) * Y - Y
503 if (I.isFast()) {
504 IntrinsicInst *Log2 = nullptr;
505 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
506 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
507 Log2 = cast<IntrinsicInst>(Op0);
508 Y = Op1;
510 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
511 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
512 Log2 = cast<IntrinsicInst>(Op1);
513 Y = Op0;
515 if (Log2) {
516 Log2->setArgOperand(0, X);
517 Log2->copyFastMathFlags(&I);
518 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
519 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
523 return nullptr;
526 /// Fold a divide or remainder with a select instruction divisor when one of the
527 /// select operands is zero. In that case, we can use the other select operand
528 /// because div/rem by zero is undefined.
529 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
530 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
531 if (!SI)
532 return false;
534 int NonNullOperand;
535 if (match(SI->getTrueValue(), m_Zero()))
536 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
537 NonNullOperand = 2;
538 else if (match(SI->getFalseValue(), m_Zero()))
539 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
540 NonNullOperand = 1;
541 else
542 return false;
544 // Change the div/rem to use 'Y' instead of the select.
545 I.setOperand(1, SI->getOperand(NonNullOperand));
547 // Okay, we know we replace the operand of the div/rem with 'Y' with no
548 // problem. However, the select, or the condition of the select may have
549 // multiple uses. Based on our knowledge that the operand must be non-zero,
550 // propagate the known value for the select into other uses of it, and
551 // propagate a known value of the condition into its other users.
553 // If the select and condition only have a single use, don't bother with this,
554 // early exit.
555 Value *SelectCond = SI->getCondition();
556 if (SI->use_empty() && SelectCond->hasOneUse())
557 return true;
559 // Scan the current block backward, looking for other uses of SI.
560 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
561 Type *CondTy = SelectCond->getType();
562 while (BBI != BBFront) {
563 --BBI;
564 // If we found an instruction that we can't assume will return, so
565 // information from below it cannot be propagated above it.
566 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
567 break;
569 // Replace uses of the select or its condition with the known values.
570 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
571 I != E; ++I) {
572 if (*I == SI) {
573 *I = SI->getOperand(NonNullOperand);
574 Worklist.Add(&*BBI);
575 } else if (*I == SelectCond) {
576 *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
577 : ConstantInt::getFalse(CondTy);
578 Worklist.Add(&*BBI);
582 // If we past the instruction, quit looking for it.
583 if (&*BBI == SI)
584 SI = nullptr;
585 if (&*BBI == SelectCond)
586 SelectCond = nullptr;
588 // If we ran out of things to eliminate, break out of the loop.
589 if (!SelectCond && !SI)
590 break;
593 return true;
596 /// True if the multiply can not be expressed in an int this size.
597 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
598 bool IsSigned) {
599 bool Overflow;
600 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
601 return Overflow;
604 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
605 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
606 bool IsSigned) {
607 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
609 // Bail if we will divide by zero.
610 if (C2.isNullValue())
611 return false;
613 // Bail if we would divide INT_MIN by -1.
614 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue())
615 return false;
617 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
618 if (IsSigned)
619 APInt::sdivrem(C1, C2, Quotient, Remainder);
620 else
621 APInt::udivrem(C1, C2, Quotient, Remainder);
623 return Remainder.isMinValue();
626 /// This function implements the transforms common to both integer division
627 /// instructions (udiv and sdiv). It is called by the visitors to those integer
628 /// division instructions.
629 /// Common integer divide transforms
630 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
631 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
632 bool IsSigned = I.getOpcode() == Instruction::SDiv;
633 Type *Ty = I.getType();
635 // The RHS is known non-zero.
636 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
637 I.setOperand(1, V);
638 return &I;
641 // Handle cases involving: [su]div X, (select Cond, Y, Z)
642 // This does not apply for fdiv.
643 if (simplifyDivRemOfSelectWithZeroOp(I))
644 return &I;
646 const APInt *C2;
647 if (match(Op1, m_APInt(C2))) {
648 Value *X;
649 const APInt *C1;
651 // (X / C1) / C2 -> X / (C1*C2)
652 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
653 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
654 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
655 if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
656 return BinaryOperator::Create(I.getOpcode(), X,
657 ConstantInt::get(Ty, Product));
660 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
661 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
662 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
664 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
665 if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
666 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
667 ConstantInt::get(Ty, Quotient));
668 NewDiv->setIsExact(I.isExact());
669 return NewDiv;
672 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
673 if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
674 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
675 ConstantInt::get(Ty, Quotient));
676 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
677 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
678 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
679 return Mul;
683 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
684 *C1 != C1->getBitWidth() - 1) ||
685 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
686 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
687 APInt C1Shifted = APInt::getOneBitSet(
688 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));
690 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
691 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
692 auto *BO = BinaryOperator::Create(I.getOpcode(), X,
693 ConstantInt::get(Ty, Quotient));
694 BO->setIsExact(I.isExact());
695 return BO;
698 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
699 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
700 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
701 ConstantInt::get(Ty, Quotient));
702 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
703 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
704 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
705 return Mul;
709 if (!C2->isNullValue()) // avoid X udiv 0
710 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
711 return FoldedDiv;
714 if (match(Op0, m_One())) {
715 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
716 if (IsSigned) {
717 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
718 // result is one, if Op1 is -1 then the result is minus one, otherwise
719 // it's zero.
720 Value *Inc = Builder.CreateAdd(Op1, Op0);
721 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
722 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0));
723 } else {
724 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
725 // result is one, otherwise it's zero.
726 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
730 // See if we can fold away this div instruction.
731 if (SimplifyDemandedInstructionBits(I))
732 return &I;
734 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
735 Value *X, *Z;
736 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
737 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
738 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
739 return BinaryOperator::Create(I.getOpcode(), X, Op1);
741 // (X << Y) / X -> 1 << Y
742 Value *Y;
743 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
744 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
745 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
746 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
748 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
749 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
750 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
751 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
752 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
753 I.setOperand(0, ConstantInt::get(Ty, 1));
754 I.setOperand(1, Y);
755 return &I;
759 return nullptr;
762 static const unsigned MaxDepth = 6;
764 namespace {
766 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1,
767 const BinaryOperator &I,
768 InstCombiner &IC);
770 /// Used to maintain state for visitUDivOperand().
771 struct UDivFoldAction {
772 /// Informs visitUDiv() how to fold this operand. This can be zero if this
773 /// action joins two actions together.
774 FoldUDivOperandCb FoldAction;
776 /// Which operand to fold.
777 Value *OperandToFold;
779 union {
780 /// The instruction returned when FoldAction is invoked.
781 Instruction *FoldResult;
783 /// Stores the LHS action index if this action joins two actions together.
784 size_t SelectLHSIdx;
787 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
788 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
789 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
790 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
793 } // end anonymous namespace
795 // X udiv 2^C -> X >> C
796 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
797 const BinaryOperator &I, InstCombiner &IC) {
798 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1));
799 if (!C1)
800 llvm_unreachable("Failed to constant fold udiv -> logbase2");
801 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1);
802 if (I.isExact())
803 LShr->setIsExact();
804 return LShr;
807 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
808 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2)
809 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
810 InstCombiner &IC) {
811 Value *ShiftLeft;
812 if (!match(Op1, m_ZExt(m_Value(ShiftLeft))))
813 ShiftLeft = Op1;
815 Constant *CI;
816 Value *N;
817 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N))))
818 llvm_unreachable("match should never fail here!");
819 Constant *Log2Base = getLogBase2(N->getType(), CI);
820 if (!Log2Base)
821 llvm_unreachable("getLogBase2 should never fail here!");
822 N = IC.Builder.CreateAdd(N, Log2Base);
823 if (Op1 != ShiftLeft)
824 N = IC.Builder.CreateZExt(N, Op1->getType());
825 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
826 if (I.isExact())
827 LShr->setIsExact();
828 return LShr;
831 // Recursively visits the possible right hand operands of a udiv
832 // instruction, seeing through select instructions, to determine if we can
833 // replace the udiv with something simpler. If we find that an operand is not
834 // able to simplify the udiv, we abort the entire transformation.
835 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
836 SmallVectorImpl<UDivFoldAction> &Actions,
837 unsigned Depth = 0) {
838 // Check to see if this is an unsigned division with an exact power of 2,
839 // if so, convert to a right shift.
840 if (match(Op1, m_Power2())) {
841 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1));
842 return Actions.size();
845 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
846 if (match(Op1, m_Shl(m_Power2(), m_Value())) ||
847 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) {
848 Actions.push_back(UDivFoldAction(foldUDivShl, Op1));
849 return Actions.size();
852 // The remaining tests are all recursive, so bail out if we hit the limit.
853 if (Depth++ == MaxDepth)
854 return 0;
856 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
857 if (size_t LHSIdx =
858 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth))
859 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) {
860 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1));
861 return Actions.size();
864 return 0;
867 /// If we have zero-extended operands of an unsigned div or rem, we may be able
868 /// to narrow the operation (sink the zext below the math).
869 static Instruction *narrowUDivURem(BinaryOperator &I,
870 InstCombiner::BuilderTy &Builder) {
871 Instruction::BinaryOps Opcode = I.getOpcode();
872 Value *N = I.getOperand(0);
873 Value *D = I.getOperand(1);
874 Type *Ty = I.getType();
875 Value *X, *Y;
876 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
877 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
878 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
879 // urem (zext X), (zext Y) --> zext (urem X, Y)
880 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
881 return new ZExtInst(NarrowOp, Ty);
884 Constant *C;
885 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
886 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
887 // If the constant is the same in the smaller type, use the narrow version.
888 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
889 if (ConstantExpr::getZExt(TruncC, Ty) != C)
890 return nullptr;
892 // udiv (zext X), C --> zext (udiv X, C')
893 // urem (zext X), C --> zext (urem X, C')
894 // udiv C, (zext X) --> zext (udiv C', X)
895 // urem C, (zext X) --> zext (urem C', X)
896 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
897 : Builder.CreateBinOp(Opcode, TruncC, X);
898 return new ZExtInst(NarrowOp, Ty);
901 return nullptr;
904 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
905 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
906 SQ.getWithInstruction(&I)))
907 return replaceInstUsesWith(I, V);
909 if (Instruction *X = foldVectorBinop(I))
910 return X;
912 // Handle the integer div common cases
913 if (Instruction *Common = commonIDivTransforms(I))
914 return Common;
916 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
917 Value *X;
918 const APInt *C1, *C2;
919 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
920 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
921 bool Overflow;
922 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
923 if (!Overflow) {
924 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
925 BinaryOperator *BO = BinaryOperator::CreateUDiv(
926 X, ConstantInt::get(X->getType(), C2ShlC1));
927 if (IsExact)
928 BO->setIsExact();
929 return BO;
933 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
934 // TODO: Could use isKnownNegative() to handle non-constant values.
935 Type *Ty = I.getType();
936 if (match(Op1, m_Negative())) {
937 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
938 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
940 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
941 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
942 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
943 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
946 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
947 return NarrowDiv;
949 // If the udiv operands are non-overflowing multiplies with a common operand,
950 // then eliminate the common factor:
951 // (A * B) / (A * X) --> B / X (and commuted variants)
952 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
953 // TODO: If -reassociation handled this generally, we could remove this.
954 Value *A, *B;
955 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
956 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
957 match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
958 return BinaryOperator::CreateUDiv(B, X);
959 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
960 match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
961 return BinaryOperator::CreateUDiv(A, X);
964 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
965 SmallVector<UDivFoldAction, 6> UDivActions;
966 if (visitUDivOperand(Op0, Op1, I, UDivActions))
967 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) {
968 FoldUDivOperandCb Action = UDivActions[i].FoldAction;
969 Value *ActionOp1 = UDivActions[i].OperandToFold;
970 Instruction *Inst;
971 if (Action)
972 Inst = Action(Op0, ActionOp1, I, *this);
973 else {
974 // This action joins two actions together. The RHS of this action is
975 // simply the last action we processed, we saved the LHS action index in
976 // the joining action.
977 size_t SelectRHSIdx = i - 1;
978 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult;
979 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx;
980 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult;
981 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(),
982 SelectLHS, SelectRHS);
985 // If this is the last action to process, return it to the InstCombiner.
986 // Otherwise, we insert it before the UDiv and record it so that we may
987 // use it as part of a joining action (i.e., a SelectInst).
988 if (e - i != 1) {
989 Inst->insertBefore(&I);
990 UDivActions[i].FoldResult = Inst;
991 } else
992 return Inst;
995 return nullptr;
998 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
999 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
1000 SQ.getWithInstruction(&I)))
1001 return replaceInstUsesWith(I, V);
1003 if (Instruction *X = foldVectorBinop(I))
1004 return X;
1006 // Handle the integer div common cases
1007 if (Instruction *Common = commonIDivTransforms(I))
1008 return Common;
1010 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1011 Value *X;
1012 // sdiv Op0, -1 --> -Op0
1013 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1014 if (match(Op1, m_AllOnes()) ||
1015 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1016 return BinaryOperator::CreateNeg(Op0);
1018 // X / INT_MIN --> X == INT_MIN
1019 if (match(Op1, m_SignMask()))
1020 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType());
1022 const APInt *Op1C;
1023 if (match(Op1, m_APInt(Op1C))) {
1024 // sdiv exact X, C --> ashr exact X, log2(C)
1025 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) {
1026 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2());
1027 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName());
1030 // If the dividend is sign-extended and the constant divisor is small enough
1031 // to fit in the source type, shrink the division to the narrower type:
1032 // (sext X) sdiv C --> sext (X sdiv C)
1033 Value *Op0Src;
1034 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1035 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1037 // In the general case, we need to make sure that the dividend is not the
1038 // minimum signed value because dividing that by -1 is UB. But here, we
1039 // know that the -1 divisor case is already handled above.
1041 Constant *NarrowDivisor =
1042 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1043 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1044 return new SExtInst(NarrowOp, Op0->getType());
1047 // -X / C --> X / -C (if the negation doesn't overflow).
1048 // TODO: This could be enhanced to handle arbitrary vector constants by
1049 // checking if all elements are not the min-signed-val.
1050 if (!Op1C->isMinSignedValue() &&
1051 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1052 Constant *NegC = ConstantInt::get(I.getType(), -(*Op1C));
1053 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1054 BO->setIsExact(I.isExact());
1055 return BO;
1059 // -X / Y --> -(X / Y)
1060 Value *Y;
1061 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1062 return BinaryOperator::CreateNSWNeg(
1063 Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1065 // If the sign bits of both operands are zero (i.e. we can prove they are
1066 // unsigned inputs), turn this into a udiv.
1067 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1068 if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1069 if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1070 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1071 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1072 BO->setIsExact(I.isExact());
1073 return BO;
1076 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1077 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1078 // Safe because the only negative value (1 << Y) can take on is
1079 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1080 // the sign bit set.
1081 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1082 BO->setIsExact(I.isExact());
1083 return BO;
1087 return nullptr;
1090 /// Remove negation and try to convert division into multiplication.
1091 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1092 Constant *C;
1093 if (!match(I.getOperand(1), m_Constant(C)))
1094 return nullptr;
1096 // -X / C --> X / -C
1097 Value *X;
1098 if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1099 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1101 // If the constant divisor has an exact inverse, this is always safe. If not,
1102 // then we can still create a reciprocal if fast-math-flags allow it and the
1103 // constant is a regular number (not zero, infinite, or denormal).
1104 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1105 return nullptr;
1107 // Disallow denormal constants because we don't know what would happen
1108 // on all targets.
1109 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1110 // denorms are flushed?
1111 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C);
1112 if (!RecipC->isNormalFP())
1113 return nullptr;
1115 // X / C --> X * (1 / C)
1116 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1119 /// Remove negation and try to reassociate constant math.
1120 static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1121 Constant *C;
1122 if (!match(I.getOperand(0), m_Constant(C)))
1123 return nullptr;
1125 // C / -X --> -C / X
1126 Value *X;
1127 if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1128 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1130 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1131 return nullptr;
1133 // Try to reassociate C / X expressions where X includes another constant.
1134 Constant *C2, *NewC = nullptr;
1135 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1136 // C / (X * C2) --> (C / C2) / X
1137 NewC = ConstantExpr::getFDiv(C, C2);
1138 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1139 // C / (X / C2) --> (C * C2) / X
1140 NewC = ConstantExpr::getFMul(C, C2);
1142 // Disallow denormal constants because we don't know what would happen
1143 // on all targets.
1144 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1145 // denorms are flushed?
1146 if (!NewC || !NewC->isNormalFP())
1147 return nullptr;
1149 return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1152 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
1153 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
1154 I.getFastMathFlags(),
1155 SQ.getWithInstruction(&I)))
1156 return replaceInstUsesWith(I, V);
1158 if (Instruction *X = foldVectorBinop(I))
1159 return X;
1161 if (Instruction *R = foldFDivConstantDivisor(I))
1162 return R;
1164 if (Instruction *R = foldFDivConstantDividend(I))
1165 return R;
1167 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1168 if (isa<Constant>(Op0))
1169 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1170 if (Instruction *R = FoldOpIntoSelect(I, SI))
1171 return R;
1173 if (isa<Constant>(Op1))
1174 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1175 if (Instruction *R = FoldOpIntoSelect(I, SI))
1176 return R;
1178 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1179 Value *X, *Y;
1180 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1181 (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1182 // (X / Y) / Z => X / (Y * Z)
1183 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1184 return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1186 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1187 (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1188 // Z / (X / Y) => (Y * Z) / X
1189 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1190 return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1194 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1195 // sin(X) / cos(X) -> tan(X)
1196 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1197 Value *X;
1198 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1199 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1200 bool IsCot =
1201 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1202 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1204 if ((IsTan || IsCot) &&
1205 hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) {
1206 IRBuilder<> B(&I);
1207 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1208 B.setFastMathFlags(I.getFastMathFlags());
1209 AttributeList Attrs =
1210 cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1211 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1212 LibFunc_tanl, B, Attrs);
1213 if (IsCot)
1214 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1215 return replaceInstUsesWith(I, Res);
1219 // -X / -Y -> X / Y
1220 Value *X, *Y;
1221 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) {
1222 I.setOperand(0, X);
1223 I.setOperand(1, Y);
1224 return &I;
1227 // X / (X * Y) --> 1.0 / Y
1228 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1229 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1230 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1231 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1232 I.setOperand(0, ConstantFP::get(I.getType(), 1.0));
1233 I.setOperand(1, Y);
1234 return &I;
1237 // X / fabs(X) -> copysign(1.0, X)
1238 // fabs(X) / X -> copysign(1.0, X)
1239 if (I.hasNoNaNs() && I.hasNoInfs() &&
1240 (match(&I,
1241 m_FDiv(m_Value(X), m_Intrinsic<Intrinsic::fabs>(m_Deferred(X)))) ||
1242 match(&I, m_FDiv(m_Intrinsic<Intrinsic::fabs>(m_Value(X)),
1243 m_Deferred(X))))) {
1244 Value *V = Builder.CreateBinaryIntrinsic(
1245 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1246 return replaceInstUsesWith(I, V);
1248 return nullptr;
1251 /// This function implements the transforms common to both integer remainder
1252 /// instructions (urem and srem). It is called by the visitors to those integer
1253 /// remainder instructions.
1254 /// Common integer remainder transforms
1255 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
1256 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1258 // The RHS is known non-zero.
1259 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
1260 I.setOperand(1, V);
1261 return &I;
1264 // Handle cases involving: rem X, (select Cond, Y, Z)
1265 if (simplifyDivRemOfSelectWithZeroOp(I))
1266 return &I;
1268 if (isa<Constant>(Op1)) {
1269 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1270 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1271 if (Instruction *R = FoldOpIntoSelect(I, SI))
1272 return R;
1273 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1274 const APInt *Op1Int;
1275 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1276 (I.getOpcode() == Instruction::URem ||
1277 !Op1Int->isMinSignedValue())) {
1278 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1279 // predecessor blocks, so do this only if we know the srem or urem
1280 // will not fault.
1281 if (Instruction *NV = foldOpIntoPhi(I, PN))
1282 return NV;
1286 // See if we can fold away this rem instruction.
1287 if (SimplifyDemandedInstructionBits(I))
1288 return &I;
1292 return nullptr;
1295 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
1296 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
1297 SQ.getWithInstruction(&I)))
1298 return replaceInstUsesWith(I, V);
1300 if (Instruction *X = foldVectorBinop(I))
1301 return X;
1303 if (Instruction *common = commonIRemTransforms(I))
1304 return common;
1306 if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1307 return NarrowRem;
1309 // X urem Y -> X and Y-1, where Y is a power of 2,
1310 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1311 Type *Ty = I.getType();
1312 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1313 // This may increase instruction count, we don't enforce that Y is a
1314 // constant.
1315 Constant *N1 = Constant::getAllOnesValue(Ty);
1316 Value *Add = Builder.CreateAdd(Op1, N1);
1317 return BinaryOperator::CreateAnd(Op0, Add);
1320 // 1 urem X -> zext(X != 1)
1321 if (match(Op0, m_One()))
1322 return CastInst::CreateZExtOrBitCast(Builder.CreateICmpNE(Op1, Op0), Ty);
1324 // X urem C -> X < C ? X : X - C, where C >= signbit.
1325 if (match(Op1, m_Negative())) {
1326 Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
1327 Value *Sub = Builder.CreateSub(Op0, Op1);
1328 return SelectInst::Create(Cmp, Op0, Sub);
1331 // If the divisor is a sext of a boolean, then the divisor must be max
1332 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1333 // max unsigned value. In that case, the remainder is 0:
1334 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1335 Value *X;
1336 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1337 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1338 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1341 return nullptr;
1344 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
1345 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
1346 SQ.getWithInstruction(&I)))
1347 return replaceInstUsesWith(I, V);
1349 if (Instruction *X = foldVectorBinop(I))
1350 return X;
1352 // Handle the integer rem common cases
1353 if (Instruction *Common = commonIRemTransforms(I))
1354 return Common;
1356 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1358 const APInt *Y;
1359 // X % -Y -> X % Y
1360 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) {
1361 Worklist.AddValue(I.getOperand(1));
1362 I.setOperand(1, ConstantInt::get(I.getType(), -*Y));
1363 return &I;
1367 // -X srem Y --> -(X srem Y)
1368 Value *X, *Y;
1369 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1370 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1372 // If the sign bits of both operands are zero (i.e. we can prove they are
1373 // unsigned inputs), turn this into a urem.
1374 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1375 if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1376 MaskedValueIsZero(Op0, Mask, 0, &I)) {
1377 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1378 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1381 // If it's a constant vector, flip any negative values positive.
1382 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1383 Constant *C = cast<Constant>(Op1);
1384 unsigned VWidth = C->getType()->getVectorNumElements();
1386 bool hasNegative = false;
1387 bool hasMissing = false;
1388 for (unsigned i = 0; i != VWidth; ++i) {
1389 Constant *Elt = C->getAggregateElement(i);
1390 if (!Elt) {
1391 hasMissing = true;
1392 break;
1395 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1396 if (RHS->isNegative())
1397 hasNegative = true;
1400 if (hasNegative && !hasMissing) {
1401 SmallVector<Constant *, 16> Elts(VWidth);
1402 for (unsigned i = 0; i != VWidth; ++i) {
1403 Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
1404 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1405 if (RHS->isNegative())
1406 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1410 Constant *NewRHSV = ConstantVector::get(Elts);
1411 if (NewRHSV != C) { // Don't loop on -MININT
1412 Worklist.AddValue(I.getOperand(1));
1413 I.setOperand(1, NewRHSV);
1414 return &I;
1419 return nullptr;
1422 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
1423 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
1424 I.getFastMathFlags(),
1425 SQ.getWithInstruction(&I)))
1426 return replaceInstUsesWith(I, V);
1428 if (Instruction *X = foldVectorBinop(I))
1429 return X;
1431 return nullptr;