1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for add, fadd, sub, and fsub.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/AlignOf.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Transforms/InstCombine/InstCombiner.h"
37 using namespace PatternMatch
;
39 #define DEBUG_TYPE "instcombine"
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef
&A
);
62 void operator+=(const FAddendCoef
&A
);
63 void operator*=(const FAddendCoef
&S
);
66 assert(!insaneIntVal(C
) && "Insane coefficient");
67 IsFp
= false; IntVal
= C
;
70 void set(const APFloat
& C
);
74 bool isZero() const { return isInt() ? !IntVal
: getFpVal().isZero(); }
75 Value
*getValue(Type
*) const;
77 bool isOne() const { return isInt() && IntVal
== 1; }
78 bool isTwo() const { return isInt() && IntVal
== 2; }
79 bool isMinusOne() const { return isInt() && IntVal
== -1; }
80 bool isMinusTwo() const { return isInt() && IntVal
== -2; }
83 bool insaneIntVal(int V
) { return V
> 4 || V
< -4; }
85 APFloat
*getFpValPtr() { return reinterpret_cast<APFloat
*>(&FpValBuf
); }
87 const APFloat
*getFpValPtr() const {
88 return reinterpret_cast<const APFloat
*>(&FpValBuf
);
91 const APFloat
&getFpVal() const {
92 assert(IsFp
&& BufHasFpVal
&& "Incorret state");
93 return *getFpValPtr();
97 assert(IsFp
&& BufHasFpVal
&& "Incorret state");
98 return *getFpValPtr();
101 bool isInt() const { return !IsFp
; }
103 // If the coefficient is represented by an integer, promote it to a
105 void convertToFpType(const fltSemantics
&Sem
);
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat
createAPFloatFromInt(const fltSemantics
&Sem
, int Val
);
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal
= false;
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
123 AlignedCharArrayUnion
<APFloat
> FpValBuf
;
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
133 void operator+=(const FAddend
&T
) {
134 assert((Val
== T
.Val
) && "Symbolic-values disagree");
138 Value
*getSymVal() const { return Val
; }
139 const FAddendCoef
&getCoef() const { return Coeff
; }
141 bool isConstant() const { return Val
== nullptr; }
142 bool isZero() const { return Coeff
.isZero(); }
144 void set(short Coefficient
, Value
*V
) {
145 Coeff
.set(Coefficient
);
148 void set(const APFloat
&Coefficient
, Value
*V
) {
149 Coeff
.set(Coefficient
);
152 void set(const ConstantFP
*Coefficient
, Value
*V
) {
153 Coeff
.set(Coefficient
->getValueAPF());
157 void negate() { Coeff
.negate(); }
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value
* V
, FAddend
&A0
, FAddend
&A1
);
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend
&Addend0
, FAddend
&Addend1
) const;
168 void Scale(const FAddendCoef
& ScaleAmt
) { Coeff
*= ScaleAmt
; }
170 // This addend has the value of "Coeff * Val".
171 Value
*Val
= nullptr;
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
180 FAddCombine(InstCombiner::BuilderTy
&B
) : Builder(B
) {}
182 Value
*simplify(Instruction
*FAdd
);
185 using AddendVect
= SmallVector
<const FAddend
*, 4>;
187 Value
*simplifyFAdd(AddendVect
& V
, unsigned InstrQuota
);
189 /// Convert given addend to a Value
190 Value
*createAddendVal(const FAddend
&A
, bool& NeedNeg
);
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect
& Vect
);
195 Value
*createFSub(Value
*Opnd0
, Value
*Opnd1
);
196 Value
*createFAdd(Value
*Opnd0
, Value
*Opnd1
);
197 Value
*createFMul(Value
*Opnd0
, Value
*Opnd1
);
198 Value
*createFNeg(Value
*V
);
199 Value
*createNaryFAdd(const AddendVect
& Opnds
, unsigned InstrQuota
);
200 void createInstPostProc(Instruction
*NewInst
, bool NoNumber
= false);
202 // Debugging stuff are clustered here.
204 unsigned CreateInstrNum
;
205 void initCreateInstNum() { CreateInstrNum
= 0; }
206 void incCreateInstNum() { CreateInstrNum
++; }
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
212 InstCombiner::BuilderTy
&Builder
;
213 Instruction
*Instr
= nullptr;
216 } // end anonymous namespace
218 //===----------------------------------------------------------------------===//
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
223 //===----------------------------------------------------------------------===//
224 FAddendCoef::~FAddendCoef() {
226 getFpValPtr()->~APFloat();
229 void FAddendCoef::set(const APFloat
& C
) {
230 APFloat
*P
= getFpValPtr();
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
239 IsFp
= BufHasFpVal
= true;
242 void FAddendCoef::convertToFpType(const fltSemantics
&Sem
) {
246 APFloat
*P
= getFpValPtr();
248 new(P
) APFloat(Sem
, IntVal
);
250 new(P
) APFloat(Sem
, 0 - IntVal
);
253 IsFp
= BufHasFpVal
= true;
256 APFloat
FAddendCoef::createAPFloatFromInt(const fltSemantics
&Sem
, int Val
) {
258 return APFloat(Sem
, Val
);
260 APFloat
T(Sem
, 0 - Val
);
266 void FAddendCoef::operator=(const FAddendCoef
&That
) {
270 set(That
.getFpVal());
273 void FAddendCoef::operator+=(const FAddendCoef
&That
) {
274 RoundingMode RndMode
= RoundingMode::NearestTiesToEven
;
275 if (isInt() == That
.isInt()) {
277 IntVal
+= That
.IntVal
;
279 getFpVal().add(That
.getFpVal(), RndMode
);
284 const APFloat
&T
= That
.getFpVal();
285 convertToFpType(T
.getSemantics());
286 getFpVal().add(T
, RndMode
);
290 APFloat
&T
= getFpVal();
291 T
.add(createAPFloatFromInt(T
.getSemantics(), That
.IntVal
), RndMode
);
294 void FAddendCoef::operator*=(const FAddendCoef
&That
) {
298 if (That
.isMinusOne()) {
303 if (isInt() && That
.isInt()) {
304 int Res
= IntVal
* (int)That
.IntVal
;
305 assert(!insaneIntVal(Res
) && "Insane int value");
310 const fltSemantics
&Semantic
=
311 isInt() ? That
.getFpVal().getSemantics() : getFpVal().getSemantics();
314 convertToFpType(Semantic
);
315 APFloat
&F0
= getFpVal();
318 F0
.multiply(createAPFloatFromInt(Semantic
, That
.IntVal
),
319 APFloat::rmNearestTiesToEven
);
321 F0
.multiply(That
.getFpVal(), APFloat::rmNearestTiesToEven
);
324 void FAddendCoef::negate() {
328 getFpVal().changeSign();
331 Value
*FAddendCoef::getValue(Type
*Ty
) const {
333 ConstantFP::get(Ty
, float(IntVal
)) :
334 ConstantFP::get(Ty
->getContext(), getFpVal());
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
346 // Legend: A and B are not constant, C is constant
347 unsigned FAddend::drillValueDownOneStep
348 (Value
*Val
, FAddend
&Addend0
, FAddend
&Addend1
) {
349 Instruction
*I
= nullptr;
350 if (!Val
|| !(I
= dyn_cast
<Instruction
>(Val
)))
353 unsigned Opcode
= I
->getOpcode();
355 if (Opcode
== Instruction::FAdd
|| Opcode
== Instruction::FSub
) {
357 Value
*Opnd0
= I
->getOperand(0);
358 Value
*Opnd1
= I
->getOperand(1);
359 if ((C0
= dyn_cast
<ConstantFP
>(Opnd0
)) && C0
->isZero())
362 if ((C1
= dyn_cast
<ConstantFP
>(Opnd1
)) && C1
->isZero())
367 Addend0
.set(1, Opnd0
);
369 Addend0
.set(C0
, nullptr);
373 FAddend
&Addend
= Opnd0
? Addend1
: Addend0
;
375 Addend
.set(1, Opnd1
);
377 Addend
.set(C1
, nullptr);
378 if (Opcode
== Instruction::FSub
)
383 return Opnd0
&& Opnd1
? 2 : 1;
385 // Both operands are zero. Weird!
386 Addend0
.set(APFloat(C0
->getValueAPF().getSemantics()), nullptr);
390 if (I
->getOpcode() == Instruction::FMul
) {
391 Value
*V0
= I
->getOperand(0);
392 Value
*V1
= I
->getOperand(1);
393 if (ConstantFP
*C
= dyn_cast
<ConstantFP
>(V0
)) {
398 if (ConstantFP
*C
= dyn_cast
<ConstantFP
>(V1
)) {
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend
&Addend0
, FAddend
&Addend1
) const {
415 unsigned BreakNum
= FAddend::drillValueDownOneStep(Val
, Addend0
, Addend1
);
416 if (!BreakNum
|| Coeff
.isOne())
419 Addend0
.Scale(Coeff
);
422 Addend1
.Scale(Coeff
);
427 Value
*FAddCombine::simplify(Instruction
*I
) {
428 assert(I
->hasAllowReassoc() && I
->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
431 // Currently we are not able to handle vector type.
432 if (I
->getType()->isVectorTy())
435 assert((I
->getOpcode() == Instruction::FAdd
||
436 I
->getOpcode() == Instruction::FSub
) && "Expect add/sub");
438 // Save the instruction before calling other member-functions.
441 FAddend Opnd0
, Opnd1
, Opnd0_0
, Opnd0_1
, Opnd1_0
, Opnd1_1
;
443 unsigned OpndNum
= FAddend::drillValueDownOneStep(I
, Opnd0
, Opnd1
);
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum
= 0;
447 unsigned Opnd1_ExpNum
= 0;
449 if (!Opnd0
.isConstant())
450 Opnd0_ExpNum
= Opnd0
.drillAddendDownOneStep(Opnd0_0
, Opnd0_1
);
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum
== 2 && !Opnd1
.isConstant())
454 Opnd1_ExpNum
= Opnd1
.drillAddendDownOneStep(Opnd1_0
, Opnd1_1
);
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum
&& Opnd1_ExpNum
) {
459 AllOpnds
.push_back(&Opnd0_0
);
460 AllOpnds
.push_back(&Opnd1_0
);
461 if (Opnd0_ExpNum
== 2)
462 AllOpnds
.push_back(&Opnd0_1
);
463 if (Opnd1_ExpNum
== 2)
464 AllOpnds
.push_back(&Opnd1_1
);
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota
= 0;
469 Value
*V0
= I
->getOperand(0);
470 Value
*V1
= I
->getOperand(1);
471 InstQuota
= ((!isa
<Constant
>(V0
) && V0
->hasOneUse()) &&
472 (!isa
<Constant
>(V1
) && V1
->hasOneUse())) ? 2 : 1;
474 if (Value
*R
= simplifyFAdd(AllOpnds
, InstQuota
))
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
483 const FAddendCoef
&CE
= Opnd0
.getCoef();
484 return CE
.isOne() ? Opnd0
.getSymVal() : nullptr;
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
490 AllOpnds
.push_back(&Opnd0
);
491 AllOpnds
.push_back(&Opnd1_0
);
492 if (Opnd1_ExpNum
== 2)
493 AllOpnds
.push_back(&Opnd1_1
);
495 if (Value
*R
= simplifyFAdd(AllOpnds
, 1))
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
502 AllOpnds
.push_back(&Opnd1
);
503 AllOpnds
.push_back(&Opnd0_0
);
504 if (Opnd0_ExpNum
== 2)
505 AllOpnds
.push_back(&Opnd0_1
);
507 if (Value
*R
= simplifyFAdd(AllOpnds
, 1))
514 Value
*FAddCombine::simplifyFAdd(AddendVect
& Addends
, unsigned InstrQuota
) {
515 unsigned AddendNum
= Addends
.size();
516 assert(AddendNum
<= 4 && "Too many addends");
518 // For saving intermediate results;
519 unsigned NextTmpIdx
= 0;
520 FAddend TmpResult
[3];
522 // Simplified addends are placed <SimpVect>.
525 // The outer loop works on one symbolic-value at a time. Suppose the input
526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527 // The symbolic-values will be processed in this order: x, y, z.
528 for (unsigned SymIdx
= 0; SymIdx
< AddendNum
; SymIdx
++) {
530 const FAddend
*ThisAddend
= Addends
[SymIdx
];
532 // This addend was processed before.
536 Value
*Val
= ThisAddend
->getSymVal();
538 // If the resulting expr has constant-addend, this constant-addend is
539 // desirable to reside at the top of the resulting expression tree. Placing
540 // constant close to super-expr(s) will potentially reveal some
541 // optimization opportunities in super-expr(s). Here we do not implement
542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative
545 unsigned StartIdx
= SimpVect
.size();
546 SimpVect
.push_back(ThisAddend
);
548 // The inner loop collects addends sharing same symbolic-value, and these
549 // addends will be later on folded into a single addend. Following above
550 // example, if the symbolic value "y" is being processed, the inner loop
551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552 // be later on folded into "<b1+b2, y>".
553 for (unsigned SameSymIdx
= SymIdx
+ 1;
554 SameSymIdx
< AddendNum
; SameSymIdx
++) {
555 const FAddend
*T
= Addends
[SameSymIdx
];
556 if (T
&& T
->getSymVal() == Val
) {
557 // Set null such that next iteration of the outer loop will not process
558 // this addend again.
559 Addends
[SameSymIdx
] = nullptr;
560 SimpVect
.push_back(T
);
564 // If multiple addends share same symbolic value, fold them together.
565 if (StartIdx
+ 1 != SimpVect
.size()) {
566 FAddend
&R
= TmpResult
[NextTmpIdx
++];
567 R
= *SimpVect
[StartIdx
];
568 for (unsigned Idx
= StartIdx
+ 1; Idx
< SimpVect
.size(); Idx
++)
571 // Pop all addends being folded and push the resulting folded addend.
572 SimpVect
.resize(StartIdx
);
574 SimpVect
.push_back(&R
);
579 assert((NextTmpIdx
<= std::size(TmpResult
) + 1) && "out-of-bound access");
582 if (!SimpVect
.empty())
583 Result
= createNaryFAdd(SimpVect
, InstrQuota
);
585 // The addition is folded to 0.0.
586 Result
= ConstantFP::get(Instr
->getType(), 0.0);
592 Value
*FAddCombine::createNaryFAdd
593 (const AddendVect
&Opnds
, unsigned InstrQuota
) {
594 assert(!Opnds
.empty() && "Expect at least one addend");
596 // Step 1: Check if the # of instructions needed exceeds the quota.
598 unsigned InstrNeeded
= calcInstrNumber(Opnds
);
599 if (InstrNeeded
> InstrQuota
)
604 // step 2: Emit the N-ary addition.
605 // Note that at most three instructions are involved in Fadd-InstCombine: the
606 // addition in question, and at most two neighboring instructions.
607 // The resulting optimized addition should have at least one less instruction
608 // than the original addition expression tree. This implies that the resulting
609 // N-ary addition has at most two instructions, and we don't need to worry
610 // about tree-height when constructing the N-ary addition.
612 Value
*LastVal
= nullptr;
613 bool LastValNeedNeg
= false;
615 // Iterate the addends, creating fadd/fsub using adjacent two addends.
616 for (const FAddend
*Opnd
: Opnds
) {
618 Value
*V
= createAddendVal(*Opnd
, NeedNeg
);
621 LastValNeedNeg
= NeedNeg
;
625 if (LastValNeedNeg
== NeedNeg
) {
626 LastVal
= createFAdd(LastVal
, V
);
631 LastVal
= createFSub(V
, LastVal
);
633 LastVal
= createFSub(LastVal
, V
);
635 LastValNeedNeg
= false;
638 if (LastValNeedNeg
) {
639 LastVal
= createFNeg(LastVal
);
643 assert(CreateInstrNum
== InstrNeeded
&&
644 "Inconsistent in instruction numbers");
650 Value
*FAddCombine::createFSub(Value
*Opnd0
, Value
*Opnd1
) {
651 Value
*V
= Builder
.CreateFSub(Opnd0
, Opnd1
);
652 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
653 createInstPostProc(I
);
657 Value
*FAddCombine::createFNeg(Value
*V
) {
658 Value
*NewV
= Builder
.CreateFNeg(V
);
659 if (Instruction
*I
= dyn_cast
<Instruction
>(NewV
))
660 createInstPostProc(I
, true); // fneg's don't receive instruction numbers.
664 Value
*FAddCombine::createFAdd(Value
*Opnd0
, Value
*Opnd1
) {
665 Value
*V
= Builder
.CreateFAdd(Opnd0
, Opnd1
);
666 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
667 createInstPostProc(I
);
671 Value
*FAddCombine::createFMul(Value
*Opnd0
, Value
*Opnd1
) {
672 Value
*V
= Builder
.CreateFMul(Opnd0
, Opnd1
);
673 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
674 createInstPostProc(I
);
678 void FAddCombine::createInstPostProc(Instruction
*NewInstr
, bool NoNumber
) {
679 NewInstr
->setDebugLoc(Instr
->getDebugLoc());
681 // Keep track of the number of instruction created.
685 // Propagate fast-math flags
686 NewInstr
->setFastMathFlags(Instr
->getFastMathFlags());
689 // Return the number of instruction needed to emit the N-ary addition.
690 // NOTE: Keep this function in sync with createAddendVal().
691 unsigned FAddCombine::calcInstrNumber(const AddendVect
&Opnds
) {
692 unsigned OpndNum
= Opnds
.size();
693 unsigned InstrNeeded
= OpndNum
- 1;
695 // Adjust the number of instructions needed to emit the N-ary add.
696 for (const FAddend
*Opnd
: Opnds
) {
697 if (Opnd
->isConstant())
700 // The constant check above is really for a few special constant
702 if (isa
<UndefValue
>(Opnd
->getSymVal()))
705 const FAddendCoef
&CE
= Opnd
->getCoef();
706 // Let the addend be "c * x". If "c == +/-1", the value of the addend
707 // is immediately available; otherwise, it needs exactly one instruction
708 // to evaluate the value.
709 if (!CE
.isMinusOne() && !CE
.isOne())
715 // Input Addend Value NeedNeg(output)
716 // ================================================================
717 // Constant C C false
718 // <+/-1, V> V coefficient is -1
719 // <2/-2, V> "fadd V, V" coefficient is -2
720 // <C, V> "fmul V, C" false
722 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
723 Value
*FAddCombine::createAddendVal(const FAddend
&Opnd
, bool &NeedNeg
) {
724 const FAddendCoef
&Coeff
= Opnd
.getCoef();
726 if (Opnd
.isConstant()) {
728 return Coeff
.getValue(Instr
->getType());
731 Value
*OpndVal
= Opnd
.getSymVal();
733 if (Coeff
.isMinusOne() || Coeff
.isOne()) {
734 NeedNeg
= Coeff
.isMinusOne();
738 if (Coeff
.isTwo() || Coeff
.isMinusTwo()) {
739 NeedNeg
= Coeff
.isMinusTwo();
740 return createFAdd(OpndVal
, OpndVal
);
744 return createFMul(OpndVal
, Coeff
.getValue(Instr
->getType()));
747 // Checks if any operand is negative and we can convert add to sub.
748 // This function checks for following negative patterns
749 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
750 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
751 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
752 static Value
*checkForNegativeOperand(BinaryOperator
&I
,
753 InstCombiner::BuilderTy
&Builder
) {
754 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
756 // This function creates 2 instructions to replace ADD, we need at least one
757 // of LHS or RHS to have one use to ensure benefit in transform.
758 if (!LHS
->hasOneUse() && !RHS
->hasOneUse())
761 Value
*X
= nullptr, *Y
= nullptr, *Z
= nullptr;
762 const APInt
*C1
= nullptr, *C2
= nullptr;
764 // if ONE is on other side, swap
765 if (match(RHS
, m_Add(m_Value(X
), m_One())))
768 if (match(LHS
, m_Add(m_Value(X
), m_One()))) {
769 // if XOR on other side, swap
770 if (match(RHS
, m_Xor(m_Value(Y
), m_APInt(C1
))))
773 if (match(X
, m_Xor(m_Value(Y
), m_APInt(C1
)))) {
774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
776 if (match(Y
, m_Or(m_Value(Z
), m_APInt(C2
))) && (*C2
== ~(*C1
))) {
777 Value
*NewAnd
= Builder
.CreateAnd(Z
, *C1
);
778 return Builder
.CreateSub(RHS
, NewAnd
, "sub");
779 } else if (match(Y
, m_And(m_Value(Z
), m_APInt(C2
))) && (*C1
== *C2
)) {
780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
782 Value
*NewOr
= Builder
.CreateOr(Z
, ~(*C1
));
783 return Builder
.CreateSub(RHS
, NewOr
, "sub");
788 // Restore LHS and RHS
789 LHS
= I
.getOperand(0);
790 RHS
= I
.getOperand(1);
792 // if XOR is on other side, swap
793 if (match(RHS
, m_Xor(m_Value(Y
), m_APInt(C1
))))
797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
799 if (match(LHS
, m_Xor(m_Value(Y
), m_APInt(C1
))))
800 if (C1
->countr_zero() == 0)
801 if (match(Y
, m_And(m_Value(Z
), m_APInt(C2
))) && *C1
== (*C2
+ 1)) {
802 Value
*NewOr
= Builder
.CreateOr(Z
, ~(*C2
));
803 return Builder
.CreateSub(RHS
, NewOr
, "sub");
808 /// Wrapping flags may allow combining constants separated by an extend.
809 static Instruction
*foldNoWrapAdd(BinaryOperator
&Add
,
810 InstCombiner::BuilderTy
&Builder
) {
811 Value
*Op0
= Add
.getOperand(0), *Op1
= Add
.getOperand(1);
812 Type
*Ty
= Add
.getType();
814 if (!match(Op1
, m_Constant(Op1C
)))
817 // Try this match first because it results in an add in the narrow type.
818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
820 const APInt
*C1
, *C2
;
821 if (match(Op1
, m_APInt(C1
)) &&
822 match(Op0
, m_ZExt(m_NUWAddLike(m_Value(X
), m_APInt(C2
)))) &&
823 C1
->isNegative() && C1
->sge(-C2
->sext(C1
->getBitWidth()))) {
824 APInt NewC
= *C2
+ C1
->trunc(C2
->getBitWidth());
825 // If the smaller add will fold to zero, we don't need to check one use.
827 return new ZExtInst(X
, Ty
);
828 // Otherwise only do this if the existing zero extend will be removed.
829 if (Op0
->hasOneUse())
831 Builder
.CreateNUWAdd(X
, ConstantInt::get(X
->getType(), NewC
)), Ty
);
834 // More general combining of constants in the wide type.
835 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
836 // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
838 if (match(Op0
, m_OneUse(m_SExtLike(
839 m_NSWAddLike(m_Value(X
), m_Constant(NarrowC
)))))) {
840 Value
*WideC
= Builder
.CreateSExt(NarrowC
, Ty
);
841 Value
*NewC
= Builder
.CreateAdd(WideC
, Op1C
);
842 Value
*WideX
= Builder
.CreateSExt(X
, Ty
);
843 return BinaryOperator::CreateAdd(WideX
, NewC
);
845 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
847 m_OneUse(m_ZExt(m_NUWAddLike(m_Value(X
), m_Constant(NarrowC
)))))) {
848 Value
*WideC
= Builder
.CreateZExt(NarrowC
, Ty
);
849 Value
*NewC
= Builder
.CreateAdd(WideC
, Op1C
);
850 Value
*WideX
= Builder
.CreateZExt(X
, Ty
);
851 return BinaryOperator::CreateAdd(WideX
, NewC
);
856 Instruction
*InstCombinerImpl::foldAddWithConstant(BinaryOperator
&Add
) {
857 Value
*Op0
= Add
.getOperand(0), *Op1
= Add
.getOperand(1);
858 Type
*Ty
= Add
.getType();
860 if (!match(Op1
, m_ImmConstant(Op1C
)))
863 if (Instruction
*NV
= foldBinOpIntoSelectOrPhi(Add
))
869 // add (sub C1, X), C2 --> sub (add C1, C2), X
870 if (match(Op0
, m_Sub(m_Constant(Op00C
), m_Value(X
))))
871 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C
, Op1C
), X
);
875 // add (sub X, Y), -1 --> add (not Y), X
876 if (match(Op0
, m_OneUse(m_Sub(m_Value(X
), m_Value(Y
)))) &&
877 match(Op1
, m_AllOnes()))
878 return BinaryOperator::CreateAdd(Builder
.CreateNot(Y
), X
);
880 // zext(bool) + C -> bool ? C + 1 : C
881 if (match(Op0
, m_ZExt(m_Value(X
))) &&
882 X
->getType()->getScalarSizeInBits() == 1)
883 return SelectInst::Create(X
, InstCombiner::AddOne(Op1C
), Op1
);
884 // sext(bool) + C -> bool ? C - 1 : C
885 if (match(Op0
, m_SExt(m_Value(X
))) &&
886 X
->getType()->getScalarSizeInBits() == 1)
887 return SelectInst::Create(X
, InstCombiner::SubOne(Op1C
), Op1
);
889 // ~X + C --> (C-1) - X
890 if (match(Op0
, m_Not(m_Value(X
)))) {
891 // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW
892 auto *COne
= ConstantInt::get(Op1C
->getType(), 1);
893 bool WillNotSOV
= willNotOverflowSignedSub(Op1C
, COne
, Add
);
894 BinaryOperator
*Res
=
895 BinaryOperator::CreateSub(ConstantExpr::getSub(Op1C
, COne
), X
);
896 Res
->setHasNoSignedWrap(Add
.hasNoSignedWrap() && WillNotSOV
);
900 // (iN X s>> (N - 1)) + 1 --> zext (X > -1)
902 unsigned BitWidth
= Ty
->getScalarSizeInBits();
903 if (match(Op0
, m_OneUse(m_AShr(m_Value(X
),
904 m_SpecificIntAllowPoison(BitWidth
- 1)))) &&
906 return new ZExtInst(Builder
.CreateIsNotNeg(X
, "isnotneg"), Ty
);
908 if (!match(Op1
, m_APInt(C
)))
911 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
913 if (match(Op0
, m_DisjointOr(m_Value(X
), m_ImmConstant(Op01C
)))) {
914 BinaryOperator
*NewAdd
=
915 BinaryOperator::CreateAdd(X
, ConstantExpr::getAdd(Op01C
, Op1C
));
916 NewAdd
->setHasNoSignedWrap(Add
.hasNoSignedWrap() &&
917 willNotOverflowSignedAdd(Op01C
, Op1C
, Add
));
918 NewAdd
->setHasNoUnsignedWrap(Add
.hasNoUnsignedWrap());
922 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
924 if (match(Op0
, m_Or(m_Value(), m_APInt(C2
))) && *C2
== -*C
)
925 return BinaryOperator::CreateXor(Op0
, ConstantInt::get(Add
.getType(), *C2
));
927 if (C
->isSignMask()) {
928 // If wrapping is not allowed, then the addition must set the sign bit:
929 // X + (signmask) --> X | signmask
930 if (Add
.hasNoSignedWrap() || Add
.hasNoUnsignedWrap())
931 return BinaryOperator::CreateOr(Op0
, Op1
);
933 // If wrapping is allowed, then the addition flips the sign bit of LHS:
934 // X + (signmask) --> X ^ signmask
935 return BinaryOperator::CreateXor(Op0
, Op1
);
938 // Is this add the last step in a convoluted sext?
939 // add(zext(xor i16 X, -32768), -32768) --> sext X
940 if (match(Op0
, m_ZExt(m_Xor(m_Value(X
), m_APInt(C2
)))) &&
941 C2
->isMinSignedValue() && C2
->sext(Ty
->getScalarSizeInBits()) == *C
)
942 return CastInst::Create(Instruction::SExt
, X
, Ty
);
944 if (match(Op0
, m_Xor(m_Value(X
), m_APInt(C2
)))) {
945 // (X ^ signmask) + C --> (X + (signmask ^ C))
946 if (C2
->isSignMask())
947 return BinaryOperator::CreateAdd(X
, ConstantInt::get(Ty
, *C2
^ *C
));
949 // If X has no high-bits set above an xor mask:
950 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
952 KnownBits LHSKnown
= computeKnownBits(X
, 0, &Add
);
953 if ((*C2
| LHSKnown
.Zero
).isAllOnes())
954 return BinaryOperator::CreateSub(ConstantInt::get(Ty
, *C2
+ *C
), X
);
957 // Look for a math+logic pattern that corresponds to sext-in-register of a
958 // value with cleared high bits. Convert that into a pair of shifts:
959 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
960 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
961 if (Op0
->hasOneUse() && *C2
== -(*C
)) {
962 unsigned BitWidth
= Ty
->getScalarSizeInBits();
965 ShAmt
= BitWidth
- C
->logBase2() - 1;
966 else if (C2
->isPowerOf2())
967 ShAmt
= BitWidth
- C2
->logBase2() - 1;
968 if (ShAmt
&& MaskedValueIsZero(X
, APInt::getHighBitsSet(BitWidth
, ShAmt
),
970 Constant
*ShAmtC
= ConstantInt::get(Ty
, ShAmt
);
971 Value
*NewShl
= Builder
.CreateShl(X
, ShAmtC
, "sext");
972 return BinaryOperator::CreateAShr(NewShl
, ShAmtC
);
977 if (C
->isOne() && Op0
->hasOneUse()) {
978 // add (sext i1 X), 1 --> zext (not X)
979 // TODO: The smallest IR representation is (select X, 0, 1), and that would
980 // not require the one-use check. But we need to remove a transform in
981 // visitSelect and make sure that IR value tracking for select is equal or
982 // better than for these ops.
983 if (match(Op0
, m_SExt(m_Value(X
))) &&
984 X
->getType()->getScalarSizeInBits() == 1)
985 return new ZExtInst(Builder
.CreateNot(X
), Ty
);
987 // Shifts and add used to flip and mask off the low bit:
988 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
990 if (match(Op0
, m_AShr(m_Shl(m_Value(X
), m_APInt(C2
)), m_APInt(C3
))) &&
991 C2
== C3
&& *C2
== Ty
->getScalarSizeInBits() - 1) {
992 Value
*NotX
= Builder
.CreateNot(X
);
993 return BinaryOperator::CreateAnd(NotX
, ConstantInt::get(Ty
, 1));
997 // umax(X, C) + -C --> usub.sat(X, C)
998 if (match(Op0
, m_OneUse(m_UMax(m_Value(X
), m_SpecificInt(-*C
)))))
999 return replaceInstUsesWith(
1000 Add
, Builder
.CreateBinaryIntrinsic(
1001 Intrinsic::usub_sat
, X
, ConstantInt::get(Add
.getType(), -*C
)));
1003 // Fold (add (zext (add X, -1)), 1) -> (zext X) if X is non-zero.
1004 // TODO: There's a general form for any constant on the outer add.
1006 if (match(Op0
, m_ZExt(m_Add(m_Value(X
), m_AllOnes())))) {
1007 const SimplifyQuery Q
= SQ
.getWithInstruction(&Add
);
1008 if (llvm::isKnownNonZero(X
, Q
))
1009 return new ZExtInst(X
, Ty
);
1016 // match variations of a^2 + 2*a*b + b^2
1018 // to reuse the code between the FP and Int versions, the instruction OpCodes
1019 // and constant types have been turned into template parameters.
1021 // Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with;
1022 // should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int
1023 // (we're matching `X<<1` instead of `X*2` for Int)
1024 template <bool FP
, typename Mul2Rhs
>
1025 static bool matchesSquareSum(BinaryOperator
&I
, Mul2Rhs M2Rhs
, Value
*&A
,
1027 constexpr unsigned MulOp
= FP
? Instruction::FMul
: Instruction::Mul
;
1028 constexpr unsigned AddOp
= FP
? Instruction::FAdd
: Instruction::Add
;
1029 constexpr unsigned Mul2Op
= FP
? Instruction::FMul
: Instruction::Shl
;
1031 // (a * a) + (((a * 2) + b) * b)
1032 if (match(&I
, m_c_BinOp(
1033 AddOp
, m_OneUse(m_BinOp(MulOp
, m_Value(A
), m_Deferred(A
))),
1036 m_c_BinOp(AddOp
, m_BinOp(Mul2Op
, m_Deferred(A
), M2Rhs
),
1041 // ((a * b) * 2) or ((a * 2) * b)
1043 // (a * a + b * b) or (b * b + a * a)
1049 Mul2Op
, m_BinOp(MulOp
, m_Value(A
), m_Value(B
)), M2Rhs
)),
1050 m_OneUse(m_c_BinOp(MulOp
, m_BinOp(Mul2Op
, m_Value(A
), M2Rhs
),
1053 m_c_BinOp(AddOp
, m_BinOp(MulOp
, m_Deferred(A
), m_Deferred(A
)),
1054 m_BinOp(MulOp
, m_Deferred(B
), m_Deferred(B
))))));
1057 // Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1058 Instruction
*InstCombinerImpl::foldSquareSumInt(BinaryOperator
&I
) {
1060 if (matchesSquareSum
</*FP*/ false>(I
, m_SpecificInt(1), A
, B
)) {
1061 Value
*AB
= Builder
.CreateAdd(A
, B
);
1062 return BinaryOperator::CreateMul(AB
, AB
);
1067 // Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1068 // Requires `nsz` and `reassoc`.
1069 Instruction
*InstCombinerImpl::foldSquareSumFP(BinaryOperator
&I
) {
1070 assert(I
.hasAllowReassoc() && I
.hasNoSignedZeros() && "Assumption mismatch");
1072 if (matchesSquareSum
</*FP*/ true>(I
, m_SpecificFP(2.0), A
, B
)) {
1073 Value
*AB
= Builder
.CreateFAddFMF(A
, B
, &I
);
1074 return BinaryOperator::CreateFMulFMF(AB
, AB
, &I
);
1079 // Matches multiplication expression Op * C where C is a constant. Returns the
1080 // constant value in C and the other operand in Op. Returns true if such a
1082 static bool MatchMul(Value
*E
, Value
*&Op
, APInt
&C
) {
1084 if (match(E
, m_Mul(m_Value(Op
), m_APInt(AI
)))) {
1088 if (match(E
, m_Shl(m_Value(Op
), m_APInt(AI
)))) {
1089 C
= APInt(AI
->getBitWidth(), 1);
1096 // Matches remainder expression Op % C where C is a constant. Returns the
1097 // constant value in C and the other operand in Op. Returns the signedness of
1098 // the remainder operation in IsSigned. Returns true if such a match is
1100 static bool MatchRem(Value
*E
, Value
*&Op
, APInt
&C
, bool &IsSigned
) {
1103 if (match(E
, m_SRem(m_Value(Op
), m_APInt(AI
)))) {
1108 if (match(E
, m_URem(m_Value(Op
), m_APInt(AI
)))) {
1112 if (match(E
, m_And(m_Value(Op
), m_APInt(AI
))) && (*AI
+ 1).isPowerOf2()) {
1119 // Matches division expression Op / C with the given signedness as indicated
1120 // by IsSigned, where C is a constant. Returns the constant value in C and the
1121 // other operand in Op. Returns true if such a match is found.
1122 static bool MatchDiv(Value
*E
, Value
*&Op
, APInt
&C
, bool IsSigned
) {
1124 if (IsSigned
&& match(E
, m_SDiv(m_Value(Op
), m_APInt(AI
)))) {
1129 if (match(E
, m_UDiv(m_Value(Op
), m_APInt(AI
)))) {
1133 if (match(E
, m_LShr(m_Value(Op
), m_APInt(AI
)))) {
1134 C
= APInt(AI
->getBitWidth(), 1);
1142 // Returns whether C0 * C1 with the given signedness overflows.
1143 static bool MulWillOverflow(APInt
&C0
, APInt
&C1
, bool IsSigned
) {
1146 (void)C0
.smul_ov(C1
, overflow
);
1148 (void)C0
.umul_ov(C1
, overflow
);
1152 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1153 // does not overflow.
1154 // Simplifies (X / C0) * C1 + (X % C0) * C2 to
1155 // (X / C0) * (C1 - C2 * C0) + X * C2
1156 Value
*InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator
&I
) {
1157 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1161 // Match I = X % C0 + MulOpV * C0
1162 if (((MatchRem(LHS
, X
, C0
, IsSigned
) && MatchMul(RHS
, MulOpV
, MulOpC
)) ||
1163 (MatchRem(RHS
, X
, C0
, IsSigned
) && MatchMul(LHS
, MulOpV
, MulOpC
))) &&
1168 // Match MulOpC = RemOpV % C1
1169 if (MatchRem(MulOpV
, RemOpV
, C1
, Rem2IsSigned
) &&
1170 IsSigned
== Rem2IsSigned
) {
1173 // Match RemOpV = X / C0
1174 if (MatchDiv(RemOpV
, DivOpV
, DivOpC
, IsSigned
) && X
== DivOpV
&&
1175 C0
== DivOpC
&& !MulWillOverflow(C0
, C1
, IsSigned
)) {
1176 Value
*NewDivisor
= ConstantInt::get(X
->getType(), C0
* C1
);
1177 return IsSigned
? Builder
.CreateSRem(X
, NewDivisor
, "srem")
1178 : Builder
.CreateURem(X
, NewDivisor
, "urem");
1183 // Match I = (X / C0) * C1 + (X % C0) * C2
1186 if (!LHS
->hasOneUse() || !MatchMul(LHS
, Div
, C1
))
1187 Div
= LHS
, C1
= APInt(I
.getType()->getScalarSizeInBits(), 1);
1188 if (!RHS
->hasOneUse() || !MatchMul(RHS
, Rem
, C2
))
1189 Rem
= RHS
, C2
= APInt(I
.getType()->getScalarSizeInBits(), 1);
1190 if (match(Div
, m_IRem(m_Value(), m_Value()))) {
1191 std::swap(Div
, Rem
);
1196 if (MatchRem(Rem
, X
, C0
, IsSigned
) &&
1197 MatchDiv(Div
, DivOpV
, DivOpC
, IsSigned
) && X
== DivOpV
&& C0
== DivOpC
) {
1198 APInt NewC
= C1
- C2
* C0
;
1199 if (!NewC
.isZero() && !Rem
->hasOneUse())
1201 if (!isGuaranteedNotToBeUndef(X
, &AC
, &I
, &DT
))
1203 Value
*MulXC2
= Builder
.CreateMul(X
, ConstantInt::get(X
->getType(), C2
));
1206 return Builder
.CreateAdd(
1207 Builder
.CreateMul(Div
, ConstantInt::get(X
->getType(), NewC
)), MulXC2
);
1214 /// (1 << NBits) - 1
1216 /// ~(-(1 << NBits))
1217 /// Because a 'not' is better for bit-tracking analysis and other transforms
1218 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1219 static Instruction
*canonicalizeLowbitMask(BinaryOperator
&I
,
1220 InstCombiner::BuilderTy
&Builder
) {
1222 if (!match(&I
, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits
))), m_AllOnes())))
1225 Constant
*MinusOne
= Constant::getAllOnesValue(NBits
->getType());
1226 Value
*NotMask
= Builder
.CreateShl(MinusOne
, NBits
, "notmask");
1227 // Be wary of constant folding.
1228 if (auto *BOp
= dyn_cast
<BinaryOperator
>(NotMask
)) {
1229 // Always NSW. But NUW propagates from `add`.
1230 BOp
->setHasNoSignedWrap();
1231 BOp
->setHasNoUnsignedWrap(I
.hasNoUnsignedWrap());
1234 return BinaryOperator::CreateNot(NotMask
, I
.getName());
1237 static Instruction
*foldToUnsignedSaturatedAdd(BinaryOperator
&I
) {
1238 assert(I
.getOpcode() == Instruction::Add
&& "Expecting add instruction");
1239 Type
*Ty
= I
.getType();
1240 auto getUAddSat
= [&]() {
1241 return Intrinsic::getOrInsertDeclaration(I
.getModule(), Intrinsic::uadd_sat
,
1245 // add (umin X, ~Y), Y --> uaddsat X, Y
1247 if (match(&I
, m_c_Add(m_c_UMin(m_Value(X
), m_Not(m_Value(Y
))),
1249 return CallInst::Create(getUAddSat(), { X
, Y
});
1251 // add (umin X, ~C), C --> uaddsat X, C
1252 const APInt
*C
, *NotC
;
1253 if (match(&I
, m_Add(m_UMin(m_Value(X
), m_APInt(NotC
)), m_APInt(C
))) &&
1255 return CallInst::Create(getUAddSat(), { X
, ConstantInt::get(Ty
, *C
) });
1261 // (add A, (shl (neg B), Y))
1262 // -> (sub A, (shl B, Y))
1263 static Instruction
*combineAddSubWithShlAddSub(InstCombiner::BuilderTy
&Builder
,
1264 const BinaryOperator
&I
) {
1267 m_c_Add(m_OneUse(m_Shl(m_OneUse(m_Neg(m_Value(B
))), m_Value(Cnt
))),
1269 Value
*NewShl
= Builder
.CreateShl(B
, Cnt
);
1270 return BinaryOperator::CreateSub(A
, NewShl
);
1275 /// Try to reduce signed division by power-of-2 to an arithmetic shift right.
1276 static Instruction
*foldAddToAshr(BinaryOperator
&Add
) {
1277 // Division must be by power-of-2, but not the minimum signed value.
1280 if (!match(Add
.getOperand(0), m_SDiv(m_Value(X
), m_Power2(DivC
))) ||
1284 // Rounding is done by adding -1 if the dividend (X) is negative and has any
1285 // low bits set. It recognizes two canonical patterns:
1286 // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the
1287 // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN).
1288 // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1).
1289 // Note that, by the time we end up here, if possible, ugt has been
1290 // canonicalized into eq.
1291 const APInt
*MaskC
, *MaskCCmp
;
1293 if (!match(Add
.getOperand(1),
1294 m_SExt(m_ICmp(Pred
, m_And(m_Specific(X
), m_APInt(MaskC
)),
1295 m_APInt(MaskCCmp
)))))
1298 if ((Pred
!= ICmpInst::ICMP_UGT
|| !MaskCCmp
->isSignMask()) &&
1299 (Pred
!= ICmpInst::ICMP_EQ
|| *MaskCCmp
!= *MaskC
))
1302 APInt SMin
= APInt::getSignedMinValue(Add
.getType()->getScalarSizeInBits());
1303 bool IsMaskValid
= Pred
== ICmpInst::ICMP_UGT
1304 ? (*MaskC
== (SMin
| (*DivC
- 1)))
1305 : (*DivC
== 2 && *MaskC
== SMin
+ 1);
1309 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC)
1310 return BinaryOperator::CreateAShr(
1311 X
, ConstantInt::get(Add
.getType(), DivC
->exactLogBase2()));
1314 Instruction
*InstCombinerImpl::foldAddLikeCommutative(Value
*LHS
, Value
*RHS
,
1315 bool NSW
, bool NUW
) {
1317 if (match(LHS
, m_Sub(m_Value(A
), m_Value(B
))) &&
1318 match(RHS
, m_Sub(m_Value(C
), m_Specific(A
)))) {
1319 Instruction
*R
= BinaryOperator::CreateSub(C
, B
);
1320 bool NSWOut
= NSW
&& match(LHS
, m_NSWSub(m_Value(), m_Value())) &&
1321 match(RHS
, m_NSWSub(m_Value(), m_Value()));
1323 bool NUWOut
= match(LHS
, m_NUWSub(m_Value(), m_Value())) &&
1324 match(RHS
, m_NUWSub(m_Value(), m_Value()));
1325 R
->setHasNoSignedWrap(NSWOut
);
1326 R
->setHasNoUnsignedWrap(NUWOut
);
1330 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1331 const APInt
*C1
, *C2
;
1332 if (match(LHS
, m_Shl(m_SDiv(m_Specific(RHS
), m_APInt(C1
)), m_APInt(C2
)))) {
1333 APInt
One(C2
->getBitWidth(), 1);
1334 APInt MinusC1
= -(*C1
);
1335 if (MinusC1
== (One
<< *C2
)) {
1336 Constant
*NewRHS
= ConstantInt::get(RHS
->getType(), MinusC1
);
1337 return BinaryOperator::CreateSRem(RHS
, NewRHS
);
1344 Instruction
*InstCombinerImpl::
1345 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
1346 BinaryOperator
&I
) {
1347 assert((I
.getOpcode() == Instruction::Add
||
1348 I
.getOpcode() == Instruction::Or
||
1349 I
.getOpcode() == Instruction::Sub
) &&
1350 "Expecting add/or/sub instruction");
1352 // We have a subtraction/addition between a (potentially truncated) *logical*
1353 // right-shift of X and a "select".
1355 Instruction
*LowBitsToSkip
, *Extract
;
1356 if (!match(&I
, m_c_BinOp(m_TruncOrSelf(m_CombineAnd(
1357 m_LShr(m_Value(X
), m_Instruction(LowBitsToSkip
)),
1358 m_Instruction(Extract
))),
1362 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1363 if (I
.getOpcode() == Instruction::Sub
&& I
.getOperand(1) != Select
)
1366 Type
*XTy
= X
->getType();
1367 bool HadTrunc
= I
.getType() != XTy
;
1369 // If there was a truncation of extracted value, then we'll need to produce
1370 // one extra instruction, so we need to ensure one instruction will go away.
1371 if (HadTrunc
&& !match(&I
, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1374 // Extraction should extract high NBits bits, with shift amount calculated as:
1375 // low bits to skip = shift bitwidth - high bits to extract
1376 // The shift amount itself may be extended, and we need to look past zero-ext
1377 // when matching NBits, that will matter for matching later.
1379 if (!match(LowBitsToSkip
,
1380 m_ZExtOrSelf(m_Sub(m_SpecificInt(XTy
->getScalarSizeInBits()),
1381 m_ZExtOrSelf(m_Value(NBits
))))))
1384 // Sign-extending value can be zero-extended if we `sub`tract it,
1385 // or sign-extended otherwise.
1386 auto SkipExtInMagic
= [&I
](Value
*&V
) {
1387 if (I
.getOpcode() == Instruction::Sub
)
1388 match(V
, m_ZExtOrSelf(m_Value(V
)));
1390 match(V
, m_SExtOrSelf(m_Value(V
)));
1393 // Now, finally validate the sign-extending magic.
1394 // `select` itself may be appropriately extended, look past that.
1395 SkipExtInMagic(Select
);
1399 Value
*SignExtendingValue
, *Zero
;
1401 // It must be a select between two values we will later establish to be a
1402 // sign-extending value and a zero constant. The condition guarding the
1403 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1404 if (!match(Select
, m_Select(m_ICmp(Pred
, m_Specific(X
), m_APInt(Thr
)),
1405 m_Value(SignExtendingValue
), m_Value(Zero
))) ||
1406 !isSignBitCheck(Pred
, *Thr
, ShouldSignext
))
1409 // icmp-select pair is commutative.
1411 std::swap(SignExtendingValue
, Zero
);
1413 // If we should not perform sign-extension then we must add/or/subtract zero.
1414 if (!match(Zero
, m_Zero()))
1416 // Otherwise, it should be some constant, left-shifted by the same NBits we
1417 // had in `lshr`. Said left-shift can also be appropriately extended.
1418 // Again, we must look past zero-ext when looking for NBits.
1419 SkipExtInMagic(SignExtendingValue
);
1420 Constant
*SignExtendingValueBaseConstant
;
1421 if (!match(SignExtendingValue
,
1422 m_Shl(m_Constant(SignExtendingValueBaseConstant
),
1423 m_ZExtOrSelf(m_Specific(NBits
)))))
1425 // If we `sub`, then the constant should be one, else it should be all-ones.
1426 if (I
.getOpcode() == Instruction::Sub
1427 ? !match(SignExtendingValueBaseConstant
, m_One())
1428 : !match(SignExtendingValueBaseConstant
, m_AllOnes()))
1431 auto *NewAShr
= BinaryOperator::CreateAShr(X
, LowBitsToSkip
,
1432 Extract
->getName() + ".sext");
1433 NewAShr
->copyIRFlags(Extract
); // Preserve `exact`-ness.
1437 Builder
.Insert(NewAShr
);
1438 return TruncInst::CreateTruncOrBitCast(NewAShr
, I
.getType());
1441 /// This is a specialization of a more general transform from
1442 /// foldUsingDistributiveLaws. If that code can be made to work optimally
1443 /// for multi-use cases or propagating nsw/nuw, then we would not need this.
1444 static Instruction
*factorizeMathWithShlOps(BinaryOperator
&I
,
1445 InstCombiner::BuilderTy
&Builder
) {
1446 // TODO: Also handle mul by doubling the shift amount?
1447 assert((I
.getOpcode() == Instruction::Add
||
1448 I
.getOpcode() == Instruction::Sub
) &&
1449 "Expected add/sub");
1450 auto *Op0
= dyn_cast
<BinaryOperator
>(I
.getOperand(0));
1451 auto *Op1
= dyn_cast
<BinaryOperator
>(I
.getOperand(1));
1452 if (!Op0
|| !Op1
|| !(Op0
->hasOneUse() || Op1
->hasOneUse()))
1455 Value
*X
, *Y
, *ShAmt
;
1456 if (!match(Op0
, m_Shl(m_Value(X
), m_Value(ShAmt
))) ||
1457 !match(Op1
, m_Shl(m_Value(Y
), m_Specific(ShAmt
))))
1460 // No-wrap propagates only when all ops have no-wrap.
1461 bool HasNSW
= I
.hasNoSignedWrap() && Op0
->hasNoSignedWrap() &&
1462 Op1
->hasNoSignedWrap();
1463 bool HasNUW
= I
.hasNoUnsignedWrap() && Op0
->hasNoUnsignedWrap() &&
1464 Op1
->hasNoUnsignedWrap();
1466 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1467 Value
*NewMath
= Builder
.CreateBinOp(I
.getOpcode(), X
, Y
);
1468 if (auto *NewI
= dyn_cast
<BinaryOperator
>(NewMath
)) {
1469 NewI
->setHasNoSignedWrap(HasNSW
);
1470 NewI
->setHasNoUnsignedWrap(HasNUW
);
1472 auto *NewShl
= BinaryOperator::CreateShl(NewMath
, ShAmt
);
1473 NewShl
->setHasNoSignedWrap(HasNSW
);
1474 NewShl
->setHasNoUnsignedWrap(HasNUW
);
1478 /// Reduce a sequence of masked half-width multiplies to a single multiply.
1479 /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
1480 static Instruction
*foldBoxMultiply(BinaryOperator
&I
) {
1481 unsigned BitWidth
= I
.getType()->getScalarSizeInBits();
1482 // Skip the odd bitwidth types.
1483 if ((BitWidth
& 0x1))
1486 unsigned HalfBits
= BitWidth
>> 1;
1487 APInt HalfMask
= APInt::getMaxValue(HalfBits
);
1489 // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
1492 // Require one-use on the multiply to avoid increasing the number of
1494 if (!match(&I
, m_c_Add(m_Shl(m_Value(CrossSum
), m_SpecificInt(HalfBits
)),
1495 m_OneUse(m_Mul(m_Value(YLo
), m_Value(XLo
))))))
1498 // XLo = X & HalfMask
1499 // YLo = Y & HalfMask
1500 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
1501 // to enhance robustness
1503 if (!match(XLo
, m_And(m_Value(X
), m_SpecificInt(HalfMask
))) ||
1504 !match(YLo
, m_And(m_Value(Y
), m_SpecificInt(HalfMask
))))
1507 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
1508 // X' can be either X or XLo in the pattern (and the same for Y')
1510 m_c_Add(m_c_Mul(m_LShr(m_Specific(Y
), m_SpecificInt(HalfBits
)),
1511 m_CombineOr(m_Specific(X
), m_Specific(XLo
))),
1512 m_c_Mul(m_LShr(m_Specific(X
), m_SpecificInt(HalfBits
)),
1513 m_CombineOr(m_Specific(Y
), m_Specific(YLo
))))))
1514 return BinaryOperator::CreateMul(X
, Y
);
1519 Instruction
*InstCombinerImpl::visitAdd(BinaryOperator
&I
) {
1520 if (Value
*V
= simplifyAddInst(I
.getOperand(0), I
.getOperand(1),
1521 I
.hasNoSignedWrap(), I
.hasNoUnsignedWrap(),
1522 SQ
.getWithInstruction(&I
)))
1523 return replaceInstUsesWith(I
, V
);
1525 if (SimplifyAssociativeOrCommutative(I
))
1528 if (Instruction
*X
= foldVectorBinop(I
))
1531 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
1534 // (A*B)+(A*C) -> A*(B+C) etc
1535 if (Value
*V
= foldUsingDistributiveLaws(I
))
1536 return replaceInstUsesWith(I
, V
);
1538 if (Instruction
*R
= foldBoxMultiply(I
))
1541 if (Instruction
*R
= factorizeMathWithShlOps(I
, Builder
))
1544 if (Instruction
*X
= foldAddWithConstant(I
))
1547 if (Instruction
*X
= foldNoWrapAdd(I
, Builder
))
1550 if (Instruction
*R
= foldBinOpShiftWithShift(I
))
1553 if (Instruction
*R
= combineAddSubWithShlAddSub(Builder
, I
))
1556 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1557 if (Instruction
*R
= foldAddLikeCommutative(LHS
, RHS
, I
.hasNoSignedWrap(),
1558 I
.hasNoUnsignedWrap()))
1560 if (Instruction
*R
= foldAddLikeCommutative(RHS
, LHS
, I
.hasNoSignedWrap(),
1561 I
.hasNoUnsignedWrap()))
1563 Type
*Ty
= I
.getType();
1564 if (Ty
->isIntOrIntVectorTy(1))
1565 return BinaryOperator::CreateXor(LHS
, RHS
);
1569 auto *Shl
= BinaryOperator::CreateShl(LHS
, ConstantInt::get(Ty
, 1));
1570 Shl
->setHasNoSignedWrap(I
.hasNoSignedWrap());
1571 Shl
->setHasNoUnsignedWrap(I
.hasNoUnsignedWrap());
1576 if (match(LHS
, m_Neg(m_Value(A
)))) {
1577 // -A + -B --> -(A + B)
1578 if (match(RHS
, m_Neg(m_Value(B
))))
1579 return BinaryOperator::CreateNeg(Builder
.CreateAdd(A
, B
));
1582 auto *Sub
= BinaryOperator::CreateSub(RHS
, A
);
1583 auto *OB0
= cast
<OverflowingBinaryOperator
>(LHS
);
1584 Sub
->setHasNoSignedWrap(I
.hasNoSignedWrap() && OB0
->hasNoSignedWrap());
1590 if (match(RHS
, m_Neg(m_Value(B
)))) {
1591 auto *Sub
= BinaryOperator::CreateSub(LHS
, B
);
1592 auto *OBO
= cast
<OverflowingBinaryOperator
>(RHS
);
1593 Sub
->setHasNoSignedWrap(I
.hasNoSignedWrap() && OBO
->hasNoSignedWrap());
1597 if (Value
*V
= checkForNegativeOperand(I
, Builder
))
1598 return replaceInstUsesWith(I
, V
);
1600 // (A + 1) + ~B --> A - B
1601 // ~B + (A + 1) --> A - B
1602 // (~B + A) + 1 --> A - B
1603 // (A + ~B) + 1 --> A - B
1604 if (match(&I
, m_c_BinOp(m_Add(m_Value(A
), m_One()), m_Not(m_Value(B
)))) ||
1605 match(&I
, m_BinOp(m_c_Add(m_Not(m_Value(B
)), m_Value(A
)), m_One())))
1606 return BinaryOperator::CreateSub(A
, B
);
1608 // (A + RHS) + RHS --> A + (RHS << 1)
1609 if (match(LHS
, m_OneUse(m_c_Add(m_Value(A
), m_Specific(RHS
)))))
1610 return BinaryOperator::CreateAdd(A
, Builder
.CreateShl(RHS
, 1, "reass.add"));
1612 // LHS + (A + LHS) --> A + (LHS << 1)
1613 if (match(RHS
, m_OneUse(m_c_Add(m_Value(A
), m_Specific(LHS
)))))
1614 return BinaryOperator::CreateAdd(A
, Builder
.CreateShl(LHS
, 1, "reass.add"));
1617 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1619 if (match(&I
, m_c_Add(m_Add(m_Value(A
), m_ImmConstant(C1
)),
1620 m_Sub(m_ImmConstant(C2
), m_Value(B
)))) &&
1621 (LHS
->hasOneUse() || RHS
->hasOneUse())) {
1622 Value
*Sub
= Builder
.CreateSub(A
, B
);
1623 return BinaryOperator::CreateAdd(Sub
, ConstantExpr::getAdd(C1
, C2
));
1626 // Canonicalize a constant sub operand as an add operand for better folding:
1627 // (C1 - A) + B --> (B - A) + C1
1628 if (match(&I
, m_c_Add(m_OneUse(m_Sub(m_ImmConstant(C1
), m_Value(A
))),
1630 Value
*Sub
= Builder
.CreateSub(B
, A
, "reass.sub");
1631 return BinaryOperator::CreateAdd(Sub
, C1
);
1635 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1636 if (Value
*V
= SimplifyAddWithRemainder(I
)) return replaceInstUsesWith(I
, V
);
1639 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1640 if (match(&I
, m_c_Add(m_And(m_Value(A
), m_APInt(C1
)), m_Deferred(A
))) &&
1641 C1
->isPowerOf2() && (ComputeNumSignBits(A
) > C1
->countl_zero())) {
1642 Constant
*NewMask
= ConstantInt::get(RHS
->getType(), *C1
- 1);
1643 return BinaryOperator::CreateAnd(A
, NewMask
);
1646 // ZExt (B - A) + ZExt(A) --> ZExt(B)
1647 if ((match(RHS
, m_ZExt(m_Value(A
))) &&
1648 match(LHS
, m_ZExt(m_NUWSub(m_Value(B
), m_Specific(A
))))) ||
1649 (match(LHS
, m_ZExt(m_Value(A
))) &&
1650 match(RHS
, m_ZExt(m_NUWSub(m_Value(B
), m_Specific(A
))))))
1651 return new ZExtInst(B
, LHS
->getType());
1653 // zext(A) + sext(A) --> 0 if A is i1
1654 if (match(&I
, m_c_BinOp(m_ZExt(m_Value(A
)), m_SExt(m_Deferred(A
)))) &&
1655 A
->getType()->isIntOrIntVectorTy(1))
1656 return replaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
1658 // sext(A < B) + zext(A > B) => ucmp/scmp(A, B)
1659 CmpPredicate LTPred
, GTPred
;
1661 m_c_Add(m_SExt(m_c_ICmp(LTPred
, m_Value(A
), m_Value(B
))),
1662 m_ZExt(m_c_ICmp(GTPred
, m_Deferred(A
), m_Deferred(B
))))) &&
1663 A
->getType()->isIntOrIntVectorTy()) {
1664 if (ICmpInst::isGT(LTPred
)) {
1665 std::swap(LTPred
, GTPred
);
1669 if (ICmpInst::isLT(LTPred
) && ICmpInst::isGT(GTPred
) &&
1670 ICmpInst::isSigned(LTPred
) == ICmpInst::isSigned(GTPred
))
1671 return replaceInstUsesWith(
1672 I
, Builder
.CreateIntrinsic(
1674 ICmpInst::isSigned(LTPred
) ? Intrinsic::scmp
: Intrinsic::ucmp
,
1678 // A+B --> A|B iff A and B have no bits set in common.
1679 WithCache
<const Value
*> LHSCache(LHS
), RHSCache(RHS
);
1680 if (haveNoCommonBitsSet(LHSCache
, RHSCache
, SQ
.getWithInstruction(&I
)))
1681 return BinaryOperator::CreateDisjointOr(LHS
, RHS
);
1683 if (Instruction
*Ext
= narrowMathIfNoOverflow(I
))
1686 // (add (xor A, B) (and A, B)) --> (or A, B)
1687 // (add (and A, B) (xor A, B)) --> (or A, B)
1688 if (match(&I
, m_c_BinOp(m_Xor(m_Value(A
), m_Value(B
)),
1689 m_c_And(m_Deferred(A
), m_Deferred(B
)))))
1690 return BinaryOperator::CreateOr(A
, B
);
1692 // (add (or A, B) (and A, B)) --> (add A, B)
1693 // (add (and A, B) (or A, B)) --> (add A, B)
1694 if (match(&I
, m_c_BinOp(m_Or(m_Value(A
), m_Value(B
)),
1695 m_c_And(m_Deferred(A
), m_Deferred(B
))))) {
1696 // Replacing operands in-place to preserve nuw/nsw flags.
1697 replaceOperand(I
, 0, A
);
1698 replaceOperand(I
, 1, B
);
1702 // (add A (or A, -A)) --> (and (add A, -1) A)
1703 // (add A (or -A, A)) --> (and (add A, -1) A)
1704 // (add (or A, -A) A) --> (and (add A, -1) A)
1705 // (add (or -A, A) A) --> (and (add A, -1) A)
1706 if (match(&I
, m_c_BinOp(m_Value(A
), m_OneUse(m_c_Or(m_Neg(m_Deferred(A
)),
1707 m_Deferred(A
)))))) {
1709 Builder
.CreateAdd(A
, Constant::getAllOnesValue(A
->getType()), "",
1710 I
.hasNoUnsignedWrap(), I
.hasNoSignedWrap());
1711 return BinaryOperator::CreateAnd(Add
, A
);
1714 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
1715 // Forms all commutable operations, and simplifies ctpop -> cttz folds.
1717 m_Add(m_OneUse(m_c_And(m_Value(A
), m_OneUse(m_Neg(m_Deferred(A
))))),
1719 Constant
*AllOnes
= ConstantInt::getAllOnesValue(RHS
->getType());
1720 Value
*Dec
= Builder
.CreateAdd(A
, AllOnes
);
1721 Value
*Not
= Builder
.CreateXor(A
, AllOnes
);
1722 return BinaryOperator::CreateAnd(Dec
, Not
);
1725 // Disguised reassociation/factorization:
1727 // ((A * -C1) - 1) + A
1728 // ((A * -C1) + A) - 1
1729 // (A * (1 - C1)) - 1
1731 m_c_Add(m_OneUse(m_Not(m_OneUse(m_Mul(m_Value(A
), m_APInt(C1
))))),
1733 Type
*Ty
= I
.getType();
1734 Constant
*NewMulC
= ConstantInt::get(Ty
, 1 - *C1
);
1735 Value
*NewMul
= Builder
.CreateMul(A
, NewMulC
);
1736 return BinaryOperator::CreateAdd(NewMul
, ConstantInt::getAllOnesValue(Ty
));
1739 // (A * -2**C) + B --> B - (A << C)
1740 const APInt
*NegPow2C
;
1741 if (match(&I
, m_c_Add(m_OneUse(m_Mul(m_Value(A
), m_NegatedPower2(NegPow2C
))),
1743 Constant
*ShiftAmtC
= ConstantInt::get(Ty
, NegPow2C
->countr_zero());
1744 Value
*Shl
= Builder
.CreateShl(A
, ShiftAmtC
);
1745 return BinaryOperator::CreateSub(B
, Shl
);
1748 // Canonicalize signum variant that ends in add:
1749 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
1750 uint64_t BitWidth
= Ty
->getScalarSizeInBits();
1751 if (match(LHS
, m_AShr(m_Value(A
), m_SpecificIntAllowPoison(BitWidth
- 1))) &&
1752 match(RHS
, m_OneUse(m_ZExt(m_OneUse(m_SpecificICmp(
1753 CmpInst::ICMP_SGT
, m_Specific(A
), m_ZeroInt())))))) {
1754 Value
*NotZero
= Builder
.CreateIsNotNull(A
, "isnotnull");
1755 Value
*Zext
= Builder
.CreateZExt(NotZero
, Ty
, "isnotnull.zext");
1756 return BinaryOperator::CreateOr(LHS
, Zext
);
1762 // (add X, (sext/zext (icmp eq X, C)))
1763 // -> (select (icmp eq X, C), (add C, (sext/zext 1)), X)
1764 auto CondMatcher
= m_CombineAnd(
1766 m_SpecificICmp(ICmpInst::ICMP_EQ
, m_Deferred(A
), m_ImmConstant(C
)));
1770 m_CombineAnd(m_Value(Ext
), m_ZExtOrSExt(CondMatcher
)))) &&
1772 Value
*Add
= isa
<ZExtInst
>(Ext
) ? InstCombiner::AddOne(C
)
1773 : InstCombiner::SubOne(C
);
1774 return replaceInstUsesWith(I
, Builder
.CreateSelect(Cond
, Add
, A
));
1778 if (Instruction
*Ashr
= foldAddToAshr(I
))
1781 // (~X) + (~Y) --> -2 - (X + Y)
1783 // To ensure we can save instructions we need to ensure that we consume both
1784 // LHS/RHS (i.e they have a `not`).
1785 bool ConsumesLHS
, ConsumesRHS
;
1786 if (isFreeToInvert(LHS
, LHS
->hasOneUse(), ConsumesLHS
) && ConsumesLHS
&&
1787 isFreeToInvert(RHS
, RHS
->hasOneUse(), ConsumesRHS
) && ConsumesRHS
) {
1788 Value
*NotLHS
= getFreelyInverted(LHS
, LHS
->hasOneUse(), &Builder
);
1789 Value
*NotRHS
= getFreelyInverted(RHS
, RHS
->hasOneUse(), &Builder
);
1790 assert(NotLHS
!= nullptr && NotRHS
!= nullptr &&
1791 "isFreeToInvert desynced with getFreelyInverted");
1792 Value
*LHSPlusRHS
= Builder
.CreateAdd(NotLHS
, NotRHS
);
1793 return BinaryOperator::CreateSub(
1794 ConstantInt::getSigned(RHS
->getType(), -2), LHSPlusRHS
);
1798 if (Instruction
*R
= tryFoldInstWithCtpopWithNot(&I
))
1801 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1802 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1803 // computeKnownBits.
1804 bool Changed
= false;
1805 if (!I
.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache
, RHSCache
, I
)) {
1807 I
.setHasNoSignedWrap(true);
1809 if (!I
.hasNoUnsignedWrap() &&
1810 willNotOverflowUnsignedAdd(LHSCache
, RHSCache
, I
)) {
1812 I
.setHasNoUnsignedWrap(true);
1815 if (Instruction
*V
= canonicalizeLowbitMask(I
, Builder
))
1818 if (Instruction
*V
=
1819 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I
))
1822 if (Instruction
*SatAdd
= foldToUnsignedSaturatedAdd(I
))
1825 // usub.sat(A, B) + B => umax(A, B)
1826 if (match(&I
, m_c_BinOp(
1827 m_OneUse(m_Intrinsic
<Intrinsic::usub_sat
>(m_Value(A
), m_Value(B
))),
1829 return replaceInstUsesWith(I
,
1830 Builder
.CreateIntrinsic(Intrinsic::umax
, {I
.getType()}, {A
, B
}));
1833 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1834 if (match(LHS
, m_OneUse(m_Intrinsic
<Intrinsic::ctpop
>(m_Value(A
)))) &&
1835 match(RHS
, m_OneUse(m_Intrinsic
<Intrinsic::ctpop
>(m_Value(B
)))) &&
1836 haveNoCommonBitsSet(A
, B
, SQ
.getWithInstruction(&I
)))
1837 return replaceInstUsesWith(
1838 I
, Builder
.CreateIntrinsic(Intrinsic::ctpop
, {I
.getType()},
1839 {Builder
.CreateOr(A
, B
)}));
1841 // Fold the log2_ceil idiom:
1842 // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1))
1844 // BW - ctlz(A - 1, false)
1849 m_ZExt(m_ICmp(Pred
, m_Intrinsic
<Intrinsic::ctpop
>(m_Value(A
)),
1851 m_OneUse(m_ZExtOrSelf(m_OneUse(m_Xor(
1852 m_OneUse(m_TruncOrSelf(m_OneUse(
1853 m_Intrinsic
<Intrinsic::ctlz
>(m_Deferred(A
), m_One())))),
1854 m_APInt(XorC
))))))) &&
1855 (Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_NE
) &&
1856 *XorC
== A
->getType()->getScalarSizeInBits() - 1) {
1857 Value
*Sub
= Builder
.CreateAdd(A
, Constant::getAllOnesValue(A
->getType()));
1858 Value
*Ctlz
= Builder
.CreateIntrinsic(Intrinsic::ctlz
, {A
->getType()},
1859 {Sub
, Builder
.getFalse()});
1860 Value
*Ret
= Builder
.CreateSub(
1861 ConstantInt::get(A
->getType(), A
->getType()->getScalarSizeInBits()),
1862 Ctlz
, "", /*HasNUW*/ true, /*HasNSW*/ true);
1863 return replaceInstUsesWith(I
, Builder
.CreateZExtOrTrunc(Ret
, I
.getType()));
1866 if (Instruction
*Res
= foldSquareSumInt(I
))
1869 if (Instruction
*Res
= foldBinOpOfDisplacedShifts(I
))
1872 if (Instruction
*Res
= foldBinOpOfSelectAndCastOfSelectCondition(I
))
1875 // Re-enqueue users of the induction variable of add recurrence if we infer
1876 // new nuw/nsw flags.
1879 Value
*Start
, *Step
;
1880 if (matchSimpleRecurrence(&I
, PHI
, Start
, Step
))
1881 Worklist
.pushUsersToWorkList(*PHI
);
1884 return Changed
? &I
: nullptr;
1887 /// Eliminate an op from a linear interpolation (lerp) pattern.
1888 static Instruction
*factorizeLerp(BinaryOperator
&I
,
1889 InstCombiner::BuilderTy
&Builder
) {
1891 if (!match(&I
, m_c_FAdd(m_OneUse(m_c_FMul(m_Value(Y
),
1892 m_OneUse(m_FSub(m_FPOne(),
1894 m_OneUse(m_c_FMul(m_Value(X
), m_Deferred(Z
))))))
1897 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1898 Value
*XY
= Builder
.CreateFSubFMF(X
, Y
, &I
);
1899 Value
*MulZ
= Builder
.CreateFMulFMF(Z
, XY
, &I
);
1900 return BinaryOperator::CreateFAddFMF(Y
, MulZ
, &I
);
1903 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
1904 static Instruction
*factorizeFAddFSub(BinaryOperator
&I
,
1905 InstCombiner::BuilderTy
&Builder
) {
1906 assert((I
.getOpcode() == Instruction::FAdd
||
1907 I
.getOpcode() == Instruction::FSub
) && "Expecting fadd/fsub");
1908 assert(I
.hasAllowReassoc() && I
.hasNoSignedZeros() &&
1909 "FP factorization requires FMF");
1911 if (Instruction
*Lerp
= factorizeLerp(I
, Builder
))
1914 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1915 if (!Op0
->hasOneUse() || !Op1
->hasOneUse())
1920 if ((match(Op0
, m_FMul(m_Value(X
), m_Value(Z
))) &&
1921 match(Op1
, m_c_FMul(m_Value(Y
), m_Specific(Z
)))) ||
1922 (match(Op0
, m_FMul(m_Value(Z
), m_Value(X
))) &&
1923 match(Op1
, m_c_FMul(m_Value(Y
), m_Specific(Z
)))))
1925 else if (match(Op0
, m_FDiv(m_Value(X
), m_Value(Z
))) &&
1926 match(Op1
, m_FDiv(m_Value(Y
), m_Specific(Z
))))
1931 // (X * Z) + (Y * Z) --> (X + Y) * Z
1932 // (X * Z) - (Y * Z) --> (X - Y) * Z
1933 // (X / Z) + (Y / Z) --> (X + Y) / Z
1934 // (X / Z) - (Y / Z) --> (X - Y) / Z
1935 bool IsFAdd
= I
.getOpcode() == Instruction::FAdd
;
1936 Value
*XY
= IsFAdd
? Builder
.CreateFAddFMF(X
, Y
, &I
)
1937 : Builder
.CreateFSubFMF(X
, Y
, &I
);
1939 // Bail out if we just created a denormal constant.
1940 // TODO: This is copied from a previous implementation. Is it necessary?
1942 if (match(XY
, m_APFloat(C
)) && !C
->isNormal())
1945 return IsFMul
? BinaryOperator::CreateFMulFMF(XY
, Z
, &I
)
1946 : BinaryOperator::CreateFDivFMF(XY
, Z
, &I
);
1949 Instruction
*InstCombinerImpl::visitFAdd(BinaryOperator
&I
) {
1950 if (Value
*V
= simplifyFAddInst(I
.getOperand(0), I
.getOperand(1),
1951 I
.getFastMathFlags(),
1952 SQ
.getWithInstruction(&I
)))
1953 return replaceInstUsesWith(I
, V
);
1955 if (SimplifyAssociativeOrCommutative(I
))
1958 if (Instruction
*X
= foldVectorBinop(I
))
1961 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
1964 if (Instruction
*FoldedFAdd
= foldBinOpIntoSelectOrPhi(I
))
1967 // (-X) + Y --> Y - X
1969 if (match(&I
, m_c_FAdd(m_FNeg(m_Value(X
)), m_Value(Y
))))
1970 return BinaryOperator::CreateFSubFMF(Y
, X
, &I
);
1972 // Similar to above, but look through fmul/fdiv for the negated term.
1973 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
1975 if (match(&I
, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X
)), m_Value(Y
))),
1977 Value
*XY
= Builder
.CreateFMulFMF(X
, Y
, &I
);
1978 return BinaryOperator::CreateFSubFMF(Z
, XY
, &I
);
1980 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
1981 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
1982 if (match(&I
, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X
)), m_Value(Y
))),
1984 match(&I
, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X
), m_FNeg(m_Value(Y
)))),
1986 Value
*XY
= Builder
.CreateFDivFMF(X
, Y
, &I
);
1987 return BinaryOperator::CreateFSubFMF(Z
, XY
, &I
);
1990 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1991 // integer add followed by a promotion.
1992 if (Instruction
*R
= foldFBinOpOfIntCasts(I
))
1995 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1996 // Handle specials cases for FAdd with selects feeding the operation
1997 if (Value
*V
= SimplifySelectsFeedingBinaryOp(I
, LHS
, RHS
))
1998 return replaceInstUsesWith(I
, V
);
2000 if (I
.hasAllowReassoc() && I
.hasNoSignedZeros()) {
2001 if (Instruction
*F
= factorizeFAddFSub(I
, Builder
))
2004 if (Instruction
*F
= foldSquareSumFP(I
))
2007 // Try to fold fadd into start value of reduction intrinsic.
2008 if (match(&I
, m_c_FAdd(m_OneUse(m_Intrinsic
<Intrinsic::vector_reduce_fadd
>(
2009 m_AnyZeroFP(), m_Value(X
))),
2011 // fadd (rdx 0.0, X), Y --> rdx Y, X
2012 return replaceInstUsesWith(
2013 I
, Builder
.CreateIntrinsic(Intrinsic::vector_reduce_fadd
,
2014 {X
->getType()}, {Y
, X
}, &I
));
2016 const APFloat
*StartC
, *C
;
2017 if (match(LHS
, m_OneUse(m_Intrinsic
<Intrinsic::vector_reduce_fadd
>(
2018 m_APFloat(StartC
), m_Value(X
)))) &&
2019 match(RHS
, m_APFloat(C
))) {
2020 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
2021 Constant
*NewStartC
= ConstantFP::get(I
.getType(), *C
+ *StartC
);
2022 return replaceInstUsesWith(
2023 I
, Builder
.CreateIntrinsic(Intrinsic::vector_reduce_fadd
,
2024 {X
->getType()}, {NewStartC
, X
}, &I
));
2027 // (X * MulC) + X --> X * (MulC + 1.0)
2029 if (match(&I
, m_c_FAdd(m_FMul(m_Value(X
), m_ImmConstant(MulC
)),
2031 if (Constant
*NewMulC
= ConstantFoldBinaryOpOperands(
2032 Instruction::FAdd
, MulC
, ConstantFP::get(I
.getType(), 1.0), DL
))
2033 return BinaryOperator::CreateFMulFMF(X
, NewMulC
, &I
);
2036 // (-X - Y) + (X + Z) --> Z - Y
2037 if (match(&I
, m_c_FAdd(m_FSub(m_FNeg(m_Value(X
)), m_Value(Y
)),
2038 m_c_FAdd(m_Deferred(X
), m_Value(Z
)))))
2039 return BinaryOperator::CreateFSubFMF(Z
, Y
, &I
);
2041 if (Value
*V
= FAddCombine(Builder
).simplify(&I
))
2042 return replaceInstUsesWith(I
, V
);
2045 // minumum(X, Y) + maximum(X, Y) => X + Y.
2047 m_c_FAdd(m_Intrinsic
<Intrinsic::maximum
>(m_Value(X
), m_Value(Y
)),
2048 m_c_Intrinsic
<Intrinsic::minimum
>(m_Deferred(X
),
2050 BinaryOperator
*Result
= BinaryOperator::CreateFAddFMF(X
, Y
, &I
);
2051 // We cannot preserve ninf if nnan flag is not set.
2052 // If X is NaN and Y is Inf then in original program we had NaN + NaN,
2053 // while in optimized version NaN + Inf and this is a poison with ninf flag.
2054 if (!Result
->hasNoNaNs())
2055 Result
->setHasNoInfs(false);
2062 /// Optimize pointer differences into the same array into a size. Consider:
2063 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
2064 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
2065 Value
*InstCombinerImpl::OptimizePointerDifference(Value
*LHS
, Value
*RHS
,
2066 Type
*Ty
, bool IsNUW
) {
2067 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
2069 bool Swapped
= false;
2070 GEPOperator
*GEP1
= nullptr, *GEP2
= nullptr;
2071 if (!isa
<GEPOperator
>(LHS
) && isa
<GEPOperator
>(RHS
)) {
2072 std::swap(LHS
, RHS
);
2076 // Require at least one GEP with a common base pointer on both sides.
2077 if (auto *LHSGEP
= dyn_cast
<GEPOperator
>(LHS
)) {
2079 if (LHSGEP
->getOperand(0)->stripPointerCasts() ==
2080 RHS
->stripPointerCasts()) {
2082 } else if (auto *RHSGEP
= dyn_cast
<GEPOperator
>(RHS
)) {
2083 // (gep X, ...) - (gep X, ...)
2084 if (LHSGEP
->getOperand(0)->stripPointerCasts() ==
2085 RHSGEP
->getOperand(0)->stripPointerCasts()) {
2095 // To avoid duplicating the offset arithmetic, rewrite the GEP to use the
2096 // computed offset. This may erase the original GEP, so be sure to cache the
2097 // nowrap flags before emitting the offset.
2098 // TODO: We should probably do this even if there is only one GEP.
2099 bool RewriteGEPs
= GEP2
!= nullptr;
2101 // Emit the offset of the GEP and an intptr_t.
2102 GEPNoWrapFlags GEP1NW
= GEP1
->getNoWrapFlags();
2103 Value
*Result
= EmitGEPOffset(GEP1
, RewriteGEPs
);
2105 // If this is a single inbounds GEP and the original sub was nuw,
2106 // then the final multiplication is also nuw.
2107 if (auto *I
= dyn_cast
<Instruction
>(Result
))
2108 if (IsNUW
&& !GEP2
&& !Swapped
&& GEP1NW
.isInBounds() &&
2109 I
->getOpcode() == Instruction::Mul
)
2110 I
->setHasNoUnsignedWrap();
2112 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
2113 // If both GEPs are inbounds, then the subtract does not have signed overflow.
2114 // If both GEPs are nuw and the original sub is nuw, the new sub is also nuw.
2116 GEPNoWrapFlags GEP2NW
= GEP2
->getNoWrapFlags();
2117 Value
*Offset
= EmitGEPOffset(GEP2
, RewriteGEPs
);
2118 Result
= Builder
.CreateSub(Result
, Offset
, "gepdiff",
2119 IsNUW
&& GEP1NW
.hasNoUnsignedWrap() &&
2120 GEP2NW
.hasNoUnsignedWrap(),
2121 GEP1NW
.isInBounds() && GEP2NW
.isInBounds());
2124 // If we have p - gep(p, ...) then we have to negate the result.
2126 Result
= Builder
.CreateNeg(Result
, "diff.neg");
2128 return Builder
.CreateIntCast(Result
, Ty
, true);
2131 static Instruction
*foldSubOfMinMax(BinaryOperator
&I
,
2132 InstCombiner::BuilderTy
&Builder
) {
2133 Value
*Op0
= I
.getOperand(0);
2134 Value
*Op1
= I
.getOperand(1);
2135 Type
*Ty
= I
.getType();
2136 auto *MinMax
= dyn_cast
<MinMaxIntrinsic
>(Op1
);
2140 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
2141 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
2142 Value
*X
= MinMax
->getLHS();
2143 Value
*Y
= MinMax
->getRHS();
2144 if (match(Op0
, m_c_Add(m_Specific(X
), m_Specific(Y
))) &&
2145 (Op0
->hasOneUse() || Op1
->hasOneUse())) {
2146 Intrinsic::ID InvID
= getInverseMinMaxIntrinsic(MinMax
->getIntrinsicID());
2147 Function
*F
= Intrinsic::getOrInsertDeclaration(I
.getModule(), InvID
, Ty
);
2148 return CallInst::Create(F
, {X
, Y
});
2151 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
2152 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
2154 if (match(Op1
, m_OneUse(m_UMin(m_Value(Y
), m_Value(Z
))))) {
2155 if (match(Op0
, m_OneUse(m_c_Add(m_Specific(Y
), m_Value(X
))))) {
2156 Value
*USub
= Builder
.CreateIntrinsic(Intrinsic::usub_sat
, Ty
, {Y
, Z
});
2157 return BinaryOperator::CreateAdd(X
, USub
);
2159 if (match(Op0
, m_OneUse(m_c_Add(m_Specific(Z
), m_Value(X
))))) {
2160 Value
*USub
= Builder
.CreateIntrinsic(Intrinsic::usub_sat
, Ty
, {Z
, Y
});
2161 return BinaryOperator::CreateAdd(X
, USub
);
2165 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z
2166 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z
2167 if (MinMax
->isSigned() && match(Y
, m_ZeroInt()) &&
2168 match(X
, m_NSWSub(m_Specific(Op0
), m_Value(Z
)))) {
2169 Intrinsic::ID InvID
= getInverseMinMaxIntrinsic(MinMax
->getIntrinsicID());
2170 Function
*F
= Intrinsic::getOrInsertDeclaration(I
.getModule(), InvID
, Ty
);
2171 return CallInst::Create(F
, {Op0
, Z
});
2177 Instruction
*InstCombinerImpl::visitSub(BinaryOperator
&I
) {
2178 if (Value
*V
= simplifySubInst(I
.getOperand(0), I
.getOperand(1),
2179 I
.hasNoSignedWrap(), I
.hasNoUnsignedWrap(),
2180 SQ
.getWithInstruction(&I
)))
2181 return replaceInstUsesWith(I
, V
);
2183 if (Instruction
*X
= foldVectorBinop(I
))
2186 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
2189 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2191 // If this is a 'B = x-(-A)', change to B = x+A.
2192 // We deal with this without involving Negator to preserve NSW flag.
2193 if (Value
*V
= dyn_castNegVal(Op1
)) {
2194 BinaryOperator
*Res
= BinaryOperator::CreateAdd(Op0
, V
);
2196 if (const auto *BO
= dyn_cast
<BinaryOperator
>(Op1
)) {
2197 assert(BO
->getOpcode() == Instruction::Sub
&&
2198 "Expected a subtraction operator!");
2199 if (BO
->hasNoSignedWrap() && I
.hasNoSignedWrap())
2200 Res
->setHasNoSignedWrap(true);
2202 if (cast
<Constant
>(Op1
)->isNotMinSignedValue() && I
.hasNoSignedWrap())
2203 Res
->setHasNoSignedWrap(true);
2209 // Try this before Negator to preserve NSW flag.
2210 if (Instruction
*R
= factorizeMathWithShlOps(I
, Builder
))
2214 if (match(Op0
, m_ImmConstant(C
))) {
2218 // C-(X+C2) --> (C-C2)-X
2219 if (match(Op1
, m_Add(m_Value(X
), m_ImmConstant(C2
)))) {
2220 // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW
2221 // => (C-C2)-X can have NSW/NUW
2222 bool WillNotSOV
= willNotOverflowSignedSub(C
, C2
, I
);
2223 BinaryOperator
*Res
=
2224 BinaryOperator::CreateSub(ConstantExpr::getSub(C
, C2
), X
);
2225 auto *OBO1
= cast
<OverflowingBinaryOperator
>(Op1
);
2226 Res
->setHasNoSignedWrap(I
.hasNoSignedWrap() && OBO1
->hasNoSignedWrap() &&
2228 Res
->setHasNoUnsignedWrap(I
.hasNoUnsignedWrap() &&
2229 OBO1
->hasNoUnsignedWrap());
2234 auto TryToNarrowDeduceFlags
= [this, &I
, &Op0
, &Op1
]() -> Instruction
* {
2235 if (Instruction
*Ext
= narrowMathIfNoOverflow(I
))
2238 bool Changed
= false;
2239 if (!I
.hasNoSignedWrap() && willNotOverflowSignedSub(Op0
, Op1
, I
)) {
2241 I
.setHasNoSignedWrap(true);
2243 if (!I
.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0
, Op1
, I
)) {
2245 I
.setHasNoUnsignedWrap(true);
2248 return Changed
? &I
: nullptr;
2251 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
2252 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
2253 // a pure negation used by a select that looks like abs/nabs.
2254 bool IsNegation
= match(Op0
, m_ZeroInt());
2255 if (!IsNegation
|| none_of(I
.users(), [&I
, Op1
](const User
*U
) {
2256 const Instruction
*UI
= dyn_cast
<Instruction
>(U
);
2259 return match(UI
, m_c_Select(m_Specific(Op1
), m_Specific(&I
)));
2261 if (Value
*NegOp1
= Negator::Negate(IsNegation
, /* IsNSW */ IsNegation
&&
2262 I
.hasNoSignedWrap(),
2264 return BinaryOperator::CreateAdd(NegOp1
, Op0
);
2267 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
2269 // (A*B)-(A*C) -> A*(B-C) etc
2270 if (Value
*V
= foldUsingDistributiveLaws(I
))
2271 return replaceInstUsesWith(I
, V
);
2273 if (I
.getType()->isIntOrIntVectorTy(1))
2274 return BinaryOperator::CreateXor(Op0
, Op1
);
2276 // Replace (-1 - A) with (~A).
2277 if (match(Op0
, m_AllOnes()))
2278 return BinaryOperator::CreateNot(Op1
);
2280 // (X + -1) - Y --> ~Y + X
2282 if (match(Op0
, m_OneUse(m_Add(m_Value(X
), m_AllOnes()))))
2283 return BinaryOperator::CreateAdd(Builder
.CreateNot(Op1
), X
);
2285 // if (C1 & C2) == C2 then (X & C1) - (X & C2) -> X & (C1 ^ C2)
2287 if (match(Op0
, m_And(m_Value(X
), m_ImmConstant(C1
))) &&
2288 match(Op1
, m_And(m_Specific(X
), m_ImmConstant(C2
)))) {
2289 Value
*AndC
= ConstantFoldBinaryInstruction(Instruction::And
, C1
, C2
);
2290 if (C2
->isElementWiseEqual(AndC
))
2291 return BinaryOperator::CreateAnd(
2292 X
, ConstantFoldBinaryInstruction(Instruction::Xor
, C1
, C2
));
2295 // Reassociate sub/add sequences to create more add instructions and
2296 // reduce dependency chains:
2297 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2299 if (match(Op0
, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X
), m_Value(Y
))),
2301 Value
*XZ
= Builder
.CreateAdd(X
, Z
);
2302 Value
*YW
= Builder
.CreateAdd(Y
, Op1
);
2303 return BinaryOperator::CreateSub(XZ
, YW
);
2306 // ((X - Y) - Op1) --> X - (Y + Op1)
2307 if (match(Op0
, m_OneUse(m_Sub(m_Value(X
), m_Value(Y
))))) {
2308 OverflowingBinaryOperator
*LHSSub
= cast
<OverflowingBinaryOperator
>(Op0
);
2309 bool HasNUW
= I
.hasNoUnsignedWrap() && LHSSub
->hasNoUnsignedWrap();
2310 bool HasNSW
= HasNUW
&& I
.hasNoSignedWrap() && LHSSub
->hasNoSignedWrap();
2311 Value
*Add
= Builder
.CreateAdd(Y
, Op1
, "", /* HasNUW */ HasNUW
,
2312 /* HasNSW */ HasNSW
);
2313 BinaryOperator
*Sub
= BinaryOperator::CreateSub(X
, Add
);
2314 Sub
->setHasNoUnsignedWrap(HasNUW
);
2315 Sub
->setHasNoSignedWrap(HasNSW
);
2320 // (X + Z) - (Y + Z) --> (X - Y)
2321 // This is done in other passes, but we want to be able to consume this
2322 // pattern in InstCombine so we can generate it without creating infinite
2324 if (match(Op0
, m_Add(m_Value(X
), m_Value(Z
))) &&
2325 match(Op1
, m_c_Add(m_Value(Y
), m_Specific(Z
))))
2326 return BinaryOperator::CreateSub(X
, Y
);
2328 // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1)
2330 if (match(Op0
, m_OneUse(m_Add(m_Value(X
), m_ImmConstant(CX
)))) &&
2331 match(Op1
, m_OneUse(m_Add(m_Value(Y
), m_ImmConstant(CY
))))) {
2332 Value
*OpsSub
= Builder
.CreateSub(X
, Y
);
2333 Constant
*ConstsSub
= ConstantExpr::getSub(CX
, CY
);
2334 return BinaryOperator::CreateAdd(OpsSub
, ConstsSub
);
2340 if (match(Op0
, m_AddLike(m_Value(W
), m_Value(X
))) &&
2341 match(Op1
, m_AddLike(m_Value(Y
), m_Value(Z
)))) {
2342 Instruction
*R
= nullptr;
2344 R
= BinaryOperator::CreateSub(X
, Z
);
2346 R
= BinaryOperator::CreateSub(X
, Y
);
2348 R
= BinaryOperator::CreateSub(W
, Z
);
2350 R
= BinaryOperator::CreateSub(W
, Y
);
2352 bool NSW
= I
.hasNoSignedWrap() &&
2353 match(Op0
, m_NSWAddLike(m_Value(), m_Value())) &&
2354 match(Op1
, m_NSWAddLike(m_Value(), m_Value()));
2356 bool NUW
= I
.hasNoUnsignedWrap() &&
2357 match(Op1
, m_NUWAddLike(m_Value(), m_Value()));
2358 R
->setHasNoSignedWrap(NSW
);
2359 R
->setHasNoUnsignedWrap(NUW
);
2365 // (~X) - (~Y) --> Y - X
2367 // Need to ensure we can consume at least one of the `not` instructions,
2368 // otherwise this can inf loop.
2369 bool ConsumesOp0
, ConsumesOp1
;
2370 if (isFreeToInvert(Op0
, Op0
->hasOneUse(), ConsumesOp0
) &&
2371 isFreeToInvert(Op1
, Op1
->hasOneUse(), ConsumesOp1
) &&
2372 (ConsumesOp0
|| ConsumesOp1
)) {
2373 Value
*NotOp0
= getFreelyInverted(Op0
, Op0
->hasOneUse(), &Builder
);
2374 Value
*NotOp1
= getFreelyInverted(Op1
, Op1
->hasOneUse(), &Builder
);
2375 assert(NotOp0
!= nullptr && NotOp1
!= nullptr &&
2376 "isFreeToInvert desynced with getFreelyInverted");
2377 return BinaryOperator::CreateSub(NotOp1
, NotOp0
);
2381 auto m_AddRdx
= [](Value
*&Vec
) {
2382 return m_OneUse(m_Intrinsic
<Intrinsic::vector_reduce_add
>(m_Value(Vec
)));
2385 if (match(Op0
, m_AddRdx(V0
)) && match(Op1
, m_AddRdx(V1
)) &&
2386 V0
->getType() == V1
->getType()) {
2387 // Difference of sums is sum of differences:
2388 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
2389 Value
*Sub
= Builder
.CreateSub(V0
, V1
);
2390 Value
*Rdx
= Builder
.CreateIntrinsic(Intrinsic::vector_reduce_add
,
2391 {Sub
->getType()}, {Sub
});
2392 return replaceInstUsesWith(I
, Rdx
);
2395 if (Constant
*C
= dyn_cast
<Constant
>(Op0
)) {
2397 if (match(Op1
, m_ZExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1))
2398 // C - (zext bool) --> bool ? C - 1 : C
2399 return SelectInst::Create(X
, InstCombiner::SubOne(C
), C
);
2400 if (match(Op1
, m_SExt(m_Value(X
))) && X
->getType()->isIntOrIntVectorTy(1))
2401 // C - (sext bool) --> bool ? C + 1 : C
2402 return SelectInst::Create(X
, InstCombiner::AddOne(C
), C
);
2404 // C - ~X == X + (1+C)
2405 if (match(Op1
, m_Not(m_Value(X
))))
2406 return BinaryOperator::CreateAdd(X
, InstCombiner::AddOne(C
));
2408 // Try to fold constant sub into select arguments.
2409 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
2410 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
))
2413 // Try to fold constant sub into PHI values.
2414 if (PHINode
*PN
= dyn_cast
<PHINode
>(Op1
))
2415 if (Instruction
*R
= foldOpIntoPhi(I
, PN
))
2420 // C-(C2-X) --> X+(C-C2)
2421 if (match(Op1
, m_Sub(m_ImmConstant(C2
), m_Value(X
))))
2422 return BinaryOperator::CreateAdd(X
, ConstantExpr::getSub(C
, C2
));
2426 if (match(Op0
, m_APInt(Op0C
))) {
2427 if (Op0C
->isMask()) {
2428 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
2429 // zero. We don't use information from dominating conditions so this
2430 // transform is easier to reverse if necessary.
2431 KnownBits RHSKnown
= llvm::computeKnownBits(
2432 Op1
, 0, SQ
.getWithInstruction(&I
).getWithoutDomCondCache());
2433 if ((*Op0C
| RHSKnown
.Zero
).isAllOnes())
2434 return BinaryOperator::CreateXor(Op1
, Op0
);
2437 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when:
2438 // (C3 - ((C2 & C3) - 1)) is pow2
2439 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1)
2440 // C2 is negative pow2 || sub nuw
2441 const APInt
*C2
, *C3
;
2442 BinaryOperator
*InnerSub
;
2443 if (match(Op1
, m_OneUse(m_And(m_BinOp(InnerSub
), m_APInt(C2
)))) &&
2444 match(InnerSub
, m_Sub(m_APInt(C3
), m_Value(X
))) &&
2445 (InnerSub
->hasNoUnsignedWrap() || C2
->isNegatedPowerOf2())) {
2446 APInt C2AndC3
= *C2
& *C3
;
2447 APInt C2AndC3Minus1
= C2AndC3
- 1;
2448 APInt C2AddC3
= *C2
+ *C3
;
2449 if ((*C3
- C2AndC3Minus1
).isPowerOf2() &&
2450 C2AndC3Minus1
.isSubsetOf(C2AddC3
)) {
2451 Value
*And
= Builder
.CreateAnd(X
, ConstantInt::get(I
.getType(), *C2
));
2452 return BinaryOperator::CreateAdd(
2453 And
, ConstantInt::get(I
.getType(), *Op0C
- C2AndC3
));
2460 // X-(X+Y) == -Y X-(Y+X) == -Y
2461 if (match(Op1
, m_c_Add(m_Specific(Op0
), m_Value(Y
))))
2462 return BinaryOperator::CreateNeg(Y
);
2465 if (match(Op0
, m_Sub(m_Specific(Op1
), m_Value(Y
))))
2466 return BinaryOperator::CreateNeg(Y
);
2469 // (sub (or A, B) (and A, B)) --> (xor A, B)
2472 if (match(Op1
, m_And(m_Value(A
), m_Value(B
))) &&
2473 match(Op0
, m_c_Or(m_Specific(A
), m_Specific(B
))))
2474 return BinaryOperator::CreateXor(A
, B
);
2477 // (sub (add A, B) (or A, B)) --> (and A, B)
2480 if (match(Op0
, m_Add(m_Value(A
), m_Value(B
))) &&
2481 match(Op1
, m_c_Or(m_Specific(A
), m_Specific(B
))))
2482 return BinaryOperator::CreateAnd(A
, B
);
2485 // (sub (add A, B) (and A, B)) --> (or A, B)
2488 if (match(Op0
, m_Add(m_Value(A
), m_Value(B
))) &&
2489 match(Op1
, m_c_And(m_Specific(A
), m_Specific(B
))))
2490 return BinaryOperator::CreateOr(A
, B
);
2493 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
2496 if (match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
2497 match(Op1
, m_c_Or(m_Specific(A
), m_Specific(B
))) &&
2498 (Op0
->hasOneUse() || Op1
->hasOneUse()))
2499 return BinaryOperator::CreateNeg(Builder
.CreateXor(A
, B
));
2502 // (sub (or A, B), (xor A, B)) --> (and A, B)
2505 if (match(Op1
, m_Xor(m_Value(A
), m_Value(B
))) &&
2506 match(Op0
, m_c_Or(m_Specific(A
), m_Specific(B
))))
2507 return BinaryOperator::CreateAnd(A
, B
);
2510 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
2513 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
))) &&
2514 match(Op1
, m_c_Or(m_Specific(A
), m_Specific(B
))) &&
2515 (Op0
->hasOneUse() || Op1
->hasOneUse()))
2516 return BinaryOperator::CreateNeg(Builder
.CreateAnd(A
, B
));
2521 // ((X | Y) - X) --> (~X & Y)
2522 if (match(Op0
, m_OneUse(m_c_Or(m_Value(Y
), m_Specific(Op1
)))))
2523 return BinaryOperator::CreateAnd(
2524 Y
, Builder
.CreateNot(Op1
, Op1
->getName() + ".not"));
2528 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2530 if (match(Op0
, m_OneUse(m_c_And(m_Specific(Op1
),
2531 m_OneUse(m_Neg(m_Value(X
))))))) {
2532 return BinaryOperator::CreateNeg(Builder
.CreateAnd(
2533 Op1
, Builder
.CreateAdd(X
, Constant::getAllOnesValue(I
.getType()))));
2538 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2540 if (match(Op0
, m_OneUse(m_And(m_Specific(Op1
), m_Constant(C
))))) {
2541 return BinaryOperator::CreateNeg(
2542 Builder
.CreateAnd(Op1
, Builder
.CreateNot(C
)));
2547 // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X)
2548 // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X))
2550 auto m_SubXorCmp
= [&C
, &X
](Value
*LHS
, Value
*RHS
) {
2551 return match(LHS
, m_OneUse(m_c_Xor(m_Value(X
), m_Specific(RHS
)))) &&
2552 match(RHS
, m_SExt(m_Value(C
))) &&
2553 (C
->getType()->getScalarSizeInBits() == 1);
2555 if (m_SubXorCmp(Op0
, Op1
))
2556 return SelectInst::Create(C
, Builder
.CreateNeg(X
), X
);
2557 if (m_SubXorCmp(Op1
, Op0
))
2558 return SelectInst::Create(C
, X
, Builder
.CreateNeg(X
));
2561 if (Instruction
*R
= tryFoldInstWithCtpopWithNot(&I
))
2564 if (Instruction
*R
= foldSubOfMinMax(I
, Builder
))
2568 // If we have a subtraction between some value and a select between
2569 // said value and something else, sink subtraction into select hands, i.e.:
2570 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2572 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2574 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2576 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2577 // This will result in select between new subtraction and 0.
2578 auto SinkSubIntoSelect
=
2579 [Ty
= I
.getType()](Value
*Select
, Value
*OtherHandOfSub
,
2580 auto SubBuilder
) -> Instruction
* {
2581 Value
*Cond
, *TrueVal
, *FalseVal
;
2582 if (!match(Select
, m_OneUse(m_Select(m_Value(Cond
), m_Value(TrueVal
),
2583 m_Value(FalseVal
)))))
2585 if (OtherHandOfSub
!= TrueVal
&& OtherHandOfSub
!= FalseVal
)
2587 // While it is really tempting to just create two subtractions and let
2588 // InstCombine fold one of those to 0, it isn't possible to do so
2589 // because of worklist visitation order. So ugly it is.
2590 bool OtherHandOfSubIsTrueVal
= OtherHandOfSub
== TrueVal
;
2591 Value
*NewSub
= SubBuilder(OtherHandOfSubIsTrueVal
? FalseVal
: TrueVal
);
2592 Constant
*Zero
= Constant::getNullValue(Ty
);
2593 SelectInst
*NewSel
=
2594 SelectInst::Create(Cond
, OtherHandOfSubIsTrueVal
? Zero
: NewSub
,
2595 OtherHandOfSubIsTrueVal
? NewSub
: Zero
);
2596 // Preserve prof metadata if any.
2597 NewSel
->copyMetadata(cast
<Instruction
>(*Select
));
2600 if (Instruction
*NewSel
= SinkSubIntoSelect(
2601 /*Select=*/Op0
, /*OtherHandOfSub=*/Op1
,
2602 [Builder
= &Builder
, Op1
](Value
*OtherHandOfSelect
) {
2603 return Builder
->CreateSub(OtherHandOfSelect
,
2604 /*OtherHandOfSub=*/Op1
);
2607 if (Instruction
*NewSel
= SinkSubIntoSelect(
2608 /*Select=*/Op1
, /*OtherHandOfSub=*/Op0
,
2609 [Builder
= &Builder
, Op0
](Value
*OtherHandOfSelect
) {
2610 return Builder
->CreateSub(/*OtherHandOfSub=*/Op0
,
2616 // (X - (X & Y)) --> (X & ~Y)
2617 if (match(Op1
, m_c_And(m_Specific(Op0
), m_Value(Y
))) &&
2618 (Op1
->hasOneUse() || isa
<Constant
>(Y
)))
2619 return BinaryOperator::CreateAnd(
2620 Op0
, Builder
.CreateNot(Y
, Y
->getName() + ".not"));
2622 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2623 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2624 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2625 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2626 // As long as Y is freely invertible, this will be neutral or a win.
2627 // Note: We don't generate the inverse max/min, just create the 'not' of
2628 // it and let other folds do the rest.
2629 if (match(Op0
, m_Not(m_Value(X
))) &&
2630 match(Op1
, m_c_MaxOrMin(m_Specific(Op0
), m_Value(Y
))) &&
2631 !Op0
->hasNUsesOrMore(3) && isFreeToInvert(Y
, Y
->hasOneUse())) {
2632 Value
*Not
= Builder
.CreateNot(Op1
);
2633 return BinaryOperator::CreateSub(Not
, X
);
2635 if (match(Op1
, m_Not(m_Value(X
))) &&
2636 match(Op0
, m_c_MaxOrMin(m_Specific(Op1
), m_Value(Y
))) &&
2637 !Op1
->hasNUsesOrMore(3) && isFreeToInvert(Y
, Y
->hasOneUse())) {
2638 Value
*Not
= Builder
.CreateNot(Op0
);
2639 return BinaryOperator::CreateSub(X
, Not
);
2642 // Optimize pointer differences into the same array into a size. Consider:
2643 // &A[10] - &A[0]: we should compile this to "10".
2644 Value
*LHSOp
, *RHSOp
;
2645 if (match(Op0
, m_PtrToInt(m_Value(LHSOp
))) &&
2646 match(Op1
, m_PtrToInt(m_Value(RHSOp
))))
2647 if (Value
*Res
= OptimizePointerDifference(LHSOp
, RHSOp
, I
.getType(),
2648 I
.hasNoUnsignedWrap()))
2649 return replaceInstUsesWith(I
, Res
);
2651 // trunc(p)-trunc(q) -> trunc(p-q)
2652 if (match(Op0
, m_Trunc(m_PtrToInt(m_Value(LHSOp
)))) &&
2653 match(Op1
, m_Trunc(m_PtrToInt(m_Value(RHSOp
)))))
2654 if (Value
*Res
= OptimizePointerDifference(LHSOp
, RHSOp
, I
.getType(),
2656 return replaceInstUsesWith(I
, Res
);
2658 if (match(Op0
, m_ZExt(m_PtrToIntSameSize(DL
, m_Value(LHSOp
)))) &&
2659 match(Op1
, m_ZExtOrSelf(m_PtrToInt(m_Value(RHSOp
))))) {
2660 if (auto *GEP
= dyn_cast
<GEPOperator
>(LHSOp
)) {
2661 if (GEP
->getPointerOperand() == RHSOp
) {
2662 if (GEP
->hasNoUnsignedWrap() || GEP
->hasNoUnsignedSignedWrap()) {
2663 Value
*Offset
= EmitGEPOffset(GEP
);
2664 Value
*Res
= GEP
->hasNoUnsignedWrap()
2665 ? Builder
.CreateZExt(
2666 Offset
, I
.getType(), "",
2667 /*IsNonNeg=*/GEP
->hasNoUnsignedSignedWrap())
2668 : Builder
.CreateSExt(Offset
, I
.getType());
2669 return replaceInstUsesWith(I
, Res
);
2675 // Canonicalize a shifty way to code absolute value to the common pattern.
2676 // There are 2 potential commuted variants.
2677 // We're relying on the fact that we only do this transform when the shift has
2678 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2682 Type
*Ty
= I
.getType();
2683 unsigned BitWidth
= Ty
->getScalarSizeInBits();
2684 if (match(Op1
, m_AShr(m_Value(A
), m_APInt(ShAmt
))) &&
2685 Op1
->hasNUses(2) && *ShAmt
== BitWidth
- 1 &&
2686 match(Op0
, m_OneUse(m_c_Xor(m_Specific(A
), m_Specific(Op1
))))) {
2687 // B = ashr i32 A, 31 ; smear the sign bit
2688 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2689 // --> (A < 0) ? -A : A
2690 Value
*IsNeg
= Builder
.CreateIsNeg(A
);
2691 // Copy the nsw flags from the sub to the negate.
2692 Value
*NegA
= I
.hasNoUnsignedWrap()
2693 ? Constant::getNullValue(A
->getType())
2694 : Builder
.CreateNeg(A
, "", I
.hasNoSignedWrap());
2695 return SelectInst::Create(IsNeg
, NegA
, A
);
2698 // If we are subtracting a low-bit masked subset of some value from an add
2699 // of that same value with no low bits changed, that is clearing some low bits
2701 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2702 const APInt
*AddC
, *AndC
;
2703 if (match(Op0
, m_Add(m_Value(X
), m_APInt(AddC
))) &&
2704 match(Op1
, m_And(m_Specific(X
), m_APInt(AndC
)))) {
2705 unsigned Cttz
= AddC
->countr_zero();
2706 APInt
HighMask(APInt::getHighBitsSet(BitWidth
, BitWidth
- Cttz
));
2707 if ((HighMask
& *AndC
).isZero())
2708 return BinaryOperator::CreateAnd(Op0
, ConstantInt::get(Ty
, ~(*AndC
)));
2711 if (Instruction
*V
=
2712 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I
))
2715 // X - usub.sat(X, Y) => umin(X, Y)
2716 if (match(Op1
, m_OneUse(m_Intrinsic
<Intrinsic::usub_sat
>(m_Specific(Op0
),
2718 return replaceInstUsesWith(
2719 I
, Builder
.CreateIntrinsic(Intrinsic::umin
, {I
.getType()}, {Op0
, Y
}));
2721 // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2722 // TODO: The one-use restriction is not strictly necessary, but it may
2723 // require improving other pattern matching and/or codegen.
2724 if (match(Op0
, m_OneUse(m_c_UMax(m_Value(X
), m_Specific(Op1
)))))
2725 return replaceInstUsesWith(
2726 I
, Builder
.CreateIntrinsic(Intrinsic::usub_sat
, {Ty
}, {X
, Op1
}));
2728 // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2729 if (match(Op1
, m_OneUse(m_c_UMin(m_Value(X
), m_Specific(Op0
)))))
2730 return replaceInstUsesWith(
2731 I
, Builder
.CreateIntrinsic(Intrinsic::usub_sat
, {Ty
}, {Op0
, X
}));
2733 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2734 if (match(Op1
, m_OneUse(m_c_UMax(m_Value(X
), m_Specific(Op0
))))) {
2735 Value
*USub
= Builder
.CreateIntrinsic(Intrinsic::usub_sat
, {Ty
}, {X
, Op0
});
2736 return BinaryOperator::CreateNeg(USub
);
2739 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2740 if (match(Op0
, m_OneUse(m_c_UMin(m_Value(X
), m_Specific(Op1
))))) {
2741 Value
*USub
= Builder
.CreateIntrinsic(Intrinsic::usub_sat
, {Ty
}, {Op1
, X
});
2742 return BinaryOperator::CreateNeg(USub
);
2745 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2746 if (match(Op0
, m_SpecificInt(BitWidth
)) &&
2747 match(Op1
, m_OneUse(m_Intrinsic
<Intrinsic::ctpop
>(m_Value(X
)))))
2748 return replaceInstUsesWith(
2749 I
, Builder
.CreateIntrinsic(Intrinsic::ctpop
, {I
.getType()},
2750 {Builder
.CreateNot(X
)}));
2752 // Reduce multiplies for difference-of-squares by factoring:
2753 // (X * X) - (Y * Y) --> (X + Y) * (X - Y)
2754 if (match(Op0
, m_OneUse(m_Mul(m_Value(X
), m_Deferred(X
)))) &&
2755 match(Op1
, m_OneUse(m_Mul(m_Value(Y
), m_Deferred(Y
))))) {
2756 auto *OBO0
= cast
<OverflowingBinaryOperator
>(Op0
);
2757 auto *OBO1
= cast
<OverflowingBinaryOperator
>(Op1
);
2758 bool PropagateNSW
= I
.hasNoSignedWrap() && OBO0
->hasNoSignedWrap() &&
2759 OBO1
->hasNoSignedWrap() && BitWidth
> 2;
2760 bool PropagateNUW
= I
.hasNoUnsignedWrap() && OBO0
->hasNoUnsignedWrap() &&
2761 OBO1
->hasNoUnsignedWrap() && BitWidth
> 1;
2762 Value
*Add
= Builder
.CreateAdd(X
, Y
, "add", PropagateNUW
, PropagateNSW
);
2763 Value
*Sub
= Builder
.CreateSub(X
, Y
, "sub", PropagateNUW
, PropagateNSW
);
2764 Value
*Mul
= Builder
.CreateMul(Add
, Sub
, "", PropagateNUW
, PropagateNSW
);
2765 return replaceInstUsesWith(I
, Mul
);
2768 // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y)
2769 if (match(Op0
, m_OneUse(m_c_SMax(m_Value(X
), m_Value(Y
)))) &&
2770 match(Op1
, m_OneUse(m_c_SMin(m_Specific(X
), m_Specific(Y
))))) {
2771 if (I
.hasNoUnsignedWrap() || I
.hasNoSignedWrap()) {
2773 Builder
.CreateSub(X
, Y
, "sub", /*HasNUW=*/false, /*HasNSW=*/true);
2775 Builder
.CreateBinaryIntrinsic(Intrinsic::abs
, Sub
, Builder
.getTrue());
2776 return replaceInstUsesWith(I
, Call
);
2780 if (Instruction
*Res
= foldBinOpOfSelectAndCastOfSelectCondition(I
))
2783 return TryToNarrowDeduceFlags();
2786 /// This eliminates floating-point negation in either 'fneg(X)' or
2787 /// 'fsub(-0.0, X)' form by combining into a constant operand.
2788 static Instruction
*foldFNegIntoConstant(Instruction
&I
, const DataLayout
&DL
) {
2789 // This is limited with one-use because fneg is assumed better for
2790 // reassociation and cheaper in codegen than fmul/fdiv.
2791 // TODO: Should the m_OneUse restriction be removed?
2792 Instruction
*FNegOp
;
2793 if (!match(&I
, m_FNeg(m_OneUse(m_Instruction(FNegOp
)))))
2799 // Fold negation into constant operand.
2800 // -(X * C) --> X * (-C)
2801 if (match(FNegOp
, m_FMul(m_Value(X
), m_Constant(C
))))
2802 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
2803 return BinaryOperator::CreateFMulFMF(X
, NegC
, &I
);
2804 // -(X / C) --> X / (-C)
2805 if (match(FNegOp
, m_FDiv(m_Value(X
), m_Constant(C
))))
2806 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
2807 return BinaryOperator::CreateFDivFMF(X
, NegC
, &I
);
2808 // -(C / X) --> (-C) / X
2809 if (match(FNegOp
, m_FDiv(m_Constant(C
), m_Value(X
))))
2810 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
)) {
2811 Instruction
*FDiv
= BinaryOperator::CreateFDivFMF(NegC
, X
, &I
);
2813 // Intersect 'nsz' and 'ninf' because those special value exceptions may
2814 // not apply to the fdiv. Everything else propagates from the fneg.
2815 // TODO: We could propagate nsz/ninf from fdiv alone?
2816 FastMathFlags FMF
= I
.getFastMathFlags();
2817 FastMathFlags OpFMF
= FNegOp
->getFastMathFlags();
2818 FDiv
->setHasNoSignedZeros(FMF
.noSignedZeros() && OpFMF
.noSignedZeros());
2819 FDiv
->setHasNoInfs(FMF
.noInfs() && OpFMF
.noInfs());
2822 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2823 // -(X + C) --> -X + -C --> -C - X
2824 if (I
.hasNoSignedZeros() && match(FNegOp
, m_FAdd(m_Value(X
), m_Constant(C
))))
2825 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
2826 return BinaryOperator::CreateFSubFMF(NegC
, X
, &I
);
2831 Instruction
*InstCombinerImpl::hoistFNegAboveFMulFDiv(Value
*FNegOp
,
2832 Instruction
&FMFSource
) {
2834 if (match(FNegOp
, m_FMul(m_Value(X
), m_Value(Y
)))) {
2835 // Push into RHS which is more likely to simplify (const or another fneg).
2836 // FIXME: It would be better to invert the transform.
2837 return cast
<Instruction
>(Builder
.CreateFMulFMF(
2838 X
, Builder
.CreateFNegFMF(Y
, &FMFSource
), &FMFSource
));
2841 if (match(FNegOp
, m_FDiv(m_Value(X
), m_Value(Y
)))) {
2842 return cast
<Instruction
>(Builder
.CreateFDivFMF(
2843 Builder
.CreateFNegFMF(X
, &FMFSource
), Y
, &FMFSource
));
2846 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(FNegOp
)) {
2847 // Make sure to preserve flags and metadata on the call.
2848 if (II
->getIntrinsicID() == Intrinsic::ldexp
) {
2849 FastMathFlags FMF
= FMFSource
.getFastMathFlags() | II
->getFastMathFlags();
2851 Builder
.CreateCall(II
->getCalledFunction(),
2852 {Builder
.CreateFNegFMF(II
->getArgOperand(0), FMF
),
2853 II
->getArgOperand(1)});
2854 New
->setFastMathFlags(FMF
);
2855 New
->copyMetadata(*II
);
2863 Instruction
*InstCombinerImpl::visitFNeg(UnaryOperator
&I
) {
2864 Value
*Op
= I
.getOperand(0);
2866 if (Value
*V
= simplifyFNegInst(Op
, I
.getFastMathFlags(),
2867 getSimplifyQuery().getWithInstruction(&I
)))
2868 return replaceInstUsesWith(I
, V
);
2870 if (Instruction
*X
= foldFNegIntoConstant(I
, DL
))
2875 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2876 if (I
.hasNoSignedZeros() &&
2877 match(Op
, m_OneUse(m_FSub(m_Value(X
), m_Value(Y
)))))
2878 return BinaryOperator::CreateFSubFMF(Y
, X
, &I
);
2881 if (!match(Op
, m_OneUse(m_Value(OneUse
))))
2884 if (Instruction
*R
= hoistFNegAboveFMulFDiv(OneUse
, I
))
2885 return replaceInstUsesWith(I
, R
);
2887 // Try to eliminate fneg if at least 1 arm of the select is negated.
2889 if (match(OneUse
, m_Select(m_Value(Cond
), m_Value(X
), m_Value(Y
)))) {
2890 // Unlike most transforms, this one is not safe to propagate nsz unless
2891 // it is present on the original select. We union the flags from the select
2892 // and fneg and then remove nsz if needed.
2893 auto propagateSelectFMF
= [&](SelectInst
*S
, bool CommonOperand
) {
2894 S
->copyFastMathFlags(&I
);
2895 if (auto *OldSel
= dyn_cast
<SelectInst
>(Op
)) {
2896 FastMathFlags FMF
= I
.getFastMathFlags() | OldSel
->getFastMathFlags();
2897 S
->setFastMathFlags(FMF
);
2898 if (!OldSel
->hasNoSignedZeros() && !CommonOperand
&&
2899 !isGuaranteedNotToBeUndefOrPoison(OldSel
->getCondition()))
2900 S
->setHasNoSignedZeros(false);
2903 // -(Cond ? -P : Y) --> Cond ? P : -Y
2905 if (match(X
, m_FNeg(m_Value(P
)))) {
2906 Value
*NegY
= Builder
.CreateFNegFMF(Y
, &I
, Y
->getName() + ".neg");
2907 SelectInst
*NewSel
= SelectInst::Create(Cond
, P
, NegY
);
2908 propagateSelectFMF(NewSel
, P
== Y
);
2911 // -(Cond ? X : -P) --> Cond ? -X : P
2912 if (match(Y
, m_FNeg(m_Value(P
)))) {
2913 Value
*NegX
= Builder
.CreateFNegFMF(X
, &I
, X
->getName() + ".neg");
2914 SelectInst
*NewSel
= SelectInst::Create(Cond
, NegX
, P
);
2915 propagateSelectFMF(NewSel
, P
== X
);
2919 // -(Cond ? X : C) --> Cond ? -X : -C
2920 // -(Cond ? C : Y) --> Cond ? -C : -Y
2921 if (match(X
, m_ImmConstant()) || match(Y
, m_ImmConstant())) {
2922 Value
*NegX
= Builder
.CreateFNegFMF(X
, &I
, X
->getName() + ".neg");
2923 Value
*NegY
= Builder
.CreateFNegFMF(Y
, &I
, Y
->getName() + ".neg");
2924 SelectInst
*NewSel
= SelectInst::Create(Cond
, NegX
, NegY
);
2925 propagateSelectFMF(NewSel
, /*CommonOperand=*/true);
2930 // fneg (copysign x, y) -> copysign x, (fneg y)
2931 if (match(OneUse
, m_CopySign(m_Value(X
), m_Value(Y
)))) {
2932 // The source copysign has an additional value input, so we can't propagate
2933 // flags the copysign doesn't also have.
2934 FastMathFlags FMF
= I
.getFastMathFlags();
2935 FMF
&= cast
<FPMathOperator
>(OneUse
)->getFastMathFlags();
2936 Value
*NegY
= Builder
.CreateFNegFMF(Y
, FMF
);
2937 Value
*NewCopySign
= Builder
.CreateCopySign(X
, NegY
, FMF
);
2938 return replaceInstUsesWith(I
, NewCopySign
);
2944 Instruction
*InstCombinerImpl::visitFSub(BinaryOperator
&I
) {
2945 if (Value
*V
= simplifyFSubInst(I
.getOperand(0), I
.getOperand(1),
2946 I
.getFastMathFlags(),
2947 getSimplifyQuery().getWithInstruction(&I
)))
2948 return replaceInstUsesWith(I
, V
);
2950 if (Instruction
*X
= foldVectorBinop(I
))
2953 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
2956 // Subtraction from -0.0 is the canonical form of fneg.
2957 // fsub -0.0, X ==> fneg X
2958 // fsub nsz 0.0, X ==> fneg nsz X
2960 // FIXME This matcher does not respect FTZ or DAZ yet:
2961 // fsub -0.0, Denorm ==> +-0
2962 // fneg Denorm ==> -Denorm
2964 if (match(&I
, m_FNeg(m_Value(Op
))))
2965 return UnaryOperator::CreateFNegFMF(Op
, &I
);
2967 if (Instruction
*X
= foldFNegIntoConstant(I
, DL
))
2970 if (Instruction
*R
= foldFBinOpOfIntCasts(I
))
2976 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2977 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
2978 // Canonicalize to fadd to make analysis easier.
2979 // This can also help codegen because fadd is commutative.
2980 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
2981 // killed later. We still limit that particular transform with 'hasOneUse'
2982 // because an fneg is assumed better/cheaper than a generic fsub.
2983 if (I
.hasNoSignedZeros() ||
2984 cannotBeNegativeZero(Op0
, 0, getSimplifyQuery().getWithInstruction(&I
))) {
2985 if (match(Op1
, m_OneUse(m_FSub(m_Value(X
), m_Value(Y
))))) {
2986 Value
*NewSub
= Builder
.CreateFSubFMF(Y
, X
, &I
);
2987 return BinaryOperator::CreateFAddFMF(Op0
, NewSub
, &I
);
2991 // (-X) - Op1 --> -(X + Op1)
2992 if (I
.hasNoSignedZeros() && !isa
<ConstantExpr
>(Op0
) &&
2993 match(Op0
, m_OneUse(m_FNeg(m_Value(X
))))) {
2994 Value
*FAdd
= Builder
.CreateFAddFMF(X
, Op1
, &I
);
2995 return UnaryOperator::CreateFNegFMF(FAdd
, &I
);
2998 if (isa
<Constant
>(Op0
))
2999 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
3000 if (Instruction
*NV
= FoldOpIntoSelect(I
, SI
))
3003 // X - C --> X + (-C)
3004 // But don't transform constant expressions because there's an inverse fold
3005 // for X + (-Y) --> X - Y.
3006 if (match(Op1
, m_ImmConstant(C
)))
3007 if (Constant
*NegC
= ConstantFoldUnaryOpOperand(Instruction::FNeg
, C
, DL
))
3008 return BinaryOperator::CreateFAddFMF(Op0
, NegC
, &I
);
3010 // X - (-Y) --> X + Y
3011 if (match(Op1
, m_FNeg(m_Value(Y
))))
3012 return BinaryOperator::CreateFAddFMF(Op0
, Y
, &I
);
3014 // Similar to above, but look through a cast of the negated value:
3015 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
3016 Type
*Ty
= I
.getType();
3017 if (match(Op1
, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y
))))))
3018 return BinaryOperator::CreateFAddFMF(Op0
, Builder
.CreateFPTrunc(Y
, Ty
), &I
);
3020 // X - (fpext(-Y)) --> X + fpext(Y)
3021 if (match(Op1
, m_OneUse(m_FPExt(m_FNeg(m_Value(Y
))))))
3022 return BinaryOperator::CreateFAddFMF(Op0
, Builder
.CreateFPExt(Y
, Ty
), &I
);
3024 // Similar to above, but look through fmul/fdiv of the negated value:
3025 // Op0 - (-X * Y) --> Op0 + (X * Y)
3026 // Op0 - (Y * -X) --> Op0 + (X * Y)
3027 if (match(Op1
, m_OneUse(m_c_FMul(m_FNeg(m_Value(X
)), m_Value(Y
))))) {
3028 Value
*FMul
= Builder
.CreateFMulFMF(X
, Y
, &I
);
3029 return BinaryOperator::CreateFAddFMF(Op0
, FMul
, &I
);
3031 // Op0 - (-X / Y) --> Op0 + (X / Y)
3032 // Op0 - (X / -Y) --> Op0 + (X / Y)
3033 if (match(Op1
, m_OneUse(m_FDiv(m_FNeg(m_Value(X
)), m_Value(Y
)))) ||
3034 match(Op1
, m_OneUse(m_FDiv(m_Value(X
), m_FNeg(m_Value(Y
)))))) {
3035 Value
*FDiv
= Builder
.CreateFDivFMF(X
, Y
, &I
);
3036 return BinaryOperator::CreateFAddFMF(Op0
, FDiv
, &I
);
3039 // Handle special cases for FSub with selects feeding the operation
3040 if (Value
*V
= SimplifySelectsFeedingBinaryOp(I
, Op0
, Op1
))
3041 return replaceInstUsesWith(I
, V
);
3043 if (I
.hasAllowReassoc() && I
.hasNoSignedZeros()) {
3044 // (Y - X) - Y --> -X
3045 if (match(Op0
, m_FSub(m_Specific(Op1
), m_Value(X
))))
3046 return UnaryOperator::CreateFNegFMF(X
, &I
);
3048 // Y - (X + Y) --> -X
3049 // Y - (Y + X) --> -X
3050 if (match(Op1
, m_c_FAdd(m_Specific(Op0
), m_Value(X
))))
3051 return UnaryOperator::CreateFNegFMF(X
, &I
);
3053 // (X * C) - X --> X * (C - 1.0)
3054 if (match(Op0
, m_FMul(m_Specific(Op1
), m_Constant(C
)))) {
3055 if (Constant
*CSubOne
= ConstantFoldBinaryOpOperands(
3056 Instruction::FSub
, C
, ConstantFP::get(Ty
, 1.0), DL
))
3057 return BinaryOperator::CreateFMulFMF(Op1
, CSubOne
, &I
);
3059 // X - (X * C) --> X * (1.0 - C)
3060 if (match(Op1
, m_FMul(m_Specific(Op0
), m_Constant(C
)))) {
3061 if (Constant
*OneSubC
= ConstantFoldBinaryOpOperands(
3062 Instruction::FSub
, ConstantFP::get(Ty
, 1.0), C
, DL
))
3063 return BinaryOperator::CreateFMulFMF(Op0
, OneSubC
, &I
);
3066 // Reassociate fsub/fadd sequences to create more fadd instructions and
3067 // reduce dependency chains:
3068 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
3070 if (match(Op0
, m_OneUse(m_c_FAdd(m_OneUse(m_FSub(m_Value(X
), m_Value(Y
))),
3072 Value
*XZ
= Builder
.CreateFAddFMF(X
, Z
, &I
);
3073 Value
*YW
= Builder
.CreateFAddFMF(Y
, Op1
, &I
);
3074 return BinaryOperator::CreateFSubFMF(XZ
, YW
, &I
);
3077 auto m_FaddRdx
= [](Value
*&Sum
, Value
*&Vec
) {
3078 return m_OneUse(m_Intrinsic
<Intrinsic::vector_reduce_fadd
>(m_Value(Sum
),
3081 Value
*A0
, *A1
, *V0
, *V1
;
3082 if (match(Op0
, m_FaddRdx(A0
, V0
)) && match(Op1
, m_FaddRdx(A1
, V1
)) &&
3083 V0
->getType() == V1
->getType()) {
3084 // Difference of sums is sum of differences:
3085 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
3086 Value
*Sub
= Builder
.CreateFSubFMF(V0
, V1
, &I
);
3087 Value
*Rdx
= Builder
.CreateIntrinsic(Intrinsic::vector_reduce_fadd
,
3088 {Sub
->getType()}, {A0
, Sub
}, &I
);
3089 return BinaryOperator::CreateFSubFMF(Rdx
, A1
, &I
);
3092 if (Instruction
*F
= factorizeFAddFSub(I
, Builder
))
3095 // TODO: This performs reassociative folds for FP ops. Some fraction of the
3096 // functionality has been subsumed by simple pattern matching here and in
3097 // InstSimplify. We should let a dedicated reassociation pass handle more
3098 // complex pattern matching and remove this from InstCombine.
3099 if (Value
*V
= FAddCombine(Builder
).simplify(&I
))
3100 return replaceInstUsesWith(I
, V
);
3102 // (X - Y) - Op1 --> X - (Y + Op1)
3103 if (match(Op0
, m_OneUse(m_FSub(m_Value(X
), m_Value(Y
))))) {
3104 Value
*FAdd
= Builder
.CreateFAddFMF(Y
, Op1
, &I
);
3105 return BinaryOperator::CreateFSubFMF(X
, FAdd
, &I
);