[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Transforms / InstCombine / InstCombineAddSub.cpp
blobaa4f0bdffe10664ec291133d998601a95657623a
1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for add, fadd, sub, and fsub.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/AlignOf.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Transforms/InstCombine/InstCombiner.h"
33 #include <cassert>
34 #include <utility>
36 using namespace llvm;
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "instcombine"
41 namespace {
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
48 ///
49 class FAddendCoef {
50 public:
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
57 ~FAddendCoef();
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
70 void set(const APFloat& C);
72 void negate();
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
82 private:
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
96 APFloat &getFpVal() {
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
101 bool isInt() const { return !IsFp; }
103 // If the coefficient is represented by an integer, promote it to a
104 // floating point.
105 void convertToFpType(const fltSemantics &Sem);
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
112 bool IsFp = false;
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
121 short IntVal = 0;
123 AlignedCharArrayUnion<APFloat> FpValBuf;
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
129 class FAddend {
130 public:
131 FAddend() = default;
133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
157 void negate() { Coeff.negate(); }
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
167 private:
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
178 class FAddCombine {
179 public:
180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
182 Value *simplify(Instruction *FAdd);
184 private:
185 using AddendVect = SmallVector<const FAddend *, 4>;
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
202 // Debugging stuff are clustered here.
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
210 #endif
212 InstCombiner::BuilderTy &Builder;
213 Instruction *Instr = nullptr;
216 } // end anonymous namespace
218 //===----------------------------------------------------------------------===//
220 // Implementation of
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
223 //===----------------------------------------------------------------------===//
224 FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
229 void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
232 if (isInt()) {
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
235 new(P) APFloat(C);
236 } else
237 *P = C;
239 IsFp = BufHasFpVal = true;
242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243 if (!isInt())
244 return;
246 APFloat *P = getFpValPtr();
247 if (IntVal > 0)
248 new(P) APFloat(Sem, IntVal);
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
253 IsFp = BufHasFpVal = true;
256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
260 APFloat T(Sem, 0 - Val);
261 T.changeSign();
263 return T;
266 void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
273 void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
276 if (isInt())
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
283 if (isInt()) {
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
294 void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
298 if (That.isMinusOne()) {
299 negate();
300 return;
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
306 IntVal = Res;
307 return;
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
313 if (isInt())
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
324 void FAddendCoef::negate() {
325 if (isInt())
326 IntVal = 0 - IntVal;
327 else
328 getFpVal().changeSign();
331 Value *FAddendCoef::getValue(Type *Ty) const {
332 return isInt() ?
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
341 // 0 - B <-1, B>
342 // C * A, <C, A>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
346 // Legend: A and B are not constant, C is constant
347 unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
351 return 0;
353 unsigned Opcode = I->getOpcode();
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356 ConstantFP *C0, *C1;
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387 return 1;
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394 Addend0.set(C, V1);
395 return 1;
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399 Addend0.set(C, V0);
400 return 1;
404 return 0;
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
412 if (isConstant())
413 return 0;
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
419 Addend0.Scale(Coeff);
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
424 return BreakNum;
427 Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
433 return nullptr;
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
438 // Save the instruction before calling other member-functions.
439 Instr = I;
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
478 if (OpndNum != 2) {
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
511 return nullptr;
514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
522 // Points to the constant addend of the resulting simplified expression.
523 // If the resulting expr has constant-addend, this constant-addend is
524 // desirable to reside at the top of the resulting expression tree. Placing
525 // constant close to supper-expr(s) will potentially reveal some optimization
526 // opportunities in super-expr(s).
527 const FAddend *ConstAdd = nullptr;
529 // Simplified addends are placed <SimpVect>.
530 AddendVect SimpVect;
532 // The outer loop works on one symbolic-value at a time. Suppose the input
533 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
534 // The symbolic-values will be processed in this order: x, y, z.
535 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
537 const FAddend *ThisAddend = Addends[SymIdx];
538 if (!ThisAddend) {
539 // This addend was processed before.
540 continue;
543 Value *Val = ThisAddend->getSymVal();
544 unsigned StartIdx = SimpVect.size();
545 SimpVect.push_back(ThisAddend);
547 // The inner loop collects addends sharing same symbolic-value, and these
548 // addends will be later on folded into a single addend. Following above
549 // example, if the symbolic value "y" is being processed, the inner loop
550 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
551 // be later on folded into "<b1+b2, y>".
552 for (unsigned SameSymIdx = SymIdx + 1;
553 SameSymIdx < AddendNum; SameSymIdx++) {
554 const FAddend *T = Addends[SameSymIdx];
555 if (T && T->getSymVal() == Val) {
556 // Set null such that next iteration of the outer loop will not process
557 // this addend again.
558 Addends[SameSymIdx] = nullptr;
559 SimpVect.push_back(T);
563 // If multiple addends share same symbolic value, fold them together.
564 if (StartIdx + 1 != SimpVect.size()) {
565 FAddend &R = TmpResult[NextTmpIdx ++];
566 R = *SimpVect[StartIdx];
567 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
568 R += *SimpVect[Idx];
570 // Pop all addends being folded and push the resulting folded addend.
571 SimpVect.resize(StartIdx);
572 if (Val) {
573 if (!R.isZero()) {
574 SimpVect.push_back(&R);
576 } else {
577 // Don't push constant addend at this time. It will be the last element
578 // of <SimpVect>.
579 ConstAdd = &R;
584 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
585 "out-of-bound access");
587 if (ConstAdd)
588 SimpVect.push_back(ConstAdd);
590 Value *Result;
591 if (!SimpVect.empty())
592 Result = createNaryFAdd(SimpVect, InstrQuota);
593 else {
594 // The addition is folded to 0.0.
595 Result = ConstantFP::get(Instr->getType(), 0.0);
598 return Result;
601 Value *FAddCombine::createNaryFAdd
602 (const AddendVect &Opnds, unsigned InstrQuota) {
603 assert(!Opnds.empty() && "Expect at least one addend");
605 // Step 1: Check if the # of instructions needed exceeds the quota.
607 unsigned InstrNeeded = calcInstrNumber(Opnds);
608 if (InstrNeeded > InstrQuota)
609 return nullptr;
611 initCreateInstNum();
613 // step 2: Emit the N-ary addition.
614 // Note that at most three instructions are involved in Fadd-InstCombine: the
615 // addition in question, and at most two neighboring instructions.
616 // The resulting optimized addition should have at least one less instruction
617 // than the original addition expression tree. This implies that the resulting
618 // N-ary addition has at most two instructions, and we don't need to worry
619 // about tree-height when constructing the N-ary addition.
621 Value *LastVal = nullptr;
622 bool LastValNeedNeg = false;
624 // Iterate the addends, creating fadd/fsub using adjacent two addends.
625 for (const FAddend *Opnd : Opnds) {
626 bool NeedNeg;
627 Value *V = createAddendVal(*Opnd, NeedNeg);
628 if (!LastVal) {
629 LastVal = V;
630 LastValNeedNeg = NeedNeg;
631 continue;
634 if (LastValNeedNeg == NeedNeg) {
635 LastVal = createFAdd(LastVal, V);
636 continue;
639 if (LastValNeedNeg)
640 LastVal = createFSub(V, LastVal);
641 else
642 LastVal = createFSub(LastVal, V);
644 LastValNeedNeg = false;
647 if (LastValNeedNeg) {
648 LastVal = createFNeg(LastVal);
651 #ifndef NDEBUG
652 assert(CreateInstrNum == InstrNeeded &&
653 "Inconsistent in instruction numbers");
654 #endif
656 return LastVal;
659 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
660 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
661 if (Instruction *I = dyn_cast<Instruction>(V))
662 createInstPostProc(I);
663 return V;
666 Value *FAddCombine::createFNeg(Value *V) {
667 Value *NewV = Builder.CreateFNeg(V);
668 if (Instruction *I = dyn_cast<Instruction>(NewV))
669 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
670 return NewV;
673 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
674 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
675 if (Instruction *I = dyn_cast<Instruction>(V))
676 createInstPostProc(I);
677 return V;
680 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
681 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
682 if (Instruction *I = dyn_cast<Instruction>(V))
683 createInstPostProc(I);
684 return V;
687 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
688 NewInstr->setDebugLoc(Instr->getDebugLoc());
690 // Keep track of the number of instruction created.
691 if (!NoNumber)
692 incCreateInstNum();
694 // Propagate fast-math flags
695 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
698 // Return the number of instruction needed to emit the N-ary addition.
699 // NOTE: Keep this function in sync with createAddendVal().
700 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
701 unsigned OpndNum = Opnds.size();
702 unsigned InstrNeeded = OpndNum - 1;
704 // The number of addends in the form of "(-1)*x".
705 unsigned NegOpndNum = 0;
707 // Adjust the number of instructions needed to emit the N-ary add.
708 for (const FAddend *Opnd : Opnds) {
709 if (Opnd->isConstant())
710 continue;
712 // The constant check above is really for a few special constant
713 // coefficients.
714 if (isa<UndefValue>(Opnd->getSymVal()))
715 continue;
717 const FAddendCoef &CE = Opnd->getCoef();
718 if (CE.isMinusOne() || CE.isMinusTwo())
719 NegOpndNum++;
721 // Let the addend be "c * x". If "c == +/-1", the value of the addend
722 // is immediately available; otherwise, it needs exactly one instruction
723 // to evaluate the value.
724 if (!CE.isMinusOne() && !CE.isOne())
725 InstrNeeded++;
727 return InstrNeeded;
730 // Input Addend Value NeedNeg(output)
731 // ================================================================
732 // Constant C C false
733 // <+/-1, V> V coefficient is -1
734 // <2/-2, V> "fadd V, V" coefficient is -2
735 // <C, V> "fmul V, C" false
737 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
738 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
739 const FAddendCoef &Coeff = Opnd.getCoef();
741 if (Opnd.isConstant()) {
742 NeedNeg = false;
743 return Coeff.getValue(Instr->getType());
746 Value *OpndVal = Opnd.getSymVal();
748 if (Coeff.isMinusOne() || Coeff.isOne()) {
749 NeedNeg = Coeff.isMinusOne();
750 return OpndVal;
753 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
754 NeedNeg = Coeff.isMinusTwo();
755 return createFAdd(OpndVal, OpndVal);
758 NeedNeg = false;
759 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
762 // Checks if any operand is negative and we can convert add to sub.
763 // This function checks for following negative patterns
764 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
765 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
766 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
767 static Value *checkForNegativeOperand(BinaryOperator &I,
768 InstCombiner::BuilderTy &Builder) {
769 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
771 // This function creates 2 instructions to replace ADD, we need at least one
772 // of LHS or RHS to have one use to ensure benefit in transform.
773 if (!LHS->hasOneUse() && !RHS->hasOneUse())
774 return nullptr;
776 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
777 const APInt *C1 = nullptr, *C2 = nullptr;
779 // if ONE is on other side, swap
780 if (match(RHS, m_Add(m_Value(X), m_One())))
781 std::swap(LHS, RHS);
783 if (match(LHS, m_Add(m_Value(X), m_One()))) {
784 // if XOR on other side, swap
785 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
786 std::swap(X, RHS);
788 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
789 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
790 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
791 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
792 Value *NewAnd = Builder.CreateAnd(Z, *C1);
793 return Builder.CreateSub(RHS, NewAnd, "sub");
794 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
795 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
796 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
797 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
798 return Builder.CreateSub(RHS, NewOr, "sub");
803 // Restore LHS and RHS
804 LHS = I.getOperand(0);
805 RHS = I.getOperand(1);
807 // if XOR is on other side, swap
808 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
809 std::swap(LHS, RHS);
811 // C2 is ODD
812 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
813 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
814 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
815 if (C1->countTrailingZeros() == 0)
816 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
817 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
818 return Builder.CreateSub(RHS, NewOr, "sub");
820 return nullptr;
823 /// Wrapping flags may allow combining constants separated by an extend.
824 static Instruction *foldNoWrapAdd(BinaryOperator &Add,
825 InstCombiner::BuilderTy &Builder) {
826 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
827 Type *Ty = Add.getType();
828 Constant *Op1C;
829 if (!match(Op1, m_Constant(Op1C)))
830 return nullptr;
832 // Try this match first because it results in an add in the narrow type.
833 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
834 Value *X;
835 const APInt *C1, *C2;
836 if (match(Op1, m_APInt(C1)) &&
837 match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
838 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
839 Constant *NewC =
840 ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
841 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
844 // More general combining of constants in the wide type.
845 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
846 Constant *NarrowC;
847 if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) {
848 Constant *WideC = ConstantExpr::getSExt(NarrowC, Ty);
849 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
850 Value *WideX = Builder.CreateSExt(X, Ty);
851 return BinaryOperator::CreateAdd(WideX, NewC);
853 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
854 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) {
855 Constant *WideC = ConstantExpr::getZExt(NarrowC, Ty);
856 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
857 Value *WideX = Builder.CreateZExt(X, Ty);
858 return BinaryOperator::CreateAdd(WideX, NewC);
861 return nullptr;
864 Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
865 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
866 Constant *Op1C;
867 if (!match(Op1, m_ImmConstant(Op1C)))
868 return nullptr;
870 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add))
871 return NV;
873 Value *X;
874 Constant *Op00C;
876 // add (sub C1, X), C2 --> sub (add C1, C2), X
877 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
878 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
880 Value *Y;
882 // add (sub X, Y), -1 --> add (not Y), X
883 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
884 match(Op1, m_AllOnes()))
885 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
887 // zext(bool) + C -> bool ? C + 1 : C
888 if (match(Op0, m_ZExt(m_Value(X))) &&
889 X->getType()->getScalarSizeInBits() == 1)
890 return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1);
891 // sext(bool) + C -> bool ? C - 1 : C
892 if (match(Op0, m_SExt(m_Value(X))) &&
893 X->getType()->getScalarSizeInBits() == 1)
894 return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1);
896 // ~X + C --> (C-1) - X
897 if (match(Op0, m_Not(m_Value(X))))
898 return BinaryOperator::CreateSub(InstCombiner::SubOne(Op1C), X);
900 const APInt *C;
901 if (!match(Op1, m_APInt(C)))
902 return nullptr;
904 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
905 Constant *Op01C;
906 if (match(Op0, m_Or(m_Value(X), m_ImmConstant(Op01C))) &&
907 haveNoCommonBitsSet(X, Op01C, DL, &AC, &Add, &DT))
908 return BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
910 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
911 const APInt *C2;
912 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
913 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
915 if (C->isSignMask()) {
916 // If wrapping is not allowed, then the addition must set the sign bit:
917 // X + (signmask) --> X | signmask
918 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
919 return BinaryOperator::CreateOr(Op0, Op1);
921 // If wrapping is allowed, then the addition flips the sign bit of LHS:
922 // X + (signmask) --> X ^ signmask
923 return BinaryOperator::CreateXor(Op0, Op1);
926 // Is this add the last step in a convoluted sext?
927 // add(zext(xor i16 X, -32768), -32768) --> sext X
928 Type *Ty = Add.getType();
929 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
930 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
931 return CastInst::Create(Instruction::SExt, X, Ty);
933 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
934 // (X ^ signmask) + C --> (X + (signmask ^ C))
935 if (C2->isSignMask())
936 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
938 // If X has no high-bits set above an xor mask:
939 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
940 if (C2->isMask()) {
941 KnownBits LHSKnown = computeKnownBits(X, 0, &Add);
942 if ((*C2 | LHSKnown.Zero).isAllOnesValue())
943 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
946 // Look for a math+logic pattern that corresponds to sext-in-register of a
947 // value with cleared high bits. Convert that into a pair of shifts:
948 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
949 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
950 if (Op0->hasOneUse() && *C2 == -(*C)) {
951 unsigned BitWidth = Ty->getScalarSizeInBits();
952 unsigned ShAmt = 0;
953 if (C->isPowerOf2())
954 ShAmt = BitWidth - C->logBase2() - 1;
955 else if (C2->isPowerOf2())
956 ShAmt = BitWidth - C2->logBase2() - 1;
957 if (ShAmt && MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt),
958 0, &Add)) {
959 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
960 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
961 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
966 if (C->isOneValue() && Op0->hasOneUse()) {
967 // add (sext i1 X), 1 --> zext (not X)
968 // TODO: The smallest IR representation is (select X, 0, 1), and that would
969 // not require the one-use check. But we need to remove a transform in
970 // visitSelect and make sure that IR value tracking for select is equal or
971 // better than for these ops.
972 if (match(Op0, m_SExt(m_Value(X))) &&
973 X->getType()->getScalarSizeInBits() == 1)
974 return new ZExtInst(Builder.CreateNot(X), Ty);
976 // Shifts and add used to flip and mask off the low bit:
977 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
978 const APInt *C3;
979 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
980 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
981 Value *NotX = Builder.CreateNot(X);
982 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
986 // If all bits affected by the add are included in a high-bit-mask, do the
987 // add before the mask op:
988 // (X & 0xFF00) + xx00 --> (X + xx00) & 0xFF00
989 if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C2)))) &&
990 C2->isNegative() && C2->isShiftedMask() && *C == (*C & *C2)) {
991 Value *NewAdd = Builder.CreateAdd(X, ConstantInt::get(Ty, *C));
992 return BinaryOperator::CreateAnd(NewAdd, ConstantInt::get(Ty, *C2));
995 return nullptr;
998 // Matches multiplication expression Op * C where C is a constant. Returns the
999 // constant value in C and the other operand in Op. Returns true if such a
1000 // match is found.
1001 static bool MatchMul(Value *E, Value *&Op, APInt &C) {
1002 const APInt *AI;
1003 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
1004 C = *AI;
1005 return true;
1007 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
1008 C = APInt(AI->getBitWidth(), 1);
1009 C <<= *AI;
1010 return true;
1012 return false;
1015 // Matches remainder expression Op % C where C is a constant. Returns the
1016 // constant value in C and the other operand in Op. Returns the signedness of
1017 // the remainder operation in IsSigned. Returns true if such a match is
1018 // found.
1019 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1020 const APInt *AI;
1021 IsSigned = false;
1022 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1023 IsSigned = true;
1024 C = *AI;
1025 return true;
1027 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1028 C = *AI;
1029 return true;
1031 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1032 C = *AI + 1;
1033 return true;
1035 return false;
1038 // Matches division expression Op / C with the given signedness as indicated
1039 // by IsSigned, where C is a constant. Returns the constant value in C and the
1040 // other operand in Op. Returns true if such a match is found.
1041 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1042 const APInt *AI;
1043 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1044 C = *AI;
1045 return true;
1047 if (!IsSigned) {
1048 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1049 C = *AI;
1050 return true;
1052 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1053 C = APInt(AI->getBitWidth(), 1);
1054 C <<= *AI;
1055 return true;
1058 return false;
1061 // Returns whether C0 * C1 with the given signedness overflows.
1062 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1063 bool overflow;
1064 if (IsSigned)
1065 (void)C0.smul_ov(C1, overflow);
1066 else
1067 (void)C0.umul_ov(C1, overflow);
1068 return overflow;
1071 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1072 // does not overflow.
1073 Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) {
1074 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1075 Value *X, *MulOpV;
1076 APInt C0, MulOpC;
1077 bool IsSigned;
1078 // Match I = X % C0 + MulOpV * C0
1079 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1080 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1081 C0 == MulOpC) {
1082 Value *RemOpV;
1083 APInt C1;
1084 bool Rem2IsSigned;
1085 // Match MulOpC = RemOpV % C1
1086 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1087 IsSigned == Rem2IsSigned) {
1088 Value *DivOpV;
1089 APInt DivOpC;
1090 // Match RemOpV = X / C0
1091 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1092 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1093 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1094 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1095 : Builder.CreateURem(X, NewDivisor, "urem");
1100 return nullptr;
1103 /// Fold
1104 /// (1 << NBits) - 1
1105 /// Into:
1106 /// ~(-(1 << NBits))
1107 /// Because a 'not' is better for bit-tracking analysis and other transforms
1108 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1109 static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
1110 InstCombiner::BuilderTy &Builder) {
1111 Value *NBits;
1112 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1113 return nullptr;
1115 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1116 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1117 // Be wary of constant folding.
1118 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1119 // Always NSW. But NUW propagates from `add`.
1120 BOp->setHasNoSignedWrap();
1121 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1124 return BinaryOperator::CreateNot(NotMask, I.getName());
1127 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) {
1128 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1129 Type *Ty = I.getType();
1130 auto getUAddSat = [&]() {
1131 return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty);
1134 // add (umin X, ~Y), Y --> uaddsat X, Y
1135 Value *X, *Y;
1136 if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))),
1137 m_Deferred(Y))))
1138 return CallInst::Create(getUAddSat(), { X, Y });
1140 // add (umin X, ~C), C --> uaddsat X, C
1141 const APInt *C, *NotC;
1142 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1143 *C == ~*NotC)
1144 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1146 return nullptr;
1149 Instruction *InstCombinerImpl::
1150 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
1151 BinaryOperator &I) {
1152 assert((I.getOpcode() == Instruction::Add ||
1153 I.getOpcode() == Instruction::Or ||
1154 I.getOpcode() == Instruction::Sub) &&
1155 "Expecting add/or/sub instruction");
1157 // We have a subtraction/addition between a (potentially truncated) *logical*
1158 // right-shift of X and a "select".
1159 Value *X, *Select;
1160 Instruction *LowBitsToSkip, *Extract;
1161 if (!match(&I, m_c_BinOp(m_TruncOrSelf(m_CombineAnd(
1162 m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)),
1163 m_Instruction(Extract))),
1164 m_Value(Select))))
1165 return nullptr;
1167 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1168 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1169 return nullptr;
1171 Type *XTy = X->getType();
1172 bool HadTrunc = I.getType() != XTy;
1174 // If there was a truncation of extracted value, then we'll need to produce
1175 // one extra instruction, so we need to ensure one instruction will go away.
1176 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1177 return nullptr;
1179 // Extraction should extract high NBits bits, with shift amount calculated as:
1180 // low bits to skip = shift bitwidth - high bits to extract
1181 // The shift amount itself may be extended, and we need to look past zero-ext
1182 // when matching NBits, that will matter for matching later.
1183 Constant *C;
1184 Value *NBits;
1185 if (!match(
1186 LowBitsToSkip,
1187 m_ZExtOrSelf(m_Sub(m_Constant(C), m_ZExtOrSelf(m_Value(NBits))))) ||
1188 !match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1189 APInt(C->getType()->getScalarSizeInBits(),
1190 X->getType()->getScalarSizeInBits()))))
1191 return nullptr;
1193 // Sign-extending value can be zero-extended if we `sub`tract it,
1194 // or sign-extended otherwise.
1195 auto SkipExtInMagic = [&I](Value *&V) {
1196 if (I.getOpcode() == Instruction::Sub)
1197 match(V, m_ZExtOrSelf(m_Value(V)));
1198 else
1199 match(V, m_SExtOrSelf(m_Value(V)));
1202 // Now, finally validate the sign-extending magic.
1203 // `select` itself may be appropriately extended, look past that.
1204 SkipExtInMagic(Select);
1206 ICmpInst::Predicate Pred;
1207 const APInt *Thr;
1208 Value *SignExtendingValue, *Zero;
1209 bool ShouldSignext;
1210 // It must be a select between two values we will later establish to be a
1211 // sign-extending value and a zero constant. The condition guarding the
1212 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1213 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1214 m_Value(SignExtendingValue), m_Value(Zero))) ||
1215 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1216 return nullptr;
1218 // icmp-select pair is commutative.
1219 if (!ShouldSignext)
1220 std::swap(SignExtendingValue, Zero);
1222 // If we should not perform sign-extension then we must add/or/subtract zero.
1223 if (!match(Zero, m_Zero()))
1224 return nullptr;
1225 // Otherwise, it should be some constant, left-shifted by the same NBits we
1226 // had in `lshr`. Said left-shift can also be appropriately extended.
1227 // Again, we must look past zero-ext when looking for NBits.
1228 SkipExtInMagic(SignExtendingValue);
1229 Constant *SignExtendingValueBaseConstant;
1230 if (!match(SignExtendingValue,
1231 m_Shl(m_Constant(SignExtendingValueBaseConstant),
1232 m_ZExtOrSelf(m_Specific(NBits)))))
1233 return nullptr;
1234 // If we `sub`, then the constant should be one, else it should be all-ones.
1235 if (I.getOpcode() == Instruction::Sub
1236 ? !match(SignExtendingValueBaseConstant, m_One())
1237 : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1238 return nullptr;
1240 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1241 Extract->getName() + ".sext");
1242 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1243 if (!HadTrunc)
1244 return NewAShr;
1246 Builder.Insert(NewAShr);
1247 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1250 /// This is a specialization of a more general transform from
1251 /// SimplifyUsingDistributiveLaws. If that code can be made to work optimally
1252 /// for multi-use cases or propagating nsw/nuw, then we would not need this.
1253 static Instruction *factorizeMathWithShlOps(BinaryOperator &I,
1254 InstCombiner::BuilderTy &Builder) {
1255 // TODO: Also handle mul by doubling the shift amount?
1256 assert((I.getOpcode() == Instruction::Add ||
1257 I.getOpcode() == Instruction::Sub) &&
1258 "Expected add/sub");
1259 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1260 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1261 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1262 return nullptr;
1264 Value *X, *Y, *ShAmt;
1265 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1266 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1267 return nullptr;
1269 // No-wrap propagates only when all ops have no-wrap.
1270 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1271 Op1->hasNoSignedWrap();
1272 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1273 Op1->hasNoUnsignedWrap();
1275 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1276 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1277 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1278 NewI->setHasNoSignedWrap(HasNSW);
1279 NewI->setHasNoUnsignedWrap(HasNUW);
1281 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1282 NewShl->setHasNoSignedWrap(HasNSW);
1283 NewShl->setHasNoUnsignedWrap(HasNUW);
1284 return NewShl;
1287 Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
1288 if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1),
1289 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1290 SQ.getWithInstruction(&I)))
1291 return replaceInstUsesWith(I, V);
1293 if (SimplifyAssociativeOrCommutative(I))
1294 return &I;
1296 if (Instruction *X = foldVectorBinop(I))
1297 return X;
1299 // (A*B)+(A*C) -> A*(B+C) etc
1300 if (Value *V = SimplifyUsingDistributiveLaws(I))
1301 return replaceInstUsesWith(I, V);
1303 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
1304 return R;
1306 if (Instruction *X = foldAddWithConstant(I))
1307 return X;
1309 if (Instruction *X = foldNoWrapAdd(I, Builder))
1310 return X;
1312 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1313 Type *Ty = I.getType();
1314 if (Ty->isIntOrIntVectorTy(1))
1315 return BinaryOperator::CreateXor(LHS, RHS);
1317 // X + X --> X << 1
1318 if (LHS == RHS) {
1319 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1320 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1321 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1322 return Shl;
1325 Value *A, *B;
1326 if (match(LHS, m_Neg(m_Value(A)))) {
1327 // -A + -B --> -(A + B)
1328 if (match(RHS, m_Neg(m_Value(B))))
1329 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1331 // -A + B --> B - A
1332 return BinaryOperator::CreateSub(RHS, A);
1335 // A + -B --> A - B
1336 if (match(RHS, m_Neg(m_Value(B))))
1337 return BinaryOperator::CreateSub(LHS, B);
1339 if (Value *V = checkForNegativeOperand(I, Builder))
1340 return replaceInstUsesWith(I, V);
1342 // (A + 1) + ~B --> A - B
1343 // ~B + (A + 1) --> A - B
1344 // (~B + A) + 1 --> A - B
1345 // (A + ~B) + 1 --> A - B
1346 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1347 match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One())))
1348 return BinaryOperator::CreateSub(A, B);
1350 // (A + RHS) + RHS --> A + (RHS << 1)
1351 if (match(LHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(RHS)))))
1352 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1354 // LHS + (A + LHS) --> A + (LHS << 1)
1355 if (match(RHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(LHS)))))
1356 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1359 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1360 Constant *C1, *C2;
1361 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1362 m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1363 (LHS->hasOneUse() || RHS->hasOneUse())) {
1364 Value *Sub = Builder.CreateSub(A, B);
1365 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2));
1369 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1370 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
1372 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1373 const APInt *C1, *C2;
1374 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1375 APInt one(C2->getBitWidth(), 1);
1376 APInt minusC1 = -(*C1);
1377 if (minusC1 == (one << *C2)) {
1378 Constant *NewRHS = ConstantInt::get(RHS->getType(), minusC1);
1379 return BinaryOperator::CreateSRem(RHS, NewRHS);
1383 // A+B --> A|B iff A and B have no bits set in common.
1384 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1385 return BinaryOperator::CreateOr(LHS, RHS);
1387 // add (select X 0 (sub n A)) A --> select X A n
1389 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1390 Value *A = RHS;
1391 if (!SI) {
1392 SI = dyn_cast<SelectInst>(RHS);
1393 A = LHS;
1395 if (SI && SI->hasOneUse()) {
1396 Value *TV = SI->getTrueValue();
1397 Value *FV = SI->getFalseValue();
1398 Value *N;
1400 // Can we fold the add into the argument of the select?
1401 // We check both true and false select arguments for a matching subtract.
1402 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1403 // Fold the add into the true select value.
1404 return SelectInst::Create(SI->getCondition(), N, A);
1406 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1407 // Fold the add into the false select value.
1408 return SelectInst::Create(SI->getCondition(), A, N);
1412 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1413 return Ext;
1415 // (add (xor A, B) (and A, B)) --> (or A, B)
1416 // (add (and A, B) (xor A, B)) --> (or A, B)
1417 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1418 m_c_And(m_Deferred(A), m_Deferred(B)))))
1419 return BinaryOperator::CreateOr(A, B);
1421 // (add (or A, B) (and A, B)) --> (add A, B)
1422 // (add (and A, B) (or A, B)) --> (add A, B)
1423 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1424 m_c_And(m_Deferred(A), m_Deferred(B))))) {
1425 // Replacing operands in-place to preserve nuw/nsw flags.
1426 replaceOperand(I, 0, A);
1427 replaceOperand(I, 1, B);
1428 return &I;
1431 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1432 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1433 // computeKnownBits.
1434 bool Changed = false;
1435 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
1436 Changed = true;
1437 I.setHasNoSignedWrap(true);
1439 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {
1440 Changed = true;
1441 I.setHasNoUnsignedWrap(true);
1444 if (Instruction *V = canonicalizeLowbitMask(I, Builder))
1445 return V;
1447 if (Instruction *V =
1448 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
1449 return V;
1451 if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I))
1452 return SatAdd;
1454 // usub.sat(A, B) + B => umax(A, B)
1455 if (match(&I, m_c_BinOp(
1456 m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))),
1457 m_Deferred(B)))) {
1458 return replaceInstUsesWith(I,
1459 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1462 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1463 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) &&
1464 match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) &&
1465 haveNoCommonBitsSet(A, B, DL, &AC, &I, &DT))
1466 return replaceInstUsesWith(
1467 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1468 {Builder.CreateOr(A, B)}));
1470 return Changed ? &I : nullptr;
1473 /// Eliminate an op from a linear interpolation (lerp) pattern.
1474 static Instruction *factorizeLerp(BinaryOperator &I,
1475 InstCombiner::BuilderTy &Builder) {
1476 Value *X, *Y, *Z;
1477 if (!match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_Value(Y),
1478 m_OneUse(m_FSub(m_FPOne(),
1479 m_Value(Z))))),
1480 m_OneUse(m_c_FMul(m_Value(X), m_Deferred(Z))))))
1481 return nullptr;
1483 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1484 Value *XY = Builder.CreateFSubFMF(X, Y, &I);
1485 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
1486 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
1489 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
1490 static Instruction *factorizeFAddFSub(BinaryOperator &I,
1491 InstCombiner::BuilderTy &Builder) {
1492 assert((I.getOpcode() == Instruction::FAdd ||
1493 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1494 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1495 "FP factorization requires FMF");
1497 if (Instruction *Lerp = factorizeLerp(I, Builder))
1498 return Lerp;
1500 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1501 Value *X, *Y, *Z;
1502 bool IsFMul;
1503 if ((match(Op0, m_OneUse(m_FMul(m_Value(X), m_Value(Z)))) &&
1504 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))) ||
1505 (match(Op0, m_OneUse(m_FMul(m_Value(Z), m_Value(X)))) &&
1506 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))))
1507 IsFMul = true;
1508 else if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Z)))) &&
1509 match(Op1, m_OneUse(m_FDiv(m_Value(Y), m_Specific(Z)))))
1510 IsFMul = false;
1511 else
1512 return nullptr;
1514 // (X * Z) + (Y * Z) --> (X + Y) * Z
1515 // (X * Z) - (Y * Z) --> (X - Y) * Z
1516 // (X / Z) + (Y / Z) --> (X + Y) / Z
1517 // (X / Z) - (Y / Z) --> (X - Y) / Z
1518 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1519 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1520 : Builder.CreateFSubFMF(X, Y, &I);
1522 // Bail out if we just created a denormal constant.
1523 // TODO: This is copied from a previous implementation. Is it necessary?
1524 const APFloat *C;
1525 if (match(XY, m_APFloat(C)) && !C->isNormal())
1526 return nullptr;
1528 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1529 : BinaryOperator::CreateFDivFMF(XY, Z, &I);
1532 Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
1533 if (Value *V = SimplifyFAddInst(I.getOperand(0), I.getOperand(1),
1534 I.getFastMathFlags(),
1535 SQ.getWithInstruction(&I)))
1536 return replaceInstUsesWith(I, V);
1538 if (SimplifyAssociativeOrCommutative(I))
1539 return &I;
1541 if (Instruction *X = foldVectorBinop(I))
1542 return X;
1544 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
1545 return FoldedFAdd;
1547 // (-X) + Y --> Y - X
1548 Value *X, *Y;
1549 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
1550 return BinaryOperator::CreateFSubFMF(Y, X, &I);
1552 // Similar to above, but look through fmul/fdiv for the negated term.
1553 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
1554 Value *Z;
1555 if (match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))),
1556 m_Value(Z)))) {
1557 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
1558 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1560 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
1561 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
1562 if (match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y))),
1563 m_Value(Z))) ||
1564 match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))),
1565 m_Value(Z)))) {
1566 Value *XY = Builder.CreateFDivFMF(X, Y, &I);
1567 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1570 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1571 // integer add followed by a promotion.
1572 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1573 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1574 Value *LHSIntVal = LHSConv->getOperand(0);
1575 Type *FPType = LHSConv->getType();
1577 // TODO: This check is overly conservative. In many cases known bits
1578 // analysis can tell us that the result of the addition has less significant
1579 // bits than the integer type can hold.
1580 auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1581 Type *FScalarTy = FTy->getScalarType();
1582 Type *IScalarTy = ITy->getScalarType();
1584 // Do we have enough bits in the significand to represent the result of
1585 // the integer addition?
1586 unsigned MaxRepresentableBits =
1587 APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
1588 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1591 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1592 // ... if the constant fits in the integer value. This is useful for things
1593 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1594 // requires a constant pool load, and generally allows the add to be better
1595 // instcombined.
1596 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1597 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1598 Constant *CI =
1599 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1600 if (LHSConv->hasOneUse() &&
1601 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1602 willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1603 // Insert the new integer add.
1604 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
1605 return new SIToFPInst(NewAdd, I.getType());
1609 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1610 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1611 Value *RHSIntVal = RHSConv->getOperand(0);
1612 // It's enough to check LHS types only because we require int types to
1613 // be the same for this transform.
1614 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1615 // Only do this if x/y have the same type, if at least one of them has a
1616 // single use (so we don't increase the number of int->fp conversions),
1617 // and if the integer add will not overflow.
1618 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1619 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1620 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1621 // Insert the new integer add.
1622 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
1623 return new SIToFPInst(NewAdd, I.getType());
1629 // Handle specials cases for FAdd with selects feeding the operation
1630 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
1631 return replaceInstUsesWith(I, V);
1633 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1634 if (Instruction *F = factorizeFAddFSub(I, Builder))
1635 return F;
1637 // Try to fold fadd into start value of reduction intrinsic.
1638 if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1639 m_AnyZeroFP(), m_Value(X))),
1640 m_Value(Y)))) {
1641 // fadd (rdx 0.0, X), Y --> rdx Y, X
1642 return replaceInstUsesWith(
1643 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1644 {X->getType()}, {Y, X}, &I));
1646 const APFloat *StartC, *C;
1647 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1648 m_APFloat(StartC), m_Value(X)))) &&
1649 match(RHS, m_APFloat(C))) {
1650 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
1651 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
1652 return replaceInstUsesWith(
1653 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1654 {X->getType()}, {NewStartC, X}, &I));
1657 if (Value *V = FAddCombine(Builder).simplify(&I))
1658 return replaceInstUsesWith(I, V);
1661 return nullptr;
1664 /// Optimize pointer differences into the same array into a size. Consider:
1665 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1666 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1667 Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS,
1668 Type *Ty, bool IsNUW) {
1669 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1670 // this.
1671 bool Swapped = false;
1672 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1673 if (!isa<GEPOperator>(LHS) && isa<GEPOperator>(RHS)) {
1674 std::swap(LHS, RHS);
1675 Swapped = true;
1678 // Require at least one GEP with a common base pointer on both sides.
1679 if (auto *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1680 // (gep X, ...) - X
1681 if (LHSGEP->getOperand(0) == RHS) {
1682 GEP1 = LHSGEP;
1683 } else if (auto *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1684 // (gep X, ...) - (gep X, ...)
1685 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1686 RHSGEP->getOperand(0)->stripPointerCasts()) {
1687 GEP1 = LHSGEP;
1688 GEP2 = RHSGEP;
1693 if (!GEP1)
1694 return nullptr;
1696 if (GEP2) {
1697 // (gep X, ...) - (gep X, ...)
1699 // Avoid duplicating the arithmetic if there are more than one non-constant
1700 // indices between the two GEPs and either GEP has a non-constant index and
1701 // multiple users. If zero non-constant index, the result is a constant and
1702 // there is no duplication. If one non-constant index, the result is an add
1703 // or sub with a constant, which is no larger than the original code, and
1704 // there's no duplicated arithmetic, even if either GEP has multiple
1705 // users. If more than one non-constant indices combined, as long as the GEP
1706 // with at least one non-constant index doesn't have multiple users, there
1707 // is no duplication.
1708 unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices();
1709 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1710 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1711 ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) ||
1712 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1713 return nullptr;
1717 // Emit the offset of the GEP and an intptr_t.
1718 Value *Result = EmitGEPOffset(GEP1);
1720 // If this is a single inbounds GEP and the original sub was nuw,
1721 // then the final multiplication is also nuw.
1722 if (auto *I = dyn_cast<Instruction>(Result))
1723 if (IsNUW && !GEP2 && !Swapped && GEP1->isInBounds() &&
1724 I->getOpcode() == Instruction::Mul)
1725 I->setHasNoUnsignedWrap();
1727 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
1728 // If both GEPs are inbounds, then the subtract does not have signed overflow.
1729 if (GEP2) {
1730 Value *Offset = EmitGEPOffset(GEP2);
1731 Result = Builder.CreateSub(Result, Offset, "gepdiff", /* NUW */ false,
1732 GEP1->isInBounds() && GEP2->isInBounds());
1735 // If we have p - gep(p, ...) then we have to negate the result.
1736 if (Swapped)
1737 Result = Builder.CreateNeg(Result, "diff.neg");
1739 return Builder.CreateIntCast(Result, Ty, true);
1742 Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
1743 if (Value *V = SimplifySubInst(I.getOperand(0), I.getOperand(1),
1744 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1745 SQ.getWithInstruction(&I)))
1746 return replaceInstUsesWith(I, V);
1748 if (Instruction *X = foldVectorBinop(I))
1749 return X;
1751 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1753 // If this is a 'B = x-(-A)', change to B = x+A.
1754 // We deal with this without involving Negator to preserve NSW flag.
1755 if (Value *V = dyn_castNegVal(Op1)) {
1756 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1758 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1759 assert(BO->getOpcode() == Instruction::Sub &&
1760 "Expected a subtraction operator!");
1761 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1762 Res->setHasNoSignedWrap(true);
1763 } else {
1764 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1765 Res->setHasNoSignedWrap(true);
1768 return Res;
1771 // Try this before Negator to preserve NSW flag.
1772 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
1773 return R;
1775 Constant *C;
1776 if (match(Op0, m_ImmConstant(C))) {
1777 Value *X;
1778 Constant *C2;
1780 // C-(X+C2) --> (C-C2)-X
1781 if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2))))
1782 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1785 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
1786 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1787 return Ext;
1789 bool Changed = false;
1790 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
1791 Changed = true;
1792 I.setHasNoSignedWrap(true);
1794 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
1795 Changed = true;
1796 I.setHasNoUnsignedWrap(true);
1799 return Changed ? &I : nullptr;
1802 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
1803 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
1804 // a pure negation used by a select that looks like abs/nabs.
1805 bool IsNegation = match(Op0, m_ZeroInt());
1806 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
1807 const Instruction *UI = dyn_cast<Instruction>(U);
1808 if (!UI)
1809 return false;
1810 return match(UI,
1811 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
1812 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
1813 })) {
1814 if (Value *NegOp1 = Negator::Negate(IsNegation, Op1, *this))
1815 return BinaryOperator::CreateAdd(NegOp1, Op0);
1817 if (IsNegation)
1818 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
1820 // (A*B)-(A*C) -> A*(B-C) etc
1821 if (Value *V = SimplifyUsingDistributiveLaws(I))
1822 return replaceInstUsesWith(I, V);
1824 if (I.getType()->isIntOrIntVectorTy(1))
1825 return BinaryOperator::CreateXor(Op0, Op1);
1827 // Replace (-1 - A) with (~A).
1828 if (match(Op0, m_AllOnes()))
1829 return BinaryOperator::CreateNot(Op1);
1831 // (~X) - (~Y) --> Y - X
1832 Value *X, *Y;
1833 if (match(Op0, m_Not(m_Value(X))) && match(Op1, m_Not(m_Value(Y))))
1834 return BinaryOperator::CreateSub(Y, X);
1836 // (X + -1) - Y --> ~Y + X
1837 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
1838 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
1840 // Reassociate sub/add sequences to create more add instructions and
1841 // reduce dependency chains:
1842 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
1843 Value *Z;
1844 if (match(Op0, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X), m_Value(Y))),
1845 m_Value(Z))))) {
1846 Value *XZ = Builder.CreateAdd(X, Z);
1847 Value *YW = Builder.CreateAdd(Y, Op1);
1848 return BinaryOperator::CreateSub(XZ, YW);
1851 // ((X - Y) - Op1) --> X - (Y + Op1)
1852 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
1853 Value *Add = Builder.CreateAdd(Y, Op1);
1854 return BinaryOperator::CreateSub(X, Add);
1857 auto m_AddRdx = [](Value *&Vec) {
1858 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec)));
1860 Value *V0, *V1;
1861 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
1862 V0->getType() == V1->getType()) {
1863 // Difference of sums is sum of differences:
1864 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
1865 Value *Sub = Builder.CreateSub(V0, V1);
1866 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
1867 {Sub->getType()}, {Sub});
1868 return replaceInstUsesWith(I, Rdx);
1871 if (Constant *C = dyn_cast<Constant>(Op0)) {
1872 Value *X;
1873 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1874 // C - (zext bool) --> bool ? C - 1 : C
1875 return SelectInst::Create(X, InstCombiner::SubOne(C), C);
1876 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1877 // C - (sext bool) --> bool ? C + 1 : C
1878 return SelectInst::Create(X, InstCombiner::AddOne(C), C);
1880 // C - ~X == X + (1+C)
1881 if (match(Op1, m_Not(m_Value(X))))
1882 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C));
1884 // Try to fold constant sub into select arguments.
1885 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1886 if (Instruction *R = FoldOpIntoSelect(I, SI))
1887 return R;
1889 // Try to fold constant sub into PHI values.
1890 if (PHINode *PN = dyn_cast<PHINode>(Op1))
1891 if (Instruction *R = foldOpIntoPhi(I, PN))
1892 return R;
1894 Constant *C2;
1896 // C-(C2-X) --> X+(C-C2)
1897 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
1898 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2));
1901 const APInt *Op0C;
1902 if (match(Op0, m_APInt(Op0C)) && Op0C->isMask()) {
1903 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1904 // zero.
1905 KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
1906 if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
1907 return BinaryOperator::CreateXor(Op1, Op0);
1911 Value *Y;
1912 // X-(X+Y) == -Y X-(Y+X) == -Y
1913 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1914 return BinaryOperator::CreateNeg(Y);
1916 // (X-Y)-X == -Y
1917 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1918 return BinaryOperator::CreateNeg(Y);
1921 // (sub (or A, B) (and A, B)) --> (xor A, B)
1923 Value *A, *B;
1924 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1925 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1926 return BinaryOperator::CreateXor(A, B);
1929 // (sub (add A, B) (or A, B)) --> (and A, B)
1931 Value *A, *B;
1932 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
1933 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1934 return BinaryOperator::CreateAnd(A, B);
1937 // (sub (add A, B) (and A, B)) --> (or A, B)
1939 Value *A, *B;
1940 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
1941 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
1942 return BinaryOperator::CreateOr(A, B);
1945 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
1947 Value *A, *B;
1948 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1949 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
1950 (Op0->hasOneUse() || Op1->hasOneUse()))
1951 return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
1954 // (sub (or A, B), (xor A, B)) --> (and A, B)
1956 Value *A, *B;
1957 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1958 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1959 return BinaryOperator::CreateAnd(A, B);
1962 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
1964 Value *A, *B;
1965 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1966 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
1967 (Op0->hasOneUse() || Op1->hasOneUse()))
1968 return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
1972 Value *Y;
1973 // ((X | Y) - X) --> (~X & Y)
1974 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1975 return BinaryOperator::CreateAnd(
1976 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
1980 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
1981 Value *X;
1982 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
1983 m_OneUse(m_Neg(m_Value(X))))))) {
1984 return BinaryOperator::CreateNeg(Builder.CreateAnd(
1985 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
1990 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
1991 Constant *C;
1992 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
1993 return BinaryOperator::CreateNeg(
1994 Builder.CreateAnd(Op1, Builder.CreateNot(C)));
1999 // If we have a subtraction between some value and a select between
2000 // said value and something else, sink subtraction into select hands, i.e.:
2001 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2002 // ->
2003 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2004 // or
2005 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2006 // ->
2007 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2008 // This will result in select between new subtraction and 0.
2009 auto SinkSubIntoSelect =
2010 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2011 auto SubBuilder) -> Instruction * {
2012 Value *Cond, *TrueVal, *FalseVal;
2013 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal),
2014 m_Value(FalseVal)))))
2015 return nullptr;
2016 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2017 return nullptr;
2018 // While it is really tempting to just create two subtractions and let
2019 // InstCombine fold one of those to 0, it isn't possible to do so
2020 // because of worklist visitation order. So ugly it is.
2021 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2022 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2023 Constant *Zero = Constant::getNullValue(Ty);
2024 SelectInst *NewSel =
2025 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2026 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2027 // Preserve prof metadata if any.
2028 NewSel->copyMetadata(cast<Instruction>(*Select));
2029 return NewSel;
2031 if (Instruction *NewSel = SinkSubIntoSelect(
2032 /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2033 [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2034 return Builder->CreateSub(OtherHandOfSelect,
2035 /*OtherHandOfSub=*/Op1);
2037 return NewSel;
2038 if (Instruction *NewSel = SinkSubIntoSelect(
2039 /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2040 [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2041 return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2042 OtherHandOfSelect);
2044 return NewSel;
2047 // (X - (X & Y)) --> (X & ~Y)
2048 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2049 (Op1->hasOneUse() || isa<Constant>(Y)))
2050 return BinaryOperator::CreateAnd(
2051 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2054 // ~A - Min/Max(~A, O) -> Max/Min(A, ~O) - A
2055 // ~A - Min/Max(O, ~A) -> Max/Min(A, ~O) - A
2056 // Min/Max(~A, O) - ~A -> A - Max/Min(A, ~O)
2057 // Min/Max(O, ~A) - ~A -> A - Max/Min(A, ~O)
2058 // So long as O here is freely invertible, this will be neutral or a win.
2059 Value *LHS, *RHS, *A;
2060 Value *NotA = Op0, *MinMax = Op1;
2061 SelectPatternFlavor SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor;
2062 if (!SelectPatternResult::isMinOrMax(SPF)) {
2063 NotA = Op1;
2064 MinMax = Op0;
2065 SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor;
2067 if (SelectPatternResult::isMinOrMax(SPF) &&
2068 match(NotA, m_Not(m_Value(A))) && (NotA == LHS || NotA == RHS)) {
2069 if (NotA == LHS)
2070 std::swap(LHS, RHS);
2071 // LHS is now O above and expected to have at least 2 uses (the min/max)
2072 // NotA is epected to have 2 uses from the min/max and 1 from the sub.
2073 if (isFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) &&
2074 !NotA->hasNUsesOrMore(4)) {
2075 // Note: We don't generate the inverse max/min, just create the not of
2076 // it and let other folds do the rest.
2077 Value *Not = Builder.CreateNot(MinMax);
2078 if (NotA == Op0)
2079 return BinaryOperator::CreateSub(Not, A);
2080 else
2081 return BinaryOperator::CreateSub(A, Not);
2086 // Optimize pointer differences into the same array into a size. Consider:
2087 // &A[10] - &A[0]: we should compile this to "10".
2088 Value *LHSOp, *RHSOp;
2089 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2090 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2091 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2092 I.hasNoUnsignedWrap()))
2093 return replaceInstUsesWith(I, Res);
2095 // trunc(p)-trunc(q) -> trunc(p-q)
2096 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2097 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
2098 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2099 /* IsNUW */ false))
2100 return replaceInstUsesWith(I, Res);
2102 // Canonicalize a shifty way to code absolute value to the common pattern.
2103 // There are 2 potential commuted variants.
2104 // We're relying on the fact that we only do this transform when the shift has
2105 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2106 // instructions).
2107 Value *A;
2108 const APInt *ShAmt;
2109 Type *Ty = I.getType();
2110 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2111 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
2112 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2113 // B = ashr i32 A, 31 ; smear the sign bit
2114 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2115 // --> (A < 0) ? -A : A
2116 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
2117 // Copy the nuw/nsw flags from the sub to the negate.
2118 Value *Neg = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(),
2119 I.hasNoSignedWrap());
2120 return SelectInst::Create(Cmp, Neg, A);
2123 // If we are subtracting a low-bit masked subset of some value from an add
2124 // of that same value with no low bits changed, that is clearing some low bits
2125 // of the sum:
2126 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2127 const APInt *AddC, *AndC;
2128 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2129 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2130 unsigned BitWidth = Ty->getScalarSizeInBits();
2131 unsigned Cttz = AddC->countTrailingZeros();
2132 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2133 if ((HighMask & *AndC).isNullValue())
2134 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2137 if (Instruction *V =
2138 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
2139 return V;
2141 // X - usub.sat(X, Y) => umin(X, Y)
2142 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0),
2143 m_Value(Y)))))
2144 return replaceInstUsesWith(
2145 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2147 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2148 if (match(Op0, m_SpecificInt(Ty->getScalarSizeInBits())) &&
2149 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X)))))
2150 return replaceInstUsesWith(
2151 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2152 {Builder.CreateNot(X)}));
2154 return TryToNarrowDeduceFlags();
2157 /// This eliminates floating-point negation in either 'fneg(X)' or
2158 /// 'fsub(-0.0, X)' form by combining into a constant operand.
2159 static Instruction *foldFNegIntoConstant(Instruction &I) {
2160 // This is limited with one-use because fneg is assumed better for
2161 // reassociation and cheaper in codegen than fmul/fdiv.
2162 // TODO: Should the m_OneUse restriction be removed?
2163 Instruction *FNegOp;
2164 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
2165 return nullptr;
2167 Value *X;
2168 Constant *C;
2170 // Fold negation into constant operand.
2171 // -(X * C) --> X * (-C)
2172 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
2173 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
2174 // -(X / C) --> X / (-C)
2175 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C))))
2176 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
2177 // -(C / X) --> (-C) / X
2178 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X)))) {
2179 Instruction *FDiv =
2180 BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
2182 // Intersect 'nsz' and 'ninf' because those special value exceptions may not
2183 // apply to the fdiv. Everything else propagates from the fneg.
2184 // TODO: We could propagate nsz/ninf from fdiv alone?
2185 FastMathFlags FMF = I.getFastMathFlags();
2186 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
2187 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() & OpFMF.noSignedZeros());
2188 FDiv->setHasNoInfs(FMF.noInfs() & OpFMF.noInfs());
2189 return FDiv;
2191 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2192 // -(X + C) --> -X + -C --> -C - X
2193 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
2194 return BinaryOperator::CreateFSubFMF(ConstantExpr::getFNeg(C), X, &I);
2196 return nullptr;
2199 static Instruction *hoistFNegAboveFMulFDiv(Instruction &I,
2200 InstCombiner::BuilderTy &Builder) {
2201 Value *FNeg;
2202 if (!match(&I, m_FNeg(m_Value(FNeg))))
2203 return nullptr;
2205 Value *X, *Y;
2206 if (match(FNeg, m_OneUse(m_FMul(m_Value(X), m_Value(Y)))))
2207 return BinaryOperator::CreateFMulFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2209 if (match(FNeg, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))))
2210 return BinaryOperator::CreateFDivFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2212 return nullptr;
2215 Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) {
2216 Value *Op = I.getOperand(0);
2218 if (Value *V = SimplifyFNegInst(Op, I.getFastMathFlags(),
2219 getSimplifyQuery().getWithInstruction(&I)))
2220 return replaceInstUsesWith(I, V);
2222 if (Instruction *X = foldFNegIntoConstant(I))
2223 return X;
2225 Value *X, *Y;
2227 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2228 if (I.hasNoSignedZeros() &&
2229 match(Op, m_OneUse(m_FSub(m_Value(X), m_Value(Y)))))
2230 return BinaryOperator::CreateFSubFMF(Y, X, &I);
2232 if (Instruction *R = hoistFNegAboveFMulFDiv(I, Builder))
2233 return R;
2235 // Try to eliminate fneg if at least 1 arm of the select is negated.
2236 Value *Cond;
2237 if (match(Op, m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y))))) {
2238 // Unlike most transforms, this one is not safe to propagate nsz unless
2239 // it is present on the original select. (We are conservatively intersecting
2240 // the nsz flags from the select and root fneg instruction.)
2241 auto propagateSelectFMF = [&](SelectInst *S) {
2242 S->copyFastMathFlags(&I);
2243 if (auto *OldSel = dyn_cast<SelectInst>(Op))
2244 if (!OldSel->hasNoSignedZeros())
2245 S->setHasNoSignedZeros(false);
2247 // -(Cond ? -P : Y) --> Cond ? P : -Y
2248 Value *P;
2249 if (match(X, m_FNeg(m_Value(P)))) {
2250 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
2251 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
2252 propagateSelectFMF(NewSel);
2253 return NewSel;
2255 // -(Cond ? X : -P) --> Cond ? -X : P
2256 if (match(Y, m_FNeg(m_Value(P)))) {
2257 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
2258 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
2259 propagateSelectFMF(NewSel);
2260 return NewSel;
2264 return nullptr;
2267 Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
2268 if (Value *V = SimplifyFSubInst(I.getOperand(0), I.getOperand(1),
2269 I.getFastMathFlags(),
2270 getSimplifyQuery().getWithInstruction(&I)))
2271 return replaceInstUsesWith(I, V);
2273 if (Instruction *X = foldVectorBinop(I))
2274 return X;
2276 // Subtraction from -0.0 is the canonical form of fneg.
2277 // fsub -0.0, X ==> fneg X
2278 // fsub nsz 0.0, X ==> fneg nsz X
2280 // FIXME This matcher does not respect FTZ or DAZ yet:
2281 // fsub -0.0, Denorm ==> +-0
2282 // fneg Denorm ==> -Denorm
2283 Value *Op;
2284 if (match(&I, m_FNeg(m_Value(Op))))
2285 return UnaryOperator::CreateFNegFMF(Op, &I);
2287 if (Instruction *X = foldFNegIntoConstant(I))
2288 return X;
2290 if (Instruction *R = hoistFNegAboveFMulFDiv(I, Builder))
2291 return R;
2293 Value *X, *Y;
2294 Constant *C;
2296 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2297 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
2298 // Canonicalize to fadd to make analysis easier.
2299 // This can also help codegen because fadd is commutative.
2300 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
2301 // killed later. We still limit that particular transform with 'hasOneUse'
2302 // because an fneg is assumed better/cheaper than a generic fsub.
2303 if (I.hasNoSignedZeros() || CannotBeNegativeZero(Op0, SQ.TLI)) {
2304 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2305 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
2306 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
2310 // (-X) - Op1 --> -(X + Op1)
2311 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2312 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
2313 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
2314 return UnaryOperator::CreateFNegFMF(FAdd, &I);
2317 if (isa<Constant>(Op0))
2318 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2319 if (Instruction *NV = FoldOpIntoSelect(I, SI))
2320 return NV;
2322 // X - C --> X + (-C)
2323 // But don't transform constant expressions because there's an inverse fold
2324 // for X + (-Y) --> X - Y.
2325 if (match(Op1, m_ImmConstant(C)))
2326 return BinaryOperator::CreateFAddFMF(Op0, ConstantExpr::getFNeg(C), &I);
2328 // X - (-Y) --> X + Y
2329 if (match(Op1, m_FNeg(m_Value(Y))))
2330 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
2332 // Similar to above, but look through a cast of the negated value:
2333 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
2334 Type *Ty = I.getType();
2335 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
2336 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
2338 // X - (fpext(-Y)) --> X + fpext(Y)
2339 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
2340 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
2342 // Similar to above, but look through fmul/fdiv of the negated value:
2343 // Op0 - (-X * Y) --> Op0 + (X * Y)
2344 // Op0 - (Y * -X) --> Op0 + (X * Y)
2345 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
2346 Value *FMul = Builder.CreateFMulFMF(X, Y, &I);
2347 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
2349 // Op0 - (-X / Y) --> Op0 + (X / Y)
2350 // Op0 - (X / -Y) --> Op0 + (X / Y)
2351 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
2352 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
2353 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
2354 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
2357 // Handle special cases for FSub with selects feeding the operation
2358 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
2359 return replaceInstUsesWith(I, V);
2361 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2362 // (Y - X) - Y --> -X
2363 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
2364 return UnaryOperator::CreateFNegFMF(X, &I);
2366 // Y - (X + Y) --> -X
2367 // Y - (Y + X) --> -X
2368 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
2369 return UnaryOperator::CreateFNegFMF(X, &I);
2371 // (X * C) - X --> X * (C - 1.0)
2372 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
2373 Constant *CSubOne = ConstantExpr::getFSub(C, ConstantFP::get(Ty, 1.0));
2374 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
2376 // X - (X * C) --> X * (1.0 - C)
2377 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
2378 Constant *OneSubC = ConstantExpr::getFSub(ConstantFP::get(Ty, 1.0), C);
2379 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
2382 // Reassociate fsub/fadd sequences to create more fadd instructions and
2383 // reduce dependency chains:
2384 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2385 Value *Z;
2386 if (match(Op0, m_OneUse(m_c_FAdd(m_OneUse(m_FSub(m_Value(X), m_Value(Y))),
2387 m_Value(Z))))) {
2388 Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
2389 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
2390 return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
2393 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
2394 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum),
2395 m_Value(Vec)));
2397 Value *A0, *A1, *V0, *V1;
2398 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
2399 V0->getType() == V1->getType()) {
2400 // Difference of sums is sum of differences:
2401 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
2402 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
2403 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2404 {Sub->getType()}, {A0, Sub}, &I);
2405 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
2408 if (Instruction *F = factorizeFAddFSub(I, Builder))
2409 return F;
2411 // TODO: This performs reassociative folds for FP ops. Some fraction of the
2412 // functionality has been subsumed by simple pattern matching here and in
2413 // InstSimplify. We should let a dedicated reassociation pass handle more
2414 // complex pattern matching and remove this from InstCombine.
2415 if (Value *V = FAddCombine(Builder).simplify(&I))
2416 return replaceInstUsesWith(I, V);
2418 // (X - Y) - Op1 --> X - (Y + Op1)
2419 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2420 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
2421 return BinaryOperator::CreateFSubFMF(X, FAdd, &I);
2425 return nullptr;