Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Transforms / InstCombine / InstCombineAndOrXor.cpp
blobaaa883a70370e05f1572f2e0c1db71f6d42364d9
1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitAnd, visitOr, and visitXor functions.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/Transforms/Utils/Local.h"
17 #include "llvm/IR/ConstantRange.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/PatternMatch.h"
20 using namespace llvm;
21 using namespace PatternMatch;
23 #define DEBUG_TYPE "instcombine"
25 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into
26 /// a four bit mask.
27 static unsigned getFCmpCode(FCmpInst::Predicate CC) {
28 assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE &&
29 "Unexpected FCmp predicate!");
30 // Take advantage of the bit pattern of FCmpInst::Predicate here.
31 // U L G E
32 static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0
33 static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1
34 static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0
35 static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1
36 static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0
37 static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1
38 static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0
39 static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1
40 static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0
41 static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1
42 static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0
43 static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1
44 static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0
45 static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1
46 static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0
47 static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1
48 return CC;
51 /// This is the complement of getICmpCode, which turns an opcode and two
52 /// operands into either a constant true or false, or a brand new ICmp
53 /// instruction. The sign is passed in to determine which kind of predicate to
54 /// use in the new icmp instruction.
55 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
56 InstCombiner::BuilderTy &Builder) {
57 ICmpInst::Predicate NewPred;
58 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred))
59 return TorF;
60 return Builder.CreateICmp(NewPred, LHS, RHS);
63 /// This is the complement of getFCmpCode, which turns an opcode and two
64 /// operands into either a FCmp instruction, or a true/false constant.
65 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
66 InstCombiner::BuilderTy &Builder) {
67 const auto Pred = static_cast<FCmpInst::Predicate>(Code);
68 assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
69 "Unexpected FCmp predicate!");
70 if (Pred == FCmpInst::FCMP_FALSE)
71 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
72 if (Pred == FCmpInst::FCMP_TRUE)
73 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
74 return Builder.CreateFCmp(Pred, LHS, RHS);
77 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
78 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
79 /// \param I Binary operator to transform.
80 /// \return Pointer to node that must replace the original binary operator, or
81 /// null pointer if no transformation was made.
82 static Value *SimplifyBSwap(BinaryOperator &I,
83 InstCombiner::BuilderTy &Builder) {
84 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
86 Value *OldLHS = I.getOperand(0);
87 Value *OldRHS = I.getOperand(1);
89 Value *NewLHS;
90 if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
91 return nullptr;
93 Value *NewRHS;
94 const APInt *C;
96 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
97 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
98 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
99 return nullptr;
100 // NewRHS initialized by the matcher.
101 } else if (match(OldRHS, m_APInt(C))) {
102 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
103 if (!OldLHS->hasOneUse())
104 return nullptr;
105 NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
106 } else
107 return nullptr;
109 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
110 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
111 I.getType());
112 return Builder.CreateCall(F, BinOp);
115 /// This handles expressions of the form ((val OP C1) & C2). Where
116 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'.
117 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
118 ConstantInt *OpRHS,
119 ConstantInt *AndRHS,
120 BinaryOperator &TheAnd) {
121 Value *X = Op->getOperand(0);
123 switch (Op->getOpcode()) {
124 default: break;
125 case Instruction::Add:
126 if (Op->hasOneUse()) {
127 // Adding a one to a single bit bit-field should be turned into an XOR
128 // of the bit. First thing to check is to see if this AND is with a
129 // single bit constant.
130 const APInt &AndRHSV = AndRHS->getValue();
132 // If there is only one bit set.
133 if (AndRHSV.isPowerOf2()) {
134 // Ok, at this point, we know that we are masking the result of the
135 // ADD down to exactly one bit. If the constant we are adding has
136 // no bits set below this bit, then we can eliminate the ADD.
137 const APInt& AddRHS = OpRHS->getValue();
139 // Check to see if any bits below the one bit set in AndRHSV are set.
140 if ((AddRHS & (AndRHSV - 1)).isNullValue()) {
141 // If not, the only thing that can effect the output of the AND is
142 // the bit specified by AndRHSV. If that bit is set, the effect of
143 // the XOR is to toggle the bit. If it is clear, then the ADD has
144 // no effect.
145 if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
146 TheAnd.setOperand(0, X);
147 return &TheAnd;
148 } else {
149 // Pull the XOR out of the AND.
150 Value *NewAnd = Builder.CreateAnd(X, AndRHS);
151 NewAnd->takeName(Op);
152 return BinaryOperator::CreateXor(NewAnd, AndRHS);
157 break;
159 return nullptr;
162 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
163 /// (V < Lo || V >= Hi). This method expects that Lo <= Hi. IsSigned indicates
164 /// whether to treat V, Lo, and Hi as signed or not.
165 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
166 bool isSigned, bool Inside) {
167 assert((isSigned ? Lo.sle(Hi) : Lo.ule(Hi)) &&
168 "Lo is not <= Hi in range emission code!");
170 Type *Ty = V->getType();
171 if (Lo == Hi)
172 return Inside ? ConstantInt::getFalse(Ty) : ConstantInt::getTrue(Ty);
174 // V >= Min && V < Hi --> V < Hi
175 // V < Min || V >= Hi --> V >= Hi
176 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
177 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
178 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
179 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
182 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
183 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
184 Value *VMinusLo =
185 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
186 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
187 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
190 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
191 /// that can be simplified.
192 /// One of A and B is considered the mask. The other is the value. This is
193 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
194 /// only "Mask", then both A and B can be considered masks. If A is the mask,
195 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
196 /// If both A and C are constants, this proof is also easy.
197 /// For the following explanations, we assume that A is the mask.
199 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
200 /// bits of A are set in B.
201 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
203 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
204 /// bits of A are cleared in B.
205 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
207 /// "Mixed" declares that (A & B) == C and C might or might not contain any
208 /// number of one bits and zero bits.
209 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
211 /// "Not" means that in above descriptions "==" should be replaced by "!=".
212 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
214 /// If the mask A contains a single bit, then the following is equivalent:
215 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
216 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
217 enum MaskedICmpType {
218 AMask_AllOnes = 1,
219 AMask_NotAllOnes = 2,
220 BMask_AllOnes = 4,
221 BMask_NotAllOnes = 8,
222 Mask_AllZeros = 16,
223 Mask_NotAllZeros = 32,
224 AMask_Mixed = 64,
225 AMask_NotMixed = 128,
226 BMask_Mixed = 256,
227 BMask_NotMixed = 512
230 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
231 /// satisfies.
232 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
233 ICmpInst::Predicate Pred) {
234 ConstantInt *ACst = dyn_cast<ConstantInt>(A);
235 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
236 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
237 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
238 bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2());
239 bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2());
240 unsigned MaskVal = 0;
241 if (CCst && CCst->isZero()) {
242 // if C is zero, then both A and B qualify as mask
243 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
244 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
245 if (IsAPow2)
246 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
247 : (AMask_AllOnes | AMask_Mixed));
248 if (IsBPow2)
249 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
250 : (BMask_AllOnes | BMask_Mixed));
251 return MaskVal;
254 if (A == C) {
255 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
256 : (AMask_NotAllOnes | AMask_NotMixed));
257 if (IsAPow2)
258 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
259 : (Mask_AllZeros | AMask_Mixed));
260 } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) {
261 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
264 if (B == C) {
265 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
266 : (BMask_NotAllOnes | BMask_NotMixed));
267 if (IsBPow2)
268 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
269 : (Mask_AllZeros | BMask_Mixed));
270 } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) {
271 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
274 return MaskVal;
277 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
278 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
279 /// is adjacent to the corresponding normal flag (recording ==), this just
280 /// involves swapping those bits over.
281 static unsigned conjugateICmpMask(unsigned Mask) {
282 unsigned NewMask;
283 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
284 AMask_Mixed | BMask_Mixed))
285 << 1;
287 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
288 AMask_NotMixed | BMask_NotMixed))
289 >> 1;
291 return NewMask;
294 // Adapts the external decomposeBitTestICmp for local use.
295 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
296 Value *&X, Value *&Y, Value *&Z) {
297 APInt Mask;
298 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
299 return false;
301 Y = ConstantInt::get(X->getType(), Mask);
302 Z = ConstantInt::get(X->getType(), 0);
303 return true;
306 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
307 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
308 /// the right hand side as a pair.
309 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
310 /// and PredR are their predicates, respectively.
311 static
312 Optional<std::pair<unsigned, unsigned>>
313 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
314 Value *&D, Value *&E, ICmpInst *LHS,
315 ICmpInst *RHS,
316 ICmpInst::Predicate &PredL,
317 ICmpInst::Predicate &PredR) {
318 // vectors are not (yet?) supported. Don't support pointers either.
319 if (!LHS->getOperand(0)->getType()->isIntegerTy() ||
320 !RHS->getOperand(0)->getType()->isIntegerTy())
321 return None;
323 // Here comes the tricky part:
324 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
325 // and L11 & L12 == L21 & L22. The same goes for RHS.
326 // Now we must find those components L** and R**, that are equal, so
327 // that we can extract the parameters A, B, C, D, and E for the canonical
328 // above.
329 Value *L1 = LHS->getOperand(0);
330 Value *L2 = LHS->getOperand(1);
331 Value *L11, *L12, *L21, *L22;
332 // Check whether the icmp can be decomposed into a bit test.
333 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
334 L21 = L22 = L1 = nullptr;
335 } else {
336 // Look for ANDs in the LHS icmp.
337 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
338 // Any icmp can be viewed as being trivially masked; if it allows us to
339 // remove one, it's worth it.
340 L11 = L1;
341 L12 = Constant::getAllOnesValue(L1->getType());
344 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
345 L21 = L2;
346 L22 = Constant::getAllOnesValue(L2->getType());
350 // Bail if LHS was a icmp that can't be decomposed into an equality.
351 if (!ICmpInst::isEquality(PredL))
352 return None;
354 Value *R1 = RHS->getOperand(0);
355 Value *R2 = RHS->getOperand(1);
356 Value *R11, *R12;
357 bool Ok = false;
358 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
359 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
360 A = R11;
361 D = R12;
362 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
363 A = R12;
364 D = R11;
365 } else {
366 return None;
368 E = R2;
369 R1 = nullptr;
370 Ok = true;
371 } else {
372 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
373 // As before, model no mask as a trivial mask if it'll let us do an
374 // optimization.
375 R11 = R1;
376 R12 = Constant::getAllOnesValue(R1->getType());
379 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
380 A = R11;
381 D = R12;
382 E = R2;
383 Ok = true;
384 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
385 A = R12;
386 D = R11;
387 E = R2;
388 Ok = true;
392 // Bail if RHS was a icmp that can't be decomposed into an equality.
393 if (!ICmpInst::isEquality(PredR))
394 return None;
396 // Look for ANDs on the right side of the RHS icmp.
397 if (!Ok) {
398 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
399 R11 = R2;
400 R12 = Constant::getAllOnesValue(R2->getType());
403 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
404 A = R11;
405 D = R12;
406 E = R1;
407 Ok = true;
408 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
409 A = R12;
410 D = R11;
411 E = R1;
412 Ok = true;
413 } else {
414 return None;
417 if (!Ok)
418 return None;
420 if (L11 == A) {
421 B = L12;
422 C = L2;
423 } else if (L12 == A) {
424 B = L11;
425 C = L2;
426 } else if (L21 == A) {
427 B = L22;
428 C = L1;
429 } else if (L22 == A) {
430 B = L21;
431 C = L1;
434 unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
435 unsigned RightType = getMaskedICmpType(A, D, E, PredR);
436 return Optional<std::pair<unsigned, unsigned>>(std::make_pair(LeftType, RightType));
439 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
440 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
441 /// and the right hand side is of type BMask_Mixed. For example,
442 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
443 static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
444 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
445 Value *A, Value *B, Value *C, Value *D, Value *E,
446 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
447 llvm::InstCombiner::BuilderTy &Builder) {
448 // We are given the canonical form:
449 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
450 // where D & E == E.
452 // If IsAnd is false, we get it in negated form:
453 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
454 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
456 // We currently handle the case of B, C, D, E are constant.
458 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
459 if (!BCst)
460 return nullptr;
461 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
462 if (!CCst)
463 return nullptr;
464 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
465 if (!DCst)
466 return nullptr;
467 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
468 if (!ECst)
469 return nullptr;
471 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
473 // Update E to the canonical form when D is a power of two and RHS is
474 // canonicalized as,
475 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
476 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
477 if (PredR != NewCC)
478 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
480 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
481 // other folding rules and this pattern won't apply any more.
482 if (BCst->getValue() == 0 || DCst->getValue() == 0)
483 return nullptr;
485 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't
486 // deduce anything from it.
487 // For example,
488 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding.
489 if ((BCst->getValue() & DCst->getValue()) == 0)
490 return nullptr;
492 // If the following two conditions are met:
494 // 1. mask B covers only a single bit that's not covered by mask D, that is,
495 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
496 // B and D has only one bit set) and,
498 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
499 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
501 // then that single bit in B must be one and thus the whole expression can be
502 // folded to
503 // (A & (B | D)) == (B & (B ^ D)) | E.
505 // For example,
506 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
507 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
508 if ((((BCst->getValue() & DCst->getValue()) & ECst->getValue()) == 0) &&
509 (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())).isPowerOf2()) {
510 APInt BorD = BCst->getValue() | DCst->getValue();
511 APInt BandBxorDorE = (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())) |
512 ECst->getValue();
513 Value *NewMask = ConstantInt::get(BCst->getType(), BorD);
514 Value *NewMaskedValue = ConstantInt::get(BCst->getType(), BandBxorDorE);
515 Value *NewAnd = Builder.CreateAnd(A, NewMask);
516 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
519 auto IsSubSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) {
520 return (C1->getValue() & C2->getValue()) == C1->getValue();
522 auto IsSuperSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) {
523 return (C1->getValue() & C2->getValue()) == C2->getValue();
526 // In the following, we consider only the cases where B is a superset of D, B
527 // is a subset of D, or B == D because otherwise there's at least one bit
528 // covered by B but not D, in which case we can't deduce much from it, so
529 // no folding (aside from the single must-be-one bit case right above.)
530 // For example,
531 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
532 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
533 return nullptr;
535 // At this point, either B is a superset of D, B is a subset of D or B == D.
537 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
538 // and the whole expression becomes false (or true if negated), otherwise, no
539 // folding.
540 // For example,
541 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
542 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
543 if (ECst->isZero()) {
544 if (IsSubSetOrEqual(BCst, DCst))
545 return ConstantInt::get(LHS->getType(), !IsAnd);
546 return nullptr;
549 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
550 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
551 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
552 // RHS. For example,
553 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
554 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
555 if (IsSuperSetOrEqual(BCst, DCst))
556 return RHS;
557 // Otherwise, B is a subset of D. If B and E have a common bit set,
558 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
559 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
560 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
561 if ((BCst->getValue() & ECst->getValue()) != 0)
562 return RHS;
563 // Otherwise, LHS and RHS contradict and the whole expression becomes false
564 // (or true if negated.) For example,
565 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
566 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
567 return ConstantInt::get(LHS->getType(), !IsAnd);
570 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
571 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
572 /// aren't of the common mask pattern type.
573 static Value *foldLogOpOfMaskedICmpsAsymmetric(
574 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
575 Value *A, Value *B, Value *C, Value *D, Value *E,
576 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
577 unsigned LHSMask, unsigned RHSMask,
578 llvm::InstCombiner::BuilderTy &Builder) {
579 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
580 "Expected equality predicates for masked type of icmps.");
581 // Handle Mask_NotAllZeros-BMask_Mixed cases.
582 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
583 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
584 // which gets swapped to
585 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
586 if (!IsAnd) {
587 LHSMask = conjugateICmpMask(LHSMask);
588 RHSMask = conjugateICmpMask(RHSMask);
590 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
591 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
592 LHS, RHS, IsAnd, A, B, C, D, E,
593 PredL, PredR, Builder)) {
594 return V;
596 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
597 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
598 RHS, LHS, IsAnd, A, D, E, B, C,
599 PredR, PredL, Builder)) {
600 return V;
603 return nullptr;
606 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
607 /// into a single (icmp(A & X) ==/!= Y).
608 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
609 llvm::InstCombiner::BuilderTy &Builder) {
610 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
611 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
612 Optional<std::pair<unsigned, unsigned>> MaskPair =
613 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
614 if (!MaskPair)
615 return nullptr;
616 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
617 "Expected equality predicates for masked type of icmps.");
618 unsigned LHSMask = MaskPair->first;
619 unsigned RHSMask = MaskPair->second;
620 unsigned Mask = LHSMask & RHSMask;
621 if (Mask == 0) {
622 // Even if the two sides don't share a common pattern, check if folding can
623 // still happen.
624 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
625 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
626 Builder))
627 return V;
628 return nullptr;
631 // In full generality:
632 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
633 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
635 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
636 // equivalent to (icmp (A & X) !Op Y).
638 // Therefore, we can pretend for the rest of this function that we're dealing
639 // with the conjunction, provided we flip the sense of any comparisons (both
640 // input and output).
642 // In most cases we're going to produce an EQ for the "&&" case.
643 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
644 if (!IsAnd) {
645 // Convert the masking analysis into its equivalent with negated
646 // comparisons.
647 Mask = conjugateICmpMask(Mask);
650 if (Mask & Mask_AllZeros) {
651 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
652 // -> (icmp eq (A & (B|D)), 0)
653 Value *NewOr = Builder.CreateOr(B, D);
654 Value *NewAnd = Builder.CreateAnd(A, NewOr);
655 // We can't use C as zero because we might actually handle
656 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
657 // with B and D, having a single bit set.
658 Value *Zero = Constant::getNullValue(A->getType());
659 return Builder.CreateICmp(NewCC, NewAnd, Zero);
661 if (Mask & BMask_AllOnes) {
662 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
663 // -> (icmp eq (A & (B|D)), (B|D))
664 Value *NewOr = Builder.CreateOr(B, D);
665 Value *NewAnd = Builder.CreateAnd(A, NewOr);
666 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
668 if (Mask & AMask_AllOnes) {
669 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
670 // -> (icmp eq (A & (B&D)), A)
671 Value *NewAnd1 = Builder.CreateAnd(B, D);
672 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
673 return Builder.CreateICmp(NewCC, NewAnd2, A);
676 // Remaining cases assume at least that B and D are constant, and depend on
677 // their actual values. This isn't strictly necessary, just a "handle the
678 // easy cases for now" decision.
679 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
680 if (!BCst)
681 return nullptr;
682 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
683 if (!DCst)
684 return nullptr;
686 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
687 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
688 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
689 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
690 // Only valid if one of the masks is a superset of the other (check "B&D" is
691 // the same as either B or D).
692 APInt NewMask = BCst->getValue() & DCst->getValue();
694 if (NewMask == BCst->getValue())
695 return LHS;
696 else if (NewMask == DCst->getValue())
697 return RHS;
700 if (Mask & AMask_NotAllOnes) {
701 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
702 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
703 // Only valid if one of the masks is a superset of the other (check "B|D" is
704 // the same as either B or D).
705 APInt NewMask = BCst->getValue() | DCst->getValue();
707 if (NewMask == BCst->getValue())
708 return LHS;
709 else if (NewMask == DCst->getValue())
710 return RHS;
713 if (Mask & BMask_Mixed) {
714 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
715 // We already know that B & C == C && D & E == E.
716 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
717 // C and E, which are shared by both the mask B and the mask D, don't
718 // contradict, then we can transform to
719 // -> (icmp eq (A & (B|D)), (C|E))
720 // Currently, we only handle the case of B, C, D, and E being constant.
721 // We can't simply use C and E because we might actually handle
722 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
723 // with B and D, having a single bit set.
724 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
725 if (!CCst)
726 return nullptr;
727 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
728 if (!ECst)
729 return nullptr;
730 if (PredL != NewCC)
731 CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
732 if (PredR != NewCC)
733 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
735 // If there is a conflict, we should actually return a false for the
736 // whole construct.
737 if (((BCst->getValue() & DCst->getValue()) &
738 (CCst->getValue() ^ ECst->getValue())).getBoolValue())
739 return ConstantInt::get(LHS->getType(), !IsAnd);
741 Value *NewOr1 = Builder.CreateOr(B, D);
742 Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
743 Value *NewAnd = Builder.CreateAnd(A, NewOr1);
744 return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
747 return nullptr;
750 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
751 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
752 /// If \p Inverted is true then the check is for the inverted range, e.g.
753 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
754 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
755 bool Inverted) {
756 // Check the lower range comparison, e.g. x >= 0
757 // InstCombine already ensured that if there is a constant it's on the RHS.
758 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
759 if (!RangeStart)
760 return nullptr;
762 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
763 Cmp0->getPredicate());
765 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
766 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
767 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
768 return nullptr;
770 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
771 Cmp1->getPredicate());
773 Value *Input = Cmp0->getOperand(0);
774 Value *RangeEnd;
775 if (Cmp1->getOperand(0) == Input) {
776 // For the upper range compare we have: icmp x, n
777 RangeEnd = Cmp1->getOperand(1);
778 } else if (Cmp1->getOperand(1) == Input) {
779 // For the upper range compare we have: icmp n, x
780 RangeEnd = Cmp1->getOperand(0);
781 Pred1 = ICmpInst::getSwappedPredicate(Pred1);
782 } else {
783 return nullptr;
786 // Check the upper range comparison, e.g. x < n
787 ICmpInst::Predicate NewPred;
788 switch (Pred1) {
789 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
790 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
791 default: return nullptr;
794 // This simplification is only valid if the upper range is not negative.
795 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
796 if (!Known.isNonNegative())
797 return nullptr;
799 if (Inverted)
800 NewPred = ICmpInst::getInversePredicate(NewPred);
802 return Builder.CreateICmp(NewPred, Input, RangeEnd);
805 static Value *
806 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
807 bool JoinedByAnd,
808 InstCombiner::BuilderTy &Builder) {
809 Value *X = LHS->getOperand(0);
810 if (X != RHS->getOperand(0))
811 return nullptr;
813 const APInt *C1, *C2;
814 if (!match(LHS->getOperand(1), m_APInt(C1)) ||
815 !match(RHS->getOperand(1), m_APInt(C2)))
816 return nullptr;
818 // We only handle (X != C1 && X != C2) and (X == C1 || X == C2).
819 ICmpInst::Predicate Pred = LHS->getPredicate();
820 if (Pred != RHS->getPredicate())
821 return nullptr;
822 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
823 return nullptr;
824 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
825 return nullptr;
827 // The larger unsigned constant goes on the right.
828 if (C1->ugt(*C2))
829 std::swap(C1, C2);
831 APInt Xor = *C1 ^ *C2;
832 if (Xor.isPowerOf2()) {
833 // If LHSC and RHSC differ by only one bit, then set that bit in X and
834 // compare against the larger constant:
835 // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2
836 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
837 // We choose an 'or' with a Pow2 constant rather than the inverse mask with
838 // 'and' because that may lead to smaller codegen from a smaller constant.
839 Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
840 return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
843 // Special case: get the ordering right when the values wrap around zero.
844 // Ie, we assumed the constants were unsigned when swapping earlier.
845 if (C1->isNullValue() && C2->isAllOnesValue())
846 std::swap(C1, C2);
848 if (*C1 == *C2 - 1) {
849 // (X == 13 || X == 14) --> X - 13 <=u 1
850 // (X != 13 && X != 14) --> X - 13 >u 1
851 // An 'add' is the canonical IR form, so favor that over a 'sub'.
852 Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
853 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
854 return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
857 return nullptr;
860 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
861 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
862 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
863 bool JoinedByAnd,
864 Instruction &CxtI) {
865 ICmpInst::Predicate Pred = LHS->getPredicate();
866 if (Pred != RHS->getPredicate())
867 return nullptr;
868 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
869 return nullptr;
870 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
871 return nullptr;
873 // TODO support vector splats
874 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
875 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
876 if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero())
877 return nullptr;
879 Value *A, *B, *C, *D;
880 if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) &&
881 match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) {
882 if (A == D || B == D)
883 std::swap(C, D);
884 if (B == C)
885 std::swap(A, B);
887 if (A == C &&
888 isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
889 isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
890 Value *Mask = Builder.CreateOr(B, D);
891 Value *Masked = Builder.CreateAnd(A, Mask);
892 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
893 return Builder.CreateICmp(NewPred, Masked, Mask);
897 return nullptr;
900 /// General pattern:
901 /// X & Y
903 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
904 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
905 /// Pattern can be one of:
906 /// %t = add i32 %arg, 128
907 /// %r = icmp ult i32 %t, 256
908 /// Or
909 /// %t0 = shl i32 %arg, 24
910 /// %t1 = ashr i32 %t0, 24
911 /// %r = icmp eq i32 %t1, %arg
912 /// Or
913 /// %t0 = trunc i32 %arg to i8
914 /// %t1 = sext i8 %t0 to i32
915 /// %r = icmp eq i32 %t1, %arg
916 /// This pattern is a signed truncation check.
918 /// And X is checking that some bit in that same mask is zero.
919 /// I.e. can be one of:
920 /// %r = icmp sgt i32 %arg, -1
921 /// Or
922 /// %t = and i32 %arg, 2147483648
923 /// %r = icmp eq i32 %t, 0
925 /// Since we are checking that all the bits in that mask are the same,
926 /// and a particular bit is zero, what we are really checking is that all the
927 /// masked bits are zero.
928 /// So this should be transformed to:
929 /// %r = icmp ult i32 %arg, 128
930 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
931 Instruction &CxtI,
932 InstCombiner::BuilderTy &Builder) {
933 assert(CxtI.getOpcode() == Instruction::And);
935 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
936 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
937 APInt &SignBitMask) -> bool {
938 CmpInst::Predicate Pred;
939 const APInt *I01, *I1; // powers of two; I1 == I01 << 1
940 if (!(match(ICmp,
941 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) &&
942 Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1))
943 return false;
944 // Which bit is the new sign bit as per the 'signed truncation' pattern?
945 SignBitMask = *I01;
946 return true;
949 // One icmp needs to be 'signed truncation check'.
950 // We need to match this first, else we will mismatch commutative cases.
951 Value *X1;
952 APInt HighestBit;
953 ICmpInst *OtherICmp;
954 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
955 OtherICmp = ICmp0;
956 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
957 OtherICmp = ICmp1;
958 else
959 return nullptr;
961 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
963 // Try to match/decompose into: icmp eq (X & Mask), 0
964 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
965 APInt &UnsetBitsMask) -> bool {
966 CmpInst::Predicate Pred = ICmp->getPredicate();
967 // Can it be decomposed into icmp eq (X & Mask), 0 ?
968 if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1),
969 Pred, X, UnsetBitsMask,
970 /*LookThruTrunc=*/false) &&
971 Pred == ICmpInst::ICMP_EQ)
972 return true;
973 // Is it icmp eq (X & Mask), 0 already?
974 const APInt *Mask;
975 if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) &&
976 Pred == ICmpInst::ICMP_EQ) {
977 UnsetBitsMask = *Mask;
978 return true;
980 return false;
983 // And the other icmp needs to be decomposable into a bit test.
984 Value *X0;
985 APInt UnsetBitsMask;
986 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
987 return nullptr;
989 assert(!UnsetBitsMask.isNullValue() && "empty mask makes no sense.");
991 // Are they working on the same value?
992 Value *X;
993 if (X1 == X0) {
994 // Ok as is.
995 X = X1;
996 } else if (match(X0, m_Trunc(m_Specific(X1)))) {
997 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits());
998 X = X1;
999 } else
1000 return nullptr;
1002 // So which bits should be uniform as per the 'signed truncation check'?
1003 // (all the bits starting with (i.e. including) HighestBit)
1004 APInt SignBitsMask = ~(HighestBit - 1U);
1006 // UnsetBitsMask must have some common bits with SignBitsMask,
1007 if (!UnsetBitsMask.intersects(SignBitsMask))
1008 return nullptr;
1010 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
1011 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) {
1012 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
1013 if (!OtherHighestBit.isPowerOf2())
1014 return nullptr;
1015 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit);
1017 // Else, if it does not, then all is ok as-is.
1019 // %r = icmp ult %X, SignBit
1020 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit),
1021 CxtI.getName() + ".simplified");
1024 /// Fold (icmp)&(icmp) if possible.
1025 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1026 Instruction &CxtI) {
1027 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
1028 // if K1 and K2 are a one-bit mask.
1029 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, true, CxtI))
1030 return V;
1032 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1034 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
1035 if (predicatesFoldable(PredL, PredR)) {
1036 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1037 LHS->getOperand(1) == RHS->getOperand(0))
1038 LHS->swapOperands();
1039 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1040 LHS->getOperand(1) == RHS->getOperand(1)) {
1041 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1042 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
1043 bool IsSigned = LHS->isSigned() || RHS->isSigned();
1044 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
1048 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
1049 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
1050 return V;
1052 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
1053 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
1054 return V;
1056 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
1057 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
1058 return V;
1060 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
1061 return V;
1063 if (Value *V = foldSignedTruncationCheck(LHS, RHS, CxtI, Builder))
1064 return V;
1066 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
1067 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1068 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1069 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1070 if (!LHSC || !RHSC)
1071 return nullptr;
1073 if (LHSC == RHSC && PredL == PredR) {
1074 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
1075 // where C is a power of 2 or
1076 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
1077 if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
1078 (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
1079 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
1080 return Builder.CreateICmp(PredL, NewOr, LHSC);
1084 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
1085 // where CMAX is the all ones value for the truncated type,
1086 // iff the lower bits of C2 and CA are zero.
1087 if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
1088 RHS->hasOneUse()) {
1089 Value *V;
1090 ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr;
1092 // (trunc x) == C1 & (and x, CA) == C2
1093 // (and x, CA) == C2 & (trunc x) == C1
1094 if (match(RHS0, m_Trunc(m_Value(V))) &&
1095 match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
1096 SmallC = RHSC;
1097 BigC = LHSC;
1098 } else if (match(LHS0, m_Trunc(m_Value(V))) &&
1099 match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
1100 SmallC = LHSC;
1101 BigC = RHSC;
1104 if (SmallC && BigC) {
1105 unsigned BigBitSize = BigC->getType()->getBitWidth();
1106 unsigned SmallBitSize = SmallC->getType()->getBitWidth();
1108 // Check that the low bits are zero.
1109 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
1110 if ((Low & AndC->getValue()).isNullValue() &&
1111 (Low & BigC->getValue()).isNullValue()) {
1112 Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
1113 APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
1114 Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
1115 return Builder.CreateICmp(PredL, NewAnd, NewVal);
1120 // From here on, we only handle:
1121 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
1122 if (LHS0 != RHS0)
1123 return nullptr;
1125 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
1126 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
1127 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
1128 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
1129 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
1130 return nullptr;
1132 // We can't fold (ugt x, C) & (sgt x, C2).
1133 if (!predicatesFoldable(PredL, PredR))
1134 return nullptr;
1136 // Ensure that the larger constant is on the RHS.
1137 bool ShouldSwap;
1138 if (CmpInst::isSigned(PredL) ||
1139 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
1140 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
1141 else
1142 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
1144 if (ShouldSwap) {
1145 std::swap(LHS, RHS);
1146 std::swap(LHSC, RHSC);
1147 std::swap(PredL, PredR);
1150 // At this point, we know we have two icmp instructions
1151 // comparing a value against two constants and and'ing the result
1152 // together. Because of the above check, we know that we only have
1153 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
1154 // (from the icmp folding check above), that the two constants
1155 // are not equal and that the larger constant is on the RHS
1156 assert(LHSC != RHSC && "Compares not folded above?");
1158 switch (PredL) {
1159 default:
1160 llvm_unreachable("Unknown integer condition code!");
1161 case ICmpInst::ICMP_NE:
1162 switch (PredR) {
1163 default:
1164 llvm_unreachable("Unknown integer condition code!");
1165 case ICmpInst::ICMP_ULT:
1166 if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
1167 return Builder.CreateICmpULT(LHS0, LHSC);
1168 if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13
1169 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1170 false, true);
1171 break; // (X != 13 & X u< 15) -> no change
1172 case ICmpInst::ICMP_SLT:
1173 if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
1174 return Builder.CreateICmpSLT(LHS0, LHSC);
1175 break; // (X != 13 & X s< 15) -> no change
1176 case ICmpInst::ICMP_NE:
1177 // Potential folds for this case should already be handled.
1178 break;
1180 break;
1181 case ICmpInst::ICMP_UGT:
1182 switch (PredR) {
1183 default:
1184 llvm_unreachable("Unknown integer condition code!");
1185 case ICmpInst::ICMP_NE:
1186 if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
1187 return Builder.CreateICmp(PredL, LHS0, RHSC);
1188 break; // (X u> 13 & X != 15) -> no change
1189 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
1190 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1191 false, true);
1193 break;
1194 case ICmpInst::ICMP_SGT:
1195 switch (PredR) {
1196 default:
1197 llvm_unreachable("Unknown integer condition code!");
1198 case ICmpInst::ICMP_NE:
1199 if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
1200 return Builder.CreateICmp(PredL, LHS0, RHSC);
1201 break; // (X s> 13 & X != 15) -> no change
1202 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
1203 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
1204 true);
1206 break;
1209 return nullptr;
1212 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1213 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1214 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1215 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1217 if (LHS0 == RHS1 && RHS0 == LHS1) {
1218 // Swap RHS operands to match LHS.
1219 PredR = FCmpInst::getSwappedPredicate(PredR);
1220 std::swap(RHS0, RHS1);
1223 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1224 // Suppose the relation between x and y is R, where R is one of
1225 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1226 // testing the desired relations.
1228 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1229 // bool(R & CC0) && bool(R & CC1)
1230 // = bool((R & CC0) & (R & CC1))
1231 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1233 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1234 // bool(R & CC0) || bool(R & CC1)
1235 // = bool((R & CC0) | (R & CC1))
1236 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1237 if (LHS0 == RHS0 && LHS1 == RHS1) {
1238 unsigned FCmpCodeL = getFCmpCode(PredL);
1239 unsigned FCmpCodeR = getFCmpCode(PredR);
1240 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1241 return getFCmpValue(NewPred, LHS0, LHS1, Builder);
1244 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1245 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1246 if (LHS0->getType() != RHS0->getType())
1247 return nullptr;
1249 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1250 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1251 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP()))
1252 // Ignore the constants because they are obviously not NANs:
1253 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1254 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1255 return Builder.CreateFCmp(PredL, LHS0, RHS0);
1258 return nullptr;
1261 /// Match De Morgan's Laws:
1262 /// (~A & ~B) == (~(A | B))
1263 /// (~A | ~B) == (~(A & B))
1264 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1265 InstCombiner::BuilderTy &Builder) {
1266 auto Opcode = I.getOpcode();
1267 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1268 "Trying to match De Morgan's Laws with something other than and/or");
1270 // Flip the logic operation.
1271 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1273 Value *A, *B;
1274 if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) &&
1275 match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) &&
1276 !IsFreeToInvert(A, A->hasOneUse()) &&
1277 !IsFreeToInvert(B, B->hasOneUse())) {
1278 Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan");
1279 return BinaryOperator::CreateNot(AndOr);
1282 return nullptr;
1285 bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
1286 Value *CastSrc = CI->getOperand(0);
1288 // Noop casts and casts of constants should be eliminated trivially.
1289 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1290 return false;
1292 // If this cast is paired with another cast that can be eliminated, we prefer
1293 // to have it eliminated.
1294 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1295 if (isEliminableCastPair(PrecedingCI, CI))
1296 return false;
1298 return true;
1301 /// Fold {and,or,xor} (cast X), C.
1302 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1303 InstCombiner::BuilderTy &Builder) {
1304 Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1305 if (!C)
1306 return nullptr;
1308 auto LogicOpc = Logic.getOpcode();
1309 Type *DestTy = Logic.getType();
1310 Type *SrcTy = Cast->getSrcTy();
1312 // Move the logic operation ahead of a zext or sext if the constant is
1313 // unchanged in the smaller source type. Performing the logic in a smaller
1314 // type may provide more information to later folds, and the smaller logic
1315 // instruction may be cheaper (particularly in the case of vectors).
1316 Value *X;
1317 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1318 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1319 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
1320 if (ZextTruncC == C) {
1321 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1322 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1323 return new ZExtInst(NewOp, DestTy);
1327 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) {
1328 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1329 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy);
1330 if (SextTruncC == C) {
1331 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1332 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1333 return new SExtInst(NewOp, DestTy);
1337 return nullptr;
1340 /// Fold {and,or,xor} (cast X), Y.
1341 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
1342 auto LogicOpc = I.getOpcode();
1343 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1345 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1346 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1347 if (!Cast0)
1348 return nullptr;
1350 // This must be a cast from an integer or integer vector source type to allow
1351 // transformation of the logic operation to the source type.
1352 Type *DestTy = I.getType();
1353 Type *SrcTy = Cast0->getSrcTy();
1354 if (!SrcTy->isIntOrIntVectorTy())
1355 return nullptr;
1357 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder))
1358 return Ret;
1360 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1361 if (!Cast1)
1362 return nullptr;
1364 // Both operands of the logic operation are casts. The casts must be of the
1365 // same type for reduction.
1366 auto CastOpcode = Cast0->getOpcode();
1367 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy())
1368 return nullptr;
1370 Value *Cast0Src = Cast0->getOperand(0);
1371 Value *Cast1Src = Cast1->getOperand(0);
1373 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1374 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1375 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1376 I.getName());
1377 return CastInst::Create(CastOpcode, NewOp, DestTy);
1380 // For now, only 'and'/'or' have optimizations after this.
1381 if (LogicOpc == Instruction::Xor)
1382 return nullptr;
1384 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1385 // cast is otherwise not optimizable. This happens for vector sexts.
1386 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1387 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1388 if (ICmp0 && ICmp1) {
1389 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I)
1390 : foldOrOfICmps(ICmp0, ICmp1, I);
1391 if (Res)
1392 return CastInst::Create(CastOpcode, Res, DestTy);
1393 return nullptr;
1396 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1397 // cast is otherwise not optimizable. This happens for vector sexts.
1398 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1399 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1400 if (FCmp0 && FCmp1)
1401 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And))
1402 return CastInst::Create(CastOpcode, R, DestTy);
1404 return nullptr;
1407 static Instruction *foldAndToXor(BinaryOperator &I,
1408 InstCombiner::BuilderTy &Builder) {
1409 assert(I.getOpcode() == Instruction::And);
1410 Value *Op0 = I.getOperand(0);
1411 Value *Op1 = I.getOperand(1);
1412 Value *A, *B;
1414 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1415 // (A | B) & ~(A & B) --> A ^ B
1416 // (A | B) & ~(B & A) --> A ^ B
1417 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)),
1418 m_Not(m_c_And(m_Deferred(A), m_Deferred(B))))))
1419 return BinaryOperator::CreateXor(A, B);
1421 // (A | ~B) & (~A | B) --> ~(A ^ B)
1422 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1423 // (~B | A) & (~A | B) --> ~(A ^ B)
1424 // (~B | A) & (B | ~A) --> ~(A ^ B)
1425 if (Op0->hasOneUse() || Op1->hasOneUse())
1426 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))),
1427 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
1428 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1430 return nullptr;
1433 static Instruction *foldOrToXor(BinaryOperator &I,
1434 InstCombiner::BuilderTy &Builder) {
1435 assert(I.getOpcode() == Instruction::Or);
1436 Value *Op0 = I.getOperand(0);
1437 Value *Op1 = I.getOperand(1);
1438 Value *A, *B;
1440 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1441 // (A & B) | ~(A | B) --> ~(A ^ B)
1442 // (A & B) | ~(B | A) --> ~(A ^ B)
1443 if (Op0->hasOneUse() || Op1->hasOneUse())
1444 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1445 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1446 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1448 // (A & ~B) | (~A & B) --> A ^ B
1449 // (A & ~B) | (B & ~A) --> A ^ B
1450 // (~B & A) | (~A & B) --> A ^ B
1451 // (~B & A) | (B & ~A) --> A ^ B
1452 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1453 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1454 return BinaryOperator::CreateXor(A, B);
1456 return nullptr;
1459 /// Return true if a constant shift amount is always less than the specified
1460 /// bit-width. If not, the shift could create poison in the narrower type.
1461 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1462 if (auto *ScalarC = dyn_cast<ConstantInt>(C))
1463 return ScalarC->getZExtValue() < BitWidth;
1465 if (C->getType()->isVectorTy()) {
1466 // Check each element of a constant vector.
1467 unsigned NumElts = C->getType()->getVectorNumElements();
1468 for (unsigned i = 0; i != NumElts; ++i) {
1469 Constant *Elt = C->getAggregateElement(i);
1470 if (!Elt)
1471 return false;
1472 if (isa<UndefValue>(Elt))
1473 continue;
1474 auto *CI = dyn_cast<ConstantInt>(Elt);
1475 if (!CI || CI->getZExtValue() >= BitWidth)
1476 return false;
1478 return true;
1481 // The constant is a constant expression or unknown.
1482 return false;
1485 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1486 /// a common zext operand: and (binop (zext X), C), (zext X).
1487 Instruction *InstCombiner::narrowMaskedBinOp(BinaryOperator &And) {
1488 // This transform could also apply to {or, and, xor}, but there are better
1489 // folds for those cases, so we don't expect those patterns here. AShr is not
1490 // handled because it should always be transformed to LShr in this sequence.
1491 // The subtract transform is different because it has a constant on the left.
1492 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1493 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1494 Constant *C;
1495 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1496 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1497 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1498 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1499 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1500 return nullptr;
1502 Value *X;
1503 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3))
1504 return nullptr;
1506 Type *Ty = And.getType();
1507 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1508 return nullptr;
1510 // If we're narrowing a shift, the shift amount must be safe (less than the
1511 // width) in the narrower type. If the shift amount is greater, instsimplify
1512 // usually handles that case, but we can't guarantee/assert it.
1513 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1514 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1515 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits()))
1516 return nullptr;
1518 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1519 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1520 Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1521 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
1522 : Builder.CreateBinOp(Opc, X, NewC);
1523 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
1526 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1527 // here. We should standardize that construct where it is needed or choose some
1528 // other way to ensure that commutated variants of patterns are not missed.
1529 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1530 if (Value *V = SimplifyAndInst(I.getOperand(0), I.getOperand(1),
1531 SQ.getWithInstruction(&I)))
1532 return replaceInstUsesWith(I, V);
1534 if (SimplifyAssociativeOrCommutative(I))
1535 return &I;
1537 if (Instruction *X = foldVectorBinop(I))
1538 return X;
1540 // See if we can simplify any instructions used by the instruction whose sole
1541 // purpose is to compute bits we don't care about.
1542 if (SimplifyDemandedInstructionBits(I))
1543 return &I;
1545 // Do this before using distributive laws to catch simple and/or/not patterns.
1546 if (Instruction *Xor = foldAndToXor(I, Builder))
1547 return Xor;
1549 // (A|B)&(A|C) -> A|(B&C) etc
1550 if (Value *V = SimplifyUsingDistributiveLaws(I))
1551 return replaceInstUsesWith(I, V);
1553 if (Value *V = SimplifyBSwap(I, Builder))
1554 return replaceInstUsesWith(I, V);
1556 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1557 const APInt *C;
1558 if (match(Op1, m_APInt(C))) {
1559 Value *X, *Y;
1560 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) &&
1561 C->isOneValue()) {
1562 // (1 << X) & 1 --> zext(X == 0)
1563 // (1 >> X) & 1 --> zext(X == 0)
1564 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0));
1565 return new ZExtInst(IsZero, I.getType());
1568 const APInt *XorC;
1569 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
1570 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
1571 Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC);
1572 Value *And = Builder.CreateAnd(X, Op1);
1573 And->takeName(Op0);
1574 return BinaryOperator::CreateXor(And, NewC);
1577 const APInt *OrC;
1578 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
1579 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
1580 // NOTE: This reduces the number of bits set in the & mask, which
1581 // can expose opportunities for store narrowing for scalars.
1582 // NOTE: SimplifyDemandedBits should have already removed bits from C1
1583 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
1584 // above, but this feels safer.
1585 APInt Together = *C & *OrC;
1586 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(),
1587 Together ^ *C));
1588 And->takeName(Op0);
1589 return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(),
1590 Together));
1593 // If the mask is only needed on one incoming arm, push the 'and' op up.
1594 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
1595 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1596 APInt NotAndMask(~(*C));
1597 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
1598 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
1599 // Not masking anything out for the LHS, move mask to RHS.
1600 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
1601 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
1602 return BinaryOperator::Create(BinOp, X, NewRHS);
1604 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
1605 // Not masking anything out for the RHS, move mask to LHS.
1606 // and ({x}or X, Y), C --> {x}or (and X, C), Y
1607 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
1608 return BinaryOperator::Create(BinOp, NewLHS, Y);
1614 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1615 const APInt &AndRHSMask = AndRHS->getValue();
1617 // Optimize a variety of ((val OP C1) & C2) combinations...
1618 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1619 // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth
1620 // of X and OP behaves well when given trunc(C1) and X.
1621 switch (Op0I->getOpcode()) {
1622 default:
1623 break;
1624 case Instruction::Xor:
1625 case Instruction::Or:
1626 case Instruction::Mul:
1627 case Instruction::Add:
1628 case Instruction::Sub:
1629 Value *X;
1630 ConstantInt *C1;
1631 if (match(Op0I, m_c_BinOp(m_ZExt(m_Value(X)), m_ConstantInt(C1)))) {
1632 if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) {
1633 auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
1634 Value *BinOp;
1635 Value *Op0LHS = Op0I->getOperand(0);
1636 if (isa<ZExtInst>(Op0LHS))
1637 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
1638 else
1639 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
1640 auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
1641 auto *And = Builder.CreateAnd(BinOp, TruncC2);
1642 return new ZExtInst(And, I.getType());
1647 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1648 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1649 return Res;
1652 // If this is an integer truncation, and if the source is an 'and' with
1653 // immediate, transform it. This frequently occurs for bitfield accesses.
1655 Value *X = nullptr; ConstantInt *YC = nullptr;
1656 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1657 // Change: and (trunc (and X, YC) to T), C2
1658 // into : and (trunc X to T), trunc(YC) & C2
1659 // This will fold the two constants together, which may allow
1660 // other simplifications.
1661 Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
1662 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1663 C3 = ConstantExpr::getAnd(C3, AndRHS);
1664 return BinaryOperator::CreateAnd(NewCast, C3);
1669 if (Instruction *Z = narrowMaskedBinOp(I))
1670 return Z;
1672 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
1673 return FoldedLogic;
1675 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1676 return DeMorgan;
1679 Value *A, *B, *C;
1680 // A & (A ^ B) --> A & ~B
1681 if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B)))))
1682 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B));
1683 // (A ^ B) & A --> A & ~B
1684 if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B)))))
1685 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B));
1687 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
1688 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1689 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1690 if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1691 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
1693 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
1694 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1695 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1696 if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1697 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
1699 // (A | B) & ((~A) ^ B) -> (A & B)
1700 // (A | B) & (B ^ (~A)) -> (A & B)
1701 // (B | A) & ((~A) ^ B) -> (A & B)
1702 // (B | A) & (B ^ (~A)) -> (A & B)
1703 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1704 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1705 return BinaryOperator::CreateAnd(A, B);
1707 // ((~A) ^ B) & (A | B) -> (A & B)
1708 // ((~A) ^ B) & (B | A) -> (A & B)
1709 // (B ^ (~A)) & (A | B) -> (A & B)
1710 // (B ^ (~A)) & (B | A) -> (A & B)
1711 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1712 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1713 return BinaryOperator::CreateAnd(A, B);
1717 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1718 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1719 if (LHS && RHS)
1720 if (Value *Res = foldAndOfICmps(LHS, RHS, I))
1721 return replaceInstUsesWith(I, Res);
1723 // TODO: Make this recursive; it's a little tricky because an arbitrary
1724 // number of 'and' instructions might have to be created.
1725 Value *X, *Y;
1726 if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1727 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1728 if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1729 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1730 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1731 if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1732 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1734 if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1735 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1736 if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1737 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1738 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1739 if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1740 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1744 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1745 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1746 if (Value *Res = foldLogicOfFCmps(LHS, RHS, true))
1747 return replaceInstUsesWith(I, Res);
1749 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
1750 return CastedAnd;
1752 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
1753 Value *A;
1754 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
1755 A->getType()->isIntOrIntVectorTy(1))
1756 return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType()));
1757 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
1758 A->getType()->isIntOrIntVectorTy(1))
1759 return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType()));
1761 return nullptr;
1764 Instruction *InstCombiner::matchBSwap(BinaryOperator &Or) {
1765 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
1766 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1);
1768 // Look through zero extends.
1769 if (Instruction *Ext = dyn_cast<ZExtInst>(Op0))
1770 Op0 = Ext->getOperand(0);
1772 if (Instruction *Ext = dyn_cast<ZExtInst>(Op1))
1773 Op1 = Ext->getOperand(0);
1775 // (A | B) | C and A | (B | C) -> bswap if possible.
1776 bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) ||
1777 match(Op1, m_Or(m_Value(), m_Value()));
1779 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1780 bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1781 match(Op1, m_LogicalShift(m_Value(), m_Value()));
1783 // (A & B) | (C & D) -> bswap if possible.
1784 bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) &&
1785 match(Op1, m_And(m_Value(), m_Value()));
1787 // (A << B) | (C & D) -> bswap if possible.
1788 // The bigger pattern here is ((A & C1) << C2) | ((B >> C2) & C1), which is a
1789 // part of the bswap idiom for specific values of C1, C2 (e.g. C1 = 16711935,
1790 // C2 = 8 for i32).
1791 // This pattern can occur when the operands of the 'or' are not canonicalized
1792 // for some reason (not having only one use, for example).
1793 bool OrOfAndAndSh = (match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1794 match(Op1, m_And(m_Value(), m_Value()))) ||
1795 (match(Op0, m_And(m_Value(), m_Value())) &&
1796 match(Op1, m_LogicalShift(m_Value(), m_Value())));
1798 if (!OrOfOrs && !OrOfShifts && !OrOfAnds && !OrOfAndAndSh)
1799 return nullptr;
1801 SmallVector<Instruction*, 4> Insts;
1802 if (!recognizeBSwapOrBitReverseIdiom(&Or, true, false, Insts))
1803 return nullptr;
1804 Instruction *LastInst = Insts.pop_back_val();
1805 LastInst->removeFromParent();
1807 for (auto *Inst : Insts)
1808 Worklist.Add(Inst);
1809 return LastInst;
1812 /// Transform UB-safe variants of bitwise rotate to the funnel shift intrinsic.
1813 static Instruction *matchRotate(Instruction &Or) {
1814 // TODO: Can we reduce the code duplication between this and the related
1815 // rotate matching code under visitSelect and visitTrunc?
1816 unsigned Width = Or.getType()->getScalarSizeInBits();
1817 if (!isPowerOf2_32(Width))
1818 return nullptr;
1820 // First, find an or'd pair of opposite shifts with the same shifted operand:
1821 // or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)
1822 BinaryOperator *Or0, *Or1;
1823 if (!match(Or.getOperand(0), m_BinOp(Or0)) ||
1824 !match(Or.getOperand(1), m_BinOp(Or1)))
1825 return nullptr;
1827 Value *ShVal, *ShAmt0, *ShAmt1;
1828 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
1829 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
1830 return nullptr;
1832 BinaryOperator::BinaryOps ShiftOpcode0 = Or0->getOpcode();
1833 BinaryOperator::BinaryOps ShiftOpcode1 = Or1->getOpcode();
1834 if (ShiftOpcode0 == ShiftOpcode1)
1835 return nullptr;
1837 // Match the shift amount operands for a rotate pattern. This always matches
1838 // a subtraction on the R operand.
1839 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * {
1840 // The shift amount may be masked with negation:
1841 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
1842 Value *X;
1843 unsigned Mask = Width - 1;
1844 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
1845 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
1846 return X;
1848 return nullptr;
1851 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
1852 bool SubIsOnLHS = false;
1853 if (!ShAmt) {
1854 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
1855 SubIsOnLHS = true;
1857 if (!ShAmt)
1858 return nullptr;
1860 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
1861 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
1862 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
1863 Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
1864 return IntrinsicInst::Create(F, { ShVal, ShVal, ShAmt });
1867 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
1868 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
1869 unsigned NumElts = C1->getType()->getVectorNumElements();
1870 for (unsigned i = 0; i != NumElts; ++i) {
1871 Constant *EltC1 = C1->getAggregateElement(i);
1872 Constant *EltC2 = C2->getAggregateElement(i);
1873 if (!EltC1 || !EltC2)
1874 return false;
1876 // One element must be all ones, and the other must be all zeros.
1877 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
1878 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
1879 return false;
1881 return true;
1884 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
1885 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
1886 /// B, it can be used as the condition operand of a select instruction.
1887 Value *InstCombiner::getSelectCondition(Value *A, Value *B) {
1888 // Step 1: We may have peeked through bitcasts in the caller.
1889 // Exit immediately if we don't have (vector) integer types.
1890 Type *Ty = A->getType();
1891 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
1892 return nullptr;
1894 // Step 2: We need 0 or all-1's bitmasks.
1895 if (ComputeNumSignBits(A) != Ty->getScalarSizeInBits())
1896 return nullptr;
1898 // Step 3: If B is the 'not' value of A, we have our answer.
1899 if (match(A, m_Not(m_Specific(B)))) {
1900 // If these are scalars or vectors of i1, A can be used directly.
1901 if (Ty->isIntOrIntVectorTy(1))
1902 return A;
1903 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(Ty));
1906 // If both operands are constants, see if the constants are inverse bitmasks.
1907 Constant *AConst, *BConst;
1908 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst)))
1909 if (AConst == ConstantExpr::getNot(BConst))
1910 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty));
1912 // Look for more complex patterns. The 'not' op may be hidden behind various
1913 // casts. Look through sexts and bitcasts to find the booleans.
1914 Value *Cond;
1915 Value *NotB;
1916 if (match(A, m_SExt(m_Value(Cond))) &&
1917 Cond->getType()->isIntOrIntVectorTy(1) &&
1918 match(B, m_OneUse(m_Not(m_Value(NotB))))) {
1919 NotB = peekThroughBitcast(NotB, true);
1920 if (match(NotB, m_SExt(m_Specific(Cond))))
1921 return Cond;
1924 // All scalar (and most vector) possibilities should be handled now.
1925 // Try more matches that only apply to non-splat constant vectors.
1926 if (!Ty->isVectorTy())
1927 return nullptr;
1929 // If both operands are xor'd with constants using the same sexted boolean
1930 // operand, see if the constants are inverse bitmasks.
1931 // TODO: Use ConstantExpr::getNot()?
1932 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) &&
1933 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) &&
1934 Cond->getType()->isIntOrIntVectorTy(1) &&
1935 areInverseVectorBitmasks(AConst, BConst)) {
1936 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty));
1937 return Builder.CreateXor(Cond, AConst);
1939 return nullptr;
1942 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
1943 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
1944 Value *InstCombiner::matchSelectFromAndOr(Value *A, Value *C, Value *B,
1945 Value *D) {
1946 // The potential condition of the select may be bitcasted. In that case, look
1947 // through its bitcast and the corresponding bitcast of the 'not' condition.
1948 Type *OrigType = A->getType();
1949 A = peekThroughBitcast(A, true);
1950 B = peekThroughBitcast(B, true);
1951 if (Value *Cond = getSelectCondition(A, B)) {
1952 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
1953 // The bitcasts will either all exist or all not exist. The builder will
1954 // not create unnecessary casts if the types already match.
1955 Value *BitcastC = Builder.CreateBitCast(C, A->getType());
1956 Value *BitcastD = Builder.CreateBitCast(D, A->getType());
1957 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
1958 return Builder.CreateBitCast(Select, OrigType);
1961 return nullptr;
1964 /// Fold (icmp)|(icmp) if possible.
1965 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1966 Instruction &CxtI) {
1967 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
1968 // if K1 and K2 are a one-bit mask.
1969 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, false, CxtI))
1970 return V;
1972 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1974 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1975 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1977 // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
1978 // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
1979 // The original condition actually refers to the following two ranges:
1980 // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
1981 // We can fold these two ranges if:
1982 // 1) C1 and C2 is unsigned greater than C3.
1983 // 2) The two ranges are separated.
1984 // 3) C1 ^ C2 is one-bit mask.
1985 // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
1986 // This implies all values in the two ranges differ by exactly one bit.
1988 if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
1989 PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
1990 LHSC->getType() == RHSC->getType() &&
1991 LHSC->getValue() == (RHSC->getValue())) {
1993 Value *LAdd = LHS->getOperand(0);
1994 Value *RAdd = RHS->getOperand(0);
1996 Value *LAddOpnd, *RAddOpnd;
1997 ConstantInt *LAddC, *RAddC;
1998 if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) &&
1999 match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) &&
2000 LAddC->getValue().ugt(LHSC->getValue()) &&
2001 RAddC->getValue().ugt(LHSC->getValue())) {
2003 APInt DiffC = LAddC->getValue() ^ RAddC->getValue();
2004 if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) {
2005 ConstantInt *MaxAddC = nullptr;
2006 if (LAddC->getValue().ult(RAddC->getValue()))
2007 MaxAddC = RAddC;
2008 else
2009 MaxAddC = LAddC;
2011 APInt RRangeLow = -RAddC->getValue();
2012 APInt RRangeHigh = RRangeLow + LHSC->getValue();
2013 APInt LRangeLow = -LAddC->getValue();
2014 APInt LRangeHigh = LRangeLow + LHSC->getValue();
2015 APInt LowRangeDiff = RRangeLow ^ LRangeLow;
2016 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
2017 APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
2018 : RRangeLow - LRangeLow;
2020 if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
2021 RangeDiff.ugt(LHSC->getValue())) {
2022 Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
2024 Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
2025 Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
2026 return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
2032 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
2033 if (predicatesFoldable(PredL, PredR)) {
2034 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2035 LHS->getOperand(1) == RHS->getOperand(0))
2036 LHS->swapOperands();
2037 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2038 LHS->getOperand(1) == RHS->getOperand(1)) {
2039 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2040 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
2041 bool IsSigned = LHS->isSigned() || RHS->isSigned();
2042 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
2046 // handle (roughly):
2047 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
2048 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
2049 return V;
2051 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
2052 if (LHS->hasOneUse() || RHS->hasOneUse()) {
2053 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
2054 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
2055 Value *A = nullptr, *B = nullptr;
2056 if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) {
2057 B = LHS0;
2058 if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1))
2059 A = RHS0;
2060 else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0)
2061 A = RHS->getOperand(1);
2063 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
2064 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
2065 else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
2066 B = RHS0;
2067 if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1))
2068 A = LHS0;
2069 else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0)
2070 A = LHS->getOperand(1);
2072 if (A && B)
2073 return Builder.CreateICmp(
2074 ICmpInst::ICMP_UGE,
2075 Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
2078 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
2079 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
2080 return V;
2082 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
2083 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
2084 return V;
2086 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
2087 return V;
2089 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
2090 if (!LHSC || !RHSC)
2091 return nullptr;
2093 if (LHSC == RHSC && PredL == PredR) {
2094 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
2095 if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
2096 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
2097 return Builder.CreateICmp(PredL, NewOr, LHSC);
2101 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
2102 // iff C2 + CA == C1.
2103 if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) {
2104 ConstantInt *AddC;
2105 if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
2106 if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
2107 return Builder.CreateICmpULE(LHS0, LHSC);
2110 // From here on, we only handle:
2111 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
2112 if (LHS0 != RHS0)
2113 return nullptr;
2115 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
2116 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
2117 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
2118 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
2119 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
2120 return nullptr;
2122 // We can't fold (ugt x, C) | (sgt x, C2).
2123 if (!predicatesFoldable(PredL, PredR))
2124 return nullptr;
2126 // Ensure that the larger constant is on the RHS.
2127 bool ShouldSwap;
2128 if (CmpInst::isSigned(PredL) ||
2129 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
2130 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
2131 else
2132 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
2134 if (ShouldSwap) {
2135 std::swap(LHS, RHS);
2136 std::swap(LHSC, RHSC);
2137 std::swap(PredL, PredR);
2140 // At this point, we know we have two icmp instructions
2141 // comparing a value against two constants and or'ing the result
2142 // together. Because of the above check, we know that we only have
2143 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
2144 // icmp folding check above), that the two constants are not
2145 // equal.
2146 assert(LHSC != RHSC && "Compares not folded above?");
2148 switch (PredL) {
2149 default:
2150 llvm_unreachable("Unknown integer condition code!");
2151 case ICmpInst::ICMP_EQ:
2152 switch (PredR) {
2153 default:
2154 llvm_unreachable("Unknown integer condition code!");
2155 case ICmpInst::ICMP_EQ:
2156 // Potential folds for this case should already be handled.
2157 break;
2158 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
2159 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
2160 break;
2162 break;
2163 case ICmpInst::ICMP_ULT:
2164 switch (PredR) {
2165 default:
2166 llvm_unreachable("Unknown integer condition code!");
2167 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
2168 break;
2169 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
2170 assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
2171 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
2172 false, false);
2174 break;
2175 case ICmpInst::ICMP_SLT:
2176 switch (PredR) {
2177 default:
2178 llvm_unreachable("Unknown integer condition code!");
2179 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
2180 break;
2181 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
2182 assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
2183 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
2184 false);
2186 break;
2188 return nullptr;
2191 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2192 // here. We should standardize that construct where it is needed or choose some
2193 // other way to ensure that commutated variants of patterns are not missed.
2194 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
2195 if (Value *V = SimplifyOrInst(I.getOperand(0), I.getOperand(1),
2196 SQ.getWithInstruction(&I)))
2197 return replaceInstUsesWith(I, V);
2199 if (SimplifyAssociativeOrCommutative(I))
2200 return &I;
2202 if (Instruction *X = foldVectorBinop(I))
2203 return X;
2205 // See if we can simplify any instructions used by the instruction whose sole
2206 // purpose is to compute bits we don't care about.
2207 if (SimplifyDemandedInstructionBits(I))
2208 return &I;
2210 // Do this before using distributive laws to catch simple and/or/not patterns.
2211 if (Instruction *Xor = foldOrToXor(I, Builder))
2212 return Xor;
2214 // (A&B)|(A&C) -> A&(B|C) etc
2215 if (Value *V = SimplifyUsingDistributiveLaws(I))
2216 return replaceInstUsesWith(I, V);
2218 if (Value *V = SimplifyBSwap(I, Builder))
2219 return replaceInstUsesWith(I, V);
2221 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2222 return FoldedLogic;
2224 if (Instruction *BSwap = matchBSwap(I))
2225 return BSwap;
2227 if (Instruction *Rotate = matchRotate(I))
2228 return Rotate;
2230 Value *X, *Y;
2231 const APInt *CV;
2232 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
2233 !CV->isAllOnesValue() && MaskedValueIsZero(Y, *CV, 0, &I)) {
2234 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
2235 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
2236 Value *Or = Builder.CreateOr(X, Y);
2237 return BinaryOperator::CreateXor(Or, ConstantInt::get(I.getType(), *CV));
2240 // (A & C)|(B & D)
2241 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2242 Value *A, *B, *C, *D;
2243 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
2244 match(Op1, m_And(m_Value(B), m_Value(D)))) {
2245 ConstantInt *C1 = dyn_cast<ConstantInt>(C);
2246 ConstantInt *C2 = dyn_cast<ConstantInt>(D);
2247 if (C1 && C2) { // (A & C1)|(B & C2)
2248 Value *V1 = nullptr, *V2 = nullptr;
2249 if ((C1->getValue() & C2->getValue()).isNullValue()) {
2250 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
2251 // iff (C1&C2) == 0 and (N&~C1) == 0
2252 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
2253 ((V1 == B &&
2254 MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
2255 (V2 == B &&
2256 MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
2257 return BinaryOperator::CreateAnd(A,
2258 Builder.getInt(C1->getValue()|C2->getValue()));
2259 // Or commutes, try both ways.
2260 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
2261 ((V1 == A &&
2262 MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
2263 (V2 == A &&
2264 MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
2265 return BinaryOperator::CreateAnd(B,
2266 Builder.getInt(C1->getValue()|C2->getValue()));
2268 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
2269 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
2270 ConstantInt *C3 = nullptr, *C4 = nullptr;
2271 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
2272 (C3->getValue() & ~C1->getValue()).isNullValue() &&
2273 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
2274 (C4->getValue() & ~C2->getValue()).isNullValue()) {
2275 V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
2276 return BinaryOperator::CreateAnd(V2,
2277 Builder.getInt(C1->getValue()|C2->getValue()));
2281 if (C1->getValue() == ~C2->getValue()) {
2282 Value *X;
2284 // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2
2285 if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
2286 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B);
2287 // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2
2288 if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
2289 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A);
2291 // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2
2292 if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
2293 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B);
2294 // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2
2295 if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
2296 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A);
2300 // Don't try to form a select if it's unlikely that we'll get rid of at
2301 // least one of the operands. A select is generally more expensive than the
2302 // 'or' that it is replacing.
2303 if (Op0->hasOneUse() || Op1->hasOneUse()) {
2304 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
2305 if (Value *V = matchSelectFromAndOr(A, C, B, D))
2306 return replaceInstUsesWith(I, V);
2307 if (Value *V = matchSelectFromAndOr(A, C, D, B))
2308 return replaceInstUsesWith(I, V);
2309 if (Value *V = matchSelectFromAndOr(C, A, B, D))
2310 return replaceInstUsesWith(I, V);
2311 if (Value *V = matchSelectFromAndOr(C, A, D, B))
2312 return replaceInstUsesWith(I, V);
2313 if (Value *V = matchSelectFromAndOr(B, D, A, C))
2314 return replaceInstUsesWith(I, V);
2315 if (Value *V = matchSelectFromAndOr(B, D, C, A))
2316 return replaceInstUsesWith(I, V);
2317 if (Value *V = matchSelectFromAndOr(D, B, A, C))
2318 return replaceInstUsesWith(I, V);
2319 if (Value *V = matchSelectFromAndOr(D, B, C, A))
2320 return replaceInstUsesWith(I, V);
2324 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
2325 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
2326 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
2327 return BinaryOperator::CreateOr(Op0, C);
2329 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
2330 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
2331 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
2332 return BinaryOperator::CreateOr(Op1, C);
2334 // ((B | C) & A) | B -> B | (A & C)
2335 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
2336 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
2338 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
2339 return DeMorgan;
2341 // Canonicalize xor to the RHS.
2342 bool SwappedForXor = false;
2343 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
2344 std::swap(Op0, Op1);
2345 SwappedForXor = true;
2348 // A | ( A ^ B) -> A | B
2349 // A | (~A ^ B) -> A | ~B
2350 // (A & B) | (A ^ B)
2351 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
2352 if (Op0 == A || Op0 == B)
2353 return BinaryOperator::CreateOr(A, B);
2355 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
2356 match(Op0, m_And(m_Specific(B), m_Specific(A))))
2357 return BinaryOperator::CreateOr(A, B);
2359 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
2360 Value *Not = Builder.CreateNot(B, B->getName() + ".not");
2361 return BinaryOperator::CreateOr(Not, Op0);
2363 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
2364 Value *Not = Builder.CreateNot(A, A->getName() + ".not");
2365 return BinaryOperator::CreateOr(Not, Op0);
2369 // A | ~(A | B) -> A | ~B
2370 // A | ~(A ^ B) -> A | ~B
2371 if (match(Op1, m_Not(m_Value(A))))
2372 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
2373 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
2374 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
2375 B->getOpcode() == Instruction::Xor)) {
2376 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
2377 B->getOperand(0);
2378 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
2379 return BinaryOperator::CreateOr(Not, Op0);
2382 if (SwappedForXor)
2383 std::swap(Op0, Op1);
2386 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2387 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2388 if (LHS && RHS)
2389 if (Value *Res = foldOrOfICmps(LHS, RHS, I))
2390 return replaceInstUsesWith(I, Res);
2392 // TODO: Make this recursive; it's a little tricky because an arbitrary
2393 // number of 'or' instructions might have to be created.
2394 Value *X, *Y;
2395 if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2396 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2397 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2398 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2399 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2400 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2401 return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2403 if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2404 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2405 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2406 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2407 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2408 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2409 return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2413 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2414 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2415 if (Value *Res = foldLogicOfFCmps(LHS, RHS, false))
2416 return replaceInstUsesWith(I, Res);
2418 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
2419 return CastedOr;
2421 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
2422 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
2423 A->getType()->isIntOrIntVectorTy(1))
2424 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
2425 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
2426 A->getType()->isIntOrIntVectorTy(1))
2427 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2429 // Note: If we've gotten to the point of visiting the outer OR, then the
2430 // inner one couldn't be simplified. If it was a constant, then it won't
2431 // be simplified by a later pass either, so we try swapping the inner/outer
2432 // ORs in the hopes that we'll be able to simplify it this way.
2433 // (X|C) | V --> (X|V) | C
2434 ConstantInt *CI;
2435 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2436 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
2437 Value *Inner = Builder.CreateOr(A, Op1);
2438 Inner->takeName(Op0);
2439 return BinaryOperator::CreateOr(Inner, CI);
2442 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
2443 // Since this OR statement hasn't been optimized further yet, we hope
2444 // that this transformation will allow the new ORs to be optimized.
2446 Value *X = nullptr, *Y = nullptr;
2447 if (Op0->hasOneUse() && Op1->hasOneUse() &&
2448 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
2449 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
2450 Value *orTrue = Builder.CreateOr(A, C);
2451 Value *orFalse = Builder.CreateOr(B, D);
2452 return SelectInst::Create(X, orTrue, orFalse);
2456 return nullptr;
2459 /// A ^ B can be specified using other logic ops in a variety of patterns. We
2460 /// can fold these early and efficiently by morphing an existing instruction.
2461 static Instruction *foldXorToXor(BinaryOperator &I,
2462 InstCombiner::BuilderTy &Builder) {
2463 assert(I.getOpcode() == Instruction::Xor);
2464 Value *Op0 = I.getOperand(0);
2465 Value *Op1 = I.getOperand(1);
2466 Value *A, *B;
2468 // There are 4 commuted variants for each of the basic patterns.
2470 // (A & B) ^ (A | B) -> A ^ B
2471 // (A & B) ^ (B | A) -> A ^ B
2472 // (A | B) ^ (A & B) -> A ^ B
2473 // (A | B) ^ (B & A) -> A ^ B
2474 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)),
2475 m_c_Or(m_Deferred(A), m_Deferred(B))))) {
2476 I.setOperand(0, A);
2477 I.setOperand(1, B);
2478 return &I;
2481 // (A | ~B) ^ (~A | B) -> A ^ B
2482 // (~B | A) ^ (~A | B) -> A ^ B
2483 // (~A | B) ^ (A | ~B) -> A ^ B
2484 // (B | ~A) ^ (A | ~B) -> A ^ B
2485 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))),
2486 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) {
2487 I.setOperand(0, A);
2488 I.setOperand(1, B);
2489 return &I;
2492 // (A & ~B) ^ (~A & B) -> A ^ B
2493 // (~B & A) ^ (~A & B) -> A ^ B
2494 // (~A & B) ^ (A & ~B) -> A ^ B
2495 // (B & ~A) ^ (A & ~B) -> A ^ B
2496 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))),
2497 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) {
2498 I.setOperand(0, A);
2499 I.setOperand(1, B);
2500 return &I;
2503 // For the remaining cases we need to get rid of one of the operands.
2504 if (!Op0->hasOneUse() && !Op1->hasOneUse())
2505 return nullptr;
2507 // (A | B) ^ ~(A & B) -> ~(A ^ B)
2508 // (A | B) ^ ~(B & A) -> ~(A ^ B)
2509 // (A & B) ^ ~(A | B) -> ~(A ^ B)
2510 // (A & B) ^ ~(B | A) -> ~(A ^ B)
2511 // Complexity sorting ensures the not will be on the right side.
2512 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2513 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
2514 (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2515 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
2516 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
2518 return nullptr;
2521 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
2522 if (predicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2523 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2524 LHS->getOperand(1) == RHS->getOperand(0))
2525 LHS->swapOperands();
2526 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2527 LHS->getOperand(1) == RHS->getOperand(1)) {
2528 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2529 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2530 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2531 bool IsSigned = LHS->isSigned() || RHS->isSigned();
2532 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
2536 // TODO: This can be generalized to compares of non-signbits using
2537 // decomposeBitTestICmp(). It could be enhanced more by using (something like)
2538 // foldLogOpOfMaskedICmps().
2539 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
2540 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
2541 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
2542 if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
2543 LHS0->getType() == RHS0->getType() &&
2544 LHS0->getType()->isIntOrIntVectorTy()) {
2545 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
2546 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
2547 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) &&
2548 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes())) ||
2549 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) &&
2550 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero()))) {
2551 Value *Zero = ConstantInt::getNullValue(LHS0->getType());
2552 return Builder.CreateICmpSLT(Builder.CreateXor(LHS0, RHS0), Zero);
2554 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
2555 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
2556 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) &&
2557 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero())) ||
2558 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) &&
2559 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes()))) {
2560 Value *MinusOne = ConstantInt::getAllOnesValue(LHS0->getType());
2561 return Builder.CreateICmpSGT(Builder.CreateXor(LHS0, RHS0), MinusOne);
2565 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
2566 // into those logic ops. That is, try to turn this into an and-of-icmps
2567 // because we have many folds for that pattern.
2569 // This is based on a truth table definition of xor:
2570 // X ^ Y --> (X | Y) & !(X & Y)
2571 if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
2572 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
2573 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
2574 if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
2575 // TODO: Independently handle cases where the 'and' side is a constant.
2576 if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) {
2577 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS
2578 RHS->setPredicate(RHS->getInversePredicate());
2579 return Builder.CreateAnd(LHS, RHS);
2581 if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) {
2582 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS
2583 LHS->setPredicate(LHS->getInversePredicate());
2584 return Builder.CreateAnd(LHS, RHS);
2589 return nullptr;
2592 /// If we have a masked merge, in the canonical form of:
2593 /// (assuming that A only has one use.)
2594 /// | A | |B|
2595 /// ((x ^ y) & M) ^ y
2596 /// | D |
2597 /// * If M is inverted:
2598 /// | D |
2599 /// ((x ^ y) & ~M) ^ y
2600 /// We can canonicalize by swapping the final xor operand
2601 /// to eliminate the 'not' of the mask.
2602 /// ((x ^ y) & M) ^ x
2603 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
2604 /// because that shortens the dependency chain and improves analysis:
2605 /// (x & M) | (y & ~M)
2606 static Instruction *visitMaskedMerge(BinaryOperator &I,
2607 InstCombiner::BuilderTy &Builder) {
2608 Value *B, *X, *D;
2609 Value *M;
2610 if (!match(&I, m_c_Xor(m_Value(B),
2611 m_OneUse(m_c_And(
2612 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)),
2613 m_Value(D)),
2614 m_Value(M))))))
2615 return nullptr;
2617 Value *NotM;
2618 if (match(M, m_Not(m_Value(NotM)))) {
2619 // De-invert the mask and swap the value in B part.
2620 Value *NewA = Builder.CreateAnd(D, NotM);
2621 return BinaryOperator::CreateXor(NewA, X);
2624 Constant *C;
2625 if (D->hasOneUse() && match(M, m_Constant(C))) {
2626 // Unfold.
2627 Value *LHS = Builder.CreateAnd(X, C);
2628 Value *NotC = Builder.CreateNot(C);
2629 Value *RHS = Builder.CreateAnd(B, NotC);
2630 return BinaryOperator::CreateOr(LHS, RHS);
2633 return nullptr;
2636 // Transform
2637 // ~(x ^ y)
2638 // into:
2639 // (~x) ^ y
2640 // or into
2641 // x ^ (~y)
2642 static Instruction *sinkNotIntoXor(BinaryOperator &I,
2643 InstCombiner::BuilderTy &Builder) {
2644 Value *X, *Y;
2645 // FIXME: one-use check is not needed in general, but currently we are unable
2646 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
2647 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
2648 return nullptr;
2650 // We only want to do the transform if it is free to do.
2651 if (IsFreeToInvert(X, X->hasOneUse())) {
2652 // Ok, good.
2653 } else if (IsFreeToInvert(Y, Y->hasOneUse())) {
2654 std::swap(X, Y);
2655 } else
2656 return nullptr;
2658 Value *NotX = Builder.CreateNot(X, X->getName() + ".not");
2659 return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan");
2662 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2663 // here. We should standardize that construct where it is needed or choose some
2664 // other way to ensure that commutated variants of patterns are not missed.
2665 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2666 if (Value *V = SimplifyXorInst(I.getOperand(0), I.getOperand(1),
2667 SQ.getWithInstruction(&I)))
2668 return replaceInstUsesWith(I, V);
2670 if (SimplifyAssociativeOrCommutative(I))
2671 return &I;
2673 if (Instruction *X = foldVectorBinop(I))
2674 return X;
2676 if (Instruction *NewXor = foldXorToXor(I, Builder))
2677 return NewXor;
2679 // (A&B)^(A&C) -> A&(B^C) etc
2680 if (Value *V = SimplifyUsingDistributiveLaws(I))
2681 return replaceInstUsesWith(I, V);
2683 // See if we can simplify any instructions used by the instruction whose sole
2684 // purpose is to compute bits we don't care about.
2685 if (SimplifyDemandedInstructionBits(I))
2686 return &I;
2688 if (Value *V = SimplifyBSwap(I, Builder))
2689 return replaceInstUsesWith(I, V);
2691 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2693 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
2694 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
2695 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
2696 // have already taken care of those cases.
2697 Value *M;
2698 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
2699 m_c_And(m_Deferred(M), m_Value()))))
2700 return BinaryOperator::CreateOr(Op0, Op1);
2702 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
2703 Value *X, *Y;
2705 // We must eliminate the and/or (one-use) for these transforms to not increase
2706 // the instruction count.
2707 // ~(~X & Y) --> (X | ~Y)
2708 // ~(Y & ~X) --> (X | ~Y)
2709 if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
2710 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2711 return BinaryOperator::CreateOr(X, NotY);
2713 // ~(~X | Y) --> (X & ~Y)
2714 // ~(Y | ~X) --> (X & ~Y)
2715 if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
2716 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2717 return BinaryOperator::CreateAnd(X, NotY);
2720 if (Instruction *Xor = visitMaskedMerge(I, Builder))
2721 return Xor;
2723 // Is this a 'not' (~) fed by a binary operator?
2724 BinaryOperator *NotVal;
2725 if (match(&I, m_Not(m_BinOp(NotVal)))) {
2726 if (NotVal->getOpcode() == Instruction::And ||
2727 NotVal->getOpcode() == Instruction::Or) {
2728 // Apply DeMorgan's Law when inverts are free:
2729 // ~(X & Y) --> (~X | ~Y)
2730 // ~(X | Y) --> (~X & ~Y)
2731 if (IsFreeToInvert(NotVal->getOperand(0),
2732 NotVal->getOperand(0)->hasOneUse()) &&
2733 IsFreeToInvert(NotVal->getOperand(1),
2734 NotVal->getOperand(1)->hasOneUse())) {
2735 Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
2736 Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
2737 if (NotVal->getOpcode() == Instruction::And)
2738 return BinaryOperator::CreateOr(NotX, NotY);
2739 return BinaryOperator::CreateAnd(NotX, NotY);
2743 // ~(X - Y) --> ~X + Y
2744 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y))))
2745 if (isa<Constant>(X) || NotVal->hasOneUse())
2746 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
2748 // ~(~X >>s Y) --> (X >>s Y)
2749 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
2750 return BinaryOperator::CreateAShr(X, Y);
2752 // If we are inverting a right-shifted constant, we may be able to eliminate
2753 // the 'not' by inverting the constant and using the opposite shift type.
2754 // Canonicalization rules ensure that only a negative constant uses 'ashr',
2755 // but we must check that in case that transform has not fired yet.
2757 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
2758 Constant *C;
2759 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
2760 match(C, m_Negative()))
2761 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y);
2763 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
2764 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
2765 match(C, m_NonNegative()))
2766 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y);
2768 // ~(X + C) --> -(C + 1) - X
2769 if (match(Op0, m_Add(m_Value(X), m_Constant(C))))
2770 return BinaryOperator::CreateSub(ConstantExpr::getNeg(AddOne(C)), X);
2773 // Use DeMorgan and reassociation to eliminate a 'not' op.
2774 Constant *C1;
2775 if (match(Op1, m_Constant(C1))) {
2776 Constant *C2;
2777 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) {
2778 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
2779 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2));
2780 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1));
2782 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) {
2783 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
2784 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2));
2785 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1));
2789 // not (cmp A, B) = !cmp A, B
2790 CmpInst::Predicate Pred;
2791 if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) {
2792 cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
2793 return replaceInstUsesWith(I, Op0);
2797 const APInt *RHSC;
2798 if (match(Op1, m_APInt(RHSC))) {
2799 Value *X;
2800 const APInt *C;
2801 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X)))) {
2802 // (C - X) ^ signmask -> (C + signmask - X)
2803 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2804 return BinaryOperator::CreateSub(NewC, X);
2806 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C)))) {
2807 // (X + C) ^ signmask -> (X + C + signmask)
2808 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2809 return BinaryOperator::CreateAdd(X, NewC);
2812 // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0
2813 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
2814 MaskedValueIsZero(X, *C, 0, &I)) {
2815 Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC);
2816 Worklist.Add(cast<Instruction>(Op0));
2817 I.setOperand(0, X);
2818 I.setOperand(1, NewC);
2819 return &I;
2824 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) {
2825 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2826 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2827 if (Op0I->getOpcode() == Instruction::LShr) {
2828 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
2829 // E1 = "X ^ C1"
2830 BinaryOperator *E1;
2831 ConstantInt *C1;
2832 if (Op0I->hasOneUse() &&
2833 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
2834 E1->getOpcode() == Instruction::Xor &&
2835 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
2836 // fold (C1 >> C2) ^ C3
2837 ConstantInt *C2 = Op0CI, *C3 = RHSC;
2838 APInt FoldConst = C1->getValue().lshr(C2->getValue());
2839 FoldConst ^= C3->getValue();
2840 // Prepare the two operands.
2841 Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
2842 Opnd0->takeName(Op0I);
2843 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
2844 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
2846 return BinaryOperator::CreateXor(Opnd0, FoldVal);
2853 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2854 return FoldedLogic;
2856 // Y ^ (X | Y) --> X & ~Y
2857 // Y ^ (Y | X) --> X & ~Y
2858 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0)))))
2859 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0));
2860 // (X | Y) ^ Y --> X & ~Y
2861 // (Y | X) ^ Y --> X & ~Y
2862 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1)))))
2863 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1));
2865 // Y ^ (X & Y) --> ~X & Y
2866 // Y ^ (Y & X) --> ~X & Y
2867 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0)))))
2868 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X));
2869 // (X & Y) ^ Y --> ~X & Y
2870 // (Y & X) ^ Y --> ~X & Y
2871 // Canonical form is (X & C) ^ C; don't touch that.
2872 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
2873 // be fixed to prefer that (otherwise we get infinite looping).
2874 if (!match(Op1, m_Constant()) &&
2875 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1)))))
2876 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X));
2878 Value *A, *B, *C;
2879 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
2880 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
2881 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C))))))
2882 return BinaryOperator::CreateXor(
2883 Builder.CreateAnd(Builder.CreateNot(A), C), B);
2885 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
2886 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
2887 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C))))))
2888 return BinaryOperator::CreateXor(
2889 Builder.CreateAnd(Builder.CreateNot(B), C), A);
2891 // (A & B) ^ (A ^ B) -> (A | B)
2892 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2893 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2894 return BinaryOperator::CreateOr(A, B);
2895 // (A ^ B) ^ (A & B) -> (A | B)
2896 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2897 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2898 return BinaryOperator::CreateOr(A, B);
2900 // (A & ~B) ^ ~A -> ~(A & B)
2901 // (~B & A) ^ ~A -> ~(A & B)
2902 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2903 match(Op1, m_Not(m_Specific(A))))
2904 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
2906 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2907 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2908 if (Value *V = foldXorOfICmps(LHS, RHS))
2909 return replaceInstUsesWith(I, V);
2911 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
2912 return CastedXor;
2914 // Canonicalize a shifty way to code absolute value to the common pattern.
2915 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
2916 // We're relying on the fact that we only do this transform when the shift has
2917 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
2918 // instructions).
2919 if (Op0->hasNUses(2))
2920 std::swap(Op0, Op1);
2922 const APInt *ShAmt;
2923 Type *Ty = I.getType();
2924 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2925 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
2926 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
2927 // B = ashr i32 A, 31 ; smear the sign bit
2928 // xor (add A, B), B ; add -1 and flip bits if negative
2929 // --> (A < 0) ? -A : A
2930 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
2931 // Copy the nuw/nsw flags from the add to the negate.
2932 auto *Add = cast<BinaryOperator>(Op0);
2933 Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(),
2934 Add->hasNoSignedWrap());
2935 return SelectInst::Create(Cmp, Neg, A);
2938 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
2940 // %notx = xor i32 %x, -1
2941 // %cmp1 = icmp sgt i32 %notx, %y
2942 // %smax = select i1 %cmp1, i32 %notx, i32 %y
2943 // %res = xor i32 %smax, -1
2944 // =>
2945 // %noty = xor i32 %y, -1
2946 // %cmp2 = icmp slt %x, %noty
2947 // %res = select i1 %cmp2, i32 %x, i32 %noty
2949 // Same is applicable for smin/umax/umin.
2950 if (match(Op1, m_AllOnes()) && Op0->hasOneUse()) {
2951 Value *LHS, *RHS;
2952 SelectPatternFlavor SPF = matchSelectPattern(Op0, LHS, RHS).Flavor;
2953 if (SelectPatternResult::isMinOrMax(SPF)) {
2954 // It's possible we get here before the not has been simplified, so make
2955 // sure the input to the not isn't freely invertible.
2956 if (match(LHS, m_Not(m_Value(X))) && !IsFreeToInvert(X, X->hasOneUse())) {
2957 Value *NotY = Builder.CreateNot(RHS);
2958 return SelectInst::Create(
2959 Builder.CreateICmp(getInverseMinMaxPred(SPF), X, NotY), X, NotY);
2962 // It's possible we get here before the not has been simplified, so make
2963 // sure the input to the not isn't freely invertible.
2964 if (match(RHS, m_Not(m_Value(Y))) && !IsFreeToInvert(Y, Y->hasOneUse())) {
2965 Value *NotX = Builder.CreateNot(LHS);
2966 return SelectInst::Create(
2967 Builder.CreateICmp(getInverseMinMaxPred(SPF), NotX, Y), NotX, Y);
2970 // If both sides are freely invertible, then we can get rid of the xor
2971 // completely.
2972 if (IsFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) &&
2973 IsFreeToInvert(RHS, !RHS->hasNUsesOrMore(3))) {
2974 Value *NotLHS = Builder.CreateNot(LHS);
2975 Value *NotRHS = Builder.CreateNot(RHS);
2976 return SelectInst::Create(
2977 Builder.CreateICmp(getInverseMinMaxPred(SPF), NotLHS, NotRHS),
2978 NotLHS, NotRHS);
2983 if (Instruction *NewXor = sinkNotIntoXor(I, Builder))
2984 return NewXor;
2986 return nullptr;