1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visitAnd, visitOr, and visitXor functions.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/IR/ConstantRange.h"
17 #include "llvm/IR/Intrinsics.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Transforms/InstCombine/InstCombiner.h"
20 #include "llvm/Transforms/Utils/Local.h"
23 using namespace PatternMatch
;
25 #define DEBUG_TYPE "instcombine"
27 /// This is the complement of getICmpCode, which turns an opcode and two
28 /// operands into either a constant true or false, or a brand new ICmp
29 /// instruction. The sign is passed in to determine which kind of predicate to
30 /// use in the new icmp instruction.
31 static Value
*getNewICmpValue(unsigned Code
, bool Sign
, Value
*LHS
, Value
*RHS
,
32 InstCombiner::BuilderTy
&Builder
) {
33 ICmpInst::Predicate NewPred
;
34 if (Constant
*TorF
= getPredForICmpCode(Code
, Sign
, LHS
->getType(), NewPred
))
36 return Builder
.CreateICmp(NewPred
, LHS
, RHS
);
39 /// This is the complement of getFCmpCode, which turns an opcode and two
40 /// operands into either a FCmp instruction, or a true/false constant.
41 static Value
*getFCmpValue(unsigned Code
, Value
*LHS
, Value
*RHS
,
42 InstCombiner::BuilderTy
&Builder
) {
43 FCmpInst::Predicate NewPred
;
44 if (Constant
*TorF
= getPredForFCmpCode(Code
, LHS
->getType(), NewPred
))
46 return Builder
.CreateFCmp(NewPred
, LHS
, RHS
);
49 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
50 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
51 /// \param I Binary operator to transform.
52 /// \return Pointer to node that must replace the original binary operator, or
53 /// null pointer if no transformation was made.
54 static Value
*SimplifyBSwap(BinaryOperator
&I
,
55 InstCombiner::BuilderTy
&Builder
) {
56 assert(I
.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
58 Value
*OldLHS
= I
.getOperand(0);
59 Value
*OldRHS
= I
.getOperand(1);
62 if (!match(OldLHS
, m_BSwap(m_Value(NewLHS
))))
68 if (match(OldRHS
, m_BSwap(m_Value(NewRHS
)))) {
69 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
70 if (!OldLHS
->hasOneUse() && !OldRHS
->hasOneUse())
72 // NewRHS initialized by the matcher.
73 } else if (match(OldRHS
, m_APInt(C
))) {
74 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
75 if (!OldLHS
->hasOneUse())
77 NewRHS
= ConstantInt::get(I
.getType(), C
->byteSwap());
81 Value
*BinOp
= Builder
.CreateBinOp(I
.getOpcode(), NewLHS
, NewRHS
);
82 Function
*F
= Intrinsic::getDeclaration(I
.getModule(), Intrinsic::bswap
,
84 return Builder
.CreateCall(F
, BinOp
);
87 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
88 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
89 /// whether to treat V, Lo, and Hi as signed or not.
90 Value
*InstCombinerImpl::insertRangeTest(Value
*V
, const APInt
&Lo
,
91 const APInt
&Hi
, bool isSigned
,
93 assert((isSigned
? Lo
.slt(Hi
) : Lo
.ult(Hi
)) &&
94 "Lo is not < Hi in range emission code!");
96 Type
*Ty
= V
->getType();
98 // V >= Min && V < Hi --> V < Hi
99 // V < Min || V >= Hi --> V >= Hi
100 ICmpInst::Predicate Pred
= Inside
? ICmpInst::ICMP_ULT
: ICmpInst::ICMP_UGE
;
101 if (isSigned
? Lo
.isMinSignedValue() : Lo
.isMinValue()) {
102 Pred
= isSigned
? ICmpInst::getSignedPredicate(Pred
) : Pred
;
103 return Builder
.CreateICmp(Pred
, V
, ConstantInt::get(Ty
, Hi
));
106 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
107 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
109 Builder
.CreateSub(V
, ConstantInt::get(Ty
, Lo
), V
->getName() + ".off");
110 Constant
*HiMinusLo
= ConstantInt::get(Ty
, Hi
- Lo
);
111 return Builder
.CreateICmp(Pred
, VMinusLo
, HiMinusLo
);
114 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
115 /// that can be simplified.
116 /// One of A and B is considered the mask. The other is the value. This is
117 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
118 /// only "Mask", then both A and B can be considered masks. If A is the mask,
119 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
120 /// If both A and C are constants, this proof is also easy.
121 /// For the following explanations, we assume that A is the mask.
123 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
124 /// bits of A are set in B.
125 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
127 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
128 /// bits of A are cleared in B.
129 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
131 /// "Mixed" declares that (A & B) == C and C might or might not contain any
132 /// number of one bits and zero bits.
133 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
135 /// "Not" means that in above descriptions "==" should be replaced by "!=".
136 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
138 /// If the mask A contains a single bit, then the following is equivalent:
139 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
140 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
141 enum MaskedICmpType
{
143 AMask_NotAllOnes
= 2,
145 BMask_NotAllOnes
= 8,
147 Mask_NotAllZeros
= 32,
149 AMask_NotMixed
= 128,
154 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
156 static unsigned getMaskedICmpType(Value
*A
, Value
*B
, Value
*C
,
157 ICmpInst::Predicate Pred
) {
158 const APInt
*ConstA
= nullptr, *ConstB
= nullptr, *ConstC
= nullptr;
159 match(A
, m_APInt(ConstA
));
160 match(B
, m_APInt(ConstB
));
161 match(C
, m_APInt(ConstC
));
162 bool IsEq
= (Pred
== ICmpInst::ICMP_EQ
);
163 bool IsAPow2
= ConstA
&& ConstA
->isPowerOf2();
164 bool IsBPow2
= ConstB
&& ConstB
->isPowerOf2();
165 unsigned MaskVal
= 0;
166 if (ConstC
&& ConstC
->isZero()) {
167 // if C is zero, then both A and B qualify as mask
168 MaskVal
|= (IsEq
? (Mask_AllZeros
| AMask_Mixed
| BMask_Mixed
)
169 : (Mask_NotAllZeros
| AMask_NotMixed
| BMask_NotMixed
));
171 MaskVal
|= (IsEq
? (AMask_NotAllOnes
| AMask_NotMixed
)
172 : (AMask_AllOnes
| AMask_Mixed
));
174 MaskVal
|= (IsEq
? (BMask_NotAllOnes
| BMask_NotMixed
)
175 : (BMask_AllOnes
| BMask_Mixed
));
180 MaskVal
|= (IsEq
? (AMask_AllOnes
| AMask_Mixed
)
181 : (AMask_NotAllOnes
| AMask_NotMixed
));
183 MaskVal
|= (IsEq
? (Mask_NotAllZeros
| AMask_NotMixed
)
184 : (Mask_AllZeros
| AMask_Mixed
));
185 } else if (ConstA
&& ConstC
&& ConstC
->isSubsetOf(*ConstA
)) {
186 MaskVal
|= (IsEq
? AMask_Mixed
: AMask_NotMixed
);
190 MaskVal
|= (IsEq
? (BMask_AllOnes
| BMask_Mixed
)
191 : (BMask_NotAllOnes
| BMask_NotMixed
));
193 MaskVal
|= (IsEq
? (Mask_NotAllZeros
| BMask_NotMixed
)
194 : (Mask_AllZeros
| BMask_Mixed
));
195 } else if (ConstB
&& ConstC
&& ConstC
->isSubsetOf(*ConstB
)) {
196 MaskVal
|= (IsEq
? BMask_Mixed
: BMask_NotMixed
);
202 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
203 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
204 /// is adjacent to the corresponding normal flag (recording ==), this just
205 /// involves swapping those bits over.
206 static unsigned conjugateICmpMask(unsigned Mask
) {
208 NewMask
= (Mask
& (AMask_AllOnes
| BMask_AllOnes
| Mask_AllZeros
|
209 AMask_Mixed
| BMask_Mixed
))
212 NewMask
|= (Mask
& (AMask_NotAllOnes
| BMask_NotAllOnes
| Mask_NotAllZeros
|
213 AMask_NotMixed
| BMask_NotMixed
))
219 // Adapts the external decomposeBitTestICmp for local use.
220 static bool decomposeBitTestICmp(Value
*LHS
, Value
*RHS
, CmpInst::Predicate
&Pred
,
221 Value
*&X
, Value
*&Y
, Value
*&Z
) {
223 if (!llvm::decomposeBitTestICmp(LHS
, RHS
, Pred
, X
, Mask
))
226 Y
= ConstantInt::get(X
->getType(), Mask
);
227 Z
= ConstantInt::get(X
->getType(), 0);
231 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
232 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
233 /// the right hand side as a pair.
234 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
235 /// and PredR are their predicates, respectively.
236 static std::optional
<std::pair
<unsigned, unsigned>> getMaskedTypeForICmpPair(
237 Value
*&A
, Value
*&B
, Value
*&C
, Value
*&D
, Value
*&E
, ICmpInst
*LHS
,
238 ICmpInst
*RHS
, ICmpInst::Predicate
&PredL
, ICmpInst::Predicate
&PredR
) {
239 // Don't allow pointers. Splat vectors are fine.
240 if (!LHS
->getOperand(0)->getType()->isIntOrIntVectorTy() ||
241 !RHS
->getOperand(0)->getType()->isIntOrIntVectorTy())
244 // Here comes the tricky part:
245 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
246 // and L11 & L12 == L21 & L22. The same goes for RHS.
247 // Now we must find those components L** and R**, that are equal, so
248 // that we can extract the parameters A, B, C, D, and E for the canonical
250 Value
*L1
= LHS
->getOperand(0);
251 Value
*L2
= LHS
->getOperand(1);
252 Value
*L11
, *L12
, *L21
, *L22
;
253 // Check whether the icmp can be decomposed into a bit test.
254 if (decomposeBitTestICmp(L1
, L2
, PredL
, L11
, L12
, L2
)) {
255 L21
= L22
= L1
= nullptr;
257 // Look for ANDs in the LHS icmp.
258 if (!match(L1
, m_And(m_Value(L11
), m_Value(L12
)))) {
259 // Any icmp can be viewed as being trivially masked; if it allows us to
260 // remove one, it's worth it.
262 L12
= Constant::getAllOnesValue(L1
->getType());
265 if (!match(L2
, m_And(m_Value(L21
), m_Value(L22
)))) {
267 L22
= Constant::getAllOnesValue(L2
->getType());
271 // Bail if LHS was a icmp that can't be decomposed into an equality.
272 if (!ICmpInst::isEquality(PredL
))
275 Value
*R1
= RHS
->getOperand(0);
276 Value
*R2
= RHS
->getOperand(1);
279 if (decomposeBitTestICmp(R1
, R2
, PredR
, R11
, R12
, R2
)) {
280 if (R11
== L11
|| R11
== L12
|| R11
== L21
|| R11
== L22
) {
283 } else if (R12
== L11
|| R12
== L12
|| R12
== L21
|| R12
== L22
) {
293 if (!match(R1
, m_And(m_Value(R11
), m_Value(R12
)))) {
294 // As before, model no mask as a trivial mask if it'll let us do an
297 R12
= Constant::getAllOnesValue(R1
->getType());
300 if (R11
== L11
|| R11
== L12
|| R11
== L21
|| R11
== L22
) {
305 } else if (R12
== L11
|| R12
== L12
|| R12
== L21
|| R12
== L22
) {
313 // Bail if RHS was a icmp that can't be decomposed into an equality.
314 if (!ICmpInst::isEquality(PredR
))
317 // Look for ANDs on the right side of the RHS icmp.
319 if (!match(R2
, m_And(m_Value(R11
), m_Value(R12
)))) {
321 R12
= Constant::getAllOnesValue(R2
->getType());
324 if (R11
== L11
|| R11
== L12
|| R11
== L21
|| R11
== L22
) {
329 } else if (R12
== L11
|| R12
== L12
|| R12
== L21
|| R12
== L22
) {
338 assert(Ok
&& "Failed to find AND on the right side of the RHS icmp.");
344 } else if (L12
== A
) {
347 } else if (L21
== A
) {
350 } else if (L22
== A
) {
355 unsigned LeftType
= getMaskedICmpType(A
, B
, C
, PredL
);
356 unsigned RightType
= getMaskedICmpType(A
, D
, E
, PredR
);
357 return std::optional
<std::pair
<unsigned, unsigned>>(
358 std::make_pair(LeftType
, RightType
));
361 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
362 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
363 /// and the right hand side is of type BMask_Mixed. For example,
364 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
365 /// Also used for logical and/or, must be poison safe.
366 static Value
*foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
367 ICmpInst
*LHS
, ICmpInst
*RHS
, bool IsAnd
, Value
*A
, Value
*B
, Value
*C
,
368 Value
*D
, Value
*E
, ICmpInst::Predicate PredL
, ICmpInst::Predicate PredR
,
369 InstCombiner::BuilderTy
&Builder
) {
370 // We are given the canonical form:
371 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
374 // If IsAnd is false, we get it in negated form:
375 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
376 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
378 // We currently handle the case of B, C, D, E are constant.
380 const APInt
*BCst
, *CCst
, *DCst
, *OrigECst
;
381 if (!match(B
, m_APInt(BCst
)) || !match(C
, m_APInt(CCst
)) ||
382 !match(D
, m_APInt(DCst
)) || !match(E
, m_APInt(OrigECst
)))
385 ICmpInst::Predicate NewCC
= IsAnd
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
;
387 // Update E to the canonical form when D is a power of two and RHS is
389 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
390 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
391 APInt ECst
= *OrigECst
;
395 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
396 // other folding rules and this pattern won't apply any more.
397 if (*BCst
== 0 || *DCst
== 0)
400 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't
401 // deduce anything from it.
403 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding.
404 if ((*BCst
& *DCst
) == 0)
407 // If the following two conditions are met:
409 // 1. mask B covers only a single bit that's not covered by mask D, that is,
410 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
411 // B and D has only one bit set) and,
413 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
414 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
416 // then that single bit in B must be one and thus the whole expression can be
418 // (A & (B | D)) == (B & (B ^ D)) | E.
421 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
422 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
423 if ((((*BCst
& *DCst
) & ECst
) == 0) &&
424 (*BCst
& (*BCst
^ *DCst
)).isPowerOf2()) {
425 APInt BorD
= *BCst
| *DCst
;
426 APInt BandBxorDorE
= (*BCst
& (*BCst
^ *DCst
)) | ECst
;
427 Value
*NewMask
= ConstantInt::get(A
->getType(), BorD
);
428 Value
*NewMaskedValue
= ConstantInt::get(A
->getType(), BandBxorDorE
);
429 Value
*NewAnd
= Builder
.CreateAnd(A
, NewMask
);
430 return Builder
.CreateICmp(NewCC
, NewAnd
, NewMaskedValue
);
433 auto IsSubSetOrEqual
= [](const APInt
*C1
, const APInt
*C2
) {
434 return (*C1
& *C2
) == *C1
;
436 auto IsSuperSetOrEqual
= [](const APInt
*C1
, const APInt
*C2
) {
437 return (*C1
& *C2
) == *C2
;
440 // In the following, we consider only the cases where B is a superset of D, B
441 // is a subset of D, or B == D because otherwise there's at least one bit
442 // covered by B but not D, in which case we can't deduce much from it, so
443 // no folding (aside from the single must-be-one bit case right above.)
445 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
446 if (!IsSubSetOrEqual(BCst
, DCst
) && !IsSuperSetOrEqual(BCst
, DCst
))
449 // At this point, either B is a superset of D, B is a subset of D or B == D.
451 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
452 // and the whole expression becomes false (or true if negated), otherwise, no
455 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
456 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
458 if (IsSubSetOrEqual(BCst
, DCst
))
459 return ConstantInt::get(LHS
->getType(), !IsAnd
);
463 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
464 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
465 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
467 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
468 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
469 if (IsSuperSetOrEqual(BCst
, DCst
))
471 // Otherwise, B is a subset of D. If B and E have a common bit set,
472 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
473 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
474 assert(IsSubSetOrEqual(BCst
, DCst
) && "Precondition due to above code");
475 if ((*BCst
& ECst
) != 0)
477 // Otherwise, LHS and RHS contradict and the whole expression becomes false
478 // (or true if negated.) For example,
479 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
480 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
481 return ConstantInt::get(LHS
->getType(), !IsAnd
);
484 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
485 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
486 /// aren't of the common mask pattern type.
487 /// Also used for logical and/or, must be poison safe.
488 static Value
*foldLogOpOfMaskedICmpsAsymmetric(
489 ICmpInst
*LHS
, ICmpInst
*RHS
, bool IsAnd
, Value
*A
, Value
*B
, Value
*C
,
490 Value
*D
, Value
*E
, ICmpInst::Predicate PredL
, ICmpInst::Predicate PredR
,
491 unsigned LHSMask
, unsigned RHSMask
, InstCombiner::BuilderTy
&Builder
) {
492 assert(ICmpInst::isEquality(PredL
) && ICmpInst::isEquality(PredR
) &&
493 "Expected equality predicates for masked type of icmps.");
494 // Handle Mask_NotAllZeros-BMask_Mixed cases.
495 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
496 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
497 // which gets swapped to
498 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
500 LHSMask
= conjugateICmpMask(LHSMask
);
501 RHSMask
= conjugateICmpMask(RHSMask
);
503 if ((LHSMask
& Mask_NotAllZeros
) && (RHSMask
& BMask_Mixed
)) {
504 if (Value
*V
= foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
505 LHS
, RHS
, IsAnd
, A
, B
, C
, D
, E
,
506 PredL
, PredR
, Builder
)) {
509 } else if ((LHSMask
& BMask_Mixed
) && (RHSMask
& Mask_NotAllZeros
)) {
510 if (Value
*V
= foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
511 RHS
, LHS
, IsAnd
, A
, D
, E
, B
, C
,
512 PredR
, PredL
, Builder
)) {
519 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
520 /// into a single (icmp(A & X) ==/!= Y).
521 static Value
*foldLogOpOfMaskedICmps(ICmpInst
*LHS
, ICmpInst
*RHS
, bool IsAnd
,
523 InstCombiner::BuilderTy
&Builder
) {
524 Value
*A
= nullptr, *B
= nullptr, *C
= nullptr, *D
= nullptr, *E
= nullptr;
525 ICmpInst::Predicate PredL
= LHS
->getPredicate(), PredR
= RHS
->getPredicate();
526 std::optional
<std::pair
<unsigned, unsigned>> MaskPair
=
527 getMaskedTypeForICmpPair(A
, B
, C
, D
, E
, LHS
, RHS
, PredL
, PredR
);
530 assert(ICmpInst::isEquality(PredL
) && ICmpInst::isEquality(PredR
) &&
531 "Expected equality predicates for masked type of icmps.");
532 unsigned LHSMask
= MaskPair
->first
;
533 unsigned RHSMask
= MaskPair
->second
;
534 unsigned Mask
= LHSMask
& RHSMask
;
536 // Even if the two sides don't share a common pattern, check if folding can
538 if (Value
*V
= foldLogOpOfMaskedICmpsAsymmetric(
539 LHS
, RHS
, IsAnd
, A
, B
, C
, D
, E
, PredL
, PredR
, LHSMask
, RHSMask
,
545 // In full generality:
546 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
547 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
549 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
550 // equivalent to (icmp (A & X) !Op Y).
552 // Therefore, we can pretend for the rest of this function that we're dealing
553 // with the conjunction, provided we flip the sense of any comparisons (both
554 // input and output).
556 // In most cases we're going to produce an EQ for the "&&" case.
557 ICmpInst::Predicate NewCC
= IsAnd
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
;
559 // Convert the masking analysis into its equivalent with negated
561 Mask
= conjugateICmpMask(Mask
);
564 if (Mask
& Mask_AllZeros
) {
565 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
566 // -> (icmp eq (A & (B|D)), 0)
567 if (IsLogical
&& !isGuaranteedNotToBeUndefOrPoison(D
))
568 return nullptr; // TODO: Use freeze?
569 Value
*NewOr
= Builder
.CreateOr(B
, D
);
570 Value
*NewAnd
= Builder
.CreateAnd(A
, NewOr
);
571 // We can't use C as zero because we might actually handle
572 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
573 // with B and D, having a single bit set.
574 Value
*Zero
= Constant::getNullValue(A
->getType());
575 return Builder
.CreateICmp(NewCC
, NewAnd
, Zero
);
577 if (Mask
& BMask_AllOnes
) {
578 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
579 // -> (icmp eq (A & (B|D)), (B|D))
580 if (IsLogical
&& !isGuaranteedNotToBeUndefOrPoison(D
))
581 return nullptr; // TODO: Use freeze?
582 Value
*NewOr
= Builder
.CreateOr(B
, D
);
583 Value
*NewAnd
= Builder
.CreateAnd(A
, NewOr
);
584 return Builder
.CreateICmp(NewCC
, NewAnd
, NewOr
);
586 if (Mask
& AMask_AllOnes
) {
587 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
588 // -> (icmp eq (A & (B&D)), A)
589 if (IsLogical
&& !isGuaranteedNotToBeUndefOrPoison(D
))
590 return nullptr; // TODO: Use freeze?
591 Value
*NewAnd1
= Builder
.CreateAnd(B
, D
);
592 Value
*NewAnd2
= Builder
.CreateAnd(A
, NewAnd1
);
593 return Builder
.CreateICmp(NewCC
, NewAnd2
, A
);
596 // Remaining cases assume at least that B and D are constant, and depend on
597 // their actual values. This isn't strictly necessary, just a "handle the
598 // easy cases for now" decision.
599 const APInt
*ConstB
, *ConstD
;
600 if (!match(B
, m_APInt(ConstB
)) || !match(D
, m_APInt(ConstD
)))
603 if (Mask
& (Mask_NotAllZeros
| BMask_NotAllOnes
)) {
604 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
605 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
606 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
607 // Only valid if one of the masks is a superset of the other (check "B&D" is
608 // the same as either B or D).
609 APInt NewMask
= *ConstB
& *ConstD
;
610 if (NewMask
== *ConstB
)
612 else if (NewMask
== *ConstD
)
616 if (Mask
& AMask_NotAllOnes
) {
617 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
618 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
619 // Only valid if one of the masks is a superset of the other (check "B|D" is
620 // the same as either B or D).
621 APInt NewMask
= *ConstB
| *ConstD
;
622 if (NewMask
== *ConstB
)
624 else if (NewMask
== *ConstD
)
628 if (Mask
& (BMask_Mixed
| BMask_NotMixed
)) {
630 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
631 // We already know that B & C == C && D & E == E.
632 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
633 // C and E, which are shared by both the mask B and the mask D, don't
634 // contradict, then we can transform to
635 // -> (icmp eq (A & (B|D)), (C|E))
636 // Currently, we only handle the case of B, C, D, and E being constant.
637 // We can't simply use C and E because we might actually handle
638 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
639 // with B and D, having a single bit set.
642 // (icmp ne (A & B), C) & (icmp ne (A & D), E)
643 // -> (icmp ne (A & (B & D)), (C & E))
644 // Check the intersection (B & D) for inequality.
645 // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
646 // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both the
647 // B and the D, don't contradict.
648 // Note that we can assume (~B & C) == 0 && (~D & E) == 0, previous
649 // operation should delete these icmps if it hadn't been met.
651 const APInt
*OldConstC
, *OldConstE
;
652 if (!match(C
, m_APInt(OldConstC
)) || !match(E
, m_APInt(OldConstE
)))
655 auto FoldBMixed
= [&](ICmpInst::Predicate CC
, bool IsNot
) -> Value
* {
656 CC
= IsNot
? CmpInst::getInversePredicate(CC
) : CC
;
657 const APInt ConstC
= PredL
!= CC
? *ConstB
^ *OldConstC
: *OldConstC
;
658 const APInt ConstE
= PredR
!= CC
? *ConstD
^ *OldConstE
: *OldConstE
;
660 if (((*ConstB
& *ConstD
) & (ConstC
^ ConstE
)).getBoolValue())
661 return IsNot
? nullptr : ConstantInt::get(LHS
->getType(), !IsAnd
);
663 if (IsNot
&& !ConstB
->isSubsetOf(*ConstD
) && !ConstD
->isSubsetOf(*ConstB
))
668 BD
= *ConstB
& *ConstD
;
669 CE
= ConstC
& ConstE
;
671 BD
= *ConstB
| *ConstD
;
672 CE
= ConstC
| ConstE
;
674 Value
*NewAnd
= Builder
.CreateAnd(A
, BD
);
675 Value
*CEVal
= ConstantInt::get(A
->getType(), CE
);
676 return Builder
.CreateICmp(CC
, CEVal
, NewAnd
);
679 if (Mask
& BMask_Mixed
)
680 return FoldBMixed(NewCC
, false);
681 if (Mask
& BMask_NotMixed
) // can be else also
682 return FoldBMixed(NewCC
, true);
687 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
688 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
689 /// If \p Inverted is true then the check is for the inverted range, e.g.
690 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
691 Value
*InstCombinerImpl::simplifyRangeCheck(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
,
693 // Check the lower range comparison, e.g. x >= 0
694 // InstCombine already ensured that if there is a constant it's on the RHS.
695 ConstantInt
*RangeStart
= dyn_cast
<ConstantInt
>(Cmp0
->getOperand(1));
699 ICmpInst::Predicate Pred0
= (Inverted
? Cmp0
->getInversePredicate() :
700 Cmp0
->getPredicate());
702 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
703 if (!((Pred0
== ICmpInst::ICMP_SGT
&& RangeStart
->isMinusOne()) ||
704 (Pred0
== ICmpInst::ICMP_SGE
&& RangeStart
->isZero())))
707 ICmpInst::Predicate Pred1
= (Inverted
? Cmp1
->getInversePredicate() :
708 Cmp1
->getPredicate());
710 Value
*Input
= Cmp0
->getOperand(0);
712 if (Cmp1
->getOperand(0) == Input
) {
713 // For the upper range compare we have: icmp x, n
714 RangeEnd
= Cmp1
->getOperand(1);
715 } else if (Cmp1
->getOperand(1) == Input
) {
716 // For the upper range compare we have: icmp n, x
717 RangeEnd
= Cmp1
->getOperand(0);
718 Pred1
= ICmpInst::getSwappedPredicate(Pred1
);
723 // Check the upper range comparison, e.g. x < n
724 ICmpInst::Predicate NewPred
;
726 case ICmpInst::ICMP_SLT
: NewPred
= ICmpInst::ICMP_ULT
; break;
727 case ICmpInst::ICMP_SLE
: NewPred
= ICmpInst::ICMP_ULE
; break;
728 default: return nullptr;
731 // This simplification is only valid if the upper range is not negative.
732 KnownBits Known
= computeKnownBits(RangeEnd
, /*Depth=*/0, Cmp1
);
733 if (!Known
.isNonNegative())
737 NewPred
= ICmpInst::getInversePredicate(NewPred
);
739 return Builder
.CreateICmp(NewPred
, Input
, RangeEnd
);
742 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
743 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
744 Value
*InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst
*LHS
,
749 CmpInst::Predicate Pred
= IsAnd
? CmpInst::ICMP_NE
: CmpInst::ICMP_EQ
;
750 if (LHS
->getPredicate() != Pred
|| RHS
->getPredicate() != Pred
)
753 if (!match(LHS
->getOperand(1), m_Zero()) ||
754 !match(RHS
->getOperand(1), m_Zero()))
757 Value
*L1
, *L2
, *R1
, *R2
;
758 if (match(LHS
->getOperand(0), m_And(m_Value(L1
), m_Value(L2
))) &&
759 match(RHS
->getOperand(0), m_And(m_Value(R1
), m_Value(R2
)))) {
760 if (L1
== R2
|| L2
== R2
)
766 isKnownToBeAPowerOfTwo(L2
, false, 0, CxtI
) &&
767 isKnownToBeAPowerOfTwo(R2
, false, 0, CxtI
)) {
768 // If this is a logical and/or, then we must prevent propagation of a
769 // poison value from the RHS by inserting freeze.
771 R2
= Builder
.CreateFreeze(R2
);
772 Value
*Mask
= Builder
.CreateOr(L2
, R2
);
773 Value
*Masked
= Builder
.CreateAnd(L1
, Mask
);
774 auto NewPred
= IsAnd
? CmpInst::ICMP_EQ
: CmpInst::ICMP_NE
;
775 return Builder
.CreateICmp(NewPred
, Masked
, Mask
);
785 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
786 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
787 /// Pattern can be one of:
788 /// %t = add i32 %arg, 128
789 /// %r = icmp ult i32 %t, 256
791 /// %t0 = shl i32 %arg, 24
792 /// %t1 = ashr i32 %t0, 24
793 /// %r = icmp eq i32 %t1, %arg
795 /// %t0 = trunc i32 %arg to i8
796 /// %t1 = sext i8 %t0 to i32
797 /// %r = icmp eq i32 %t1, %arg
798 /// This pattern is a signed truncation check.
800 /// And X is checking that some bit in that same mask is zero.
801 /// I.e. can be one of:
802 /// %r = icmp sgt i32 %arg, -1
804 /// %t = and i32 %arg, 2147483648
805 /// %r = icmp eq i32 %t, 0
807 /// Since we are checking that all the bits in that mask are the same,
808 /// and a particular bit is zero, what we are really checking is that all the
809 /// masked bits are zero.
810 /// So this should be transformed to:
811 /// %r = icmp ult i32 %arg, 128
812 static Value
*foldSignedTruncationCheck(ICmpInst
*ICmp0
, ICmpInst
*ICmp1
,
814 InstCombiner::BuilderTy
&Builder
) {
815 assert(CxtI
.getOpcode() == Instruction::And
);
817 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
818 auto tryToMatchSignedTruncationCheck
= [](ICmpInst
*ICmp
, Value
*&X
,
819 APInt
&SignBitMask
) -> bool {
820 CmpInst::Predicate Pred
;
821 const APInt
*I01
, *I1
; // powers of two; I1 == I01 << 1
823 m_ICmp(Pred
, m_Add(m_Value(X
), m_Power2(I01
)), m_Power2(I1
))) &&
824 Pred
== ICmpInst::ICMP_ULT
&& I1
->ugt(*I01
) && I01
->shl(1) == *I1
))
826 // Which bit is the new sign bit as per the 'signed truncation' pattern?
831 // One icmp needs to be 'signed truncation check'.
832 // We need to match this first, else we will mismatch commutative cases.
836 if (tryToMatchSignedTruncationCheck(ICmp1
, X1
, HighestBit
))
838 else if (tryToMatchSignedTruncationCheck(ICmp0
, X1
, HighestBit
))
843 assert(HighestBit
.isPowerOf2() && "expected to be power of two (non-zero)");
845 // Try to match/decompose into: icmp eq (X & Mask), 0
846 auto tryToDecompose
= [](ICmpInst
*ICmp
, Value
*&X
,
847 APInt
&UnsetBitsMask
) -> bool {
848 CmpInst::Predicate Pred
= ICmp
->getPredicate();
849 // Can it be decomposed into icmp eq (X & Mask), 0 ?
850 if (llvm::decomposeBitTestICmp(ICmp
->getOperand(0), ICmp
->getOperand(1),
851 Pred
, X
, UnsetBitsMask
,
852 /*LookThroughTrunc=*/false) &&
853 Pred
== ICmpInst::ICMP_EQ
)
855 // Is it icmp eq (X & Mask), 0 already?
857 if (match(ICmp
, m_ICmp(Pred
, m_And(m_Value(X
), m_APInt(Mask
)), m_Zero())) &&
858 Pred
== ICmpInst::ICMP_EQ
) {
859 UnsetBitsMask
= *Mask
;
865 // And the other icmp needs to be decomposable into a bit test.
868 if (!tryToDecompose(OtherICmp
, X0
, UnsetBitsMask
))
871 assert(!UnsetBitsMask
.isZero() && "empty mask makes no sense.");
873 // Are they working on the same value?
878 } else if (match(X0
, m_Trunc(m_Specific(X1
)))) {
879 UnsetBitsMask
= UnsetBitsMask
.zext(X1
->getType()->getScalarSizeInBits());
884 // So which bits should be uniform as per the 'signed truncation check'?
885 // (all the bits starting with (i.e. including) HighestBit)
886 APInt SignBitsMask
= ~(HighestBit
- 1U);
888 // UnsetBitsMask must have some common bits with SignBitsMask,
889 if (!UnsetBitsMask
.intersects(SignBitsMask
))
892 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
893 if (!UnsetBitsMask
.isSubsetOf(SignBitsMask
)) {
894 APInt OtherHighestBit
= (~UnsetBitsMask
) + 1U;
895 if (!OtherHighestBit
.isPowerOf2())
897 HighestBit
= APIntOps::umin(HighestBit
, OtherHighestBit
);
899 // Else, if it does not, then all is ok as-is.
901 // %r = icmp ult %X, SignBit
902 return Builder
.CreateICmpULT(X
, ConstantInt::get(X
->getType(), HighestBit
),
903 CxtI
.getName() + ".simplified");
906 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
907 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
908 /// Also used for logical and/or, must be poison safe.
909 static Value
*foldIsPowerOf2OrZero(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
, bool IsAnd
,
910 InstCombiner::BuilderTy
&Builder
) {
911 CmpInst::Predicate Pred0
, Pred1
;
913 if (!match(Cmp0
, m_ICmp(Pred0
, m_Intrinsic
<Intrinsic::ctpop
>(m_Value(X
)),
914 m_SpecificInt(1))) ||
915 !match(Cmp1
, m_ICmp(Pred1
, m_Specific(X
), m_ZeroInt())))
918 Value
*CtPop
= Cmp0
->getOperand(0);
919 if (IsAnd
&& Pred0
== ICmpInst::ICMP_NE
&& Pred1
== ICmpInst::ICMP_NE
)
920 return Builder
.CreateICmpUGT(CtPop
, ConstantInt::get(CtPop
->getType(), 1));
921 if (!IsAnd
&& Pred0
== ICmpInst::ICMP_EQ
&& Pred1
== ICmpInst::ICMP_EQ
)
922 return Builder
.CreateICmpULT(CtPop
, ConstantInt::get(CtPop
->getType(), 2));
927 /// Reduce a pair of compares that check if a value has exactly 1 bit set.
928 /// Also used for logical and/or, must be poison safe.
929 static Value
*foldIsPowerOf2(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
, bool JoinedByAnd
,
930 InstCombiner::BuilderTy
&Builder
) {
931 // Handle 'and' / 'or' commutation: make the equality check the first operand.
932 if (JoinedByAnd
&& Cmp1
->getPredicate() == ICmpInst::ICMP_NE
)
933 std::swap(Cmp0
, Cmp1
);
934 else if (!JoinedByAnd
&& Cmp1
->getPredicate() == ICmpInst::ICMP_EQ
)
935 std::swap(Cmp0
, Cmp1
);
937 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
938 CmpInst::Predicate Pred0
, Pred1
;
940 if (JoinedByAnd
&& match(Cmp0
, m_ICmp(Pred0
, m_Value(X
), m_ZeroInt())) &&
941 match(Cmp1
, m_ICmp(Pred1
, m_Intrinsic
<Intrinsic::ctpop
>(m_Specific(X
)),
942 m_SpecificInt(2))) &&
943 Pred0
== ICmpInst::ICMP_NE
&& Pred1
== ICmpInst::ICMP_ULT
) {
944 Value
*CtPop
= Cmp1
->getOperand(0);
945 return Builder
.CreateICmpEQ(CtPop
, ConstantInt::get(CtPop
->getType(), 1));
947 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
948 if (!JoinedByAnd
&& match(Cmp0
, m_ICmp(Pred0
, m_Value(X
), m_ZeroInt())) &&
949 match(Cmp1
, m_ICmp(Pred1
, m_Intrinsic
<Intrinsic::ctpop
>(m_Specific(X
)),
950 m_SpecificInt(1))) &&
951 Pred0
== ICmpInst::ICMP_EQ
&& Pred1
== ICmpInst::ICMP_UGT
) {
952 Value
*CtPop
= Cmp1
->getOperand(0);
953 return Builder
.CreateICmpNE(CtPop
, ConstantInt::get(CtPop
->getType(), 1));
958 /// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
959 /// B is a contiguous set of ones starting from the most significant bit
960 /// (negative power of 2), D and E are equal, and D is a contiguous set of ones
961 /// starting at the most significant zero bit in B. Parameter B supports masking
962 /// using undef/poison in either scalar or vector values.
963 static Value
*foldNegativePower2AndShiftedMask(
964 Value
*A
, Value
*B
, Value
*D
, Value
*E
, ICmpInst::Predicate PredL
,
965 ICmpInst::Predicate PredR
, InstCombiner::BuilderTy
&Builder
) {
966 assert(ICmpInst::isEquality(PredL
) && ICmpInst::isEquality(PredR
) &&
967 "Expected equality predicates for masked type of icmps.");
968 if (PredL
!= ICmpInst::ICMP_EQ
|| PredR
!= ICmpInst::ICMP_NE
)
971 if (!match(B
, m_NegatedPower2()) || !match(D
, m_ShiftedMask()) ||
972 !match(E
, m_ShiftedMask()))
975 // Test scalar arguments for conversion. B has been validated earlier to be a
976 // negative power of two and thus is guaranteed to have one or more contiguous
977 // ones starting from the MSB followed by zero or more contiguous zeros. D has
978 // been validated earlier to be a shifted set of one or more contiguous ones.
979 // In order to match, B leading ones and D leading zeros should be equal. The
980 // predicate that B be a negative power of 2 prevents the condition of there
981 // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
982 // D always be a shifted mask prevents the condition of D equaling 0. This
983 // prevents matching the condition where B contains the maximum number of
984 // leading one bits (-1) and D contains the maximum number of leading zero
986 auto isReducible
= [](const Value
*B
, const Value
*D
, const Value
*E
) {
987 const APInt
*BCst
, *DCst
, *ECst
;
988 return match(B
, m_APIntAllowUndef(BCst
)) && match(D
, m_APInt(DCst
)) &&
989 match(E
, m_APInt(ECst
)) && *DCst
== *ECst
&&
990 (isa
<UndefValue
>(B
) ||
991 (BCst
->countLeadingOnes() == DCst
->countLeadingZeros()));
994 // Test vector type arguments for conversion.
995 if (const auto *BVTy
= dyn_cast
<VectorType
>(B
->getType())) {
996 const auto *BFVTy
= dyn_cast
<FixedVectorType
>(BVTy
);
997 const auto *BConst
= dyn_cast
<Constant
>(B
);
998 const auto *DConst
= dyn_cast
<Constant
>(D
);
999 const auto *EConst
= dyn_cast
<Constant
>(E
);
1001 if (!BFVTy
|| !BConst
|| !DConst
|| !EConst
)
1004 for (unsigned I
= 0; I
!= BFVTy
->getNumElements(); ++I
) {
1005 const auto *BElt
= BConst
->getAggregateElement(I
);
1006 const auto *DElt
= DConst
->getAggregateElement(I
);
1007 const auto *EElt
= EConst
->getAggregateElement(I
);
1009 if (!BElt
|| !DElt
|| !EElt
)
1011 if (!isReducible(BElt
, DElt
, EElt
))
1015 // Test scalar type arguments for conversion.
1016 if (!isReducible(B
, D
, E
))
1019 return Builder
.CreateICmp(ICmpInst::ICMP_ULT
, A
, D
);
1022 /// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1023 /// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1024 /// M is a contiguous shifted mask starting at the right most significant zero
1025 /// bit in P. SGT is supported as when P is the largest representable power of
1026 /// 2, an earlier optimization converts the expression into (icmp X s> -1).
1027 /// Parameter P supports masking using undef/poison in either scalar or vector
1029 static Value
*foldPowerOf2AndShiftedMask(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
,
1031 InstCombiner::BuilderTy
&Builder
) {
1034 Value
*A
= nullptr, *B
= nullptr, *C
= nullptr, *D
= nullptr, *E
= nullptr;
1035 ICmpInst::Predicate CmpPred0
= Cmp0
->getPredicate(),
1036 CmpPred1
= Cmp1
->getPredicate();
1037 // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1038 // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1040 std::optional
<std::pair
<unsigned, unsigned>> MaskPair
=
1041 getMaskedTypeForICmpPair(A
, B
, C
, D
, E
, Cmp0
, Cmp1
, CmpPred0
, CmpPred1
);
1045 const auto compareBMask
= BMask_NotMixed
| BMask_NotAllOnes
;
1046 unsigned CmpMask0
= MaskPair
->first
;
1047 unsigned CmpMask1
= MaskPair
->second
;
1048 if ((CmpMask0
& Mask_AllZeros
) && (CmpMask1
== compareBMask
)) {
1049 if (Value
*V
= foldNegativePower2AndShiftedMask(A
, B
, D
, E
, CmpPred0
,
1052 } else if ((CmpMask0
== compareBMask
) && (CmpMask1
& Mask_AllZeros
)) {
1053 if (Value
*V
= foldNegativePower2AndShiftedMask(A
, D
, B
, C
, CmpPred1
,
1060 /// Commuted variants are assumed to be handled by calling this function again
1061 /// with the parameters swapped.
1062 static Value
*foldUnsignedUnderflowCheck(ICmpInst
*ZeroICmp
,
1063 ICmpInst
*UnsignedICmp
, bool IsAnd
,
1064 const SimplifyQuery
&Q
,
1065 InstCombiner::BuilderTy
&Builder
) {
1067 ICmpInst::Predicate EqPred
;
1068 if (!match(ZeroICmp
, m_ICmp(EqPred
, m_Value(ZeroCmpOp
), m_Zero())) ||
1069 !ICmpInst::isEquality(EqPred
))
1072 auto IsKnownNonZero
= [&](Value
*V
) {
1073 return isKnownNonZero(V
, Q
.DL
, /*Depth=*/0, Q
.AC
, Q
.CxtI
, Q
.DT
);
1076 ICmpInst::Predicate UnsignedPred
;
1079 if (match(UnsignedICmp
,
1080 m_c_ICmp(UnsignedPred
, m_Specific(ZeroCmpOp
), m_Value(A
))) &&
1081 match(ZeroCmpOp
, m_c_Add(m_Specific(A
), m_Value(B
))) &&
1082 (ZeroICmp
->hasOneUse() || UnsignedICmp
->hasOneUse())) {
1083 auto GetKnownNonZeroAndOther
= [&](Value
*&NonZero
, Value
*&Other
) {
1084 if (!IsKnownNonZero(NonZero
))
1085 std::swap(NonZero
, Other
);
1086 return IsKnownNonZero(NonZero
);
1089 // Given ZeroCmpOp = (A + B)
1090 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
1091 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
1092 // with X being the value (A/B) that is known to be non-zero,
1093 // and Y being remaining value.
1094 if (UnsignedPred
== ICmpInst::ICMP_ULT
&& EqPred
== ICmpInst::ICMP_NE
&&
1095 IsAnd
&& GetKnownNonZeroAndOther(B
, A
))
1096 return Builder
.CreateICmpULT(Builder
.CreateNeg(B
), A
);
1097 if (UnsignedPred
== ICmpInst::ICMP_UGE
&& EqPred
== ICmpInst::ICMP_EQ
&&
1098 !IsAnd
&& GetKnownNonZeroAndOther(B
, A
))
1099 return Builder
.CreateICmpUGE(Builder
.CreateNeg(B
), A
);
1102 Value
*Base
, *Offset
;
1103 if (!match(ZeroCmpOp
, m_Sub(m_Value(Base
), m_Value(Offset
))))
1106 if (!match(UnsignedICmp
,
1107 m_c_ICmp(UnsignedPred
, m_Specific(Base
), m_Specific(Offset
))) ||
1108 !ICmpInst::isUnsigned(UnsignedPred
))
1111 // Base >=/> Offset && (Base - Offset) != 0 <--> Base > Offset
1112 // (no overflow and not null)
1113 if ((UnsignedPred
== ICmpInst::ICMP_UGE
||
1114 UnsignedPred
== ICmpInst::ICMP_UGT
) &&
1115 EqPred
== ICmpInst::ICMP_NE
&& IsAnd
)
1116 return Builder
.CreateICmpUGT(Base
, Offset
);
1118 // Base <=/< Offset || (Base - Offset) == 0 <--> Base <= Offset
1119 // (overflow or null)
1120 if ((UnsignedPred
== ICmpInst::ICMP_ULE
||
1121 UnsignedPred
== ICmpInst::ICMP_ULT
) &&
1122 EqPred
== ICmpInst::ICMP_EQ
&& !IsAnd
)
1123 return Builder
.CreateICmpULE(Base
, Offset
);
1125 // Base <= Offset && (Base - Offset) != 0 --> Base < Offset
1126 if (UnsignedPred
== ICmpInst::ICMP_ULE
&& EqPred
== ICmpInst::ICMP_NE
&&
1128 return Builder
.CreateICmpULT(Base
, Offset
);
1130 // Base > Offset || (Base - Offset) == 0 --> Base >= Offset
1131 if (UnsignedPred
== ICmpInst::ICMP_UGT
&& EqPred
== ICmpInst::ICMP_EQ
&&
1133 return Builder
.CreateICmpUGE(Base
, Offset
);
1144 /// Match an extraction of bits from an integer.
1145 static std::optional
<IntPart
> matchIntPart(Value
*V
) {
1147 if (!match(V
, m_OneUse(m_Trunc(m_Value(X
)))))
1148 return std::nullopt
;
1150 unsigned NumOriginalBits
= X
->getType()->getScalarSizeInBits();
1151 unsigned NumExtractedBits
= V
->getType()->getScalarSizeInBits();
1154 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1155 // from Y, not any shifted-in zeroes.
1156 if (match(X
, m_OneUse(m_LShr(m_Value(Y
), m_APInt(Shift
)))) &&
1157 Shift
->ule(NumOriginalBits
- NumExtractedBits
))
1158 return {{Y
, (unsigned)Shift
->getZExtValue(), NumExtractedBits
}};
1159 return {{X
, 0, NumExtractedBits
}};
1162 /// Materialize an extraction of bits from an integer in IR.
1163 static Value
*extractIntPart(const IntPart
&P
, IRBuilderBase
&Builder
) {
1166 V
= Builder
.CreateLShr(V
, P
.StartBit
);
1167 Type
*TruncTy
= V
->getType()->getWithNewBitWidth(P
.NumBits
);
1168 if (TruncTy
!= V
->getType())
1169 V
= Builder
.CreateTrunc(V
, TruncTy
);
1173 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1174 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1175 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
1176 Value
*InstCombinerImpl::foldEqOfParts(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
,
1178 if (!Cmp0
->hasOneUse() || !Cmp1
->hasOneUse())
1181 CmpInst::Predicate Pred
= IsAnd
? CmpInst::ICMP_EQ
: CmpInst::ICMP_NE
;
1182 if (Cmp0
->getPredicate() != Pred
|| Cmp1
->getPredicate() != Pred
)
1185 std::optional
<IntPart
> L0
= matchIntPart(Cmp0
->getOperand(0));
1186 std::optional
<IntPart
> R0
= matchIntPart(Cmp0
->getOperand(1));
1187 std::optional
<IntPart
> L1
= matchIntPart(Cmp1
->getOperand(0));
1188 std::optional
<IntPart
> R1
= matchIntPart(Cmp1
->getOperand(1));
1189 if (!L0
|| !R0
|| !L1
|| !R1
)
1192 // Make sure the LHS/RHS compare a part of the same value, possibly after
1194 if (L0
->From
!= L1
->From
|| R0
->From
!= R1
->From
) {
1195 if (L0
->From
!= R1
->From
|| R0
->From
!= L1
->From
)
1200 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1201 // the low part and L1/R1 being the high part.
1202 if (L0
->StartBit
+ L0
->NumBits
!= L1
->StartBit
||
1203 R0
->StartBit
+ R0
->NumBits
!= R1
->StartBit
) {
1204 if (L1
->StartBit
+ L1
->NumBits
!= L0
->StartBit
||
1205 R1
->StartBit
+ R1
->NumBits
!= R0
->StartBit
)
1211 // We can simplify to a comparison of these larger parts of the integers.
1212 IntPart L
= {L0
->From
, L0
->StartBit
, L0
->NumBits
+ L1
->NumBits
};
1213 IntPart R
= {R0
->From
, R0
->StartBit
, R0
->NumBits
+ R1
->NumBits
};
1214 Value
*LValue
= extractIntPart(L
, Builder
);
1215 Value
*RValue
= extractIntPart(R
, Builder
);
1216 return Builder
.CreateICmp(Pred
, LValue
, RValue
);
1219 /// Reduce logic-of-compares with equality to a constant by substituting a
1220 /// common operand with the constant. Callers are expected to call this with
1221 /// Cmp0/Cmp1 switched to handle logic op commutativity.
1222 static Value
*foldAndOrOfICmpsWithConstEq(ICmpInst
*Cmp0
, ICmpInst
*Cmp1
,
1223 bool IsAnd
, bool IsLogical
,
1224 InstCombiner::BuilderTy
&Builder
,
1225 const SimplifyQuery
&Q
) {
1226 // Match an equality compare with a non-poison constant as Cmp0.
1227 // Also, give up if the compare can be constant-folded to avoid looping.
1228 ICmpInst::Predicate Pred0
;
1231 if (!match(Cmp0
, m_ICmp(Pred0
, m_Value(X
), m_Constant(C
))) ||
1232 !isGuaranteedNotToBeUndefOrPoison(C
) || isa
<Constant
>(X
))
1234 if ((IsAnd
&& Pred0
!= ICmpInst::ICMP_EQ
) ||
1235 (!IsAnd
&& Pred0
!= ICmpInst::ICMP_NE
))
1238 // The other compare must include a common operand (X). Canonicalize the
1239 // common operand as operand 1 (Pred1 is swapped if the common operand was
1242 ICmpInst::Predicate Pred1
;
1243 if (!match(Cmp1
, m_c_ICmp(Pred1
, m_Value(Y
), m_Deferred(X
))))
1246 // Replace variable with constant value equivalence to remove a variable use:
1247 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1248 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1249 // Can think of the 'or' substitution with the 'and' bool equivalent:
1250 // A || B --> A || (!A && B)
1251 Value
*SubstituteCmp
= simplifyICmpInst(Pred1
, Y
, C
, Q
);
1252 if (!SubstituteCmp
) {
1253 // If we need to create a new instruction, require that the old compare can
1255 if (!Cmp1
->hasOneUse())
1257 SubstituteCmp
= Builder
.CreateICmp(Pred1
, Y
, C
);
1260 return IsAnd
? Builder
.CreateLogicalAnd(Cmp0
, SubstituteCmp
)
1261 : Builder
.CreateLogicalOr(Cmp0
, SubstituteCmp
);
1262 return Builder
.CreateBinOp(IsAnd
? Instruction::And
: Instruction::Or
, Cmp0
,
1266 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1267 /// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1268 /// into a single comparison using range-based reasoning.
1269 /// NOTE: This is also used for logical and/or, must be poison-safe!
1270 Value
*InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst
*ICmp1
,
1273 ICmpInst::Predicate Pred1
, Pred2
;
1275 const APInt
*C1
, *C2
;
1276 if (!match(ICmp1
, m_ICmp(Pred1
, m_Value(V1
), m_APInt(C1
))) ||
1277 !match(ICmp2
, m_ICmp(Pred2
, m_Value(V2
), m_APInt(C2
))))
1280 // Look through add of a constant offset on V1, V2, or both operands. This
1281 // allows us to interpret the V + C' < C'' range idiom into a proper range.
1282 const APInt
*Offset1
= nullptr, *Offset2
= nullptr;
1285 if (match(V1
, m_Add(m_Value(X
), m_APInt(Offset1
))))
1287 if (match(V2
, m_Add(m_Value(X
), m_APInt(Offset2
))))
1294 ConstantRange CR1
= ConstantRange::makeExactICmpRegion(
1295 IsAnd
? ICmpInst::getInversePredicate(Pred1
) : Pred1
, *C1
);
1297 CR1
= CR1
.subtract(*Offset1
);
1299 ConstantRange CR2
= ConstantRange::makeExactICmpRegion(
1300 IsAnd
? ICmpInst::getInversePredicate(Pred2
) : Pred2
, *C2
);
1302 CR2
= CR2
.subtract(*Offset2
);
1304 Type
*Ty
= V1
->getType();
1306 std::optional
<ConstantRange
> CR
= CR1
.exactUnionWith(CR2
);
1308 if (!(ICmp1
->hasOneUse() && ICmp2
->hasOneUse()) || CR1
.isWrappedSet() ||
1312 // Check whether we have equal-size ranges that only differ by one bit.
1313 // In that case we can apply a mask to map one range onto the other.
1314 APInt LowerDiff
= CR1
.getLower() ^ CR2
.getLower();
1315 APInt UpperDiff
= (CR1
.getUpper() - 1) ^ (CR2
.getUpper() - 1);
1316 APInt CR1Size
= CR1
.getUpper() - CR1
.getLower();
1317 if (!LowerDiff
.isPowerOf2() || LowerDiff
!= UpperDiff
||
1318 CR1Size
!= CR2
.getUpper() - CR2
.getLower())
1321 CR
= CR1
.getLower().ult(CR2
.getLower()) ? CR1
: CR2
;
1322 NewV
= Builder
.CreateAnd(NewV
, ConstantInt::get(Ty
, ~LowerDiff
));
1328 CmpInst::Predicate NewPred
;
1330 CR
->getEquivalentICmp(NewPred
, NewC
, Offset
);
1333 NewV
= Builder
.CreateAdd(NewV
, ConstantInt::get(Ty
, Offset
));
1334 return Builder
.CreateICmp(NewPred
, NewV
, ConstantInt::get(Ty
, NewC
));
1337 /// Ignore all operations which only change the sign of a value, returning the
1338 /// underlying magnitude value.
1339 static Value
*stripSignOnlyFPOps(Value
*Val
) {
1340 match(Val
, m_FNeg(m_Value(Val
)));
1341 match(Val
, m_FAbs(m_Value(Val
)));
1342 match(Val
, m_CopySign(m_Value(Val
), m_Value()));
1346 /// Matches canonical form of isnan, fcmp ord x, 0
1347 static bool matchIsNotNaN(FCmpInst::Predicate P
, Value
*LHS
, Value
*RHS
) {
1348 return P
== FCmpInst::FCMP_ORD
&& match(RHS
, m_AnyZeroFP());
1351 /// Matches fcmp u__ x, +/-inf
1352 static bool matchUnorderedInfCompare(FCmpInst::Predicate P
, Value
*LHS
,
1354 return FCmpInst::isUnordered(P
) && match(RHS
, m_Inf());
1357 /// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1359 /// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
1360 static Value
*matchIsFiniteTest(InstCombiner::BuilderTy
&Builder
, FCmpInst
*LHS
,
1362 Value
*LHS0
= LHS
->getOperand(0), *LHS1
= LHS
->getOperand(1);
1363 Value
*RHS0
= RHS
->getOperand(0), *RHS1
= RHS
->getOperand(1);
1364 FCmpInst::Predicate PredL
= LHS
->getPredicate(), PredR
= RHS
->getPredicate();
1366 if (!matchIsNotNaN(PredL
, LHS0
, LHS1
) ||
1367 !matchUnorderedInfCompare(PredR
, RHS0
, RHS1
))
1370 IRBuilder
<>::FastMathFlagGuard
FMFG(Builder
);
1371 FastMathFlags FMF
= LHS
->getFastMathFlags();
1372 FMF
&= RHS
->getFastMathFlags();
1373 Builder
.setFastMathFlags(FMF
);
1375 return Builder
.CreateFCmp(FCmpInst::getOrderedPredicate(PredR
), RHS0
, RHS1
);
1378 Value
*InstCombinerImpl::foldLogicOfFCmps(FCmpInst
*LHS
, FCmpInst
*RHS
,
1379 bool IsAnd
, bool IsLogicalSelect
) {
1380 Value
*LHS0
= LHS
->getOperand(0), *LHS1
= LHS
->getOperand(1);
1381 Value
*RHS0
= RHS
->getOperand(0), *RHS1
= RHS
->getOperand(1);
1382 FCmpInst::Predicate PredL
= LHS
->getPredicate(), PredR
= RHS
->getPredicate();
1384 if (LHS0
== RHS1
&& RHS0
== LHS1
) {
1385 // Swap RHS operands to match LHS.
1386 PredR
= FCmpInst::getSwappedPredicate(PredR
);
1387 std::swap(RHS0
, RHS1
);
1390 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1391 // Suppose the relation between x and y is R, where R is one of
1392 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1393 // testing the desired relations.
1395 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1396 // bool(R & CC0) && bool(R & CC1)
1397 // = bool((R & CC0) & (R & CC1))
1398 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1400 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1401 // bool(R & CC0) || bool(R & CC1)
1402 // = bool((R & CC0) | (R & CC1))
1403 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1404 if (LHS0
== RHS0
&& LHS1
== RHS1
) {
1405 unsigned FCmpCodeL
= getFCmpCode(PredL
);
1406 unsigned FCmpCodeR
= getFCmpCode(PredR
);
1407 unsigned NewPred
= IsAnd
? FCmpCodeL
& FCmpCodeR
: FCmpCodeL
| FCmpCodeR
;
1409 // Intersect the fast math flags.
1410 // TODO: We can union the fast math flags unless this is a logical select.
1411 IRBuilder
<>::FastMathFlagGuard
FMFG(Builder
);
1412 FastMathFlags FMF
= LHS
->getFastMathFlags();
1413 FMF
&= RHS
->getFastMathFlags();
1414 Builder
.setFastMathFlags(FMF
);
1416 return getFCmpValue(NewPred
, LHS0
, LHS1
, Builder
);
1419 // This transform is not valid for a logical select.
1420 if (!IsLogicalSelect
&&
1421 ((PredL
== FCmpInst::FCMP_ORD
&& PredR
== FCmpInst::FCMP_ORD
&& IsAnd
) ||
1422 (PredL
== FCmpInst::FCMP_UNO
&& PredR
== FCmpInst::FCMP_UNO
&&
1424 if (LHS0
->getType() != RHS0
->getType())
1427 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1428 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1429 if (match(LHS1
, m_PosZeroFP()) && match(RHS1
, m_PosZeroFP()))
1430 // Ignore the constants because they are obviously not NANs:
1431 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1432 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1433 return Builder
.CreateFCmp(PredL
, LHS0
, RHS0
);
1436 if (IsAnd
&& stripSignOnlyFPOps(LHS0
) == stripSignOnlyFPOps(RHS0
)) {
1437 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1438 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1439 if (Value
*Left
= matchIsFiniteTest(Builder
, LHS
, RHS
))
1441 if (Value
*Right
= matchIsFiniteTest(Builder
, RHS
, LHS
))
1445 // Turn at least two fcmps with constants into llvm.is.fpclass.
1447 // If we can represent a combined value test with one class call, we can
1448 // potentially eliminate 4-6 instructions. If we can represent a test with a
1449 // single fcmp with fneg and fabs, that's likely a better canonical form.
1450 if (LHS
->hasOneUse() && RHS
->hasOneUse()) {
1451 auto [ClassValRHS
, ClassMaskRHS
] =
1452 fcmpToClassTest(PredR
, *RHS
->getFunction(), RHS0
, RHS1
);
1454 auto [ClassValLHS
, ClassMaskLHS
] =
1455 fcmpToClassTest(PredL
, *LHS
->getFunction(), LHS0
, LHS1
);
1456 if (ClassValLHS
== ClassValRHS
) {
1457 unsigned CombinedMask
= IsAnd
? (ClassMaskLHS
& ClassMaskRHS
)
1458 : (ClassMaskLHS
| ClassMaskRHS
);
1459 return Builder
.CreateIntrinsic(
1460 Intrinsic::is_fpclass
, {ClassValLHS
->getType()},
1461 {ClassValLHS
, Builder
.getInt32(CombinedMask
)});
1469 /// Match an fcmp against a special value that performs a test possible by
1470 /// llvm.is.fpclass.
1471 static bool matchIsFPClassLikeFCmp(Value
*Op
, Value
*&ClassVal
,
1472 uint64_t &ClassMask
) {
1473 auto *FCmp
= dyn_cast
<FCmpInst
>(Op
);
1474 if (!FCmp
|| !FCmp
->hasOneUse())
1477 std::tie(ClassVal
, ClassMask
) =
1478 fcmpToClassTest(FCmp
->getPredicate(), *FCmp
->getParent()->getParent(),
1479 FCmp
->getOperand(0), FCmp
->getOperand(1));
1480 return ClassVal
!= nullptr;
1483 /// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1484 /// -> is_fpclass x, (mask0 | mask1)
1485 /// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1486 /// -> is_fpclass x, (mask0 & mask1)
1487 /// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1488 /// -> is_fpclass x, (mask0 ^ mask1)
1489 Instruction
*InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator
&BO
,
1490 Value
*Op0
, Value
*Op1
) {
1491 Value
*ClassVal0
= nullptr;
1492 Value
*ClassVal1
= nullptr;
1493 uint64_t ClassMask0
, ClassMask1
;
1495 // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1498 // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1502 match(Op0
, m_OneUse(m_Intrinsic
<Intrinsic::is_fpclass
>(
1503 m_Value(ClassVal0
), m_ConstantInt(ClassMask0
))));
1505 match(Op1
, m_OneUse(m_Intrinsic
<Intrinsic::is_fpclass
>(
1506 m_Value(ClassVal1
), m_ConstantInt(ClassMask1
))));
1507 if ((((IsLHSClass
|| matchIsFPClassLikeFCmp(Op0
, ClassVal0
, ClassMask0
)) &&
1508 (IsRHSClass
|| matchIsFPClassLikeFCmp(Op1
, ClassVal1
, ClassMask1
)))) &&
1509 ClassVal0
== ClassVal1
) {
1510 unsigned NewClassMask
;
1511 switch (BO
.getOpcode()) {
1512 case Instruction::And
:
1513 NewClassMask
= ClassMask0
& ClassMask1
;
1515 case Instruction::Or
:
1516 NewClassMask
= ClassMask0
| ClassMask1
;
1518 case Instruction::Xor
:
1519 NewClassMask
= ClassMask0
^ ClassMask1
;
1522 llvm_unreachable("not a binary logic operator");
1526 auto *II
= cast
<IntrinsicInst
>(Op0
);
1528 1, ConstantInt::get(II
->getArgOperand(1)->getType(), NewClassMask
));
1529 return replaceInstUsesWith(BO
, II
);
1533 auto *II
= cast
<IntrinsicInst
>(Op1
);
1535 1, ConstantInt::get(II
->getArgOperand(1)->getType(), NewClassMask
));
1536 return replaceInstUsesWith(BO
, II
);
1539 CallInst
*NewClass
=
1540 Builder
.CreateIntrinsic(Intrinsic::is_fpclass
, {ClassVal0
->getType()},
1541 {ClassVal0
, Builder
.getInt32(NewClassMask
)});
1542 return replaceInstUsesWith(BO
, NewClass
);
1548 /// Look for the pattern that conditionally negates a value via math operations:
1549 /// cond.splat = sext i1 cond
1550 /// sub = add cond.splat, x
1551 /// xor = xor sub, cond.splat
1552 /// and rewrite it to do the same, but via logical operations:
1553 /// value.neg = sub 0, value
1554 /// cond = select i1 neg, value.neg, value
1555 Instruction
*InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1556 BinaryOperator
&I
) {
1557 assert(I
.getOpcode() == BinaryOperator::Xor
&& "Only for xor!");
1559 // As per complexity ordering, `xor` is not commutative here.
1560 if (!match(&I
, m_c_BinOp(m_OneUse(m_Value()), m_Value())) ||
1561 !match(I
.getOperand(1), m_SExt(m_Value(Cond
))) ||
1562 !Cond
->getType()->isIntOrIntVectorTy(1) ||
1563 !match(I
.getOperand(0), m_c_Add(m_SExt(m_Deferred(Cond
)), m_Value(X
))))
1565 return SelectInst::Create(Cond
, Builder
.CreateNeg(X
, X
->getName() + ".neg"),
1569 /// This a limited reassociation for a special case (see above) where we are
1570 /// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1571 /// This could be handled more generally in '-reassociation', but it seems like
1572 /// an unlikely pattern for a large number of logic ops and fcmps.
1573 static Instruction
*reassociateFCmps(BinaryOperator
&BO
,
1574 InstCombiner::BuilderTy
&Builder
) {
1575 Instruction::BinaryOps Opcode
= BO
.getOpcode();
1576 assert((Opcode
== Instruction::And
|| Opcode
== Instruction::Or
) &&
1577 "Expecting and/or op for fcmp transform");
1579 // There are 4 commuted variants of the pattern. Canonicalize operands of this
1580 // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1581 Value
*Op0
= BO
.getOperand(0), *Op1
= BO
.getOperand(1), *X
;
1582 FCmpInst::Predicate Pred
;
1583 if (match(Op1
, m_FCmp(Pred
, m_Value(), m_AnyZeroFP())))
1584 std::swap(Op0
, Op1
);
1586 // Match inner binop and the predicate for combining 2 NAN checks into 1.
1588 FCmpInst::Predicate NanPred
= Opcode
== Instruction::And
? FCmpInst::FCMP_ORD
1589 : FCmpInst::FCMP_UNO
;
1590 if (!match(Op0
, m_FCmp(Pred
, m_Value(X
), m_AnyZeroFP())) || Pred
!= NanPred
||
1591 !match(Op1
, m_BinOp(Opcode
, m_Value(BO10
), m_Value(BO11
))))
1594 // The inner logic op must have a matching fcmp operand.
1596 if (!match(BO10
, m_FCmp(Pred
, m_Value(Y
), m_AnyZeroFP())) ||
1597 Pred
!= NanPred
|| X
->getType() != Y
->getType())
1598 std::swap(BO10
, BO11
);
1600 if (!match(BO10
, m_FCmp(Pred
, m_Value(Y
), m_AnyZeroFP())) ||
1601 Pred
!= NanPred
|| X
->getType() != Y
->getType())
1604 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1605 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z
1606 Value
*NewFCmp
= Builder
.CreateFCmp(Pred
, X
, Y
);
1607 if (auto *NewFCmpInst
= dyn_cast
<FCmpInst
>(NewFCmp
)) {
1608 // Intersect FMF from the 2 source fcmps.
1609 NewFCmpInst
->copyIRFlags(Op0
);
1610 NewFCmpInst
->andIRFlags(BO10
);
1612 return BinaryOperator::Create(Opcode
, NewFCmp
, BO11
);
1615 /// Match variations of De Morgan's Laws:
1616 /// (~A & ~B) == (~(A | B))
1617 /// (~A | ~B) == (~(A & B))
1618 static Instruction
*matchDeMorgansLaws(BinaryOperator
&I
,
1619 InstCombiner::BuilderTy
&Builder
) {
1620 const Instruction::BinaryOps Opcode
= I
.getOpcode();
1621 assert((Opcode
== Instruction::And
|| Opcode
== Instruction::Or
) &&
1622 "Trying to match De Morgan's Laws with something other than and/or");
1624 // Flip the logic operation.
1625 const Instruction::BinaryOps FlippedOpcode
=
1626 (Opcode
== Instruction::And
) ? Instruction::Or
: Instruction::And
;
1628 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1630 if (match(Op0
, m_OneUse(m_Not(m_Value(A
)))) &&
1631 match(Op1
, m_OneUse(m_Not(m_Value(B
)))) &&
1632 !InstCombiner::isFreeToInvert(A
, A
->hasOneUse()) &&
1633 !InstCombiner::isFreeToInvert(B
, B
->hasOneUse())) {
1635 Builder
.CreateBinOp(FlippedOpcode
, A
, B
, I
.getName() + ".demorgan");
1636 return BinaryOperator::CreateNot(AndOr
);
1639 // The 'not' ops may require reassociation.
1640 // (A & ~B) & ~C --> A & ~(B | C)
1641 // (~B & A) & ~C --> A & ~(B | C)
1642 // (A | ~B) | ~C --> A | ~(B & C)
1643 // (~B | A) | ~C --> A | ~(B & C)
1645 if (match(Op0
, m_OneUse(m_c_BinOp(Opcode
, m_Value(A
), m_Not(m_Value(B
))))) &&
1646 match(Op1
, m_Not(m_Value(C
)))) {
1647 Value
*FlippedBO
= Builder
.CreateBinOp(FlippedOpcode
, B
, C
);
1648 return BinaryOperator::Create(Opcode
, A
, Builder
.CreateNot(FlippedBO
));
1654 bool InstCombinerImpl::shouldOptimizeCast(CastInst
*CI
) {
1655 Value
*CastSrc
= CI
->getOperand(0);
1657 // Noop casts and casts of constants should be eliminated trivially.
1658 if (CI
->getSrcTy() == CI
->getDestTy() || isa
<Constant
>(CastSrc
))
1661 // If this cast is paired with another cast that can be eliminated, we prefer
1662 // to have it eliminated.
1663 if (const auto *PrecedingCI
= dyn_cast
<CastInst
>(CastSrc
))
1664 if (isEliminableCastPair(PrecedingCI
, CI
))
1670 /// Fold {and,or,xor} (cast X), C.
1671 static Instruction
*foldLogicCastConstant(BinaryOperator
&Logic
, CastInst
*Cast
,
1672 InstCombiner::BuilderTy
&Builder
) {
1673 Constant
*C
= dyn_cast
<Constant
>(Logic
.getOperand(1));
1677 auto LogicOpc
= Logic
.getOpcode();
1678 Type
*DestTy
= Logic
.getType();
1679 Type
*SrcTy
= Cast
->getSrcTy();
1681 // Move the logic operation ahead of a zext or sext if the constant is
1682 // unchanged in the smaller source type. Performing the logic in a smaller
1683 // type may provide more information to later folds, and the smaller logic
1684 // instruction may be cheaper (particularly in the case of vectors).
1686 if (match(Cast
, m_OneUse(m_ZExt(m_Value(X
))))) {
1687 Constant
*TruncC
= ConstantExpr::getTrunc(C
, SrcTy
);
1688 Constant
*ZextTruncC
= ConstantExpr::getZExt(TruncC
, DestTy
);
1689 if (ZextTruncC
== C
) {
1690 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1691 Value
*NewOp
= Builder
.CreateBinOp(LogicOpc
, X
, TruncC
);
1692 return new ZExtInst(NewOp
, DestTy
);
1696 if (match(Cast
, m_OneUse(m_SExt(m_Value(X
))))) {
1697 Constant
*TruncC
= ConstantExpr::getTrunc(C
, SrcTy
);
1698 Constant
*SextTruncC
= ConstantExpr::getSExt(TruncC
, DestTy
);
1699 if (SextTruncC
== C
) {
1700 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1701 Value
*NewOp
= Builder
.CreateBinOp(LogicOpc
, X
, TruncC
);
1702 return new SExtInst(NewOp
, DestTy
);
1709 /// Fold {and,or,xor} (cast X), Y.
1710 Instruction
*InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator
&I
) {
1711 auto LogicOpc
= I
.getOpcode();
1712 assert(I
.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1714 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1716 // fold bitwise(A >> BW - 1, zext(icmp)) (BW is the scalar bits of the
1718 // -> bitwise(zext(A < 0), zext(icmp))
1719 // -> zext(bitwise(A < 0, icmp))
1720 auto FoldBitwiseICmpZeroWithICmp
= [&](Value
*Op0
,
1721 Value
*Op1
) -> Instruction
* {
1722 ICmpInst::Predicate Pred
;
1728 m_SpecificInt(Op0
->getType()->getScalarSizeInBits() - 1)))) &&
1729 match(Op1
, m_OneUse(m_ZExt(m_ICmp(Pred
, m_Value(), m_Value()))));
1735 Builder
.CreateICmpSLT(A
, Constant::getNullValue(A
->getType()));
1736 auto *ICmpR
= cast
<ZExtInst
>(Op1
)->getOperand(0);
1737 auto *BitwiseOp
= Builder
.CreateBinOp(LogicOpc
, ICmpL
, ICmpR
);
1739 return new ZExtInst(BitwiseOp
, Op0
->getType());
1742 if (auto *Ret
= FoldBitwiseICmpZeroWithICmp(Op0
, Op1
))
1745 if (auto *Ret
= FoldBitwiseICmpZeroWithICmp(Op1
, Op0
))
1748 CastInst
*Cast0
= dyn_cast
<CastInst
>(Op0
);
1752 // This must be a cast from an integer or integer vector source type to allow
1753 // transformation of the logic operation to the source type.
1754 Type
*DestTy
= I
.getType();
1755 Type
*SrcTy
= Cast0
->getSrcTy();
1756 if (!SrcTy
->isIntOrIntVectorTy())
1759 if (Instruction
*Ret
= foldLogicCastConstant(I
, Cast0
, Builder
))
1762 CastInst
*Cast1
= dyn_cast
<CastInst
>(Op1
);
1766 // Both operands of the logic operation are casts. The casts must be the
1767 // same kind for reduction.
1768 Instruction::CastOps CastOpcode
= Cast0
->getOpcode();
1769 if (CastOpcode
!= Cast1
->getOpcode())
1772 // If the source types do not match, but the casts are matching extends, we
1773 // can still narrow the logic op.
1774 if (SrcTy
!= Cast1
->getSrcTy()) {
1776 if (match(Cast0
, m_OneUse(m_ZExtOrSExt(m_Value(X
)))) &&
1777 match(Cast1
, m_OneUse(m_ZExtOrSExt(m_Value(Y
))))) {
1778 // Cast the narrower source to the wider source type.
1779 unsigned XNumBits
= X
->getType()->getScalarSizeInBits();
1780 unsigned YNumBits
= Y
->getType()->getScalarSizeInBits();
1781 if (XNumBits
< YNumBits
)
1782 X
= Builder
.CreateCast(CastOpcode
, X
, Y
->getType());
1784 Y
= Builder
.CreateCast(CastOpcode
, Y
, X
->getType());
1785 // Do the logic op in the intermediate width, then widen more.
1786 Value
*NarrowLogic
= Builder
.CreateBinOp(LogicOpc
, X
, Y
);
1787 return CastInst::Create(CastOpcode
, NarrowLogic
, DestTy
);
1790 // Give up for other cast opcodes.
1794 Value
*Cast0Src
= Cast0
->getOperand(0);
1795 Value
*Cast1Src
= Cast1
->getOperand(0);
1797 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1798 if ((Cast0
->hasOneUse() || Cast1
->hasOneUse()) &&
1799 shouldOptimizeCast(Cast0
) && shouldOptimizeCast(Cast1
)) {
1800 Value
*NewOp
= Builder
.CreateBinOp(LogicOpc
, Cast0Src
, Cast1Src
,
1802 return CastInst::Create(CastOpcode
, NewOp
, DestTy
);
1805 // For now, only 'and'/'or' have optimizations after this.
1806 if (LogicOpc
== Instruction::Xor
)
1809 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1810 // cast is otherwise not optimizable. This happens for vector sexts.
1811 ICmpInst
*ICmp0
= dyn_cast
<ICmpInst
>(Cast0Src
);
1812 ICmpInst
*ICmp1
= dyn_cast
<ICmpInst
>(Cast1Src
);
1813 if (ICmp0
&& ICmp1
) {
1815 foldAndOrOfICmps(ICmp0
, ICmp1
, I
, LogicOpc
== Instruction::And
))
1816 return CastInst::Create(CastOpcode
, Res
, DestTy
);
1820 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1821 // cast is otherwise not optimizable. This happens for vector sexts.
1822 FCmpInst
*FCmp0
= dyn_cast
<FCmpInst
>(Cast0Src
);
1823 FCmpInst
*FCmp1
= dyn_cast
<FCmpInst
>(Cast1Src
);
1825 if (Value
*R
= foldLogicOfFCmps(FCmp0
, FCmp1
, LogicOpc
== Instruction::And
))
1826 return CastInst::Create(CastOpcode
, R
, DestTy
);
1831 static Instruction
*foldAndToXor(BinaryOperator
&I
,
1832 InstCombiner::BuilderTy
&Builder
) {
1833 assert(I
.getOpcode() == Instruction::And
);
1834 Value
*Op0
= I
.getOperand(0);
1835 Value
*Op1
= I
.getOperand(1);
1838 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1839 // (A | B) & ~(A & B) --> A ^ B
1840 // (A | B) & ~(B & A) --> A ^ B
1841 if (match(&I
, m_BinOp(m_Or(m_Value(A
), m_Value(B
)),
1842 m_Not(m_c_And(m_Deferred(A
), m_Deferred(B
))))))
1843 return BinaryOperator::CreateXor(A
, B
);
1845 // (A | ~B) & (~A | B) --> ~(A ^ B)
1846 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1847 // (~B | A) & (~A | B) --> ~(A ^ B)
1848 // (~B | A) & (B | ~A) --> ~(A ^ B)
1849 if (Op0
->hasOneUse() || Op1
->hasOneUse())
1850 if (match(&I
, m_BinOp(m_c_Or(m_Value(A
), m_Not(m_Value(B
))),
1851 m_c_Or(m_Not(m_Deferred(A
)), m_Deferred(B
)))))
1852 return BinaryOperator::CreateNot(Builder
.CreateXor(A
, B
));
1857 static Instruction
*foldOrToXor(BinaryOperator
&I
,
1858 InstCombiner::BuilderTy
&Builder
) {
1859 assert(I
.getOpcode() == Instruction::Or
);
1860 Value
*Op0
= I
.getOperand(0);
1861 Value
*Op1
= I
.getOperand(1);
1864 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1865 // (A & B) | ~(A | B) --> ~(A ^ B)
1866 // (A & B) | ~(B | A) --> ~(A ^ B)
1867 if (Op0
->hasOneUse() || Op1
->hasOneUse())
1868 if (match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
1869 match(Op1
, m_Not(m_c_Or(m_Specific(A
), m_Specific(B
)))))
1870 return BinaryOperator::CreateNot(Builder
.CreateXor(A
, B
));
1872 // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1873 // (A ^ B) | ~(A | B) --> ~(A & B)
1874 // (A ^ B) | ~(B | A) --> ~(A & B)
1875 if (Op0
->hasOneUse() || Op1
->hasOneUse())
1876 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
))) &&
1877 match(Op1
, m_Not(m_c_Or(m_Specific(A
), m_Specific(B
)))))
1878 return BinaryOperator::CreateNot(Builder
.CreateAnd(A
, B
));
1880 // (A & ~B) | (~A & B) --> A ^ B
1881 // (A & ~B) | (B & ~A) --> A ^ B
1882 // (~B & A) | (~A & B) --> A ^ B
1883 // (~B & A) | (B & ~A) --> A ^ B
1884 if (match(Op0
, m_c_And(m_Value(A
), m_Not(m_Value(B
)))) &&
1885 match(Op1
, m_c_And(m_Not(m_Specific(A
)), m_Specific(B
))))
1886 return BinaryOperator::CreateXor(A
, B
);
1891 /// Return true if a constant shift amount is always less than the specified
1892 /// bit-width. If not, the shift could create poison in the narrower type.
1893 static bool canNarrowShiftAmt(Constant
*C
, unsigned BitWidth
) {
1894 APInt
Threshold(C
->getType()->getScalarSizeInBits(), BitWidth
);
1895 return match(C
, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT
, Threshold
));
1898 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1899 /// a common zext operand: and (binop (zext X), C), (zext X).
1900 Instruction
*InstCombinerImpl::narrowMaskedBinOp(BinaryOperator
&And
) {
1901 // This transform could also apply to {or, and, xor}, but there are better
1902 // folds for those cases, so we don't expect those patterns here. AShr is not
1903 // handled because it should always be transformed to LShr in this sequence.
1904 // The subtract transform is different because it has a constant on the left.
1905 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1906 Value
*Op0
= And
.getOperand(0), *Op1
= And
.getOperand(1);
1908 if (!match(Op0
, m_OneUse(m_Add(m_Specific(Op1
), m_Constant(C
)))) &&
1909 !match(Op0
, m_OneUse(m_Mul(m_Specific(Op1
), m_Constant(C
)))) &&
1910 !match(Op0
, m_OneUse(m_LShr(m_Specific(Op1
), m_Constant(C
)))) &&
1911 !match(Op0
, m_OneUse(m_Shl(m_Specific(Op1
), m_Constant(C
)))) &&
1912 !match(Op0
, m_OneUse(m_Sub(m_Constant(C
), m_Specific(Op1
)))))
1916 if (!match(Op1
, m_ZExt(m_Value(X
))) || Op1
->hasNUsesOrMore(3))
1919 Type
*Ty
= And
.getType();
1920 if (!isa
<VectorType
>(Ty
) && !shouldChangeType(Ty
, X
->getType()))
1923 // If we're narrowing a shift, the shift amount must be safe (less than the
1924 // width) in the narrower type. If the shift amount is greater, instsimplify
1925 // usually handles that case, but we can't guarantee/assert it.
1926 Instruction::BinaryOps Opc
= cast
<BinaryOperator
>(Op0
)->getOpcode();
1927 if (Opc
== Instruction::LShr
|| Opc
== Instruction::Shl
)
1928 if (!canNarrowShiftAmt(C
, X
->getType()->getScalarSizeInBits()))
1931 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1932 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1933 Value
*NewC
= ConstantExpr::getTrunc(C
, X
->getType());
1934 Value
*NewBO
= Opc
== Instruction::Sub
? Builder
.CreateBinOp(Opc
, NewC
, X
)
1935 : Builder
.CreateBinOp(Opc
, X
, NewC
);
1936 return new ZExtInst(Builder
.CreateAnd(NewBO
, X
), Ty
);
1939 /// Try folding relatively complex patterns for both And and Or operations
1940 /// with all And and Or swapped.
1941 static Instruction
*foldComplexAndOrPatterns(BinaryOperator
&I
,
1942 InstCombiner::BuilderTy
&Builder
) {
1943 const Instruction::BinaryOps Opcode
= I
.getOpcode();
1944 assert(Opcode
== Instruction::And
|| Opcode
== Instruction::Or
);
1946 // Flip the logic operation.
1947 const Instruction::BinaryOps FlippedOpcode
=
1948 (Opcode
== Instruction::And
) ? Instruction::Or
: Instruction::And
;
1950 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
1951 Value
*A
, *B
, *C
, *X
, *Y
, *Dummy
;
1953 // Match following expressions:
1956 // Captures X = ~(A | B) or ~(A & B)
1957 const auto matchNotOrAnd
=
1958 [Opcode
, FlippedOpcode
](Value
*Op
, auto m_A
, auto m_B
, auto m_C
,
1959 Value
*&X
, bool CountUses
= false) -> bool {
1960 if (CountUses
&& !Op
->hasOneUse())
1963 if (match(Op
, m_c_BinOp(FlippedOpcode
,
1964 m_CombineAnd(m_Value(X
),
1965 m_Not(m_c_BinOp(Opcode
, m_A
, m_B
))),
1967 return !CountUses
|| X
->hasOneUse();
1972 // (~(A | B) & C) | ... --> ...
1973 // (~(A & B) | C) & ... --> ...
1974 // TODO: One use checks are conservative. We just need to check that a total
1975 // number of multiple used values does not exceed reduction
1977 if (matchNotOrAnd(Op0
, m_Value(A
), m_Value(B
), m_Value(C
), X
)) {
1978 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
1979 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
1980 if (matchNotOrAnd(Op1
, m_Specific(A
), m_Specific(C
), m_Specific(B
), Dummy
,
1982 Value
*Xor
= Builder
.CreateXor(B
, C
);
1983 return (Opcode
== Instruction::Or
)
1984 ? BinaryOperator::CreateAnd(Xor
, Builder
.CreateNot(A
))
1985 : BinaryOperator::CreateNot(Builder
.CreateAnd(Xor
, A
));
1988 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
1989 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
1990 if (matchNotOrAnd(Op1
, m_Specific(B
), m_Specific(C
), m_Specific(A
), Dummy
,
1992 Value
*Xor
= Builder
.CreateXor(A
, C
);
1993 return (Opcode
== Instruction::Or
)
1994 ? BinaryOperator::CreateAnd(Xor
, Builder
.CreateNot(B
))
1995 : BinaryOperator::CreateNot(Builder
.CreateAnd(Xor
, B
));
1998 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
1999 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
2000 if (match(Op1
, m_OneUse(m_Not(m_OneUse(
2001 m_c_BinOp(Opcode
, m_Specific(A
), m_Specific(C
)))))))
2002 return BinaryOperator::CreateNot(Builder
.CreateBinOp(
2003 Opcode
, Builder
.CreateBinOp(FlippedOpcode
, B
, C
), A
));
2005 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
2006 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
2007 if (match(Op1
, m_OneUse(m_Not(m_OneUse(
2008 m_c_BinOp(Opcode
, m_Specific(B
), m_Specific(C
)))))))
2009 return BinaryOperator::CreateNot(Builder
.CreateBinOp(
2010 Opcode
, Builder
.CreateBinOp(FlippedOpcode
, A
, C
), B
));
2012 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
2013 // Note, the pattern with swapped and/or is not handled because the
2014 // result is more undefined than a source:
2015 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
2016 if (Opcode
== Instruction::Or
&& Op0
->hasOneUse() &&
2017 match(Op1
, m_OneUse(m_Not(m_CombineAnd(
2019 m_c_BinOp(Opcode
, m_Specific(C
),
2020 m_c_Xor(m_Specific(A
), m_Specific(B
)))))))) {
2023 Value
*Or
= cast
<BinaryOperator
>(X
)->getOperand(0);
2024 return BinaryOperator::CreateNot(Builder
.CreateAnd(Or
, Y
));
2028 // (~A & B & C) | ... --> ...
2029 // (~A | B | C) | ... --> ...
2030 // TODO: One use checks are conservative. We just need to check that a total
2031 // number of multiple used values does not exceed reduction
2034 m_OneUse(m_c_BinOp(FlippedOpcode
,
2035 m_BinOp(FlippedOpcode
, m_Value(B
), m_Value(C
)),
2036 m_CombineAnd(m_Value(X
), m_Not(m_Value(A
)))))) ||
2037 match(Op0
, m_OneUse(m_c_BinOp(
2039 m_c_BinOp(FlippedOpcode
, m_Value(C
),
2040 m_CombineAnd(m_Value(X
), m_Not(m_Value(A
)))),
2043 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2044 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2045 if (match(Op1
, m_OneUse(m_Not(m_c_BinOp(
2046 Opcode
, m_c_BinOp(Opcode
, m_Specific(A
), m_Specific(B
)),
2047 m_Specific(C
))))) ||
2048 match(Op1
, m_OneUse(m_Not(m_c_BinOp(
2049 Opcode
, m_c_BinOp(Opcode
, m_Specific(B
), m_Specific(C
)),
2050 m_Specific(A
))))) ||
2051 match(Op1
, m_OneUse(m_Not(m_c_BinOp(
2052 Opcode
, m_c_BinOp(Opcode
, m_Specific(A
), m_Specific(C
)),
2053 m_Specific(B
)))))) {
2054 Value
*Xor
= Builder
.CreateXor(B
, C
);
2055 return (Opcode
== Instruction::Or
)
2056 ? BinaryOperator::CreateNot(Builder
.CreateOr(Xor
, A
))
2057 : BinaryOperator::CreateOr(Xor
, X
);
2060 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2061 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2062 if (match(Op1
, m_OneUse(m_Not(m_OneUse(
2063 m_c_BinOp(Opcode
, m_Specific(A
), m_Specific(B
)))))))
2064 return BinaryOperator::Create(
2065 FlippedOpcode
, Builder
.CreateBinOp(Opcode
, C
, Builder
.CreateNot(B
)),
2068 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2069 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2070 if (match(Op1
, m_OneUse(m_Not(m_OneUse(
2071 m_c_BinOp(Opcode
, m_Specific(A
), m_Specific(C
)))))))
2072 return BinaryOperator::Create(
2073 FlippedOpcode
, Builder
.CreateBinOp(Opcode
, B
, Builder
.CreateNot(C
)),
2080 /// Try to reassociate a pair of binops so that values with one use only are
2081 /// part of the same instruction. This may enable folds that are limited with
2082 /// multi-use restrictions and makes it more likely to match other patterns that
2083 /// are looking for a common operand.
2084 static Instruction
*reassociateForUses(BinaryOperator
&BO
,
2085 InstCombinerImpl::BuilderTy
&Builder
) {
2086 Instruction::BinaryOps Opcode
= BO
.getOpcode();
2089 m_c_BinOp(Opcode
, m_OneUse(m_BinOp(Opcode
, m_Value(X
), m_Value(Y
))),
2090 m_OneUse(m_Value(Z
))))) {
2091 if (!isa
<Constant
>(X
) && !isa
<Constant
>(Y
) && !isa
<Constant
>(Z
)) {
2092 // (X op Y) op Z --> (Y op Z) op X
2093 if (!X
->hasOneUse()) {
2094 Value
*YZ
= Builder
.CreateBinOp(Opcode
, Y
, Z
);
2095 return BinaryOperator::Create(Opcode
, YZ
, X
);
2097 // (X op Y) op Z --> (X op Z) op Y
2098 if (!Y
->hasOneUse()) {
2099 Value
*XZ
= Builder
.CreateBinOp(Opcode
, X
, Z
);
2100 return BinaryOperator::Create(Opcode
, XZ
, Y
);
2112 // and convert to do the bitwise logic first:
2116 // iff bits affected by logic op are lower than last bit affected by math op
2117 static Instruction
*canonicalizeLogicFirst(BinaryOperator
&I
,
2118 InstCombiner::BuilderTy
&Builder
) {
2119 Type
*Ty
= I
.getType();
2120 Instruction::BinaryOps OpC
= I
.getOpcode();
2121 Value
*Op0
= I
.getOperand(0);
2122 Value
*Op1
= I
.getOperand(1);
2124 const APInt
*C
, *C2
;
2126 if (!(match(Op0
, m_OneUse(m_Add(m_Value(X
), m_APInt(C2
)))) &&
2127 match(Op1
, m_APInt(C
))))
2130 unsigned Width
= Ty
->getScalarSizeInBits();
2131 unsigned LastOneMath
= Width
- C2
->countr_zero();
2134 case Instruction::And
:
2135 if (C
->countl_one() < LastOneMath
)
2138 case Instruction::Xor
:
2139 case Instruction::Or
:
2140 if (C
->countl_zero() < LastOneMath
)
2144 llvm_unreachable("Unexpected BinaryOp!");
2147 Value
*NewBinOp
= Builder
.CreateBinOp(OpC
, X
, ConstantInt::get(Ty
, *C
));
2148 return BinaryOperator::CreateWithCopiedFlags(Instruction::Add
, NewBinOp
,
2149 ConstantInt::get(Ty
, *C2
), Op0
);
2152 // binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2153 // shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2154 // where both shifts are the same and AddC is a valid shift amount.
2155 Instruction
*InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator
&I
) {
2156 assert((I
.isBitwiseLogicOp() || I
.getOpcode() == Instruction::Add
) &&
2157 "Unexpected opcode");
2160 Constant
*ShiftedC1
, *ShiftedC2
, *AddC
;
2161 Type
*Ty
= I
.getType();
2162 unsigned BitWidth
= Ty
->getScalarSizeInBits();
2164 m_c_BinOp(m_Shift(m_ImmConstant(ShiftedC1
), m_Value(ShAmt
)),
2165 m_Shift(m_ImmConstant(ShiftedC2
),
2166 m_Add(m_Deferred(ShAmt
), m_ImmConstant(AddC
))))))
2169 // Make sure the add constant is a valid shift amount.
2171 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT
, APInt(BitWidth
, BitWidth
))))
2174 // Avoid constant expressions.
2175 auto *Op0Inst
= dyn_cast
<Instruction
>(I
.getOperand(0));
2176 auto *Op1Inst
= dyn_cast
<Instruction
>(I
.getOperand(1));
2177 if (!Op0Inst
|| !Op1Inst
)
2180 // Both shifts must be the same.
2181 Instruction::BinaryOps ShiftOp
=
2182 static_cast<Instruction::BinaryOps
>(Op0Inst
->getOpcode());
2183 if (ShiftOp
!= Op1Inst
->getOpcode())
2186 // For adds, only left shifts are supported.
2187 if (I
.getOpcode() == Instruction::Add
&& ShiftOp
!= Instruction::Shl
)
2190 Value
*NewC
= Builder
.CreateBinOp(
2191 I
.getOpcode(), ShiftedC1
, Builder
.CreateBinOp(ShiftOp
, ShiftedC2
, AddC
));
2192 return BinaryOperator::Create(ShiftOp
, NewC
, ShAmt
);
2195 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2196 // here. We should standardize that construct where it is needed or choose some
2197 // other way to ensure that commutated variants of patterns are not missed.
2198 Instruction
*InstCombinerImpl::visitAnd(BinaryOperator
&I
) {
2199 Type
*Ty
= I
.getType();
2201 if (Value
*V
= simplifyAndInst(I
.getOperand(0), I
.getOperand(1),
2202 SQ
.getWithInstruction(&I
)))
2203 return replaceInstUsesWith(I
, V
);
2205 if (SimplifyAssociativeOrCommutative(I
))
2208 if (Instruction
*X
= foldVectorBinop(I
))
2211 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
2214 // See if we can simplify any instructions used by the instruction whose sole
2215 // purpose is to compute bits we don't care about.
2216 if (SimplifyDemandedInstructionBits(I
))
2219 // Do this before using distributive laws to catch simple and/or/not patterns.
2220 if (Instruction
*Xor
= foldAndToXor(I
, Builder
))
2223 if (Instruction
*X
= foldComplexAndOrPatterns(I
, Builder
))
2226 // (A|B)&(A|C) -> A|(B&C) etc
2227 if (Value
*V
= foldUsingDistributiveLaws(I
))
2228 return replaceInstUsesWith(I
, V
);
2230 if (Value
*V
= SimplifyBSwap(I
, Builder
))
2231 return replaceInstUsesWith(I
, V
);
2233 if (Instruction
*R
= foldBinOpShiftWithShift(I
))
2236 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2239 if (match(Op0
, m_OneUse(m_LogicalShift(m_One(), m_Value(X
)))) &&
2240 match(Op1
, m_One())) {
2241 // (1 << X) & 1 --> zext(X == 0)
2242 // (1 >> X) & 1 --> zext(X == 0)
2243 Value
*IsZero
= Builder
.CreateICmpEQ(X
, ConstantInt::get(Ty
, 0));
2244 return new ZExtInst(IsZero
, Ty
);
2247 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2250 m_c_And(m_CombineAnd(m_Value(Neg
),
2251 m_OneUse(m_Neg(m_And(m_Value(), m_One())))),
2253 Value
*Cmp
= Builder
.CreateIsNull(Neg
);
2254 return SelectInst::Create(Cmp
, ConstantInt::getNullValue(Ty
), Y
);
2258 if (match(Op1
, m_APInt(C
))) {
2260 if (match(Op0
, m_OneUse(m_Xor(m_Value(X
), m_APInt(XorC
))))) {
2261 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2262 Constant
*NewC
= ConstantInt::get(Ty
, *C
& *XorC
);
2263 Value
*And
= Builder
.CreateAnd(X
, Op1
);
2265 return BinaryOperator::CreateXor(And
, NewC
);
2269 if (match(Op0
, m_OneUse(m_Or(m_Value(X
), m_APInt(OrC
))))) {
2270 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2271 // NOTE: This reduces the number of bits set in the & mask, which
2272 // can expose opportunities for store narrowing for scalars.
2273 // NOTE: SimplifyDemandedBits should have already removed bits from C1
2274 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2275 // above, but this feels safer.
2276 APInt Together
= *C
& *OrC
;
2277 Value
*And
= Builder
.CreateAnd(X
, ConstantInt::get(Ty
, Together
^ *C
));
2279 return BinaryOperator::CreateOr(And
, ConstantInt::get(Ty
, Together
));
2282 unsigned Width
= Ty
->getScalarSizeInBits();
2283 const APInt
*ShiftC
;
2284 if (match(Op0
, m_OneUse(m_SExt(m_AShr(m_Value(X
), m_APInt(ShiftC
))))) &&
2285 ShiftC
->ult(Width
)) {
2286 if (*C
== APInt::getLowBitsSet(Width
, Width
- ShiftC
->getZExtValue())) {
2287 // We are clearing high bits that were potentially set by sext+ashr:
2288 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2289 Value
*Sext
= Builder
.CreateSExt(X
, Ty
);
2290 Constant
*ShAmtC
= ConstantInt::get(Ty
, ShiftC
->zext(Width
));
2291 return BinaryOperator::CreateLShr(Sext
, ShAmtC
);
2295 // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2296 // and (ashr X, ShiftC), C --> lshr X, ShiftC
2297 if (match(Op0
, m_AShr(m_Value(X
), m_APInt(ShiftC
))) && ShiftC
->ult(Width
) &&
2298 C
->isMask(Width
- ShiftC
->getZExtValue()))
2299 return BinaryOperator::CreateLShr(X
, ConstantInt::get(Ty
, *ShiftC
));
2302 if (match(Op0
, m_Add(m_Value(X
), m_APInt(AddC
)))) {
2303 // If we add zeros to every bit below a mask, the add has no effect:
2304 // (X + AddC) & LowMaskC --> X & LowMaskC
2305 unsigned Ctlz
= C
->countl_zero();
2306 APInt
LowMask(APInt::getLowBitsSet(Width
, Width
- Ctlz
));
2307 if ((*AddC
& LowMask
).isZero())
2308 return BinaryOperator::CreateAnd(X
, Op1
);
2310 // If we are masking the result of the add down to exactly one bit and
2311 // the constant we are adding has no bits set below that bit, then the
2312 // add is flipping a single bit. Example:
2313 // (X + 4) & 4 --> (X & 4) ^ 4
2314 if (Op0
->hasOneUse() && C
->isPowerOf2() && (*AddC
& (*C
- 1)) == 0) {
2315 assert((*C
& *AddC
) != 0 && "Expected common bit");
2316 Value
*NewAnd
= Builder
.CreateAnd(X
, Op1
);
2317 return BinaryOperator::CreateXor(NewAnd
, Op1
);
2321 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2322 // bitwidth of X and OP behaves well when given trunc(C1) and X.
2323 auto isNarrowableBinOpcode
= [](BinaryOperator
*B
) {
2324 switch (B
->getOpcode()) {
2325 case Instruction::Xor
:
2326 case Instruction::Or
:
2327 case Instruction::Mul
:
2328 case Instruction::Add
:
2329 case Instruction::Sub
:
2336 if (match(Op0
, m_OneUse(m_BinOp(BO
))) && isNarrowableBinOpcode(BO
)) {
2337 Instruction::BinaryOps BOpcode
= BO
->getOpcode();
2340 // TODO: The one-use restrictions could be relaxed a little if the AND
2341 // is going to be removed.
2342 // Try to narrow the 'and' and a binop with constant operand:
2343 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2344 if (match(BO
, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X
))), m_APInt(C1
))) &&
2345 C
->isIntN(X
->getType()->getScalarSizeInBits())) {
2346 unsigned XWidth
= X
->getType()->getScalarSizeInBits();
2347 Constant
*TruncC1
= ConstantInt::get(X
->getType(), C1
->trunc(XWidth
));
2348 Value
*BinOp
= isa
<ZExtInst
>(BO
->getOperand(0))
2349 ? Builder
.CreateBinOp(BOpcode
, X
, TruncC1
)
2350 : Builder
.CreateBinOp(BOpcode
, TruncC1
, X
);
2351 Constant
*TruncC
= ConstantInt::get(X
->getType(), C
->trunc(XWidth
));
2352 Value
*And
= Builder
.CreateAnd(BinOp
, TruncC
);
2353 return new ZExtInst(And
, Ty
);
2356 // Similar to above: if the mask matches the zext input width, then the
2357 // 'and' can be eliminated, so we can truncate the other variable op:
2358 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2359 if (isa
<Instruction
>(BO
->getOperand(0)) &&
2360 match(BO
->getOperand(0), m_OneUse(m_ZExt(m_Value(X
)))) &&
2361 C
->isMask(X
->getType()->getScalarSizeInBits())) {
2362 Y
= BO
->getOperand(1);
2363 Value
*TrY
= Builder
.CreateTrunc(Y
, X
->getType(), Y
->getName() + ".tr");
2365 Builder
.CreateBinOp(BOpcode
, X
, TrY
, BO
->getName() + ".narrow");
2366 return new ZExtInst(NewBO
, Ty
);
2368 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2369 if (isa
<Instruction
>(BO
->getOperand(1)) &&
2370 match(BO
->getOperand(1), m_OneUse(m_ZExt(m_Value(X
)))) &&
2371 C
->isMask(X
->getType()->getScalarSizeInBits())) {
2372 Y
= BO
->getOperand(0);
2373 Value
*TrY
= Builder
.CreateTrunc(Y
, X
->getType(), Y
->getName() + ".tr");
2375 Builder
.CreateBinOp(BOpcode
, TrY
, X
, BO
->getName() + ".narrow");
2376 return new ZExtInst(NewBO
, Ty
);
2380 // This is intentionally placed after the narrowing transforms for
2381 // efficiency (transform directly to the narrow logic op if possible).
2382 // If the mask is only needed on one incoming arm, push the 'and' op up.
2383 if (match(Op0
, m_OneUse(m_Xor(m_Value(X
), m_Value(Y
)))) ||
2384 match(Op0
, m_OneUse(m_Or(m_Value(X
), m_Value(Y
))))) {
2385 APInt
NotAndMask(~(*C
));
2386 BinaryOperator::BinaryOps BinOp
= cast
<BinaryOperator
>(Op0
)->getOpcode();
2387 if (MaskedValueIsZero(X
, NotAndMask
, 0, &I
)) {
2388 // Not masking anything out for the LHS, move mask to RHS.
2389 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2390 Value
*NewRHS
= Builder
.CreateAnd(Y
, Op1
, Y
->getName() + ".masked");
2391 return BinaryOperator::Create(BinOp
, X
, NewRHS
);
2393 if (!isa
<Constant
>(Y
) && MaskedValueIsZero(Y
, NotAndMask
, 0, &I
)) {
2394 // Not masking anything out for the RHS, move mask to LHS.
2395 // and ({x}or X, Y), C --> {x}or (and X, C), Y
2396 Value
*NewLHS
= Builder
.CreateAnd(X
, Op1
, X
->getName() + ".masked");
2397 return BinaryOperator::Create(BinOp
, NewLHS
, Y
);
2401 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2402 // constant, test if the shift amount equals the offset bit index:
2403 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2404 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2405 if (C
->isPowerOf2() &&
2406 match(Op0
, m_OneUse(m_LogicalShift(m_Power2(ShiftC
), m_Value(X
))))) {
2407 int Log2ShiftC
= ShiftC
->exactLogBase2();
2408 int Log2C
= C
->exactLogBase2();
2410 cast
<BinaryOperator
>(Op0
)->getOpcode() == Instruction::Shl
;
2411 int BitNum
= IsShiftLeft
? Log2C
- Log2ShiftC
: Log2ShiftC
- Log2C
;
2412 assert(BitNum
>= 0 && "Expected demanded bits to handle impossible mask");
2413 Value
*Cmp
= Builder
.CreateICmpEQ(X
, ConstantInt::get(Ty
, BitNum
));
2414 return SelectInst::Create(Cmp
, ConstantInt::get(Ty
, *C
),
2415 ConstantInt::getNullValue(Ty
));
2419 const APInt
*C3
= C
;
2421 if (C3
->isPowerOf2()) {
2422 Constant
*Log2C3
= ConstantInt::get(Ty
, C3
->countr_zero());
2423 if (match(Op0
, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1
), m_Value(X
)),
2424 m_ImmConstant(C2
)))) &&
2425 match(C1
, m_Power2())) {
2426 Constant
*Log2C1
= ConstantExpr::getExactLogBase2(C1
);
2427 Constant
*LshrC
= ConstantExpr::getAdd(C2
, Log2C3
);
2428 KnownBits KnownLShrc
= computeKnownBits(LshrC
, 0, nullptr);
2429 if (KnownLShrc
.getMaxValue().ult(Width
)) {
2430 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2431 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2432 Constant
*CmpC
= ConstantExpr::getSub(LshrC
, Log2C1
);
2433 Value
*Cmp
= Builder
.CreateICmpEQ(X
, CmpC
);
2434 return SelectInst::Create(Cmp
, ConstantInt::get(Ty
, *C3
),
2435 ConstantInt::getNullValue(Ty
));
2439 if (match(Op0
, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1
), m_Value(X
)),
2440 m_ImmConstant(C2
)))) &&
2441 match(C1
, m_Power2())) {
2442 Constant
*Log2C1
= ConstantExpr::getExactLogBase2(C1
);
2444 ConstantExpr::getCompare(ICmpInst::ICMP_ULT
, Log2C3
, C2
);
2445 if (Cmp
->isZeroValue()) {
2446 // iff C1,C3 is pow2 and Log2(C3) >= C2:
2447 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2448 Constant
*ShlC
= ConstantExpr::getAdd(C2
, Log2C1
);
2449 Constant
*CmpC
= ConstantExpr::getSub(ShlC
, Log2C3
);
2450 Value
*Cmp
= Builder
.CreateICmpEQ(X
, CmpC
);
2451 return SelectInst::Create(Cmp
, ConstantInt::get(Ty
, *C3
),
2452 ConstantInt::getNullValue(Ty
));
2458 // If we are clearing the sign bit of a floating-point value, convert this to
2459 // fabs, then cast back to integer.
2461 // This is a generous interpretation for noimplicitfloat, this is not a true
2462 // floating-point operation.
2464 // Assumes any IEEE-represented type has the sign bit in the high bit.
2465 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2467 if (match(Op0
, m_BitCast(m_Value(CastOp
))) &&
2468 match(Op1
, m_MaxSignedValue()) &&
2469 !Builder
.GetInsertBlock()->getParent()->hasFnAttribute(
2470 Attribute::NoImplicitFloat
)) {
2471 Type
*EltTy
= CastOp
->getType()->getScalarType();
2472 if (EltTy
->isFloatingPointTy() && EltTy
->isIEEE() &&
2473 EltTy
->getPrimitiveSizeInBits() ==
2474 I
.getType()->getScalarType()->getPrimitiveSizeInBits()) {
2475 Value
*FAbs
= Builder
.CreateUnaryIntrinsic(Intrinsic::fabs
, CastOp
);
2476 return new BitCastInst(FAbs
, I
.getType());
2480 if (match(&I
, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X
)), m_Value(Y
))),
2482 match(Y
, m_SpecificInt_ICMP(
2483 ICmpInst::Predicate::ICMP_EQ
,
2484 APInt(Ty
->getScalarSizeInBits(),
2485 Ty
->getScalarSizeInBits() -
2486 X
->getType()->getScalarSizeInBits())))) {
2487 auto *SExt
= Builder
.CreateSExt(X
, Ty
, X
->getName() + ".signext");
2488 auto *SanitizedSignMask
= cast
<Constant
>(Op1
);
2489 // We must be careful with the undef elements of the sign bit mask, however:
2490 // the mask elt can be undef iff the shift amount for that lane was undef,
2491 // otherwise we need to sanitize undef masks to zero.
2492 SanitizedSignMask
= Constant::replaceUndefsWith(
2493 SanitizedSignMask
, ConstantInt::getNullValue(Ty
->getScalarType()));
2495 Constant::mergeUndefsWith(SanitizedSignMask
, cast
<Constant
>(Y
));
2496 return BinaryOperator::CreateAnd(SExt
, SanitizedSignMask
);
2499 if (Instruction
*Z
= narrowMaskedBinOp(I
))
2502 if (I
.getType()->isIntOrIntVectorTy(1)) {
2503 if (auto *SI0
= dyn_cast
<SelectInst
>(Op0
)) {
2505 foldAndOrOfSelectUsingImpliedCond(Op1
, *SI0
, /* IsAnd */ true))
2508 if (auto *SI1
= dyn_cast
<SelectInst
>(Op1
)) {
2510 foldAndOrOfSelectUsingImpliedCond(Op0
, *SI1
, /* IsAnd */ true))
2515 if (Instruction
*FoldedLogic
= foldBinOpIntoSelectOrPhi(I
))
2518 if (Instruction
*DeMorgan
= matchDeMorgansLaws(I
, Builder
))
2523 // A & (A ^ B) --> A & ~B
2524 if (match(Op1
, m_OneUse(m_c_Xor(m_Specific(Op0
), m_Value(B
)))))
2525 return BinaryOperator::CreateAnd(Op0
, Builder
.CreateNot(B
));
2526 // (A ^ B) & A --> A & ~B
2527 if (match(Op0
, m_OneUse(m_c_Xor(m_Specific(Op1
), m_Value(B
)))))
2528 return BinaryOperator::CreateAnd(Op1
, Builder
.CreateNot(B
));
2530 // A & ~(A ^ B) --> A & B
2531 if (match(Op1
, m_Not(m_c_Xor(m_Specific(Op0
), m_Value(B
)))))
2532 return BinaryOperator::CreateAnd(Op0
, B
);
2533 // ~(A ^ B) & A --> A & B
2534 if (match(Op0
, m_Not(m_c_Xor(m_Specific(Op1
), m_Value(B
)))))
2535 return BinaryOperator::CreateAnd(Op1
, B
);
2537 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2538 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
))))
2539 if (match(Op1
, m_Xor(m_Xor(m_Specific(B
), m_Value(C
)), m_Specific(A
))))
2540 if (Op1
->hasOneUse() || isFreeToInvert(C
, C
->hasOneUse()))
2541 return BinaryOperator::CreateAnd(Op0
, Builder
.CreateNot(C
));
2543 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2544 if (match(Op0
, m_Xor(m_Xor(m_Value(A
), m_Value(C
)), m_Value(B
))))
2545 if (match(Op1
, m_Xor(m_Specific(B
), m_Specific(A
))))
2546 if (Op0
->hasOneUse() || isFreeToInvert(C
, C
->hasOneUse()))
2547 return BinaryOperator::CreateAnd(Op1
, Builder
.CreateNot(C
));
2549 // (A | B) & (~A ^ B) -> A & B
2550 // (A | B) & (B ^ ~A) -> A & B
2551 // (B | A) & (~A ^ B) -> A & B
2552 // (B | A) & (B ^ ~A) -> A & B
2553 if (match(Op1
, m_c_Xor(m_Not(m_Value(A
)), m_Value(B
))) &&
2554 match(Op0
, m_c_Or(m_Specific(A
), m_Specific(B
))))
2555 return BinaryOperator::CreateAnd(A
, B
);
2557 // (~A ^ B) & (A | B) -> A & B
2558 // (~A ^ B) & (B | A) -> A & B
2559 // (B ^ ~A) & (A | B) -> A & B
2560 // (B ^ ~A) & (B | A) -> A & B
2561 if (match(Op0
, m_c_Xor(m_Not(m_Value(A
)), m_Value(B
))) &&
2562 match(Op1
, m_c_Or(m_Specific(A
), m_Specific(B
))))
2563 return BinaryOperator::CreateAnd(A
, B
);
2565 // (~A | B) & (A ^ B) -> ~A & B
2566 // (~A | B) & (B ^ A) -> ~A & B
2567 // (B | ~A) & (A ^ B) -> ~A & B
2568 // (B | ~A) & (B ^ A) -> ~A & B
2569 if (match(Op0
, m_c_Or(m_Not(m_Value(A
)), m_Value(B
))) &&
2570 match(Op1
, m_c_Xor(m_Specific(A
), m_Specific(B
))))
2571 return BinaryOperator::CreateAnd(Builder
.CreateNot(A
), B
);
2573 // (A ^ B) & (~A | B) -> ~A & B
2574 // (B ^ A) & (~A | B) -> ~A & B
2575 // (A ^ B) & (B | ~A) -> ~A & B
2576 // (B ^ A) & (B | ~A) -> ~A & B
2577 if (match(Op1
, m_c_Or(m_Not(m_Value(A
)), m_Value(B
))) &&
2578 match(Op0
, m_c_Xor(m_Specific(A
), m_Specific(B
))))
2579 return BinaryOperator::CreateAnd(Builder
.CreateNot(A
), B
);
2583 ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(Op0
);
2584 ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(Op1
);
2586 if (Value
*Res
= foldAndOrOfICmps(LHS
, RHS
, I
, /* IsAnd */ true))
2587 return replaceInstUsesWith(I
, Res
);
2589 // TODO: Make this recursive; it's a little tricky because an arbitrary
2590 // number of 'and' instructions might have to be created.
2591 if (LHS
&& match(Op1
, m_OneUse(m_LogicalAnd(m_Value(X
), m_Value(Y
))))) {
2592 bool IsLogical
= isa
<SelectInst
>(Op1
);
2593 // LHS & (X && Y) --> (LHS && X) && Y
2594 if (auto *Cmp
= dyn_cast
<ICmpInst
>(X
))
2596 foldAndOrOfICmps(LHS
, Cmp
, I
, /* IsAnd */ true, IsLogical
))
2597 return replaceInstUsesWith(I
, IsLogical
2598 ? Builder
.CreateLogicalAnd(Res
, Y
)
2599 : Builder
.CreateAnd(Res
, Y
));
2600 // LHS & (X && Y) --> X && (LHS & Y)
2601 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Y
))
2602 if (Value
*Res
= foldAndOrOfICmps(LHS
, Cmp
, I
, /* IsAnd */ true,
2603 /* IsLogical */ false))
2604 return replaceInstUsesWith(I
, IsLogical
2605 ? Builder
.CreateLogicalAnd(X
, Res
)
2606 : Builder
.CreateAnd(X
, Res
));
2608 if (RHS
&& match(Op0
, m_OneUse(m_LogicalAnd(m_Value(X
), m_Value(Y
))))) {
2609 bool IsLogical
= isa
<SelectInst
>(Op0
);
2610 // (X && Y) & RHS --> (X && RHS) && Y
2611 if (auto *Cmp
= dyn_cast
<ICmpInst
>(X
))
2613 foldAndOrOfICmps(Cmp
, RHS
, I
, /* IsAnd */ true, IsLogical
))
2614 return replaceInstUsesWith(I
, IsLogical
2615 ? Builder
.CreateLogicalAnd(Res
, Y
)
2616 : Builder
.CreateAnd(Res
, Y
));
2617 // (X && Y) & RHS --> X && (Y & RHS)
2618 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Y
))
2619 if (Value
*Res
= foldAndOrOfICmps(Cmp
, RHS
, I
, /* IsAnd */ true,
2620 /* IsLogical */ false))
2621 return replaceInstUsesWith(I
, IsLogical
2622 ? Builder
.CreateLogicalAnd(X
, Res
)
2623 : Builder
.CreateAnd(X
, Res
));
2627 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0)))
2628 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
2629 if (Value
*Res
= foldLogicOfFCmps(LHS
, RHS
, /*IsAnd*/ true))
2630 return replaceInstUsesWith(I
, Res
);
2632 if (Instruction
*FoldedFCmps
= reassociateFCmps(I
, Builder
))
2635 if (Instruction
*CastedAnd
= foldCastedBitwiseLogic(I
))
2638 if (Instruction
*Sel
= foldBinopOfSextBoolToSelect(I
))
2641 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2642 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2643 // with binop identity constant. But creating a select with non-constant
2644 // arm may not be reversible due to poison semantics. Is that a good
2645 // canonicalization?
2647 if (match(&I
, m_c_And(m_OneUse(m_SExt(m_Value(A
))), m_Value(B
))) &&
2648 A
->getType()->isIntOrIntVectorTy(1))
2649 return SelectInst::Create(A
, B
, Constant::getNullValue(Ty
));
2651 // Similarly, a 'not' of the bool translates to a swap of the select arms:
2652 // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2653 if (match(&I
, m_c_And(m_Not(m_SExt(m_Value(A
))), m_Value(B
))) &&
2654 A
->getType()->isIntOrIntVectorTy(1))
2655 return SelectInst::Create(A
, Constant::getNullValue(Ty
), B
);
2657 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2658 if (match(&I
, m_c_And(m_OneUse(m_SExtOrSelf(
2659 m_AShr(m_Value(X
), m_APIntAllowUndef(C
)))),
2661 *C
== X
->getType()->getScalarSizeInBits() - 1) {
2662 Value
*IsNeg
= Builder
.CreateIsNeg(X
, "isneg");
2663 return SelectInst::Create(IsNeg
, Y
, ConstantInt::getNullValue(Ty
));
2665 // If there's a 'not' of the shifted value, swap the select operands:
2666 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2667 if (match(&I
, m_c_And(m_OneUse(m_SExtOrSelf(
2668 m_Not(m_AShr(m_Value(X
), m_APIntAllowUndef(C
))))),
2670 *C
== X
->getType()->getScalarSizeInBits() - 1) {
2671 Value
*IsNeg
= Builder
.CreateIsNeg(X
, "isneg");
2672 return SelectInst::Create(IsNeg
, ConstantInt::getNullValue(Ty
), Y
);
2675 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
2676 if (sinkNotIntoOtherHandOfLogicalOp(I
))
2679 // An and recurrence w/loop invariant step is equivelent to (and start, step)
2680 PHINode
*PN
= nullptr;
2681 Value
*Start
= nullptr, *Step
= nullptr;
2682 if (matchSimpleRecurrence(&I
, PN
, Start
, Step
) && DT
.dominates(Step
, PN
))
2683 return replaceInstUsesWith(I
, Builder
.CreateAnd(Start
, Step
));
2685 if (Instruction
*R
= reassociateForUses(I
, Builder
))
2688 if (Instruction
*Canonicalized
= canonicalizeLogicFirst(I
, Builder
))
2689 return Canonicalized
;
2691 if (Instruction
*Folded
= foldLogicOfIsFPClass(I
, Op0
, Op1
))
2694 if (Instruction
*Res
= foldBinOpOfDisplacedShifts(I
))
2700 Instruction
*InstCombinerImpl::matchBSwapOrBitReverse(Instruction
&I
,
2702 bool MatchBitReversals
) {
2703 SmallVector
<Instruction
*, 4> Insts
;
2704 if (!recognizeBSwapOrBitReverseIdiom(&I
, MatchBSwaps
, MatchBitReversals
,
2707 Instruction
*LastInst
= Insts
.pop_back_val();
2708 LastInst
->removeFromParent();
2710 for (auto *Inst
: Insts
)
2711 Worklist
.push(Inst
);
2715 /// Match UB-safe variants of the funnel shift intrinsic.
2716 static Instruction
*matchFunnelShift(Instruction
&Or
, InstCombinerImpl
&IC
) {
2717 // TODO: Can we reduce the code duplication between this and the related
2718 // rotate matching code under visitSelect and visitTrunc?
2719 unsigned Width
= Or
.getType()->getScalarSizeInBits();
2721 // First, find an or'd pair of opposite shifts:
2722 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2723 BinaryOperator
*Or0
, *Or1
;
2724 if (!match(Or
.getOperand(0), m_BinOp(Or0
)) ||
2725 !match(Or
.getOperand(1), m_BinOp(Or1
)))
2728 Value
*ShVal0
, *ShVal1
, *ShAmt0
, *ShAmt1
;
2729 if (!match(Or0
, m_OneUse(m_LogicalShift(m_Value(ShVal0
), m_Value(ShAmt0
)))) ||
2730 !match(Or1
, m_OneUse(m_LogicalShift(m_Value(ShVal1
), m_Value(ShAmt1
)))) ||
2731 Or0
->getOpcode() == Or1
->getOpcode())
2734 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2735 if (Or0
->getOpcode() == BinaryOperator::LShr
) {
2736 std::swap(Or0
, Or1
);
2737 std::swap(ShVal0
, ShVal1
);
2738 std::swap(ShAmt0
, ShAmt1
);
2740 assert(Or0
->getOpcode() == BinaryOperator::Shl
&&
2741 Or1
->getOpcode() == BinaryOperator::LShr
&&
2742 "Illegal or(shift,shift) pair");
2744 // Match the shift amount operands for a funnel shift pattern. This always
2745 // matches a subtraction on the R operand.
2746 auto matchShiftAmount
= [&](Value
*L
, Value
*R
, unsigned Width
) -> Value
* {
2747 // Check for constant shift amounts that sum to the bitwidth.
2748 const APInt
*LI
, *RI
;
2749 if (match(L
, m_APIntAllowUndef(LI
)) && match(R
, m_APIntAllowUndef(RI
)))
2750 if (LI
->ult(Width
) && RI
->ult(Width
) && (*LI
+ *RI
) == Width
)
2751 return ConstantInt::get(L
->getType(), *LI
);
2754 if (match(L
, m_Constant(LC
)) && match(R
, m_Constant(RC
)) &&
2755 match(L
, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT
, APInt(Width
, Width
))) &&
2756 match(R
, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT
, APInt(Width
, Width
))) &&
2757 match(ConstantExpr::getAdd(LC
, RC
), m_SpecificIntAllowUndef(Width
)))
2758 return ConstantExpr::mergeUndefsWith(LC
, RC
);
2760 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2761 // We limit this to X < Width in case the backend re-expands the intrinsic,
2762 // and has to reintroduce a shift modulo operation (InstCombine might remove
2763 // it after this fold). This still doesn't guarantee that the final codegen
2764 // will match this original pattern.
2765 if (match(R
, m_OneUse(m_Sub(m_SpecificInt(Width
), m_Specific(L
))))) {
2766 KnownBits KnownL
= IC
.computeKnownBits(L
, /*Depth*/ 0, &Or
);
2767 return KnownL
.getMaxValue().ult(Width
) ? L
: nullptr;
2770 // For non-constant cases, the following patterns currently only work for
2771 // rotation patterns.
2772 // TODO: Add general funnel-shift compatible patterns.
2773 if (ShVal0
!= ShVal1
)
2776 // For non-constant cases we don't support non-pow2 shift masks.
2777 // TODO: Is it worth matching urem as well?
2778 if (!isPowerOf2_32(Width
))
2781 // The shift amount may be masked with negation:
2782 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2784 unsigned Mask
= Width
- 1;
2785 if (match(L
, m_And(m_Value(X
), m_SpecificInt(Mask
))) &&
2786 match(R
, m_And(m_Neg(m_Specific(X
)), m_SpecificInt(Mask
))))
2789 // Similar to above, but the shift amount may be extended after masking,
2790 // so return the extended value as the parameter for the intrinsic.
2791 if (match(L
, m_ZExt(m_And(m_Value(X
), m_SpecificInt(Mask
)))) &&
2792 match(R
, m_And(m_Neg(m_ZExt(m_And(m_Specific(X
), m_SpecificInt(Mask
)))),
2793 m_SpecificInt(Mask
))))
2796 if (match(L
, m_ZExt(m_And(m_Value(X
), m_SpecificInt(Mask
)))) &&
2797 match(R
, m_ZExt(m_And(m_Neg(m_Specific(X
)), m_SpecificInt(Mask
)))))
2803 Value
*ShAmt
= matchShiftAmount(ShAmt0
, ShAmt1
, Width
);
2804 bool IsFshl
= true; // Sub on LSHR.
2806 ShAmt
= matchShiftAmount(ShAmt1
, ShAmt0
, Width
);
2807 IsFshl
= false; // Sub on SHL.
2812 Intrinsic::ID IID
= IsFshl
? Intrinsic::fshl
: Intrinsic::fshr
;
2813 Function
*F
= Intrinsic::getDeclaration(Or
.getModule(), IID
, Or
.getType());
2814 return CallInst::Create(F
, {ShVal0
, ShVal1
, ShAmt
});
2817 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
2818 static Instruction
*matchOrConcat(Instruction
&Or
,
2819 InstCombiner::BuilderTy
&Builder
) {
2820 assert(Or
.getOpcode() == Instruction::Or
&& "bswap requires an 'or'");
2821 Value
*Op0
= Or
.getOperand(0), *Op1
= Or
.getOperand(1);
2822 Type
*Ty
= Or
.getType();
2824 unsigned Width
= Ty
->getScalarSizeInBits();
2825 if ((Width
& 1) != 0)
2827 unsigned HalfWidth
= Width
/ 2;
2829 // Canonicalize zext (lower half) to LHS.
2830 if (!isa
<ZExtInst
>(Op0
))
2831 std::swap(Op0
, Op1
);
2833 // Find lower/upper half.
2834 Value
*LowerSrc
, *ShlVal
, *UpperSrc
;
2836 if (!match(Op0
, m_OneUse(m_ZExt(m_Value(LowerSrc
)))) ||
2837 !match(Op1
, m_OneUse(m_Shl(m_Value(ShlVal
), m_APInt(C
)))) ||
2838 !match(ShlVal
, m_OneUse(m_ZExt(m_Value(UpperSrc
)))))
2840 if (*C
!= HalfWidth
|| LowerSrc
->getType() != UpperSrc
->getType() ||
2841 LowerSrc
->getType()->getScalarSizeInBits() != HalfWidth
)
2844 auto ConcatIntrinsicCalls
= [&](Intrinsic::ID id
, Value
*Lo
, Value
*Hi
) {
2845 Value
*NewLower
= Builder
.CreateZExt(Lo
, Ty
);
2846 Value
*NewUpper
= Builder
.CreateZExt(Hi
, Ty
);
2847 NewUpper
= Builder
.CreateShl(NewUpper
, HalfWidth
);
2848 Value
*BinOp
= Builder
.CreateOr(NewLower
, NewUpper
);
2849 Function
*F
= Intrinsic::getDeclaration(Or
.getModule(), id
, Ty
);
2850 return Builder
.CreateCall(F
, BinOp
);
2853 // BSWAP: Push the concat down, swapping the lower/upper sources.
2854 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
2855 Value
*LowerBSwap
, *UpperBSwap
;
2856 if (match(LowerSrc
, m_BSwap(m_Value(LowerBSwap
))) &&
2857 match(UpperSrc
, m_BSwap(m_Value(UpperBSwap
))))
2858 return ConcatIntrinsicCalls(Intrinsic::bswap
, UpperBSwap
, LowerBSwap
);
2860 // BITREVERSE: Push the concat down, swapping the lower/upper sources.
2861 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
2862 Value
*LowerBRev
, *UpperBRev
;
2863 if (match(LowerSrc
, m_BitReverse(m_Value(LowerBRev
))) &&
2864 match(UpperSrc
, m_BitReverse(m_Value(UpperBRev
))))
2865 return ConcatIntrinsicCalls(Intrinsic::bitreverse
, UpperBRev
, LowerBRev
);
2870 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
2871 static bool areInverseVectorBitmasks(Constant
*C1
, Constant
*C2
) {
2872 unsigned NumElts
= cast
<FixedVectorType
>(C1
->getType())->getNumElements();
2873 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2874 Constant
*EltC1
= C1
->getAggregateElement(i
);
2875 Constant
*EltC2
= C2
->getAggregateElement(i
);
2876 if (!EltC1
|| !EltC2
)
2879 // One element must be all ones, and the other must be all zeros.
2880 if (!((match(EltC1
, m_Zero()) && match(EltC2
, m_AllOnes())) ||
2881 (match(EltC2
, m_Zero()) && match(EltC1
, m_AllOnes()))))
2887 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
2888 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
2889 /// B, it can be used as the condition operand of a select instruction.
2890 /// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
2891 Value
*InstCombinerImpl::getSelectCondition(Value
*A
, Value
*B
,
2893 // We may have peeked through bitcasts in the caller.
2894 // Exit immediately if we don't have (vector) integer types.
2895 Type
*Ty
= A
->getType();
2896 if (!Ty
->isIntOrIntVectorTy() || !B
->getType()->isIntOrIntVectorTy())
2899 // If A is the 'not' operand of B and has enough signbits, we have our answer.
2900 if (ABIsTheSame
? (A
== B
) : match(B
, m_Not(m_Specific(A
)))) {
2901 // If these are scalars or vectors of i1, A can be used directly.
2902 if (Ty
->isIntOrIntVectorTy(1))
2905 // If we look through a vector bitcast, the caller will bitcast the operands
2906 // to match the condition's number of bits (N x i1).
2907 // To make this poison-safe, disallow bitcast from wide element to narrow
2908 // element. That could allow poison in lanes where it was not present in the
2910 A
= peekThroughBitcast(A
);
2911 if (A
->getType()->isIntOrIntVectorTy()) {
2912 unsigned NumSignBits
= ComputeNumSignBits(A
);
2913 if (NumSignBits
== A
->getType()->getScalarSizeInBits() &&
2914 NumSignBits
<= Ty
->getScalarSizeInBits())
2915 return Builder
.CreateTrunc(A
, CmpInst::makeCmpResultType(A
->getType()));
2920 // TODO: add support for sext and constant case
2924 // If both operands are constants, see if the constants are inverse bitmasks.
2925 Constant
*AConst
, *BConst
;
2926 if (match(A
, m_Constant(AConst
)) && match(B
, m_Constant(BConst
)))
2927 if (AConst
== ConstantExpr::getNot(BConst
) &&
2928 ComputeNumSignBits(A
) == Ty
->getScalarSizeInBits())
2929 return Builder
.CreateZExtOrTrunc(A
, CmpInst::makeCmpResultType(Ty
));
2931 // Look for more complex patterns. The 'not' op may be hidden behind various
2932 // casts. Look through sexts and bitcasts to find the booleans.
2935 if (match(A
, m_SExt(m_Value(Cond
))) &&
2936 Cond
->getType()->isIntOrIntVectorTy(1)) {
2937 // A = sext i1 Cond; B = sext (not (i1 Cond))
2938 if (match(B
, m_SExt(m_Not(m_Specific(Cond
)))))
2941 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
2942 // TODO: The one-use checks are unnecessary or misplaced. If the caller
2943 // checked for uses on logic ops/casts, that should be enough to
2944 // make this transform worthwhile.
2945 if (match(B
, m_OneUse(m_Not(m_Value(NotB
))))) {
2946 NotB
= peekThroughBitcast(NotB
, true);
2947 if (match(NotB
, m_SExt(m_Specific(Cond
))))
2952 // All scalar (and most vector) possibilities should be handled now.
2953 // Try more matches that only apply to non-splat constant vectors.
2954 if (!Ty
->isVectorTy())
2957 // If both operands are xor'd with constants using the same sexted boolean
2958 // operand, see if the constants are inverse bitmasks.
2959 // TODO: Use ConstantExpr::getNot()?
2960 if (match(A
, (m_Xor(m_SExt(m_Value(Cond
)), m_Constant(AConst
)))) &&
2961 match(B
, (m_Xor(m_SExt(m_Specific(Cond
)), m_Constant(BConst
)))) &&
2962 Cond
->getType()->isIntOrIntVectorTy(1) &&
2963 areInverseVectorBitmasks(AConst
, BConst
)) {
2964 AConst
= ConstantExpr::getTrunc(AConst
, CmpInst::makeCmpResultType(Ty
));
2965 return Builder
.CreateXor(Cond
, AConst
);
2970 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
2971 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
2972 /// When InvertFalseVal is set to true, we try to match the pattern
2973 /// where we have peeked through a 'not' op and A and B are the same:
2974 /// (A & C) | ~(A | D) --> (A & C) | (~A & ~D) --> A' ? C : ~D
2975 Value
*InstCombinerImpl::matchSelectFromAndOr(Value
*A
, Value
*C
, Value
*B
,
2976 Value
*D
, bool InvertFalseVal
) {
2977 // The potential condition of the select may be bitcasted. In that case, look
2978 // through its bitcast and the corresponding bitcast of the 'not' condition.
2979 Type
*OrigType
= A
->getType();
2980 A
= peekThroughBitcast(A
, true);
2981 B
= peekThroughBitcast(B
, true);
2982 if (Value
*Cond
= getSelectCondition(A
, B
, InvertFalseVal
)) {
2983 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
2984 // If this is a vector, we may need to cast to match the condition's length.
2985 // The bitcasts will either all exist or all not exist. The builder will
2986 // not create unnecessary casts if the types already match.
2987 Type
*SelTy
= A
->getType();
2988 if (auto *VecTy
= dyn_cast
<VectorType
>(Cond
->getType())) {
2989 // For a fixed or scalable vector get N from <{vscale x} N x iM>
2990 unsigned Elts
= VecTy
->getElementCount().getKnownMinValue();
2991 // For a fixed or scalable vector, get the size in bits of N x iM; for a
2992 // scalar this is just M.
2993 unsigned SelEltSize
= SelTy
->getPrimitiveSizeInBits().getKnownMinValue();
2994 Type
*EltTy
= Builder
.getIntNTy(SelEltSize
/ Elts
);
2995 SelTy
= VectorType::get(EltTy
, VecTy
->getElementCount());
2997 Value
*BitcastC
= Builder
.CreateBitCast(C
, SelTy
);
2999 D
= Builder
.CreateNot(D
);
3000 Value
*BitcastD
= Builder
.CreateBitCast(D
, SelTy
);
3001 Value
*Select
= Builder
.CreateSelect(Cond
, BitcastC
, BitcastD
);
3002 return Builder
.CreateBitCast(Select
, OrigType
);
3008 // (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3009 // (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
3010 static Value
*foldAndOrOfICmpEqConstantAndICmp(ICmpInst
*LHS
, ICmpInst
*RHS
,
3011 bool IsAnd
, bool IsLogical
,
3012 IRBuilderBase
&Builder
) {
3013 Value
*LHS0
= LHS
->getOperand(0);
3014 Value
*RHS0
= RHS
->getOperand(0);
3015 Value
*RHS1
= RHS
->getOperand(1);
3017 ICmpInst::Predicate LPred
=
3018 IsAnd
? LHS
->getInversePredicate() : LHS
->getPredicate();
3019 ICmpInst::Predicate RPred
=
3020 IsAnd
? RHS
->getInversePredicate() : RHS
->getPredicate();
3023 if (LPred
!= ICmpInst::ICMP_EQ
||
3024 !match(LHS
->getOperand(1), m_APIntAllowUndef(CInt
)) ||
3025 !LHS0
->getType()->isIntOrIntVectorTy() ||
3026 !(LHS
->hasOneUse() || RHS
->hasOneUse()))
3029 auto MatchRHSOp
= [LHS0
, CInt
](const Value
*RHSOp
) {
3031 m_Add(m_Specific(LHS0
), m_SpecificIntAllowUndef(-*CInt
))) ||
3032 (CInt
->isZero() && RHSOp
== LHS0
);
3036 if (RPred
== ICmpInst::ICMP_ULT
&& MatchRHSOp(RHS1
))
3038 else if (RPred
== ICmpInst::ICMP_UGT
&& MatchRHSOp(RHS0
))
3044 Other
= Builder
.CreateFreeze(Other
);
3046 return Builder
.CreateICmp(
3047 IsAnd
? ICmpInst::ICMP_ULT
: ICmpInst::ICMP_UGE
,
3048 Builder
.CreateSub(LHS0
, ConstantInt::get(LHS0
->getType(), *CInt
+ 1)),
3052 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3053 /// If IsLogical is true, then the and/or is in select form and the transform
3054 /// must be poison-safe.
3055 Value
*InstCombinerImpl::foldAndOrOfICmps(ICmpInst
*LHS
, ICmpInst
*RHS
,
3056 Instruction
&I
, bool IsAnd
,
3058 const SimplifyQuery Q
= SQ
.getWithInstruction(&I
);
3060 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
3061 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
3062 // if K1 and K2 are a one-bit mask.
3063 if (Value
*V
= foldAndOrOfICmpsOfAndWithPow2(LHS
, RHS
, &I
, IsAnd
, IsLogical
))
3066 ICmpInst::Predicate PredL
= LHS
->getPredicate(), PredR
= RHS
->getPredicate();
3067 Value
*LHS0
= LHS
->getOperand(0), *RHS0
= RHS
->getOperand(0);
3068 Value
*LHS1
= LHS
->getOperand(1), *RHS1
= RHS
->getOperand(1);
3069 const APInt
*LHSC
= nullptr, *RHSC
= nullptr;
3070 match(LHS1
, m_APInt(LHSC
));
3071 match(RHS1
, m_APInt(RHSC
));
3073 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3074 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3075 if (predicatesFoldable(PredL
, PredR
)) {
3076 if (LHS0
== RHS1
&& LHS1
== RHS0
) {
3077 PredL
= ICmpInst::getSwappedPredicate(PredL
);
3078 std::swap(LHS0
, LHS1
);
3080 if (LHS0
== RHS0
&& LHS1
== RHS1
) {
3081 unsigned Code
= IsAnd
? getICmpCode(PredL
) & getICmpCode(PredR
)
3082 : getICmpCode(PredL
) | getICmpCode(PredR
);
3083 bool IsSigned
= LHS
->isSigned() || RHS
->isSigned();
3084 return getNewICmpValue(Code
, IsSigned
, LHS0
, LHS1
, Builder
);
3088 // handle (roughly):
3089 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3090 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3091 if (Value
*V
= foldLogOpOfMaskedICmps(LHS
, RHS
, IsAnd
, IsLogical
, Builder
))
3095 foldAndOrOfICmpEqConstantAndICmp(LHS
, RHS
, IsAnd
, IsLogical
, Builder
))
3097 // We can treat logical like bitwise here, because both operands are used on
3098 // the LHS, and as such poison from both will propagate.
3099 if (Value
*V
= foldAndOrOfICmpEqConstantAndICmp(RHS
, LHS
, IsAnd
,
3100 /*IsLogical*/ false, Builder
))
3104 foldAndOrOfICmpsWithConstEq(LHS
, RHS
, IsAnd
, IsLogical
, Builder
, Q
))
3106 // We can convert this case to bitwise and, because both operands are used
3107 // on the LHS, and as such poison from both will propagate.
3108 if (Value
*V
= foldAndOrOfICmpsWithConstEq(RHS
, LHS
, IsAnd
,
3109 /*IsLogical*/ false, Builder
, Q
))
3112 if (Value
*V
= foldIsPowerOf2OrZero(LHS
, RHS
, IsAnd
, Builder
))
3114 if (Value
*V
= foldIsPowerOf2OrZero(RHS
, LHS
, IsAnd
, Builder
))
3117 // TODO: One of these directions is fine with logical and/or, the other could
3118 // be supported by inserting freeze.
3120 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3121 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3122 if (Value
*V
= simplifyRangeCheck(LHS
, RHS
, /*Inverted=*/!IsAnd
))
3125 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3126 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3127 if (Value
*V
= simplifyRangeCheck(RHS
, LHS
, /*Inverted=*/!IsAnd
))
3131 // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3132 if (IsAnd
&& !IsLogical
)
3133 if (Value
*V
= foldSignedTruncationCheck(LHS
, RHS
, I
, Builder
))
3136 if (Value
*V
= foldIsPowerOf2(LHS
, RHS
, IsAnd
, Builder
))
3139 if (Value
*V
= foldPowerOf2AndShiftedMask(LHS
, RHS
, IsAnd
, Builder
))
3142 // TODO: Verify whether this is safe for logical and/or.
3144 if (Value
*X
= foldUnsignedUnderflowCheck(LHS
, RHS
, IsAnd
, Q
, Builder
))
3146 if (Value
*X
= foldUnsignedUnderflowCheck(RHS
, LHS
, IsAnd
, Q
, Builder
))
3150 if (Value
*X
= foldEqOfParts(LHS
, RHS
, IsAnd
))
3153 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3154 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3155 // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3156 if (!IsLogical
&& PredL
== (IsAnd
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
) &&
3157 PredL
== PredR
&& match(LHS1
, m_ZeroInt()) && match(RHS1
, m_ZeroInt()) &&
3158 LHS0
->getType() == RHS0
->getType()) {
3159 Value
*NewOr
= Builder
.CreateOr(LHS0
, RHS0
);
3160 return Builder
.CreateICmp(PredL
, NewOr
,
3161 Constant::getNullValue(NewOr
->getType()));
3164 // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3165 // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3166 if (!IsLogical
&& PredL
== (IsAnd
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
) &&
3167 PredL
== PredR
&& match(LHS1
, m_AllOnes()) && match(RHS1
, m_AllOnes()) &&
3168 LHS0
->getType() == RHS0
->getType()) {
3169 Value
*NewAnd
= Builder
.CreateAnd(LHS0
, RHS0
);
3170 return Builder
.CreateICmp(PredL
, NewAnd
,
3171 Constant::getAllOnesValue(LHS0
->getType()));
3174 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3178 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3179 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3180 // where CMAX is the all ones value for the truncated type,
3181 // iff the lower bits of C2 and CA are zero.
3182 if (PredL
== (IsAnd
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
) &&
3183 PredL
== PredR
&& LHS
->hasOneUse() && RHS
->hasOneUse()) {
3185 const APInt
*AndC
, *SmallC
= nullptr, *BigC
= nullptr;
3187 // (trunc x) == C1 & (and x, CA) == C2
3188 // (and x, CA) == C2 & (trunc x) == C1
3189 if (match(RHS0
, m_Trunc(m_Value(V
))) &&
3190 match(LHS0
, m_And(m_Specific(V
), m_APInt(AndC
)))) {
3193 } else if (match(LHS0
, m_Trunc(m_Value(V
))) &&
3194 match(RHS0
, m_And(m_Specific(V
), m_APInt(AndC
)))) {
3199 if (SmallC
&& BigC
) {
3200 unsigned BigBitSize
= BigC
->getBitWidth();
3201 unsigned SmallBitSize
= SmallC
->getBitWidth();
3203 // Check that the low bits are zero.
3204 APInt Low
= APInt::getLowBitsSet(BigBitSize
, SmallBitSize
);
3205 if ((Low
& *AndC
).isZero() && (Low
& *BigC
).isZero()) {
3206 Value
*NewAnd
= Builder
.CreateAnd(V
, Low
| *AndC
);
3207 APInt N
= SmallC
->zext(BigBitSize
) | *BigC
;
3208 Value
*NewVal
= ConstantInt::get(NewAnd
->getType(), N
);
3209 return Builder
.CreateICmp(PredL
, NewAnd
, NewVal
);
3214 // Match naive pattern (and its inverted form) for checking if two values
3215 // share same sign. An example of the pattern:
3216 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3217 // Inverted form (example):
3218 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3219 bool TrueIfSignedL
, TrueIfSignedR
;
3220 if (isSignBitCheck(PredL
, *LHSC
, TrueIfSignedL
) &&
3221 isSignBitCheck(PredR
, *RHSC
, TrueIfSignedR
) &&
3222 (RHS
->hasOneUse() || LHS
->hasOneUse())) {
3225 if ((TrueIfSignedL
&& !TrueIfSignedR
&&
3226 match(LHS0
, m_Or(m_Value(X
), m_Value(Y
))) &&
3227 match(RHS0
, m_c_And(m_Specific(X
), m_Specific(Y
)))) ||
3228 (!TrueIfSignedL
&& TrueIfSignedR
&&
3229 match(LHS0
, m_And(m_Value(X
), m_Value(Y
))) &&
3230 match(RHS0
, m_c_Or(m_Specific(X
), m_Specific(Y
))))) {
3231 Value
*NewXor
= Builder
.CreateXor(X
, Y
);
3232 return Builder
.CreateIsNeg(NewXor
);
3235 if ((TrueIfSignedL
&& !TrueIfSignedR
&&
3236 match(LHS0
, m_And(m_Value(X
), m_Value(Y
))) &&
3237 match(RHS0
, m_c_Or(m_Specific(X
), m_Specific(Y
)))) ||
3238 (!TrueIfSignedL
&& TrueIfSignedR
&&
3239 match(LHS0
, m_Or(m_Value(X
), m_Value(Y
))) &&
3240 match(RHS0
, m_c_And(m_Specific(X
), m_Specific(Y
))))) {
3241 Value
*NewXor
= Builder
.CreateXor(X
, Y
);
3242 return Builder
.CreateIsNotNeg(NewXor
);
3247 return foldAndOrOfICmpsUsingRanges(LHS
, RHS
, IsAnd
);
3250 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
3251 // here. We should standardize that construct where it is needed or choose some
3252 // other way to ensure that commutated variants of patterns are not missed.
3253 Instruction
*InstCombinerImpl::visitOr(BinaryOperator
&I
) {
3254 if (Value
*V
= simplifyOrInst(I
.getOperand(0), I
.getOperand(1),
3255 SQ
.getWithInstruction(&I
)))
3256 return replaceInstUsesWith(I
, V
);
3258 if (SimplifyAssociativeOrCommutative(I
))
3261 if (Instruction
*X
= foldVectorBinop(I
))
3264 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
3267 // See if we can simplify any instructions used by the instruction whose sole
3268 // purpose is to compute bits we don't care about.
3269 if (SimplifyDemandedInstructionBits(I
))
3272 // Do this before using distributive laws to catch simple and/or/not patterns.
3273 if (Instruction
*Xor
= foldOrToXor(I
, Builder
))
3276 if (Instruction
*X
= foldComplexAndOrPatterns(I
, Builder
))
3279 // (A&B)|(A&C) -> A&(B|C) etc
3280 if (Value
*V
= foldUsingDistributiveLaws(I
))
3281 return replaceInstUsesWith(I
, V
);
3283 if (Value
*V
= SimplifyBSwap(I
, Builder
))
3284 return replaceInstUsesWith(I
, V
);
3286 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3287 Type
*Ty
= I
.getType();
3288 if (Ty
->isIntOrIntVectorTy(1)) {
3289 if (auto *SI0
= dyn_cast
<SelectInst
>(Op0
)) {
3291 foldAndOrOfSelectUsingImpliedCond(Op1
, *SI0
, /* IsAnd */ false))
3294 if (auto *SI1
= dyn_cast
<SelectInst
>(Op1
)) {
3296 foldAndOrOfSelectUsingImpliedCond(Op0
, *SI1
, /* IsAnd */ false))
3301 if (Instruction
*FoldedLogic
= foldBinOpIntoSelectOrPhi(I
))
3304 if (Instruction
*BitOp
= matchBSwapOrBitReverse(I
, /*MatchBSwaps*/ true,
3305 /*MatchBitReversals*/ true))
3308 if (Instruction
*Funnel
= matchFunnelShift(I
, *this))
3311 if (Instruction
*Concat
= matchOrConcat(I
, Builder
))
3312 return replaceInstUsesWith(I
, Concat
);
3314 if (Instruction
*R
= foldBinOpShiftWithShift(I
))
3319 if (match(&I
, m_c_Or(m_OneUse(m_Xor(m_Value(X
), m_APInt(CV
))), m_Value(Y
))) &&
3320 !CV
->isAllOnes() && MaskedValueIsZero(Y
, *CV
, 0, &I
)) {
3321 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
3322 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
3323 Value
*Or
= Builder
.CreateOr(X
, Y
);
3324 return BinaryOperator::CreateXor(Or
, ConstantInt::get(Ty
, *CV
));
3327 // If the operands have no common bits set:
3328 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
3330 m_c_Or(m_OneUse(m_Mul(m_Value(X
), m_Value(Y
))), m_Deferred(X
))) &&
3331 haveNoCommonBitsSet(Op0
, Op1
, DL
)) {
3332 Value
*IncrementY
= Builder
.CreateAdd(Y
, ConstantInt::get(Ty
, 1));
3333 return BinaryOperator::CreateMul(X
, IncrementY
);
3336 // X | (X ^ Y) --> X | Y (4 commuted patterns)
3337 if (match(&I
, m_c_Or(m_Value(X
), m_c_Xor(m_Deferred(X
), m_Value(Y
)))))
3338 return BinaryOperator::CreateOr(X
, Y
);
3340 // (A & C) | (B & D)
3341 Value
*A
, *B
, *C
, *D
;
3342 if (match(Op0
, m_And(m_Value(A
), m_Value(C
))) &&
3343 match(Op1
, m_And(m_Value(B
), m_Value(D
)))) {
3345 // (A & C0) | (B & C1)
3346 const APInt
*C0
, *C1
;
3347 if (match(C
, m_APInt(C0
)) && match(D
, m_APInt(C1
))) {
3350 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
3351 if (match(A
, m_c_Or(m_Value(X
), m_Specific(B
))))
3352 return BinaryOperator::CreateOr(Builder
.CreateAnd(X
, *C0
), B
);
3353 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
3354 if (match(B
, m_c_Or(m_Specific(A
), m_Value(X
))))
3355 return BinaryOperator::CreateOr(Builder
.CreateAnd(X
, *C1
), A
);
3357 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
3358 if (match(A
, m_c_Xor(m_Value(X
), m_Specific(B
))))
3359 return BinaryOperator::CreateXor(Builder
.CreateAnd(X
, *C0
), B
);
3360 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
3361 if (match(B
, m_c_Xor(m_Specific(A
), m_Value(X
))))
3362 return BinaryOperator::CreateXor(Builder
.CreateAnd(X
, *C1
), A
);
3365 if ((*C0
& *C1
).isZero()) {
3366 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
3367 // iff (C0 & C1) == 0 and (X & ~C0) == 0
3368 if (match(A
, m_c_Or(m_Value(X
), m_Specific(B
))) &&
3369 MaskedValueIsZero(X
, ~*C0
, 0, &I
)) {
3370 Constant
*C01
= ConstantInt::get(Ty
, *C0
| *C1
);
3371 return BinaryOperator::CreateAnd(A
, C01
);
3373 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
3374 // iff (C0 & C1) == 0 and (X & ~C1) == 0
3375 if (match(B
, m_c_Or(m_Value(X
), m_Specific(A
))) &&
3376 MaskedValueIsZero(X
, ~*C1
, 0, &I
)) {
3377 Constant
*C01
= ConstantInt::get(Ty
, *C0
| *C1
);
3378 return BinaryOperator::CreateAnd(B
, C01
);
3380 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
3381 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
3382 const APInt
*C2
, *C3
;
3383 if (match(A
, m_Or(m_Value(X
), m_APInt(C2
))) &&
3384 match(B
, m_Or(m_Specific(X
), m_APInt(C3
))) &&
3385 (*C2
& ~*C0
).isZero() && (*C3
& ~*C1
).isZero()) {
3386 Value
*Or
= Builder
.CreateOr(X
, *C2
| *C3
, "bitfield");
3387 Constant
*C01
= ConstantInt::get(Ty
, *C0
| *C1
);
3388 return BinaryOperator::CreateAnd(Or
, C01
);
3393 // Don't try to form a select if it's unlikely that we'll get rid of at
3394 // least one of the operands. A select is generally more expensive than the
3395 // 'or' that it is replacing.
3396 if (Op0
->hasOneUse() || Op1
->hasOneUse()) {
3397 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
3398 if (Value
*V
= matchSelectFromAndOr(A
, C
, B
, D
))
3399 return replaceInstUsesWith(I
, V
);
3400 if (Value
*V
= matchSelectFromAndOr(A
, C
, D
, B
))
3401 return replaceInstUsesWith(I
, V
);
3402 if (Value
*V
= matchSelectFromAndOr(C
, A
, B
, D
))
3403 return replaceInstUsesWith(I
, V
);
3404 if (Value
*V
= matchSelectFromAndOr(C
, A
, D
, B
))
3405 return replaceInstUsesWith(I
, V
);
3406 if (Value
*V
= matchSelectFromAndOr(B
, D
, A
, C
))
3407 return replaceInstUsesWith(I
, V
);
3408 if (Value
*V
= matchSelectFromAndOr(B
, D
, C
, A
))
3409 return replaceInstUsesWith(I
, V
);
3410 if (Value
*V
= matchSelectFromAndOr(D
, B
, A
, C
))
3411 return replaceInstUsesWith(I
, V
);
3412 if (Value
*V
= matchSelectFromAndOr(D
, B
, C
, A
))
3413 return replaceInstUsesWith(I
, V
);
3417 if (match(Op0
, m_And(m_Value(A
), m_Value(C
))) &&
3418 match(Op1
, m_Not(m_Or(m_Value(B
), m_Value(D
)))) &&
3419 (Op0
->hasOneUse() || Op1
->hasOneUse())) {
3420 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
3421 if (Value
*V
= matchSelectFromAndOr(A
, C
, B
, D
, true))
3422 return replaceInstUsesWith(I
, V
);
3423 if (Value
*V
= matchSelectFromAndOr(A
, C
, D
, B
, true))
3424 return replaceInstUsesWith(I
, V
);
3425 if (Value
*V
= matchSelectFromAndOr(C
, A
, B
, D
, true))
3426 return replaceInstUsesWith(I
, V
);
3427 if (Value
*V
= matchSelectFromAndOr(C
, A
, D
, B
, true))
3428 return replaceInstUsesWith(I
, V
);
3431 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
3432 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
))))
3433 if (match(Op1
, m_Xor(m_Xor(m_Specific(B
), m_Value(C
)), m_Specific(A
))))
3434 return BinaryOperator::CreateOr(Op0
, C
);
3436 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
3437 if (match(Op0
, m_Xor(m_Xor(m_Value(A
), m_Value(C
)), m_Value(B
))))
3438 if (match(Op1
, m_Xor(m_Specific(B
), m_Specific(A
))))
3439 return BinaryOperator::CreateOr(Op1
, C
);
3441 // ((A & B) ^ C) | B -> C | B
3442 if (match(Op0
, m_c_Xor(m_c_And(m_Value(A
), m_Specific(Op1
)), m_Value(C
))))
3443 return BinaryOperator::CreateOr(C
, Op1
);
3445 // B | ((A & B) ^ C) -> B | C
3446 if (match(Op1
, m_c_Xor(m_c_And(m_Value(A
), m_Specific(Op0
)), m_Value(C
))))
3447 return BinaryOperator::CreateOr(Op0
, C
);
3449 // ((B | C) & A) | B -> B | (A & C)
3450 if (match(Op0
, m_And(m_Or(m_Specific(Op1
), m_Value(C
)), m_Value(A
))))
3451 return BinaryOperator::CreateOr(Op1
, Builder
.CreateAnd(A
, C
));
3453 if (Instruction
*DeMorgan
= matchDeMorgansLaws(I
, Builder
))
3456 // Canonicalize xor to the RHS.
3457 bool SwappedForXor
= false;
3458 if (match(Op0
, m_Xor(m_Value(), m_Value()))) {
3459 std::swap(Op0
, Op1
);
3460 SwappedForXor
= true;
3463 if (match(Op1
, m_Xor(m_Value(A
), m_Value(B
)))) {
3464 // (A | ?) | (A ^ B) --> (A | ?) | B
3465 // (B | ?) | (A ^ B) --> (B | ?) | A
3466 if (match(Op0
, m_c_Or(m_Specific(A
), m_Value())))
3467 return BinaryOperator::CreateOr(Op0
, B
);
3468 if (match(Op0
, m_c_Or(m_Specific(B
), m_Value())))
3469 return BinaryOperator::CreateOr(Op0
, A
);
3471 // (A & B) | (A ^ B) --> A | B
3472 // (B & A) | (A ^ B) --> A | B
3473 if (match(Op0
, m_And(m_Specific(A
), m_Specific(B
))) ||
3474 match(Op0
, m_And(m_Specific(B
), m_Specific(A
))))
3475 return BinaryOperator::CreateOr(A
, B
);
3477 // ~A | (A ^ B) --> ~(A & B)
3478 // ~B | (A ^ B) --> ~(A & B)
3479 // The swap above should always make Op0 the 'not'.
3480 if ((Op0
->hasOneUse() || Op1
->hasOneUse()) &&
3481 (match(Op0
, m_Not(m_Specific(A
))) || match(Op0
, m_Not(m_Specific(B
)))))
3482 return BinaryOperator::CreateNot(Builder
.CreateAnd(A
, B
));
3484 // Same as above, but peek through an 'and' to the common operand:
3485 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
3486 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
3488 if ((Op0
->hasOneUse() || Op1
->hasOneUse()) &&
3489 match(Op0
, m_Not(m_CombineAnd(m_Instruction(And
),
3490 m_c_And(m_Specific(A
), m_Value())))))
3491 return BinaryOperator::CreateNot(Builder
.CreateAnd(And
, B
));
3492 if ((Op0
->hasOneUse() || Op1
->hasOneUse()) &&
3493 match(Op0
, m_Not(m_CombineAnd(m_Instruction(And
),
3494 m_c_And(m_Specific(B
), m_Value())))))
3495 return BinaryOperator::CreateNot(Builder
.CreateAnd(And
, A
));
3497 // (~A | C) | (A ^ B) --> ~(A & B) | C
3498 // (~B | C) | (A ^ B) --> ~(A & B) | C
3499 if (Op0
->hasOneUse() && Op1
->hasOneUse() &&
3500 (match(Op0
, m_c_Or(m_Not(m_Specific(A
)), m_Value(C
))) ||
3501 match(Op0
, m_c_Or(m_Not(m_Specific(B
)), m_Value(C
))))) {
3502 Value
*Nand
= Builder
.CreateNot(Builder
.CreateAnd(A
, B
), "nand");
3503 return BinaryOperator::CreateOr(Nand
, C
);
3506 // A | (~A ^ B) --> ~B | A
3507 // B | (A ^ ~B) --> ~A | B
3508 if (Op1
->hasOneUse() && match(A
, m_Not(m_Specific(Op0
)))) {
3509 Value
*NotB
= Builder
.CreateNot(B
, B
->getName() + ".not");
3510 return BinaryOperator::CreateOr(NotB
, Op0
);
3512 if (Op1
->hasOneUse() && match(B
, m_Not(m_Specific(Op0
)))) {
3513 Value
*NotA
= Builder
.CreateNot(A
, A
->getName() + ".not");
3514 return BinaryOperator::CreateOr(NotA
, Op0
);
3518 // A | ~(A | B) -> A | ~B
3519 // A | ~(A ^ B) -> A | ~B
3520 if (match(Op1
, m_Not(m_Value(A
))))
3521 if (BinaryOperator
*B
= dyn_cast
<BinaryOperator
>(A
))
3522 if ((Op0
== B
->getOperand(0) || Op0
== B
->getOperand(1)) &&
3523 Op1
->hasOneUse() && (B
->getOpcode() == Instruction::Or
||
3524 B
->getOpcode() == Instruction::Xor
)) {
3525 Value
*NotOp
= Op0
== B
->getOperand(0) ? B
->getOperand(1) :
3527 Value
*Not
= Builder
.CreateNot(NotOp
, NotOp
->getName() + ".not");
3528 return BinaryOperator::CreateOr(Not
, Op0
);
3532 std::swap(Op0
, Op1
);
3535 ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(Op0
);
3536 ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(Op1
);
3538 if (Value
*Res
= foldAndOrOfICmps(LHS
, RHS
, I
, /* IsAnd */ false))
3539 return replaceInstUsesWith(I
, Res
);
3541 // TODO: Make this recursive; it's a little tricky because an arbitrary
3542 // number of 'or' instructions might have to be created.
3544 if (LHS
&& match(Op1
, m_OneUse(m_LogicalOr(m_Value(X
), m_Value(Y
))))) {
3545 bool IsLogical
= isa
<SelectInst
>(Op1
);
3546 // LHS | (X || Y) --> (LHS || X) || Y
3547 if (auto *Cmp
= dyn_cast
<ICmpInst
>(X
))
3549 foldAndOrOfICmps(LHS
, Cmp
, I
, /* IsAnd */ false, IsLogical
))
3550 return replaceInstUsesWith(I
, IsLogical
3551 ? Builder
.CreateLogicalOr(Res
, Y
)
3552 : Builder
.CreateOr(Res
, Y
));
3553 // LHS | (X || Y) --> X || (LHS | Y)
3554 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Y
))
3555 if (Value
*Res
= foldAndOrOfICmps(LHS
, Cmp
, I
, /* IsAnd */ false,
3556 /* IsLogical */ false))
3557 return replaceInstUsesWith(I
, IsLogical
3558 ? Builder
.CreateLogicalOr(X
, Res
)
3559 : Builder
.CreateOr(X
, Res
));
3561 if (RHS
&& match(Op0
, m_OneUse(m_LogicalOr(m_Value(X
), m_Value(Y
))))) {
3562 bool IsLogical
= isa
<SelectInst
>(Op0
);
3563 // (X || Y) | RHS --> (X || RHS) || Y
3564 if (auto *Cmp
= dyn_cast
<ICmpInst
>(X
))
3566 foldAndOrOfICmps(Cmp
, RHS
, I
, /* IsAnd */ false, IsLogical
))
3567 return replaceInstUsesWith(I
, IsLogical
3568 ? Builder
.CreateLogicalOr(Res
, Y
)
3569 : Builder
.CreateOr(Res
, Y
));
3570 // (X || Y) | RHS --> X || (Y | RHS)
3571 if (auto *Cmp
= dyn_cast
<ICmpInst
>(Y
))
3572 if (Value
*Res
= foldAndOrOfICmps(Cmp
, RHS
, I
, /* IsAnd */ false,
3573 /* IsLogical */ false))
3574 return replaceInstUsesWith(I
, IsLogical
3575 ? Builder
.CreateLogicalOr(X
, Res
)
3576 : Builder
.CreateOr(X
, Res
));
3580 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0)))
3581 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
3582 if (Value
*Res
= foldLogicOfFCmps(LHS
, RHS
, /*IsAnd*/ false))
3583 return replaceInstUsesWith(I
, Res
);
3585 if (Instruction
*FoldedFCmps
= reassociateFCmps(I
, Builder
))
3588 if (Instruction
*CastedOr
= foldCastedBitwiseLogic(I
))
3591 if (Instruction
*Sel
= foldBinopOfSextBoolToSelect(I
))
3594 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
3595 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
3596 // with binop identity constant. But creating a select with non-constant
3597 // arm may not be reversible due to poison semantics. Is that a good
3598 // canonicalization?
3599 if (match(&I
, m_c_Or(m_OneUse(m_SExt(m_Value(A
))), m_Value(B
))) &&
3600 A
->getType()->isIntOrIntVectorTy(1))
3601 return SelectInst::Create(A
, ConstantInt::getAllOnesValue(Ty
), B
);
3603 // Note: If we've gotten to the point of visiting the outer OR, then the
3604 // inner one couldn't be simplified. If it was a constant, then it won't
3605 // be simplified by a later pass either, so we try swapping the inner/outer
3606 // ORs in the hopes that we'll be able to simplify it this way.
3607 // (X|C) | V --> (X|V) | C
3609 if (Op0
->hasOneUse() && !match(Op1
, m_ConstantInt()) &&
3610 match(Op0
, m_Or(m_Value(A
), m_ConstantInt(CI
)))) {
3611 Value
*Inner
= Builder
.CreateOr(A
, Op1
);
3612 Inner
->takeName(Op0
);
3613 return BinaryOperator::CreateOr(Inner
, CI
);
3616 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
3617 // Since this OR statement hasn't been optimized further yet, we hope
3618 // that this transformation will allow the new ORs to be optimized.
3620 Value
*X
= nullptr, *Y
= nullptr;
3621 if (Op0
->hasOneUse() && Op1
->hasOneUse() &&
3622 match(Op0
, m_Select(m_Value(X
), m_Value(A
), m_Value(B
))) &&
3623 match(Op1
, m_Select(m_Value(Y
), m_Value(C
), m_Value(D
))) && X
== Y
) {
3624 Value
*orTrue
= Builder
.CreateOr(A
, C
);
3625 Value
*orFalse
= Builder
.CreateOr(B
, D
);
3626 return SelectInst::Create(X
, orTrue
, orFalse
);
3630 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X.
3633 if (match(&I
, m_c_Or(m_OneUse(m_AShr(
3634 m_NSWSub(m_Value(Y
), m_Value(X
)),
3635 m_SpecificInt(Ty
->getScalarSizeInBits() - 1))),
3637 Value
*NewICmpInst
= Builder
.CreateICmpSGT(X
, Y
);
3638 Value
*AllOnes
= ConstantInt::getAllOnesValue(Ty
);
3639 return SelectInst::Create(NewICmpInst
, AllOnes
, X
);
3644 // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
3645 // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
3646 // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
3647 // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
3648 const auto TryXorOpt
= [&](Value
*Lhs
, Value
*Rhs
) -> Instruction
* {
3649 if (match(Lhs
, m_OneUse(m_c_Xor(m_And(m_Value(A
), m_Value(B
)),
3651 match(Rhs
, m_OneUse(m_c_Xor(m_And(m_Specific(A
), m_Specific(B
)),
3653 return BinaryOperator::CreateXor(A
, B
);
3658 if (Instruction
*Result
= TryXorOpt(Op0
, Op1
))
3660 if (Instruction
*Result
= TryXorOpt(Op1
, Op0
))
3664 if (Instruction
*V
=
3665 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I
))
3668 CmpInst::Predicate Pred
;
3669 Value
*Mul
, *Ov
, *MulIsNotZero
, *UMulWithOv
;
3670 // Check if the OR weakens the overflow condition for umul.with.overflow by
3671 // treating any non-zero result as overflow. In that case, we overflow if both
3672 // umul.with.overflow operands are != 0, as in that case the result can only
3673 // be 0, iff the multiplication overflows.
3675 m_c_Or(m_CombineAnd(m_ExtractValue
<1>(m_Value(UMulWithOv
)),
3677 m_CombineAnd(m_ICmp(Pred
,
3678 m_CombineAnd(m_ExtractValue
<0>(
3679 m_Deferred(UMulWithOv
)),
3682 m_Value(MulIsNotZero
)))) &&
3683 (Ov
->hasOneUse() || (MulIsNotZero
->hasOneUse() && Mul
->hasOneUse())) &&
3684 Pred
== CmpInst::ICMP_NE
) {
3686 if (match(UMulWithOv
, m_Intrinsic
<Intrinsic::umul_with_overflow
>(
3687 m_Value(A
), m_Value(B
)))) {
3688 Value
*NotNullA
= Builder
.CreateIsNotNull(A
);
3689 Value
*NotNullB
= Builder
.CreateIsNotNull(B
);
3690 return BinaryOperator::CreateAnd(NotNullA
, NotNullB
);
3694 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
3695 if (sinkNotIntoOtherHandOfLogicalOp(I
))
3698 // Improve "get low bit mask up to and including bit X" pattern:
3699 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X)
3700 if (match(&I
, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X
)), m_AllOnes()),
3701 m_Shl(m_One(), m_Deferred(X
)))) &&
3702 match(&I
, m_c_Or(m_OneUse(m_Value()), m_Value()))) {
3703 Value
*Sub
= Builder
.CreateSub(
3704 ConstantInt::get(Ty
, Ty
->getScalarSizeInBits() - 1), X
);
3705 return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty
), Sub
);
3708 // An or recurrence w/loop invariant step is equivelent to (or start, step)
3709 PHINode
*PN
= nullptr;
3710 Value
*Start
= nullptr, *Step
= nullptr;
3711 if (matchSimpleRecurrence(&I
, PN
, Start
, Step
) && DT
.dominates(Step
, PN
))
3712 return replaceInstUsesWith(I
, Builder
.CreateOr(Start
, Step
));
3714 // (A & B) | (C | D) or (C | D) | (A & B)
3715 // Can be combined if C or D is of type (A/B & X)
3716 if (match(&I
, m_c_Or(m_OneUse(m_And(m_Value(A
), m_Value(B
))),
3717 m_OneUse(m_Or(m_Value(C
), m_Value(D
)))))) {
3718 // (A & B) | (C | ?) -> C | (? | (A & B))
3719 // (A & B) | (C | ?) -> C | (? | (A & B))
3720 // (A & B) | (C | ?) -> C | (? | (A & B))
3721 // (A & B) | (C | ?) -> C | (? | (A & B))
3722 // (C | ?) | (A & B) -> C | (? | (A & B))
3723 // (C | ?) | (A & B) -> C | (? | (A & B))
3724 // (C | ?) | (A & B) -> C | (? | (A & B))
3725 // (C | ?) | (A & B) -> C | (? | (A & B))
3726 if (match(D
, m_OneUse(m_c_And(m_Specific(A
), m_Value()))) ||
3727 match(D
, m_OneUse(m_c_And(m_Specific(B
), m_Value()))))
3728 return BinaryOperator::CreateOr(
3729 C
, Builder
.CreateOr(D
, Builder
.CreateAnd(A
, B
)));
3730 // (A & B) | (? | D) -> (? | (A & B)) | D
3731 // (A & B) | (? | D) -> (? | (A & B)) | D
3732 // (A & B) | (? | D) -> (? | (A & B)) | D
3733 // (A & B) | (? | D) -> (? | (A & B)) | D
3734 // (? | D) | (A & B) -> (? | (A & B)) | D
3735 // (? | D) | (A & B) -> (? | (A & B)) | D
3736 // (? | D) | (A & B) -> (? | (A & B)) | D
3737 // (? | D) | (A & B) -> (? | (A & B)) | D
3738 if (match(C
, m_OneUse(m_c_And(m_Specific(A
), m_Value()))) ||
3739 match(C
, m_OneUse(m_c_And(m_Specific(B
), m_Value()))))
3740 return BinaryOperator::CreateOr(
3741 Builder
.CreateOr(C
, Builder
.CreateAnd(A
, B
)), D
);
3744 if (Instruction
*R
= reassociateForUses(I
, Builder
))
3747 if (Instruction
*Canonicalized
= canonicalizeLogicFirst(I
, Builder
))
3748 return Canonicalized
;
3750 if (Instruction
*Folded
= foldLogicOfIsFPClass(I
, Op0
, Op1
))
3753 if (Instruction
*Res
= foldBinOpOfDisplacedShifts(I
))
3756 // If we are setting the sign bit of a floating-point value, convert
3757 // this to fneg(fabs), then cast back to integer.
3759 // If the result isn't immediately cast back to a float, this will increase
3760 // the number of instructions. This is still probably a better canonical form
3761 // as it enables FP value tracking.
3763 // Assumes any IEEE-represented type has the sign bit in the high bit.
3765 // This is generous interpretation of noimplicitfloat, this is not a true
3766 // floating-point operation.
3768 if (match(Op0
, m_BitCast(m_Value(CastOp
))) && match(Op1
, m_SignMask()) &&
3769 !Builder
.GetInsertBlock()->getParent()->hasFnAttribute(
3770 Attribute::NoImplicitFloat
)) {
3771 Type
*EltTy
= CastOp
->getType()->getScalarType();
3772 if (EltTy
->isFloatingPointTy() && EltTy
->isIEEE() &&
3773 EltTy
->getPrimitiveSizeInBits() ==
3774 I
.getType()->getScalarType()->getPrimitiveSizeInBits()) {
3775 Value
*FAbs
= Builder
.CreateUnaryIntrinsic(Intrinsic::fabs
, CastOp
);
3776 Value
*FNegFAbs
= Builder
.CreateFNeg(FAbs
);
3777 return new BitCastInst(FNegFAbs
, I
.getType());
3784 /// A ^ B can be specified using other logic ops in a variety of patterns. We
3785 /// can fold these early and efficiently by morphing an existing instruction.
3786 static Instruction
*foldXorToXor(BinaryOperator
&I
,
3787 InstCombiner::BuilderTy
&Builder
) {
3788 assert(I
.getOpcode() == Instruction::Xor
);
3789 Value
*Op0
= I
.getOperand(0);
3790 Value
*Op1
= I
.getOperand(1);
3793 // There are 4 commuted variants for each of the basic patterns.
3795 // (A & B) ^ (A | B) -> A ^ B
3796 // (A & B) ^ (B | A) -> A ^ B
3797 // (A | B) ^ (A & B) -> A ^ B
3798 // (A | B) ^ (B & A) -> A ^ B
3799 if (match(&I
, m_c_Xor(m_And(m_Value(A
), m_Value(B
)),
3800 m_c_Or(m_Deferred(A
), m_Deferred(B
)))))
3801 return BinaryOperator::CreateXor(A
, B
);
3803 // (A | ~B) ^ (~A | B) -> A ^ B
3804 // (~B | A) ^ (~A | B) -> A ^ B
3805 // (~A | B) ^ (A | ~B) -> A ^ B
3806 // (B | ~A) ^ (A | ~B) -> A ^ B
3807 if (match(&I
, m_Xor(m_c_Or(m_Value(A
), m_Not(m_Value(B
))),
3808 m_c_Or(m_Not(m_Deferred(A
)), m_Deferred(B
)))))
3809 return BinaryOperator::CreateXor(A
, B
);
3811 // (A & ~B) ^ (~A & B) -> A ^ B
3812 // (~B & A) ^ (~A & B) -> A ^ B
3813 // (~A & B) ^ (A & ~B) -> A ^ B
3814 // (B & ~A) ^ (A & ~B) -> A ^ B
3815 if (match(&I
, m_Xor(m_c_And(m_Value(A
), m_Not(m_Value(B
))),
3816 m_c_And(m_Not(m_Deferred(A
)), m_Deferred(B
)))))
3817 return BinaryOperator::CreateXor(A
, B
);
3819 // For the remaining cases we need to get rid of one of the operands.
3820 if (!Op0
->hasOneUse() && !Op1
->hasOneUse())
3823 // (A | B) ^ ~(A & B) -> ~(A ^ B)
3824 // (A | B) ^ ~(B & A) -> ~(A ^ B)
3825 // (A & B) ^ ~(A | B) -> ~(A ^ B)
3826 // (A & B) ^ ~(B | A) -> ~(A ^ B)
3827 // Complexity sorting ensures the not will be on the right side.
3828 if ((match(Op0
, m_Or(m_Value(A
), m_Value(B
))) &&
3829 match(Op1
, m_Not(m_c_And(m_Specific(A
), m_Specific(B
))))) ||
3830 (match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
3831 match(Op1
, m_Not(m_c_Or(m_Specific(A
), m_Specific(B
))))))
3832 return BinaryOperator::CreateNot(Builder
.CreateXor(A
, B
));
3837 Value
*InstCombinerImpl::foldXorOfICmps(ICmpInst
*LHS
, ICmpInst
*RHS
,
3838 BinaryOperator
&I
) {
3839 assert(I
.getOpcode() == Instruction::Xor
&& I
.getOperand(0) == LHS
&&
3840 I
.getOperand(1) == RHS
&& "Should be 'xor' with these operands");
3842 ICmpInst::Predicate PredL
= LHS
->getPredicate(), PredR
= RHS
->getPredicate();
3843 Value
*LHS0
= LHS
->getOperand(0), *LHS1
= LHS
->getOperand(1);
3844 Value
*RHS0
= RHS
->getOperand(0), *RHS1
= RHS
->getOperand(1);
3846 if (predicatesFoldable(PredL
, PredR
)) {
3847 if (LHS0
== RHS1
&& LHS1
== RHS0
) {
3848 std::swap(LHS0
, LHS1
);
3849 PredL
= ICmpInst::getSwappedPredicate(PredL
);
3851 if (LHS0
== RHS0
&& LHS1
== RHS1
) {
3852 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
3853 unsigned Code
= getICmpCode(PredL
) ^ getICmpCode(PredR
);
3854 bool IsSigned
= LHS
->isSigned() || RHS
->isSigned();
3855 return getNewICmpValue(Code
, IsSigned
, LHS0
, LHS1
, Builder
);
3859 // TODO: This can be generalized to compares of non-signbits using
3860 // decomposeBitTestICmp(). It could be enhanced more by using (something like)
3861 // foldLogOpOfMaskedICmps().
3862 const APInt
*LC
, *RC
;
3863 if (match(LHS1
, m_APInt(LC
)) && match(RHS1
, m_APInt(RC
)) &&
3864 LHS0
->getType() == RHS0
->getType() &&
3865 LHS0
->getType()->isIntOrIntVectorTy() &&
3866 (LHS
->hasOneUse() || RHS
->hasOneUse())) {
3867 // Convert xor of signbit tests to signbit test of xor'd values:
3868 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
3869 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
3870 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
3871 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
3872 bool TrueIfSignedL
, TrueIfSignedR
;
3873 if (isSignBitCheck(PredL
, *LC
, TrueIfSignedL
) &&
3874 isSignBitCheck(PredR
, *RC
, TrueIfSignedR
)) {
3875 Value
*XorLR
= Builder
.CreateXor(LHS0
, RHS0
);
3876 return TrueIfSignedL
== TrueIfSignedR
? Builder
.CreateIsNeg(XorLR
) :
3877 Builder
.CreateIsNotNeg(XorLR
);
3880 // (X > C) ^ (X < C + 2) --> X != C + 1
3881 // (X < C + 2) ^ (X > C) --> X != C + 1
3882 // Considering the correctness of this pattern, we should avoid that C is
3883 // non-negative and C + 2 is negative, although it will be matched by other
3885 const APInt
*C1
, *C2
;
3886 if ((PredL
== CmpInst::ICMP_SGT
&& match(LHS1
, m_APInt(C1
)) &&
3887 PredR
== CmpInst::ICMP_SLT
&& match(RHS1
, m_APInt(C2
))) ||
3888 (PredL
== CmpInst::ICMP_SLT
&& match(LHS1
, m_APInt(C2
)) &&
3889 PredR
== CmpInst::ICMP_SGT
&& match(RHS1
, m_APInt(C1
))))
3890 if (LHS0
== RHS0
&& *C1
+ 2 == *C2
&&
3891 (C1
->isNegative() || C2
->isNonNegative()))
3892 return Builder
.CreateICmpNE(LHS0
,
3893 ConstantInt::get(LHS0
->getType(), *C1
+ 1));
3896 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
3897 // into those logic ops. That is, try to turn this into an and-of-icmps
3898 // because we have many folds for that pattern.
3900 // This is based on a truth table definition of xor:
3901 // X ^ Y --> (X | Y) & !(X & Y)
3902 if (Value
*OrICmp
= simplifyBinOp(Instruction::Or
, LHS
, RHS
, SQ
)) {
3903 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
3904 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
3905 if (Value
*AndICmp
= simplifyBinOp(Instruction::And
, LHS
, RHS
, SQ
)) {
3906 // TODO: Independently handle cases where the 'and' side is a constant.
3907 ICmpInst
*X
= nullptr, *Y
= nullptr;
3908 if (OrICmp
== LHS
&& AndICmp
== RHS
) {
3909 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y
3913 if (OrICmp
== RHS
&& AndICmp
== LHS
) {
3914 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X
3918 if (X
&& Y
&& (Y
->hasOneUse() || canFreelyInvertAllUsersOf(Y
, &I
))) {
3919 // Invert the predicate of 'Y', thus inverting its output.
3920 Y
->setPredicate(Y
->getInversePredicate());
3921 // So, are there other uses of Y?
3922 if (!Y
->hasOneUse()) {
3923 // We need to adapt other uses of Y though. Get a value that matches
3924 // the original value of Y before inversion. While this increases
3925 // immediate instruction count, we have just ensured that all the
3926 // users are freely-invertible, so that 'not' *will* get folded away.
3927 BuilderTy::InsertPointGuard
Guard(Builder
);
3928 // Set insertion point to right after the Y.
3929 Builder
.SetInsertPoint(Y
->getParent(), ++(Y
->getIterator()));
3930 Value
*NotY
= Builder
.CreateNot(Y
, Y
->getName() + ".not");
3931 // Replace all uses of Y (excluding the one in NotY!) with NotY.
3932 Worklist
.pushUsersToWorkList(*Y
);
3933 Y
->replaceUsesWithIf(NotY
,
3934 [NotY
](Use
&U
) { return U
.getUser() != NotY
; });
3937 return Builder
.CreateAnd(LHS
, RHS
);
3945 /// If we have a masked merge, in the canonical form of:
3946 /// (assuming that A only has one use.)
3948 /// ((x ^ y) & M) ^ y
3950 /// * If M is inverted:
3952 /// ((x ^ y) & ~M) ^ y
3953 /// We can canonicalize by swapping the final xor operand
3954 /// to eliminate the 'not' of the mask.
3955 /// ((x ^ y) & M) ^ x
3956 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
3957 /// because that shortens the dependency chain and improves analysis:
3958 /// (x & M) | (y & ~M)
3959 static Instruction
*visitMaskedMerge(BinaryOperator
&I
,
3960 InstCombiner::BuilderTy
&Builder
) {
3963 if (!match(&I
, m_c_Xor(m_Value(B
),
3965 m_CombineAnd(m_c_Xor(m_Deferred(B
), m_Value(X
)),
3971 if (match(M
, m_Not(m_Value(NotM
)))) {
3972 // De-invert the mask and swap the value in B part.
3973 Value
*NewA
= Builder
.CreateAnd(D
, NotM
);
3974 return BinaryOperator::CreateXor(NewA
, X
);
3978 if (D
->hasOneUse() && match(M
, m_Constant(C
))) {
3979 // Propagating undef is unsafe. Clamp undef elements to -1.
3980 Type
*EltTy
= C
->getType()->getScalarType();
3981 C
= Constant::replaceUndefsWith(C
, ConstantInt::getAllOnesValue(EltTy
));
3983 Value
*LHS
= Builder
.CreateAnd(X
, C
);
3984 Value
*NotC
= Builder
.CreateNot(C
);
3985 Value
*RHS
= Builder
.CreateAnd(B
, NotC
);
3986 return BinaryOperator::CreateOr(LHS
, RHS
);
3998 static Instruction
*sinkNotIntoXor(BinaryOperator
&I
, Value
*X
, Value
*Y
,
3999 InstCombiner::BuilderTy
&Builder
) {
4000 // We only want to do the transform if it is free to do.
4001 if (InstCombiner::isFreeToInvert(X
, X
->hasOneUse())) {
4003 } else if (InstCombiner::isFreeToInvert(Y
, Y
->hasOneUse())) {
4008 Value
*NotX
= Builder
.CreateNot(X
, X
->getName() + ".not");
4009 return BinaryOperator::CreateXor(NotX
, Y
, I
.getName() + ".demorgan");
4012 static Instruction
*foldNotXor(BinaryOperator
&I
,
4013 InstCombiner::BuilderTy
&Builder
) {
4015 // FIXME: one-use check is not needed in general, but currently we are unable
4016 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4017 if (!match(&I
, m_Not(m_OneUse(m_Xor(m_Value(X
), m_Value(Y
))))))
4020 if (Instruction
*NewXor
= sinkNotIntoXor(I
, X
, Y
, Builder
))
4023 auto hasCommonOperand
= [](Value
*A
, Value
*B
, Value
*C
, Value
*D
) {
4024 return A
== C
|| A
== D
|| B
== C
|| B
== D
;
4027 Value
*A
, *B
, *C
, *D
;
4028 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4029 // 4 commuted variants
4030 if (match(X
, m_And(m_Value(A
), m_Value(B
))) &&
4031 match(Y
, m_Or(m_Value(C
), m_Value(D
))) && hasCommonOperand(A
, B
, C
, D
)) {
4032 Value
*NotY
= Builder
.CreateNot(Y
);
4033 return BinaryOperator::CreateOr(X
, NotY
);
4036 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4037 // 4 commuted variants
4038 if (match(Y
, m_And(m_Value(A
), m_Value(B
))) &&
4039 match(X
, m_Or(m_Value(C
), m_Value(D
))) && hasCommonOperand(A
, B
, C
, D
)) {
4040 Value
*NotX
= Builder
.CreateNot(X
);
4041 return BinaryOperator::CreateOr(Y
, NotX
);
4047 /// Canonicalize a shifty way to code absolute value to the more common pattern
4048 /// that uses negation and select.
4049 static Instruction
*canonicalizeAbs(BinaryOperator
&Xor
,
4050 InstCombiner::BuilderTy
&Builder
) {
4051 assert(Xor
.getOpcode() == Instruction::Xor
&& "Expected an xor instruction.");
4053 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4054 // We're relying on the fact that we only do this transform when the shift has
4055 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4057 Value
*Op0
= Xor
.getOperand(0), *Op1
= Xor
.getOperand(1);
4058 if (Op0
->hasNUses(2))
4059 std::swap(Op0
, Op1
);
4061 Type
*Ty
= Xor
.getType();
4064 if (match(Op1
, m_AShr(m_Value(A
), m_APInt(ShAmt
))) &&
4065 Op1
->hasNUses(2) && *ShAmt
== Ty
->getScalarSizeInBits() - 1 &&
4066 match(Op0
, m_OneUse(m_c_Add(m_Specific(A
), m_Specific(Op1
))))) {
4067 // Op1 = ashr i32 A, 31 ; smear the sign bit
4068 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative
4069 // --> (A < 0) ? -A : A
4070 Value
*IsNeg
= Builder
.CreateIsNeg(A
);
4071 // Copy the nuw/nsw flags from the add to the negate.
4072 auto *Add
= cast
<BinaryOperator
>(Op0
);
4073 Value
*NegA
= Builder
.CreateNeg(A
, "", Add
->hasNoUnsignedWrap(),
4074 Add
->hasNoSignedWrap());
4075 return SelectInst::Create(IsNeg
, NegA
, A
);
4080 static bool canFreelyInvert(InstCombiner
&IC
, Value
*Op
,
4081 Instruction
*IgnoredUser
) {
4082 auto *I
= dyn_cast
<Instruction
>(Op
);
4083 return I
&& IC
.isFreeToInvert(I
, /*WillInvertAllUses=*/true) &&
4084 InstCombiner::canFreelyInvertAllUsersOf(I
, IgnoredUser
);
4087 static Value
*freelyInvert(InstCombinerImpl
&IC
, Value
*Op
,
4088 Instruction
*IgnoredUser
) {
4089 auto *I
= cast
<Instruction
>(Op
);
4090 IC
.Builder
.SetInsertPoint(&*I
->getInsertionPointAfterDef());
4091 Value
*NotOp
= IC
.Builder
.CreateNot(Op
, Op
->getName() + ".not");
4092 Op
->replaceUsesWithIf(NotOp
,
4093 [NotOp
](Use
&U
) { return U
.getUser() != NotOp
; });
4094 IC
.freelyInvertAllUsersOf(NotOp
, IgnoredUser
);
4101 // z = ((~x) |/& (~y))
4102 // iff both x and y are free to invert and all uses of z can be freely updated.
4103 bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction
&I
) {
4105 if (!match(&I
, m_LogicalOp(m_Value(Op0
), m_Value(Op1
))))
4108 // If this logic op has not been simplified yet, just bail out and let that
4109 // happen first. Otherwise, the code below may wrongly invert.
4113 Instruction::BinaryOps NewOpc
=
4114 match(&I
, m_LogicalAnd()) ? Instruction::Or
: Instruction::And
;
4115 bool IsBinaryOp
= isa
<BinaryOperator
>(I
);
4117 // Can our users be adapted?
4118 if (!InstCombiner::canFreelyInvertAllUsersOf(&I
, /*IgnoredUser=*/nullptr))
4121 // And can the operands be adapted?
4122 if (!canFreelyInvert(*this, Op0
, &I
) || !canFreelyInvert(*this, Op1
, &I
))
4125 Op0
= freelyInvert(*this, Op0
, &I
);
4126 Op1
= freelyInvert(*this, Op1
, &I
);
4128 Builder
.SetInsertPoint(I
.getInsertionPointAfterDef());
4131 NewLogicOp
= Builder
.CreateBinOp(NewOpc
, Op0
, Op1
, I
.getName() + ".not");
4134 Builder
.CreateLogicalOp(NewOpc
, Op0
, Op1
, I
.getName() + ".not");
4136 replaceInstUsesWith(I
, NewLogicOp
);
4137 // We can not just create an outer `not`, it will most likely be immediately
4138 // folded back, reconstructing our initial pattern, and causing an
4139 // infinite combine loop, so immediately manually fold it away.
4140 freelyInvertAllUsersOf(NewLogicOp
);
4147 // z = ~(x |/& (~y))
4148 // iff y is free to invert and all uses of z can be freely updated.
4149 bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction
&I
) {
4151 if (!match(&I
, m_LogicalOp(m_Value(Op0
), m_Value(Op1
))))
4153 Instruction::BinaryOps NewOpc
=
4154 match(&I
, m_LogicalAnd()) ? Instruction::Or
: Instruction::And
;
4155 bool IsBinaryOp
= isa
<BinaryOperator
>(I
);
4157 Value
*NotOp0
= nullptr;
4158 Value
*NotOp1
= nullptr;
4159 Value
**OpToInvert
= nullptr;
4160 if (match(Op0
, m_Not(m_Value(NotOp0
))) && canFreelyInvert(*this, Op1
, &I
)) {
4163 } else if (match(Op1
, m_Not(m_Value(NotOp1
))) &&
4164 canFreelyInvert(*this, Op0
, &I
)) {
4170 // And can our users be adapted?
4171 if (!InstCombiner::canFreelyInvertAllUsersOf(&I
, /*IgnoredUser=*/nullptr))
4174 *OpToInvert
= freelyInvert(*this, *OpToInvert
, &I
);
4176 Builder
.SetInsertPoint(&*I
.getInsertionPointAfterDef());
4179 NewBinOp
= Builder
.CreateBinOp(NewOpc
, Op0
, Op1
, I
.getName() + ".not");
4181 NewBinOp
= Builder
.CreateLogicalOp(NewOpc
, Op0
, Op1
, I
.getName() + ".not");
4182 replaceInstUsesWith(I
, NewBinOp
);
4183 // We can not just create an outer `not`, it will most likely be immediately
4184 // folded back, reconstructing our initial pattern, and causing an
4185 // infinite combine loop, so immediately manually fold it away.
4186 freelyInvertAllUsersOf(NewBinOp
);
4190 Instruction
*InstCombinerImpl::foldNot(BinaryOperator
&I
) {
4192 if (!match(&I
, m_Not(m_Value(NotOp
))))
4195 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
4196 // We must eliminate the and/or (one-use) for these transforms to not increase
4197 // the instruction count.
4199 // ~(~X & Y) --> (X | ~Y)
4200 // ~(Y & ~X) --> (X | ~Y)
4202 // Note: The logical matches do not check for the commuted patterns because
4203 // those are handled via SimplifySelectsFeedingBinaryOp().
4204 Type
*Ty
= I
.getType();
4206 if (match(NotOp
, m_OneUse(m_c_And(m_Not(m_Value(X
)), m_Value(Y
))))) {
4207 Value
*NotY
= Builder
.CreateNot(Y
, Y
->getName() + ".not");
4208 return BinaryOperator::CreateOr(X
, NotY
);
4210 if (match(NotOp
, m_OneUse(m_LogicalAnd(m_Not(m_Value(X
)), m_Value(Y
))))) {
4211 Value
*NotY
= Builder
.CreateNot(Y
, Y
->getName() + ".not");
4212 return SelectInst::Create(X
, ConstantInt::getTrue(Ty
), NotY
);
4215 // ~(~X | Y) --> (X & ~Y)
4216 // ~(Y | ~X) --> (X & ~Y)
4217 if (match(NotOp
, m_OneUse(m_c_Or(m_Not(m_Value(X
)), m_Value(Y
))))) {
4218 Value
*NotY
= Builder
.CreateNot(Y
, Y
->getName() + ".not");
4219 return BinaryOperator::CreateAnd(X
, NotY
);
4221 if (match(NotOp
, m_OneUse(m_LogicalOr(m_Not(m_Value(X
)), m_Value(Y
))))) {
4222 Value
*NotY
= Builder
.CreateNot(Y
, Y
->getName() + ".not");
4223 return SelectInst::Create(X
, NotY
, ConstantInt::getFalse(Ty
));
4226 // Is this a 'not' (~) fed by a binary operator?
4227 BinaryOperator
*NotVal
;
4228 if (match(NotOp
, m_BinOp(NotVal
))) {
4229 // ~((-X) | Y) --> (X - 1) & (~Y)
4231 m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X
))), m_Value(Y
))))) {
4232 Value
*DecX
= Builder
.CreateAdd(X
, ConstantInt::getAllOnesValue(Ty
));
4233 Value
*NotY
= Builder
.CreateNot(Y
);
4234 return BinaryOperator::CreateAnd(DecX
, NotY
);
4237 // ~(~X >>s Y) --> (X >>s Y)
4238 if (match(NotVal
, m_AShr(m_Not(m_Value(X
)), m_Value(Y
))))
4239 return BinaryOperator::CreateAShr(X
, Y
);
4241 // Bit-hack form of a signbit test for iN type:
4242 // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
4243 unsigned FullShift
= Ty
->getScalarSizeInBits() - 1;
4244 if (match(NotVal
, m_OneUse(m_AShr(m_Value(X
), m_SpecificInt(FullShift
))))) {
4245 Value
*IsNotNeg
= Builder
.CreateIsNotNeg(X
, "isnotneg");
4246 return new SExtInst(IsNotNeg
, Ty
);
4249 // If we are inverting a right-shifted constant, we may be able to eliminate
4250 // the 'not' by inverting the constant and using the opposite shift type.
4251 // Canonicalization rules ensure that only a negative constant uses 'ashr',
4252 // but we must check that in case that transform has not fired yet.
4254 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
4256 if (match(NotVal
, m_AShr(m_Constant(C
), m_Value(Y
))) &&
4257 match(C
, m_Negative())) {
4258 // We matched a negative constant, so propagating undef is unsafe.
4259 // Clamp undef elements to -1.
4260 Type
*EltTy
= Ty
->getScalarType();
4261 C
= Constant::replaceUndefsWith(C
, ConstantInt::getAllOnesValue(EltTy
));
4262 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C
), Y
);
4265 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
4266 if (match(NotVal
, m_LShr(m_Constant(C
), m_Value(Y
))) &&
4267 match(C
, m_NonNegative())) {
4268 // We matched a non-negative constant, so propagating undef is unsafe.
4269 // Clamp undef elements to 0.
4270 Type
*EltTy
= Ty
->getScalarType();
4271 C
= Constant::replaceUndefsWith(C
, ConstantInt::getNullValue(EltTy
));
4272 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C
), Y
);
4275 // ~(X + C) --> ~C - X
4276 if (match(NotVal
, m_c_Add(m_Value(X
), m_ImmConstant(C
))))
4277 return BinaryOperator::CreateSub(ConstantExpr::getNot(C
), X
);
4279 // ~(X - Y) --> ~X + Y
4280 // FIXME: is it really beneficial to sink the `not` here?
4281 if (match(NotVal
, m_Sub(m_Value(X
), m_Value(Y
))))
4282 if (isa
<Constant
>(X
) || NotVal
->hasOneUse())
4283 return BinaryOperator::CreateAdd(Builder
.CreateNot(X
), Y
);
4285 // ~(~X + Y) --> X - Y
4286 if (match(NotVal
, m_c_Add(m_Not(m_Value(X
)), m_Value(Y
))))
4287 return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub
, X
, Y
,
4291 // not (cmp A, B) = !cmp A, B
4292 CmpInst::Predicate Pred
;
4293 if (match(NotOp
, m_Cmp(Pred
, m_Value(), m_Value())) &&
4294 (NotOp
->hasOneUse() ||
4295 InstCombiner::canFreelyInvertAllUsersOf(cast
<Instruction
>(NotOp
),
4296 /*IgnoredUser=*/nullptr))) {
4297 cast
<CmpInst
>(NotOp
)->setPredicate(CmpInst::getInversePredicate(Pred
));
4298 freelyInvertAllUsersOf(NotOp
);
4302 // Move a 'not' ahead of casts of a bool to enable logic reduction:
4303 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4304 if (match(NotOp
, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X
)))))) && X
->getType()->isIntOrIntVectorTy(1)) {
4305 Type
*SextTy
= cast
<BitCastOperator
>(NotOp
)->getSrcTy();
4306 Value
*NotX
= Builder
.CreateNot(X
);
4307 Value
*Sext
= Builder
.CreateSExt(NotX
, SextTy
);
4308 return CastInst::CreateBitOrPointerCast(Sext
, Ty
);
4311 if (auto *NotOpI
= dyn_cast
<Instruction
>(NotOp
))
4312 if (sinkNotIntoLogicalOp(*NotOpI
))
4315 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
4316 // ~min(~X, ~Y) --> max(X, Y)
4317 // ~max(~X, Y) --> min(X, ~Y)
4318 auto *II
= dyn_cast
<IntrinsicInst
>(NotOp
);
4319 if (II
&& II
->hasOneUse()) {
4320 if (match(NotOp
, m_MaxOrMin(m_Value(X
), m_Value(Y
))) &&
4321 isFreeToInvert(X
, X
->hasOneUse()) &&
4322 isFreeToInvert(Y
, Y
->hasOneUse())) {
4323 Intrinsic::ID InvID
= getInverseMinMaxIntrinsic(II
->getIntrinsicID());
4324 Value
*NotX
= Builder
.CreateNot(X
);
4325 Value
*NotY
= Builder
.CreateNot(Y
);
4326 Value
*InvMaxMin
= Builder
.CreateBinaryIntrinsic(InvID
, NotX
, NotY
);
4327 return replaceInstUsesWith(I
, InvMaxMin
);
4329 if (match(NotOp
, m_c_MaxOrMin(m_Not(m_Value(X
)), m_Value(Y
)))) {
4330 Intrinsic::ID InvID
= getInverseMinMaxIntrinsic(II
->getIntrinsicID());
4331 Value
*NotY
= Builder
.CreateNot(Y
);
4332 Value
*InvMaxMin
= Builder
.CreateBinaryIntrinsic(InvID
, X
, NotY
);
4333 return replaceInstUsesWith(I
, InvMaxMin
);
4336 if (II
->getIntrinsicID() == Intrinsic::is_fpclass
) {
4337 ConstantInt
*ClassMask
= cast
<ConstantInt
>(II
->getArgOperand(1));
4339 1, ConstantInt::get(ClassMask
->getType(),
4340 ~ClassMask
->getZExtValue() & fcAllFlags
));
4341 return replaceInstUsesWith(I
, II
);
4345 if (NotOp
->hasOneUse()) {
4346 // Pull 'not' into operands of select if both operands are one-use compares
4347 // or one is one-use compare and the other one is a constant.
4348 // Inverting the predicates eliminates the 'not' operation.
4350 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
4351 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
4352 // not (select ?, (cmp TPred, ?, ?), true -->
4353 // select ?, (cmp InvTPred, ?, ?), false
4354 if (auto *Sel
= dyn_cast
<SelectInst
>(NotOp
)) {
4355 Value
*TV
= Sel
->getTrueValue();
4356 Value
*FV
= Sel
->getFalseValue();
4357 auto *CmpT
= dyn_cast
<CmpInst
>(TV
);
4358 auto *CmpF
= dyn_cast
<CmpInst
>(FV
);
4359 bool InvertibleT
= (CmpT
&& CmpT
->hasOneUse()) || isa
<Constant
>(TV
);
4360 bool InvertibleF
= (CmpF
&& CmpF
->hasOneUse()) || isa
<Constant
>(FV
);
4361 if (InvertibleT
&& InvertibleF
) {
4363 CmpT
->setPredicate(CmpT
->getInversePredicate());
4365 Sel
->setTrueValue(ConstantExpr::getNot(cast
<Constant
>(TV
)));
4367 CmpF
->setPredicate(CmpF
->getInversePredicate());
4369 Sel
->setFalseValue(ConstantExpr::getNot(cast
<Constant
>(FV
)));
4370 return replaceInstUsesWith(I
, Sel
);
4375 if (Instruction
*NewXor
= foldNotXor(I
, Builder
))
4381 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4382 // here. We should standardize that construct where it is needed or choose some
4383 // other way to ensure that commutated variants of patterns are not missed.
4384 Instruction
*InstCombinerImpl::visitXor(BinaryOperator
&I
) {
4385 if (Value
*V
= simplifyXorInst(I
.getOperand(0), I
.getOperand(1),
4386 SQ
.getWithInstruction(&I
)))
4387 return replaceInstUsesWith(I
, V
);
4389 if (SimplifyAssociativeOrCommutative(I
))
4392 if (Instruction
*X
= foldVectorBinop(I
))
4395 if (Instruction
*Phi
= foldBinopWithPhiOperands(I
))
4398 if (Instruction
*NewXor
= foldXorToXor(I
, Builder
))
4401 // (A&B)^(A&C) -> A&(B^C) etc
4402 if (Value
*V
= foldUsingDistributiveLaws(I
))
4403 return replaceInstUsesWith(I
, V
);
4405 // See if we can simplify any instructions used by the instruction whose sole
4406 // purpose is to compute bits we don't care about.
4407 if (SimplifyDemandedInstructionBits(I
))
4410 if (Value
*V
= SimplifyBSwap(I
, Builder
))
4411 return replaceInstUsesWith(I
, V
);
4413 if (Instruction
*R
= foldNot(I
))
4416 if (Instruction
*R
= foldBinOpShiftWithShift(I
))
4419 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
4420 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
4421 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
4422 // have already taken care of those cases.
4423 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
4425 if (match(&I
, m_c_Xor(m_c_And(m_Not(m_Value(M
)), m_Value()),
4426 m_c_And(m_Deferred(M
), m_Value()))))
4427 return BinaryOperator::CreateOr(Op0
, Op1
);
4429 if (Instruction
*Xor
= visitMaskedMerge(I
, Builder
))
4434 if (match(Op1
, m_Constant(C1
))) {
4437 if (match(Op0
, m_OneUse(m_Or(m_Value(X
), m_ImmConstant(C2
)))) &&
4438 match(C1
, m_ImmConstant())) {
4439 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
4440 C2
= Constant::replaceUndefsWith(
4441 C2
, Constant::getAllOnesValue(C2
->getType()->getScalarType()));
4442 Value
*And
= Builder
.CreateAnd(
4443 X
, Constant::mergeUndefsWith(ConstantExpr::getNot(C2
), C1
));
4444 return BinaryOperator::CreateXor(
4445 And
, Constant::mergeUndefsWith(ConstantExpr::getXor(C1
, C2
), C1
));
4448 // Use DeMorgan and reassociation to eliminate a 'not' op.
4449 if (match(Op0
, m_OneUse(m_Or(m_Not(m_Value(X
)), m_Constant(C2
))))) {
4450 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
4451 Value
*And
= Builder
.CreateAnd(X
, ConstantExpr::getNot(C2
));
4452 return BinaryOperator::CreateXor(And
, ConstantExpr::getNot(C1
));
4454 if (match(Op0
, m_OneUse(m_And(m_Not(m_Value(X
)), m_Constant(C2
))))) {
4455 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
4456 Value
*Or
= Builder
.CreateOr(X
, ConstantExpr::getNot(C2
));
4457 return BinaryOperator::CreateXor(Or
, ConstantExpr::getNot(C1
));
4460 // Convert xor ([trunc] (ashr X, BW-1)), C =>
4461 // select(X >s -1, C, ~C)
4462 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
4463 // constant depending on whether this input is less than 0.
4465 if (match(Op0
, m_OneUse(m_TruncOrSelf(
4466 m_AShr(m_Value(X
), m_APIntAllowUndef(CA
))))) &&
4467 *CA
== X
->getType()->getScalarSizeInBits() - 1 &&
4468 !match(C1
, m_AllOnes())) {
4469 assert(!C1
->isZeroValue() && "Unexpected xor with 0");
4470 Value
*IsNotNeg
= Builder
.CreateIsNotNeg(X
);
4471 return SelectInst::Create(IsNotNeg
, Op1
, Builder
.CreateNot(Op1
));
4475 Type
*Ty
= I
.getType();
4478 if (match(Op1
, m_APInt(RHSC
))) {
4481 // (C - X) ^ signmaskC --> (C + signmaskC) - X
4482 if (RHSC
->isSignMask() && match(Op0
, m_Sub(m_APInt(C
), m_Value(X
))))
4483 return BinaryOperator::CreateSub(ConstantInt::get(Ty
, *C
+ *RHSC
), X
);
4485 // (X + C) ^ signmaskC --> X + (C + signmaskC)
4486 if (RHSC
->isSignMask() && match(Op0
, m_Add(m_Value(X
), m_APInt(C
))))
4487 return BinaryOperator::CreateAdd(X
, ConstantInt::get(Ty
, *C
+ *RHSC
));
4489 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
4490 if (match(Op0
, m_Or(m_Value(X
), m_APInt(C
))) &&
4491 MaskedValueIsZero(X
, *C
, 0, &I
))
4492 return BinaryOperator::CreateXor(X
, ConstantInt::get(Ty
, *C
^ *RHSC
));
4494 // When X is a power-of-two or zero and zero input is poison:
4495 // ctlz(i32 X) ^ 31 --> cttz(X)
4496 // cttz(i32 X) ^ 31 --> ctlz(X)
4497 auto *II
= dyn_cast
<IntrinsicInst
>(Op0
);
4498 if (II
&& II
->hasOneUse() && *RHSC
== Ty
->getScalarSizeInBits() - 1) {
4499 Intrinsic::ID IID
= II
->getIntrinsicID();
4500 if ((IID
== Intrinsic::ctlz
|| IID
== Intrinsic::cttz
) &&
4501 match(II
->getArgOperand(1), m_One()) &&
4502 isKnownToBeAPowerOfTwo(II
->getArgOperand(0), /*OrZero */ true)) {
4503 IID
= (IID
== Intrinsic::ctlz
) ? Intrinsic::cttz
: Intrinsic::ctlz
;
4504 Function
*F
= Intrinsic::getDeclaration(II
->getModule(), IID
, Ty
);
4505 return CallInst::Create(F
, {II
->getArgOperand(0), Builder
.getTrue()});
4509 // If RHSC is inverting the remaining bits of shifted X,
4510 // canonicalize to a 'not' before the shift to help SCEV and codegen:
4511 // (X << C) ^ RHSC --> ~X << C
4512 if (match(Op0
, m_OneUse(m_Shl(m_Value(X
), m_APInt(C
)))) &&
4513 *RHSC
== APInt::getAllOnes(Ty
->getScalarSizeInBits()).shl(*C
)) {
4514 Value
*NotX
= Builder
.CreateNot(X
);
4515 return BinaryOperator::CreateShl(NotX
, ConstantInt::get(Ty
, *C
));
4517 // (X >>u C) ^ RHSC --> ~X >>u C
4518 if (match(Op0
, m_OneUse(m_LShr(m_Value(X
), m_APInt(C
)))) &&
4519 *RHSC
== APInt::getAllOnes(Ty
->getScalarSizeInBits()).lshr(*C
)) {
4520 Value
*NotX
= Builder
.CreateNot(X
);
4521 return BinaryOperator::CreateLShr(NotX
, ConstantInt::get(Ty
, *C
));
4523 // TODO: We could handle 'ashr' here as well. That would be matching
4524 // a 'not' op and moving it before the shift. Doing that requires
4525 // preventing the inverse fold in canShiftBinOpWithConstantRHS().
4528 // If we are XORing the sign bit of a floating-point value, convert
4529 // this to fneg, then cast back to integer.
4531 // This is generous interpretation of noimplicitfloat, this is not a true
4532 // floating-point operation.
4534 // Assumes any IEEE-represented type has the sign bit in the high bit.
4535 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
4537 if (match(Op0
, m_BitCast(m_Value(CastOp
))) && match(Op1
, m_SignMask()) &&
4538 !Builder
.GetInsertBlock()->getParent()->hasFnAttribute(
4539 Attribute::NoImplicitFloat
)) {
4540 Type
*EltTy
= CastOp
->getType()->getScalarType();
4541 if (EltTy
->isFloatingPointTy() && EltTy
->isIEEE() &&
4542 EltTy
->getPrimitiveSizeInBits() ==
4543 I
.getType()->getScalarType()->getPrimitiveSizeInBits()) {
4544 Value
*FNeg
= Builder
.CreateFNeg(CastOp
);
4545 return new BitCastInst(FNeg
, I
.getType());
4550 // FIXME: This should not be limited to scalar (pull into APInt match above).
4553 ConstantInt
*C1
, *C2
, *C3
;
4554 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
4555 if (match(Op1
, m_ConstantInt(C3
)) &&
4556 match(Op0
, m_LShr(m_Xor(m_Value(X
), m_ConstantInt(C1
)),
4557 m_ConstantInt(C2
))) &&
4559 // fold (C1 >> C2) ^ C3
4560 APInt FoldConst
= C1
->getValue().lshr(C2
->getValue());
4561 FoldConst
^= C3
->getValue();
4562 // Prepare the two operands.
4563 auto *Opnd0
= Builder
.CreateLShr(X
, C2
);
4564 Opnd0
->takeName(Op0
);
4565 return BinaryOperator::CreateXor(Opnd0
, ConstantInt::get(Ty
, FoldConst
));
4569 if (Instruction
*FoldedLogic
= foldBinOpIntoSelectOrPhi(I
))
4572 // Y ^ (X | Y) --> X & ~Y
4573 // Y ^ (Y | X) --> X & ~Y
4574 if (match(Op1
, m_OneUse(m_c_Or(m_Value(X
), m_Specific(Op0
)))))
4575 return BinaryOperator::CreateAnd(X
, Builder
.CreateNot(Op0
));
4576 // (X | Y) ^ Y --> X & ~Y
4577 // (Y | X) ^ Y --> X & ~Y
4578 if (match(Op0
, m_OneUse(m_c_Or(m_Value(X
), m_Specific(Op1
)))))
4579 return BinaryOperator::CreateAnd(X
, Builder
.CreateNot(Op1
));
4581 // Y ^ (X & Y) --> ~X & Y
4582 // Y ^ (Y & X) --> ~X & Y
4583 if (match(Op1
, m_OneUse(m_c_And(m_Value(X
), m_Specific(Op0
)))))
4584 return BinaryOperator::CreateAnd(Op0
, Builder
.CreateNot(X
));
4585 // (X & Y) ^ Y --> ~X & Y
4586 // (Y & X) ^ Y --> ~X & Y
4587 // Canonical form is (X & C) ^ C; don't touch that.
4588 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
4589 // be fixed to prefer that (otherwise we get infinite looping).
4590 if (!match(Op1
, m_Constant()) &&
4591 match(Op0
, m_OneUse(m_c_And(m_Value(X
), m_Specific(Op1
)))))
4592 return BinaryOperator::CreateAnd(Op1
, Builder
.CreateNot(X
));
4595 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
4596 if (match(&I
, m_c_Xor(m_OneUse(m_Xor(m_Value(A
), m_Value(B
))),
4597 m_OneUse(m_c_Or(m_Deferred(A
), m_Value(C
))))))
4598 return BinaryOperator::CreateXor(
4599 Builder
.CreateAnd(Builder
.CreateNot(A
), C
), B
);
4601 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
4602 if (match(&I
, m_c_Xor(m_OneUse(m_Xor(m_Value(A
), m_Value(B
))),
4603 m_OneUse(m_c_Or(m_Deferred(B
), m_Value(C
))))))
4604 return BinaryOperator::CreateXor(
4605 Builder
.CreateAnd(Builder
.CreateNot(B
), C
), A
);
4607 // (A & B) ^ (A ^ B) -> (A | B)
4608 if (match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
4609 match(Op1
, m_c_Xor(m_Specific(A
), m_Specific(B
))))
4610 return BinaryOperator::CreateOr(A
, B
);
4611 // (A ^ B) ^ (A & B) -> (A | B)
4612 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
))) &&
4613 match(Op1
, m_c_And(m_Specific(A
), m_Specific(B
))))
4614 return BinaryOperator::CreateOr(A
, B
);
4616 // (A & ~B) ^ ~A -> ~(A & B)
4617 // (~B & A) ^ ~A -> ~(A & B)
4618 if (match(Op0
, m_c_And(m_Value(A
), m_Not(m_Value(B
)))) &&
4619 match(Op1
, m_Not(m_Specific(A
))))
4620 return BinaryOperator::CreateNot(Builder
.CreateAnd(A
, B
));
4622 // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
4623 if (match(&I
, m_c_Xor(m_c_And(m_Not(m_Value(A
)), m_Value(B
)), m_Deferred(A
))))
4624 return BinaryOperator::CreateOr(A
, B
);
4626 // (~A | B) ^ A --> ~(A & B)
4627 if (match(Op0
, m_OneUse(m_c_Or(m_Not(m_Specific(Op1
)), m_Value(B
)))))
4628 return BinaryOperator::CreateNot(Builder
.CreateAnd(Op1
, B
));
4630 // A ^ (~A | B) --> ~(A & B)
4631 if (match(Op1
, m_OneUse(m_c_Or(m_Not(m_Specific(Op0
)), m_Value(B
)))))
4632 return BinaryOperator::CreateNot(Builder
.CreateAnd(Op0
, B
));
4634 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
4635 // TODO: Loosen one-use restriction if common operand is a constant.
4637 if (match(Op0
, m_OneUse(m_Or(m_Value(A
), m_Value(B
)))) &&
4638 match(Op1
, m_OneUse(m_Or(m_Value(C
), m_Value(D
))))) {
4639 if (B
== C
|| B
== D
)
4644 Value
*NotA
= Builder
.CreateNot(A
);
4645 return BinaryOperator::CreateAnd(Builder
.CreateXor(B
, C
), NotA
);
4649 // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
4650 if (I
.getType()->isIntOrIntVectorTy(1) &&
4651 match(Op0
, m_OneUse(m_LogicalAnd(m_Value(A
), m_Value(B
)))) &&
4652 match(Op1
, m_OneUse(m_LogicalOr(m_Value(C
), m_Value(D
))))) {
4653 bool NeedFreeze
= isa
<SelectInst
>(Op0
) && isa
<SelectInst
>(Op1
) && B
== D
;
4654 if (B
== C
|| B
== D
)
4660 A
= Builder
.CreateFreeze(A
);
4661 Value
*NotB
= Builder
.CreateNot(B
);
4662 return SelectInst::Create(A
, NotB
, C
);
4666 if (auto *LHS
= dyn_cast
<ICmpInst
>(I
.getOperand(0)))
4667 if (auto *RHS
= dyn_cast
<ICmpInst
>(I
.getOperand(1)))
4668 if (Value
*V
= foldXorOfICmps(LHS
, RHS
, I
))
4669 return replaceInstUsesWith(I
, V
);
4671 if (Instruction
*CastedXor
= foldCastedBitwiseLogic(I
))
4674 if (Instruction
*Abs
= canonicalizeAbs(I
, Builder
))
4677 // Otherwise, if all else failed, try to hoist the xor-by-constant:
4678 // (X ^ C) ^ Y --> (X ^ Y) ^ C
4679 // Just like we do in other places, we completely avoid the fold
4680 // for constantexprs, at least to avoid endless combine loop.
4681 if (match(&I
, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X
),
4682 m_Unless(m_ConstantExpr())),
4683 m_ImmConstant(C1
))),
4685 return BinaryOperator::CreateXor(Builder
.CreateXor(X
, Y
), C1
);
4687 if (Instruction
*R
= reassociateForUses(I
, Builder
))
4690 if (Instruction
*Canonicalized
= canonicalizeLogicFirst(I
, Builder
))
4691 return Canonicalized
;
4693 if (Instruction
*Folded
= foldLogicOfIsFPClass(I
, Op0
, Op1
))
4696 if (Instruction
*Folded
= canonicalizeConditionalNegationViaMathToSelect(I
))
4699 if (Instruction
*Res
= foldBinOpOfDisplacedShifts(I
))