zpu: managed to compile program that writes constant to global variable
[llvm/zpu.git] / lib / Transforms / InstCombine / InstCombineAndOrXor.cpp
blob4f8240ac2ee93a8665fc8d14152556a440d6ff86
1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitAnd, visitOr, and visitXor functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/Intrinsics.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Support/PatternMatch.h"
18 using namespace llvm;
19 using namespace PatternMatch;
22 /// AddOne - Add one to a ConstantInt.
23 static Constant *AddOne(Constant *C) {
24 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
26 /// SubOne - Subtract one from a ConstantInt.
27 static Constant *SubOne(ConstantInt *C) {
28 return ConstantInt::get(C->getContext(), C->getValue()-1);
31 /// isFreeToInvert - Return true if the specified value is free to invert (apply
32 /// ~ to). This happens in cases where the ~ can be eliminated.
33 static inline bool isFreeToInvert(Value *V) {
34 // ~(~(X)) -> X.
35 if (BinaryOperator::isNot(V))
36 return true;
38 // Constants can be considered to be not'ed values.
39 if (isa<ConstantInt>(V))
40 return true;
42 // Compares can be inverted if they have a single use.
43 if (CmpInst *CI = dyn_cast<CmpInst>(V))
44 return CI->hasOneUse();
46 return false;
49 static inline Value *dyn_castNotVal(Value *V) {
50 // If this is not(not(x)) don't return that this is a not: we want the two
51 // not's to be folded first.
52 if (BinaryOperator::isNot(V)) {
53 Value *Operand = BinaryOperator::getNotArgument(V);
54 if (!isFreeToInvert(Operand))
55 return Operand;
58 // Constants can be considered to be not'ed values...
59 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
60 return ConstantInt::get(C->getType(), ~C->getValue());
61 return 0;
65 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
66 /// are carefully arranged to allow folding of expressions such as:
67 ///
68 /// (A < B) | (A > B) --> (A != B)
69 ///
70 /// Note that this is only valid if the first and second predicates have the
71 /// same sign. Is illegal to do: (A u< B) | (A s> B)
72 ///
73 /// Three bits are used to represent the condition, as follows:
74 /// 0 A > B
75 /// 1 A == B
76 /// 2 A < B
77 ///
78 /// <=> Value Definition
79 /// 000 0 Always false
80 /// 001 1 A > B
81 /// 010 2 A == B
82 /// 011 3 A >= B
83 /// 100 4 A < B
84 /// 101 5 A != B
85 /// 110 6 A <= B
86 /// 111 7 Always true
87 ///
88 static unsigned getICmpCode(const ICmpInst *ICI) {
89 switch (ICI->getPredicate()) {
90 // False -> 0
91 case ICmpInst::ICMP_UGT: return 1; // 001
92 case ICmpInst::ICMP_SGT: return 1; // 001
93 case ICmpInst::ICMP_EQ: return 2; // 010
94 case ICmpInst::ICMP_UGE: return 3; // 011
95 case ICmpInst::ICMP_SGE: return 3; // 011
96 case ICmpInst::ICMP_ULT: return 4; // 100
97 case ICmpInst::ICMP_SLT: return 4; // 100
98 case ICmpInst::ICMP_NE: return 5; // 101
99 case ICmpInst::ICMP_ULE: return 6; // 110
100 case ICmpInst::ICMP_SLE: return 6; // 110
101 // True -> 7
102 default:
103 llvm_unreachable("Invalid ICmp predicate!");
104 return 0;
108 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
109 /// predicate into a three bit mask. It also returns whether it is an ordered
110 /// predicate by reference.
111 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
112 isOrdered = false;
113 switch (CC) {
114 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
115 case FCmpInst::FCMP_UNO: return 0; // 000
116 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
117 case FCmpInst::FCMP_UGT: return 1; // 001
118 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
119 case FCmpInst::FCMP_UEQ: return 2; // 010
120 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
121 case FCmpInst::FCMP_UGE: return 3; // 011
122 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
123 case FCmpInst::FCMP_ULT: return 4; // 100
124 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
125 case FCmpInst::FCMP_UNE: return 5; // 101
126 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
127 case FCmpInst::FCMP_ULE: return 6; // 110
128 // True -> 7
129 default:
130 // Not expecting FCMP_FALSE and FCMP_TRUE;
131 llvm_unreachable("Unexpected FCmp predicate!");
132 return 0;
136 /// getICmpValue - This is the complement of getICmpCode, which turns an
137 /// opcode and two operands into either a constant true or false, or a brand
138 /// new ICmp instruction. The sign is passed in to determine which kind
139 /// of predicate to use in the new icmp instruction.
140 static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
141 InstCombiner::BuilderTy *Builder) {
142 CmpInst::Predicate Pred;
143 switch (Code) {
144 default: assert(0 && "Illegal ICmp code!");
145 case 0: // False.
146 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
147 case 1: Pred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
148 case 2: Pred = ICmpInst::ICMP_EQ; break;
149 case 3: Pred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
150 case 4: Pred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
151 case 5: Pred = ICmpInst::ICMP_NE; break;
152 case 6: Pred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
153 case 7: // True.
154 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
156 return Builder->CreateICmp(Pred, LHS, RHS);
159 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
160 /// opcode and two operands into either a FCmp instruction. isordered is passed
161 /// in to determine which kind of predicate to use in the new fcmp instruction.
162 static Value *getFCmpValue(bool isordered, unsigned code,
163 Value *LHS, Value *RHS,
164 InstCombiner::BuilderTy *Builder) {
165 CmpInst::Predicate Pred;
166 switch (code) {
167 default: assert(0 && "Illegal FCmp code!");
168 case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
169 case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
170 case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
171 case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break;
172 case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
173 case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
174 case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
175 case 7: return ConstantInt::getTrue(LHS->getContext());
177 return Builder->CreateFCmp(Pred, LHS, RHS);
180 /// PredicatesFoldable - Return true if both predicates match sign or if at
181 /// least one of them is an equality comparison (which is signless).
182 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
183 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
184 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
185 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
188 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
189 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
190 // guaranteed to be a binary operator.
191 Instruction *InstCombiner::OptAndOp(Instruction *Op,
192 ConstantInt *OpRHS,
193 ConstantInt *AndRHS,
194 BinaryOperator &TheAnd) {
195 Value *X = Op->getOperand(0);
196 Constant *Together = 0;
197 if (!Op->isShift())
198 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
200 switch (Op->getOpcode()) {
201 case Instruction::Xor:
202 if (Op->hasOneUse()) {
203 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
204 Value *And = Builder->CreateAnd(X, AndRHS);
205 And->takeName(Op);
206 return BinaryOperator::CreateXor(And, Together);
208 break;
209 case Instruction::Or:
210 if (Op->hasOneUse()){
211 if (Together != OpRHS) {
212 // (X | C1) & C2 --> (X | (C1&C2)) & C2
213 Value *Or = Builder->CreateOr(X, Together);
214 Or->takeName(Op);
215 return BinaryOperator::CreateAnd(Or, AndRHS);
218 ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
219 if (TogetherCI && !TogetherCI->isZero()){
220 // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
221 // NOTE: This reduces the number of bits set in the & mask, which
222 // can expose opportunities for store narrowing.
223 Together = ConstantExpr::getXor(AndRHS, Together);
224 Value *And = Builder->CreateAnd(X, Together);
225 And->takeName(Op);
226 return BinaryOperator::CreateOr(And, OpRHS);
230 break;
231 case Instruction::Add:
232 if (Op->hasOneUse()) {
233 // Adding a one to a single bit bit-field should be turned into an XOR
234 // of the bit. First thing to check is to see if this AND is with a
235 // single bit constant.
236 const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
238 // If there is only one bit set.
239 if (AndRHSV.isPowerOf2()) {
240 // Ok, at this point, we know that we are masking the result of the
241 // ADD down to exactly one bit. If the constant we are adding has
242 // no bits set below this bit, then we can eliminate the ADD.
243 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
245 // Check to see if any bits below the one bit set in AndRHSV are set.
246 if ((AddRHS & (AndRHSV-1)) == 0) {
247 // If not, the only thing that can effect the output of the AND is
248 // the bit specified by AndRHSV. If that bit is set, the effect of
249 // the XOR is to toggle the bit. If it is clear, then the ADD has
250 // no effect.
251 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
252 TheAnd.setOperand(0, X);
253 return &TheAnd;
254 } else {
255 // Pull the XOR out of the AND.
256 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
257 NewAnd->takeName(Op);
258 return BinaryOperator::CreateXor(NewAnd, AndRHS);
263 break;
265 case Instruction::Shl: {
266 // We know that the AND will not produce any of the bits shifted in, so if
267 // the anded constant includes them, clear them now!
269 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
270 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
271 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
272 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
273 AndRHS->getValue() & ShlMask);
275 if (CI->getValue() == ShlMask) {
276 // Masking out bits that the shift already masks
277 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
278 } else if (CI != AndRHS) { // Reducing bits set in and.
279 TheAnd.setOperand(1, CI);
280 return &TheAnd;
282 break;
284 case Instruction::LShr: {
285 // We know that the AND will not produce any of the bits shifted in, so if
286 // the anded constant includes them, clear them now! This only applies to
287 // unsigned shifts, because a signed shr may bring in set bits!
289 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
290 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
291 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
292 ConstantInt *CI = ConstantInt::get(Op->getContext(),
293 AndRHS->getValue() & ShrMask);
295 if (CI->getValue() == ShrMask) {
296 // Masking out bits that the shift already masks.
297 return ReplaceInstUsesWith(TheAnd, Op);
298 } else if (CI != AndRHS) {
299 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
300 return &TheAnd;
302 break;
304 case Instruction::AShr:
305 // Signed shr.
306 // See if this is shifting in some sign extension, then masking it out
307 // with an and.
308 if (Op->hasOneUse()) {
309 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
310 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
311 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
312 Constant *C = ConstantInt::get(Op->getContext(),
313 AndRHS->getValue() & ShrMask);
314 if (C == AndRHS) { // Masking out bits shifted in.
315 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
316 // Make the argument unsigned.
317 Value *ShVal = Op->getOperand(0);
318 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
319 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
322 break;
324 return 0;
328 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
329 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
330 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
331 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
332 /// insert new instructions.
333 Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
334 bool isSigned, bool Inside) {
335 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
336 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
337 "Lo is not <= Hi in range emission code!");
339 if (Inside) {
340 if (Lo == Hi) // Trivially false.
341 return ConstantInt::getFalse(V->getContext());
343 // V >= Min && V < Hi --> V < Hi
344 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
345 ICmpInst::Predicate pred = (isSigned ?
346 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
347 return Builder->CreateICmp(pred, V, Hi);
350 // Emit V-Lo <u Hi-Lo
351 Constant *NegLo = ConstantExpr::getNeg(Lo);
352 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
353 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
354 return Builder->CreateICmpULT(Add, UpperBound);
357 if (Lo == Hi) // Trivially true.
358 return ConstantInt::getTrue(V->getContext());
360 // V < Min || V >= Hi -> V > Hi-1
361 Hi = SubOne(cast<ConstantInt>(Hi));
362 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
363 ICmpInst::Predicate pred = (isSigned ?
364 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
365 return Builder->CreateICmp(pred, V, Hi);
368 // Emit V-Lo >u Hi-1-Lo
369 // Note that Hi has already had one subtracted from it, above.
370 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
371 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
372 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
373 return Builder->CreateICmpUGT(Add, LowerBound);
376 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
377 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
378 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
379 // not, since all 1s are not contiguous.
380 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
381 const APInt& V = Val->getValue();
382 uint32_t BitWidth = Val->getType()->getBitWidth();
383 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
385 // look for the first zero bit after the run of ones
386 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
387 // look for the first non-zero bit
388 ME = V.getActiveBits();
389 return true;
392 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
393 /// where isSub determines whether the operator is a sub. If we can fold one of
394 /// the following xforms:
395 ///
396 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
397 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
398 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
400 /// return (A +/- B).
402 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
403 ConstantInt *Mask, bool isSub,
404 Instruction &I) {
405 Instruction *LHSI = dyn_cast<Instruction>(LHS);
406 if (!LHSI || LHSI->getNumOperands() != 2 ||
407 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
409 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
411 switch (LHSI->getOpcode()) {
412 default: return 0;
413 case Instruction::And:
414 if (ConstantExpr::getAnd(N, Mask) == Mask) {
415 // If the AndRHS is a power of two minus one (0+1+), this is simple.
416 if ((Mask->getValue().countLeadingZeros() +
417 Mask->getValue().countPopulation()) ==
418 Mask->getValue().getBitWidth())
419 break;
421 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
422 // part, we don't need any explicit masks to take them out of A. If that
423 // is all N is, ignore it.
424 uint32_t MB = 0, ME = 0;
425 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
426 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
427 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
428 if (MaskedValueIsZero(RHS, Mask))
429 break;
432 return 0;
433 case Instruction::Or:
434 case Instruction::Xor:
435 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
436 if ((Mask->getValue().countLeadingZeros() +
437 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
438 && ConstantExpr::getAnd(N, Mask)->isNullValue())
439 break;
440 return 0;
443 if (isSub)
444 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
445 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
448 /// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C)
449 /// One of A and B is considered the mask, the other the value. This is
450 /// described as the "AMask" or "BMask" part of the enum. If the enum
451 /// contains only "Mask", then both A and B can be considered masks.
452 /// If A is the mask, then it was proven, that (A & C) == C. This
453 /// is trivial if C == A, or C == 0. If both A and C are constants, this
454 /// proof is also easy.
455 /// For the following explanations we assume that A is the mask.
456 /// The part "AllOnes" declares, that the comparison is true only
457 /// if (A & B) == A, or all bits of A are set in B.
458 /// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes
459 /// The part "AllZeroes" declares, that the comparison is true only
460 /// if (A & B) == 0, or all bits of A are cleared in B.
461 /// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes
462 /// The part "Mixed" declares, that (A & B) == C and C might or might not
463 /// contain any number of one bits and zero bits.
464 /// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed
465 /// The Part "Not" means, that in above descriptions "==" should be replaced
466 /// by "!=".
467 /// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes
468 /// If the mask A contains a single bit, then the following is equivalent:
469 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
470 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
471 enum MaskedICmpType {
472 FoldMskICmp_AMask_AllOnes = 1,
473 FoldMskICmp_AMask_NotAllOnes = 2,
474 FoldMskICmp_BMask_AllOnes = 4,
475 FoldMskICmp_BMask_NotAllOnes = 8,
476 FoldMskICmp_Mask_AllZeroes = 16,
477 FoldMskICmp_Mask_NotAllZeroes = 32,
478 FoldMskICmp_AMask_Mixed = 64,
479 FoldMskICmp_AMask_NotMixed = 128,
480 FoldMskICmp_BMask_Mixed = 256,
481 FoldMskICmp_BMask_NotMixed = 512
484 /// return the set of pattern classes (from MaskedICmpType)
485 /// that (icmp SCC (A & B), C) satisfies
486 static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
487 ICmpInst::Predicate SCC)
489 ConstantInt *ACst = dyn_cast<ConstantInt>(A);
490 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
491 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
492 bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
493 bool icmp_abit = (ACst != 0 && !ACst->isZero() &&
494 ACst->getValue().isPowerOf2());
495 bool icmp_bbit = (BCst != 0 && !BCst->isZero() &&
496 BCst->getValue().isPowerOf2());
497 unsigned result = 0;
498 if (CCst != 0 && CCst->isZero()) {
499 // if C is zero, then both A and B qualify as mask
500 result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
501 FoldMskICmp_Mask_AllZeroes |
502 FoldMskICmp_AMask_Mixed |
503 FoldMskICmp_BMask_Mixed)
504 : (FoldMskICmp_Mask_NotAllZeroes |
505 FoldMskICmp_Mask_NotAllZeroes |
506 FoldMskICmp_AMask_NotMixed |
507 FoldMskICmp_BMask_NotMixed));
508 if (icmp_abit)
509 result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes |
510 FoldMskICmp_AMask_NotMixed)
511 : (FoldMskICmp_AMask_AllOnes |
512 FoldMskICmp_AMask_Mixed));
513 if (icmp_bbit)
514 result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes |
515 FoldMskICmp_BMask_NotMixed)
516 : (FoldMskICmp_BMask_AllOnes |
517 FoldMskICmp_BMask_Mixed));
518 return result;
520 if (A == C) {
521 result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes |
522 FoldMskICmp_AMask_Mixed)
523 : (FoldMskICmp_AMask_NotAllOnes |
524 FoldMskICmp_AMask_NotMixed));
525 if (icmp_abit)
526 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
527 FoldMskICmp_AMask_NotMixed)
528 : (FoldMskICmp_Mask_AllZeroes |
529 FoldMskICmp_AMask_Mixed));
531 else if (ACst != 0 && CCst != 0 &&
532 ConstantExpr::getAnd(ACst, CCst) == CCst) {
533 result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
534 : FoldMskICmp_AMask_NotMixed);
536 if (B == C)
538 result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes |
539 FoldMskICmp_BMask_Mixed)
540 : (FoldMskICmp_BMask_NotAllOnes |
541 FoldMskICmp_BMask_NotMixed));
542 if (icmp_bbit)
543 result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
544 FoldMskICmp_BMask_NotMixed)
545 : (FoldMskICmp_Mask_AllZeroes |
546 FoldMskICmp_BMask_Mixed));
548 else if (BCst != 0 && CCst != 0 &&
549 ConstantExpr::getAnd(BCst, CCst) == CCst) {
550 result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
551 : FoldMskICmp_BMask_NotMixed);
553 return result;
556 /// foldLogOpOfMaskedICmpsHelper:
557 /// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
558 /// return the set of pattern classes (from MaskedICmpType)
559 /// that both LHS and RHS satisfy
560 static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
561 Value*& B, Value*& C,
562 Value*& D, Value*& E,
563 ICmpInst *LHS, ICmpInst *RHS) {
564 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
565 if (LHSCC != ICmpInst::ICMP_EQ && LHSCC != ICmpInst::ICMP_NE) return 0;
566 if (RHSCC != ICmpInst::ICMP_EQ && RHSCC != ICmpInst::ICMP_NE) return 0;
567 if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0;
568 // vectors are not (yet?) supported
569 if (LHS->getOperand(0)->getType()->isVectorTy()) return 0;
571 // Here comes the tricky part:
572 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
573 // and L11 & L12 == L21 & L22. The same goes for RHS.
574 // Now we must find those components L** and R**, that are equal, so
575 // that we can extract the parameters A, B, C, D, and E for the canonical
576 // above.
577 Value *L1 = LHS->getOperand(0);
578 Value *L2 = LHS->getOperand(1);
579 Value *L11,*L12,*L21,*L22;
580 if (match(L1, m_And(m_Value(L11), m_Value(L12)))) {
581 if (!match(L2, m_And(m_Value(L21), m_Value(L22))))
582 L21 = L22 = 0;
584 else {
585 if (!match(L2, m_And(m_Value(L11), m_Value(L12))))
586 return 0;
587 std::swap(L1, L2);
588 L21 = L22 = 0;
591 Value *R1 = RHS->getOperand(0);
592 Value *R2 = RHS->getOperand(1);
593 Value *R11,*R12;
594 bool ok = false;
595 if (match(R1, m_And(m_Value(R11), m_Value(R12)))) {
596 if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
597 A = R11; D = R12; E = R2; ok = true;
599 else
600 if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
601 A = R12; D = R11; E = R2; ok = true;
604 if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) {
605 if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
606 A = R11; D = R12; E = R1; ok = true;
608 else
609 if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
610 A = R12; D = R11; E = R1; ok = true;
612 else
613 return 0;
615 if (!ok)
616 return 0;
618 if (L11 == A) {
619 B = L12; C = L2;
621 else if (L12 == A) {
622 B = L11; C = L2;
624 else if (L21 == A) {
625 B = L22; C = L1;
627 else if (L22 == A) {
628 B = L21; C = L1;
631 unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC);
632 unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC);
633 return left_type & right_type;
635 /// foldLogOpOfMaskedICmps:
636 /// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
637 /// into a single (icmp(A & X) ==/!= Y)
638 static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS,
639 ICmpInst::Predicate NEWCC,
640 llvm::InstCombiner::BuilderTy* Builder) {
641 Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
642 unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS);
643 if (mask == 0) return 0;
645 if (NEWCC == ICmpInst::ICMP_NE)
646 mask >>= 1; // treat "Not"-states as normal states
648 if (mask & FoldMskICmp_Mask_AllZeroes) {
649 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
650 // -> (icmp eq (A & (B|D)), 0)
651 Value* newOr = Builder->CreateOr(B, D);
652 Value* newAnd = Builder->CreateAnd(A, newOr);
653 // we can't use C as zero, because we might actually handle
654 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
655 // with B and D, having a single bit set
656 Value* zero = Constant::getNullValue(A->getType());
657 return Builder->CreateICmp(NEWCC, newAnd, zero);
659 else if (mask & FoldMskICmp_BMask_AllOnes) {
660 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
661 // -> (icmp eq (A & (B|D)), (B|D))
662 Value* newOr = Builder->CreateOr(B, D);
663 Value* newAnd = Builder->CreateAnd(A, newOr);
664 return Builder->CreateICmp(NEWCC, newAnd, newOr);
666 else if (mask & FoldMskICmp_AMask_AllOnes) {
667 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
668 // -> (icmp eq (A & (B&D)), A)
669 Value* newAnd1 = Builder->CreateAnd(B, D);
670 Value* newAnd = Builder->CreateAnd(A, newAnd1);
671 return Builder->CreateICmp(NEWCC, newAnd, A);
673 else if (mask & FoldMskICmp_BMask_Mixed) {
674 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
675 // We already know that B & C == C && D & E == E.
676 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
677 // C and E, which are shared by both the mask B and the mask D, don't
678 // contradict, then we can transform to
679 // -> (icmp eq (A & (B|D)), (C|E))
680 // Currently, we only handle the case of B, C, D, and E being constant.
681 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
682 if (BCst == 0) return 0;
683 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
684 if (DCst == 0) return 0;
685 // we can't simply use C and E, because we might actually handle
686 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
687 // with B and D, having a single bit set
689 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
690 if (CCst == 0) return 0;
691 if (LHS->getPredicate() != NEWCC)
692 CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
693 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
694 if (ECst == 0) return 0;
695 if (RHS->getPredicate() != NEWCC)
696 ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
697 ConstantInt* MCst = dyn_cast<ConstantInt>(
698 ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst),
699 ConstantExpr::getXor(CCst, ECst)) );
700 // if there is a conflict we should actually return a false for the
701 // whole construct
702 if (!MCst->isZero())
703 return 0;
704 Value* newOr1 = Builder->CreateOr(B, D);
705 Value* newOr2 = ConstantExpr::getOr(CCst, ECst);
706 Value* newAnd = Builder->CreateAnd(A, newOr1);
707 return Builder->CreateICmp(NEWCC, newAnd, newOr2);
709 return 0;
712 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
713 Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
714 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
716 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
717 if (PredicatesFoldable(LHSCC, RHSCC)) {
718 if (LHS->getOperand(0) == RHS->getOperand(1) &&
719 LHS->getOperand(1) == RHS->getOperand(0))
720 LHS->swapOperands();
721 if (LHS->getOperand(0) == RHS->getOperand(0) &&
722 LHS->getOperand(1) == RHS->getOperand(1)) {
723 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
724 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
725 bool isSigned = LHS->isSigned() || RHS->isSigned();
726 return getICmpValue(isSigned, Code, Op0, Op1, Builder);
731 // handle (roughly):
732 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
733 Value* fold = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_EQ, Builder);
734 if (fold) return fold;
737 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
738 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
739 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
740 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
741 if (LHSCst == 0 || RHSCst == 0) return 0;
743 if (LHSCst == RHSCst && LHSCC == RHSCC) {
744 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
745 // where C is a power of 2
746 if (LHSCC == ICmpInst::ICMP_ULT &&
747 LHSCst->getValue().isPowerOf2()) {
748 Value *NewOr = Builder->CreateOr(Val, Val2);
749 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
752 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
753 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
754 Value *NewOr = Builder->CreateOr(Val, Val2);
755 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
759 // From here on, we only handle:
760 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
761 if (Val != Val2) return 0;
763 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
764 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
765 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
766 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
767 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
768 return 0;
770 // We can't fold (ugt x, C) & (sgt x, C2).
771 if (!PredicatesFoldable(LHSCC, RHSCC))
772 return 0;
774 // Ensure that the larger constant is on the RHS.
775 bool ShouldSwap;
776 if (CmpInst::isSigned(LHSCC) ||
777 (ICmpInst::isEquality(LHSCC) &&
778 CmpInst::isSigned(RHSCC)))
779 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
780 else
781 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
783 if (ShouldSwap) {
784 std::swap(LHS, RHS);
785 std::swap(LHSCst, RHSCst);
786 std::swap(LHSCC, RHSCC);
789 // At this point, we know we have two icmp instructions
790 // comparing a value against two constants and and'ing the result
791 // together. Because of the above check, we know that we only have
792 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
793 // (from the icmp folding check above), that the two constants
794 // are not equal and that the larger constant is on the RHS
795 assert(LHSCst != RHSCst && "Compares not folded above?");
797 switch (LHSCC) {
798 default: llvm_unreachable("Unknown integer condition code!");
799 case ICmpInst::ICMP_EQ:
800 switch (RHSCC) {
801 default: llvm_unreachable("Unknown integer condition code!");
802 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
803 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
804 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
805 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
806 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
807 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
808 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
809 return LHS;
811 case ICmpInst::ICMP_NE:
812 switch (RHSCC) {
813 default: llvm_unreachable("Unknown integer condition code!");
814 case ICmpInst::ICMP_ULT:
815 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
816 return Builder->CreateICmpULT(Val, LHSCst);
817 break; // (X != 13 & X u< 15) -> no change
818 case ICmpInst::ICMP_SLT:
819 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
820 return Builder->CreateICmpSLT(Val, LHSCst);
821 break; // (X != 13 & X s< 15) -> no change
822 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
823 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
824 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
825 return RHS;
826 case ICmpInst::ICMP_NE:
827 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
828 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
829 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
830 return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1));
832 break; // (X != 13 & X != 15) -> no change
834 break;
835 case ICmpInst::ICMP_ULT:
836 switch (RHSCC) {
837 default: llvm_unreachable("Unknown integer condition code!");
838 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
839 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
840 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
841 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
842 break;
843 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
844 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
845 return LHS;
846 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
847 break;
849 break;
850 case ICmpInst::ICMP_SLT:
851 switch (RHSCC) {
852 default: llvm_unreachable("Unknown integer condition code!");
853 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
854 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
855 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
856 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
857 break;
858 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
859 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
860 return LHS;
861 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
862 break;
864 break;
865 case ICmpInst::ICMP_UGT:
866 switch (RHSCC) {
867 default: llvm_unreachable("Unknown integer condition code!");
868 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
869 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
870 return RHS;
871 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
872 break;
873 case ICmpInst::ICMP_NE:
874 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
875 return Builder->CreateICmp(LHSCC, Val, RHSCst);
876 break; // (X u> 13 & X != 15) -> no change
877 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
878 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
879 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
880 break;
882 break;
883 case ICmpInst::ICMP_SGT:
884 switch (RHSCC) {
885 default: llvm_unreachable("Unknown integer condition code!");
886 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
887 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
888 return RHS;
889 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
890 break;
891 case ICmpInst::ICMP_NE:
892 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
893 return Builder->CreateICmp(LHSCC, Val, RHSCst);
894 break; // (X s> 13 & X != 15) -> no change
895 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
896 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true);
897 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
898 break;
900 break;
903 return 0;
906 /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
907 /// instcombine, this returns a Value which should already be inserted into the
908 /// function.
909 Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
910 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
911 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
912 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
913 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
914 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
915 // If either of the constants are nans, then the whole thing returns
916 // false.
917 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
918 return ConstantInt::getFalse(LHS->getContext());
919 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
922 // Handle vector zeros. This occurs because the canonical form of
923 // "fcmp ord x,x" is "fcmp ord x, 0".
924 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
925 isa<ConstantAggregateZero>(RHS->getOperand(1)))
926 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
927 return 0;
930 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
931 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
932 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
935 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
936 // Swap RHS operands to match LHS.
937 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
938 std::swap(Op1LHS, Op1RHS);
941 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
942 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
943 if (Op0CC == Op1CC)
944 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
945 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
946 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
947 if (Op0CC == FCmpInst::FCMP_TRUE)
948 return RHS;
949 if (Op1CC == FCmpInst::FCMP_TRUE)
950 return LHS;
952 bool Op0Ordered;
953 bool Op1Ordered;
954 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
955 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
956 if (Op1Pred == 0) {
957 std::swap(LHS, RHS);
958 std::swap(Op0Pred, Op1Pred);
959 std::swap(Op0Ordered, Op1Ordered);
961 if (Op0Pred == 0) {
962 // uno && ueq -> uno && (uno || eq) -> ueq
963 // ord && olt -> ord && (ord && lt) -> olt
964 if (Op0Ordered == Op1Ordered)
965 return RHS;
967 // uno && oeq -> uno && (ord && eq) -> false
968 // uno && ord -> false
969 if (!Op0Ordered)
970 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
971 // ord && ueq -> ord && (uno || eq) -> oeq
972 return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder);
976 return 0;
980 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
981 bool Changed = SimplifyCommutative(I);
982 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
984 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
985 return ReplaceInstUsesWith(I, V);
987 // See if we can simplify any instructions used by the instruction whose sole
988 // purpose is to compute bits we don't care about.
989 if (SimplifyDemandedInstructionBits(I))
990 return &I;
992 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
993 const APInt &AndRHSMask = AndRHS->getValue();
994 APInt NotAndRHS(~AndRHSMask);
996 // Optimize a variety of ((val OP C1) & C2) combinations...
997 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
998 Value *Op0LHS = Op0I->getOperand(0);
999 Value *Op0RHS = Op0I->getOperand(1);
1000 switch (Op0I->getOpcode()) {
1001 default: break;
1002 case Instruction::Xor:
1003 case Instruction::Or:
1004 // If the mask is only needed on one incoming arm, push it up.
1005 if (!Op0I->hasOneUse()) break;
1007 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
1008 // Not masking anything out for the LHS, move to RHS.
1009 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
1010 Op0RHS->getName()+".masked");
1011 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
1013 if (!isa<Constant>(Op0RHS) &&
1014 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
1015 // Not masking anything out for the RHS, move to LHS.
1016 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
1017 Op0LHS->getName()+".masked");
1018 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
1021 break;
1022 case Instruction::Add:
1023 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
1024 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1025 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1026 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
1027 return BinaryOperator::CreateAnd(V, AndRHS);
1028 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
1029 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
1030 break;
1032 case Instruction::Sub:
1033 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
1034 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1035 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1036 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
1037 return BinaryOperator::CreateAnd(V, AndRHS);
1039 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
1040 // has 1's for all bits that the subtraction with A might affect.
1041 if (Op0I->hasOneUse()) {
1042 uint32_t BitWidth = AndRHSMask.getBitWidth();
1043 uint32_t Zeros = AndRHSMask.countLeadingZeros();
1044 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
1046 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
1047 if (!(A && A->isZero()) && // avoid infinite recursion.
1048 MaskedValueIsZero(Op0LHS, Mask)) {
1049 Value *NewNeg = Builder->CreateNeg(Op0RHS);
1050 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
1053 break;
1055 case Instruction::Shl:
1056 case Instruction::LShr:
1057 // (1 << x) & 1 --> zext(x == 0)
1058 // (1 >> x) & 1 --> zext(x == 0)
1059 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
1060 Value *NewICmp =
1061 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
1062 return new ZExtInst(NewICmp, I.getType());
1064 break;
1067 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1068 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1069 return Res;
1070 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
1071 // If this is an integer truncation or change from signed-to-unsigned, and
1072 // if the source is an and/or with immediate, transform it. This
1073 // frequently occurs for bitfield accesses.
1074 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
1075 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
1076 CastOp->getNumOperands() == 2)
1077 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
1078 if (CastOp->getOpcode() == Instruction::And) {
1079 // Change: and (cast (and X, C1) to T), C2
1080 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
1081 // This will fold the two constants together, which may allow
1082 // other simplifications.
1083 Value *NewCast = Builder->CreateTruncOrBitCast(
1084 CastOp->getOperand(0), I.getType(),
1085 CastOp->getName()+".shrunk");
1086 // trunc_or_bitcast(C1)&C2
1087 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
1088 C3 = ConstantExpr::getAnd(C3, AndRHS);
1089 return BinaryOperator::CreateAnd(NewCast, C3);
1090 } else if (CastOp->getOpcode() == Instruction::Or) {
1091 // Change: and (cast (or X, C1) to T), C2
1092 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
1093 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
1094 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
1095 // trunc(C1)&C2
1096 return ReplaceInstUsesWith(I, AndRHS);
1102 // Try to fold constant and into select arguments.
1103 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1104 if (Instruction *R = FoldOpIntoSelect(I, SI))
1105 return R;
1106 if (isa<PHINode>(Op0))
1107 if (Instruction *NV = FoldOpIntoPhi(I))
1108 return NV;
1112 // (~A & ~B) == (~(A | B)) - De Morgan's Law
1113 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1114 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1115 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1116 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
1117 I.getName()+".demorgan");
1118 return BinaryOperator::CreateNot(Or);
1122 Value *A = 0, *B = 0, *C = 0, *D = 0;
1123 // (A|B) & ~(A&B) -> A^B
1124 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1125 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1126 ((A == C && B == D) || (A == D && B == C)))
1127 return BinaryOperator::CreateXor(A, B);
1129 // ~(A&B) & (A|B) -> A^B
1130 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1131 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1132 ((A == C && B == D) || (A == D && B == C)))
1133 return BinaryOperator::CreateXor(A, B);
1135 if (Op0->hasOneUse() &&
1136 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
1137 if (A == Op1) { // (A^B)&A -> A&(A^B)
1138 I.swapOperands(); // Simplify below
1139 std::swap(Op0, Op1);
1140 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
1141 cast<BinaryOperator>(Op0)->swapOperands();
1142 I.swapOperands(); // Simplify below
1143 std::swap(Op0, Op1);
1147 if (Op1->hasOneUse() &&
1148 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
1149 if (B == Op0) { // B&(A^B) -> B&(B^A)
1150 cast<BinaryOperator>(Op1)->swapOperands();
1151 std::swap(A, B);
1153 if (A == Op0) // A&(A^B) -> A & ~B
1154 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
1157 // (A&((~A)|B)) -> A&B
1158 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
1159 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
1160 return BinaryOperator::CreateAnd(A, Op1);
1161 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
1162 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
1163 return BinaryOperator::CreateAnd(A, Op0);
1166 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1))
1167 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
1168 if (Value *Res = FoldAndOfICmps(LHS, RHS))
1169 return ReplaceInstUsesWith(I, Res);
1171 // If and'ing two fcmp, try combine them into one.
1172 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1173 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1174 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
1175 return ReplaceInstUsesWith(I, Res);
1178 // fold (and (cast A), (cast B)) -> (cast (and A, B))
1179 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
1180 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
1181 const Type *SrcTy = Op0C->getOperand(0)->getType();
1182 if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
1183 SrcTy == Op1C->getOperand(0)->getType() &&
1184 SrcTy->isIntOrIntVectorTy()) {
1185 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
1187 // Only do this if the casts both really cause code to be generated.
1188 if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
1189 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
1190 Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
1191 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1194 // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
1195 // cast is otherwise not optimizable. This happens for vector sexts.
1196 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
1197 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
1198 if (Value *Res = FoldAndOfICmps(LHS, RHS))
1199 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1201 // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
1202 // cast is otherwise not optimizable. This happens for vector sexts.
1203 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
1204 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
1205 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
1206 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1210 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
1211 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1212 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1213 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1214 SI0->getOperand(1) == SI1->getOperand(1) &&
1215 (SI0->hasOneUse() || SI1->hasOneUse())) {
1216 Value *NewOp =
1217 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
1218 SI0->getName());
1219 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1220 SI1->getOperand(1));
1224 return Changed ? &I : 0;
1227 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
1228 /// capable of providing pieces of a bswap. The subexpression provides pieces
1229 /// of a bswap if it is proven that each of the non-zero bytes in the output of
1230 /// the expression came from the corresponding "byte swapped" byte in some other
1231 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
1232 /// we know that the expression deposits the low byte of %X into the high byte
1233 /// of the bswap result and that all other bytes are zero. This expression is
1234 /// accepted, the high byte of ByteValues is set to X to indicate a correct
1235 /// match.
1237 /// This function returns true if the match was unsuccessful and false if so.
1238 /// On entry to the function the "OverallLeftShift" is a signed integer value
1239 /// indicating the number of bytes that the subexpression is later shifted. For
1240 /// example, if the expression is later right shifted by 16 bits, the
1241 /// OverallLeftShift value would be -2 on entry. This is used to specify which
1242 /// byte of ByteValues is actually being set.
1244 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
1245 /// byte is masked to zero by a user. For example, in (X & 255), X will be
1246 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
1247 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
1248 /// always in the local (OverallLeftShift) coordinate space.
1250 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
1251 SmallVector<Value*, 8> &ByteValues) {
1252 if (Instruction *I = dyn_cast<Instruction>(V)) {
1253 // If this is an or instruction, it may be an inner node of the bswap.
1254 if (I->getOpcode() == Instruction::Or) {
1255 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1256 ByteValues) ||
1257 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
1258 ByteValues);
1261 // If this is a logical shift by a constant multiple of 8, recurse with
1262 // OverallLeftShift and ByteMask adjusted.
1263 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1264 unsigned ShAmt =
1265 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1266 // Ensure the shift amount is defined and of a byte value.
1267 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
1268 return true;
1270 unsigned ByteShift = ShAmt >> 3;
1271 if (I->getOpcode() == Instruction::Shl) {
1272 // X << 2 -> collect(X, +2)
1273 OverallLeftShift += ByteShift;
1274 ByteMask >>= ByteShift;
1275 } else {
1276 // X >>u 2 -> collect(X, -2)
1277 OverallLeftShift -= ByteShift;
1278 ByteMask <<= ByteShift;
1279 ByteMask &= (~0U >> (32-ByteValues.size()));
1282 if (OverallLeftShift >= (int)ByteValues.size()) return true;
1283 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
1285 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1286 ByteValues);
1289 // If this is a logical 'and' with a mask that clears bytes, clear the
1290 // corresponding bytes in ByteMask.
1291 if (I->getOpcode() == Instruction::And &&
1292 isa<ConstantInt>(I->getOperand(1))) {
1293 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
1294 unsigned NumBytes = ByteValues.size();
1295 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
1296 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1298 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
1299 // If this byte is masked out by a later operation, we don't care what
1300 // the and mask is.
1301 if ((ByteMask & (1 << i)) == 0)
1302 continue;
1304 // If the AndMask is all zeros for this byte, clear the bit.
1305 APInt MaskB = AndMask & Byte;
1306 if (MaskB == 0) {
1307 ByteMask &= ~(1U << i);
1308 continue;
1311 // If the AndMask is not all ones for this byte, it's not a bytezap.
1312 if (MaskB != Byte)
1313 return true;
1315 // Otherwise, this byte is kept.
1318 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1319 ByteValues);
1323 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
1324 // the input value to the bswap. Some observations: 1) if more than one byte
1325 // is demanded from this input, then it could not be successfully assembled
1326 // into a byteswap. At least one of the two bytes would not be aligned with
1327 // their ultimate destination.
1328 if (!isPowerOf2_32(ByteMask)) return true;
1329 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
1331 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
1332 // is demanded, it needs to go into byte 0 of the result. This means that the
1333 // byte needs to be shifted until it lands in the right byte bucket. The
1334 // shift amount depends on the position: if the byte is coming from the high
1335 // part of the value (e.g. byte 3) then it must be shifted right. If from the
1336 // low part, it must be shifted left.
1337 unsigned DestByteNo = InputByteNo + OverallLeftShift;
1338 if (InputByteNo < ByteValues.size()/2) {
1339 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1340 return true;
1341 } else {
1342 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1343 return true;
1346 // If the destination byte value is already defined, the values are or'd
1347 // together, which isn't a bswap (unless it's an or of the same bits).
1348 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
1349 return true;
1350 ByteValues[DestByteNo] = V;
1351 return false;
1354 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
1355 /// If so, insert the new bswap intrinsic and return it.
1356 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1357 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
1358 if (!ITy || ITy->getBitWidth() % 16 ||
1359 // ByteMask only allows up to 32-byte values.
1360 ITy->getBitWidth() > 32*8)
1361 return 0; // Can only bswap pairs of bytes. Can't do vectors.
1363 /// ByteValues - For each byte of the result, we keep track of which value
1364 /// defines each byte.
1365 SmallVector<Value*, 8> ByteValues;
1366 ByteValues.resize(ITy->getBitWidth()/8);
1368 // Try to find all the pieces corresponding to the bswap.
1369 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
1370 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
1371 return 0;
1373 // Check to see if all of the bytes come from the same value.
1374 Value *V = ByteValues[0];
1375 if (V == 0) return 0; // Didn't find a byte? Must be zero.
1377 // Check to make sure that all of the bytes come from the same value.
1378 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
1379 if (ByteValues[i] != V)
1380 return 0;
1381 const Type *Tys[] = { ITy };
1382 Module *M = I.getParent()->getParent()->getParent();
1383 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
1384 return CallInst::Create(F, V);
1387 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
1388 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
1389 /// we can simplify this expression to "cond ? C : D or B".
1390 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
1391 Value *C, Value *D) {
1392 // If A is not a select of -1/0, this cannot match.
1393 Value *Cond = 0;
1394 if (!match(A, m_SExt(m_Value(Cond))) ||
1395 !Cond->getType()->isIntegerTy(1))
1396 return 0;
1398 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
1399 if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
1400 return SelectInst::Create(Cond, C, B);
1401 if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
1402 return SelectInst::Create(Cond, C, B);
1404 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
1405 if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
1406 return SelectInst::Create(Cond, C, D);
1407 if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
1408 return SelectInst::Create(Cond, C, D);
1409 return 0;
1412 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
1413 Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
1414 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
1416 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1417 if (PredicatesFoldable(LHSCC, RHSCC)) {
1418 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1419 LHS->getOperand(1) == RHS->getOperand(0))
1420 LHS->swapOperands();
1421 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1422 LHS->getOperand(1) == RHS->getOperand(1)) {
1423 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1424 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1425 bool isSigned = LHS->isSigned() || RHS->isSigned();
1426 return getICmpValue(isSigned, Code, Op0, Op1, Builder);
1431 // handle (roughly):
1432 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
1433 Value* fold = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_NE, Builder);
1434 if (fold) return fold;
1437 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1438 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
1439 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
1440 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
1441 if (LHSCst == 0 || RHSCst == 0) return 0;
1443 if (LHSCst == RHSCst && LHSCC == RHSCC) {
1444 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1445 if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
1446 Value *NewOr = Builder->CreateOr(Val, Val2);
1447 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
1451 // From here on, we only handle:
1452 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1453 if (Val != Val2) return 0;
1455 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
1456 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
1457 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
1458 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
1459 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
1460 return 0;
1462 // We can't fold (ugt x, C) | (sgt x, C2).
1463 if (!PredicatesFoldable(LHSCC, RHSCC))
1464 return 0;
1466 // Ensure that the larger constant is on the RHS.
1467 bool ShouldSwap;
1468 if (CmpInst::isSigned(LHSCC) ||
1469 (ICmpInst::isEquality(LHSCC) &&
1470 CmpInst::isSigned(RHSCC)))
1471 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
1472 else
1473 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
1475 if (ShouldSwap) {
1476 std::swap(LHS, RHS);
1477 std::swap(LHSCst, RHSCst);
1478 std::swap(LHSCC, RHSCC);
1481 // At this point, we know we have two icmp instructions
1482 // comparing a value against two constants and or'ing the result
1483 // together. Because of the above check, we know that we only have
1484 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1485 // icmp folding check above), that the two constants are not
1486 // equal.
1487 assert(LHSCst != RHSCst && "Compares not folded above?");
1489 switch (LHSCC) {
1490 default: llvm_unreachable("Unknown integer condition code!");
1491 case ICmpInst::ICMP_EQ:
1492 switch (RHSCC) {
1493 default: llvm_unreachable("Unknown integer condition code!");
1494 case ICmpInst::ICMP_EQ:
1495 if (LHSCst == SubOne(RHSCst)) {
1496 // (X == 13 | X == 14) -> X-13 <u 2
1497 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
1498 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
1499 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
1500 return Builder->CreateICmpULT(Add, AddCST);
1502 break; // (X == 13 | X == 15) -> no change
1503 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1504 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1505 break;
1506 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
1507 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
1508 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
1509 return RHS;
1511 break;
1512 case ICmpInst::ICMP_NE:
1513 switch (RHSCC) {
1514 default: llvm_unreachable("Unknown integer condition code!");
1515 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
1516 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
1517 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
1518 return LHS;
1519 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
1520 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
1521 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
1522 return ConstantInt::getTrue(LHS->getContext());
1524 break;
1525 case ICmpInst::ICMP_ULT:
1526 switch (RHSCC) {
1527 default: llvm_unreachable("Unknown integer condition code!");
1528 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1529 break;
1530 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1531 // If RHSCst is [us]MAXINT, it is always false. Not handling
1532 // this can cause overflow.
1533 if (RHSCst->isMaxValue(false))
1534 return LHS;
1535 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false);
1536 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
1537 break;
1538 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
1539 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
1540 return RHS;
1541 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
1542 break;
1544 break;
1545 case ICmpInst::ICMP_SLT:
1546 switch (RHSCC) {
1547 default: llvm_unreachable("Unknown integer condition code!");
1548 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1549 break;
1550 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1551 // If RHSCst is [us]MAXINT, it is always false. Not handling
1552 // this can cause overflow.
1553 if (RHSCst->isMaxValue(true))
1554 return LHS;
1555 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false);
1556 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
1557 break;
1558 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
1559 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
1560 return RHS;
1561 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
1562 break;
1564 break;
1565 case ICmpInst::ICMP_UGT:
1566 switch (RHSCC) {
1567 default: llvm_unreachable("Unknown integer condition code!");
1568 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
1569 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
1570 return LHS;
1571 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
1572 break;
1573 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
1574 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
1575 return ConstantInt::getTrue(LHS->getContext());
1576 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
1577 break;
1579 break;
1580 case ICmpInst::ICMP_SGT:
1581 switch (RHSCC) {
1582 default: llvm_unreachable("Unknown integer condition code!");
1583 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
1584 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
1585 return LHS;
1586 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
1587 break;
1588 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
1589 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
1590 return ConstantInt::getTrue(LHS->getContext());
1591 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
1592 break;
1594 break;
1596 return 0;
1599 /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
1600 /// instcombine, this returns a Value which should already be inserted into the
1601 /// function.
1602 Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
1603 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
1604 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
1605 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
1606 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1607 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1608 // If either of the constants are nans, then the whole thing returns
1609 // true.
1610 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1611 return ConstantInt::getTrue(LHS->getContext());
1613 // Otherwise, no need to compare the two constants, compare the
1614 // rest.
1615 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1618 // Handle vector zeros. This occurs because the canonical form of
1619 // "fcmp uno x,x" is "fcmp uno x, 0".
1620 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1621 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1622 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1624 return 0;
1627 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1628 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1629 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1631 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1632 // Swap RHS operands to match LHS.
1633 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1634 std::swap(Op1LHS, Op1RHS);
1636 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
1637 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
1638 if (Op0CC == Op1CC)
1639 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
1640 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
1641 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
1642 if (Op0CC == FCmpInst::FCMP_FALSE)
1643 return RHS;
1644 if (Op1CC == FCmpInst::FCMP_FALSE)
1645 return LHS;
1646 bool Op0Ordered;
1647 bool Op1Ordered;
1648 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
1649 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
1650 if (Op0Ordered == Op1Ordered) {
1651 // If both are ordered or unordered, return a new fcmp with
1652 // or'ed predicates.
1653 return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
1656 return 0;
1659 /// FoldOrWithConstants - This helper function folds:
1661 /// ((A | B) & C1) | (B & C2)
1663 /// into:
1664 ///
1665 /// (A & C1) | B
1667 /// when the XOR of the two constants is "all ones" (-1).
1668 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
1669 Value *A, Value *B, Value *C) {
1670 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1671 if (!CI1) return 0;
1673 Value *V1 = 0;
1674 ConstantInt *CI2 = 0;
1675 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
1677 APInt Xor = CI1->getValue() ^ CI2->getValue();
1678 if (!Xor.isAllOnesValue()) return 0;
1680 if (V1 == A || V1 == B) {
1681 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
1682 return BinaryOperator::CreateOr(NewOp, V1);
1685 return 0;
1688 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1689 bool Changed = SimplifyCommutative(I);
1690 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1692 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
1693 return ReplaceInstUsesWith(I, V);
1695 // See if we can simplify any instructions used by the instruction whose sole
1696 // purpose is to compute bits we don't care about.
1697 if (SimplifyDemandedInstructionBits(I))
1698 return &I;
1700 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1701 ConstantInt *C1 = 0; Value *X = 0;
1702 // (X & C1) | C2 --> (X | C2) & (C1|C2)
1703 // iff (C1 & C2) == 0.
1704 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
1705 (RHS->getValue() & C1->getValue()) != 0 &&
1706 Op0->hasOneUse()) {
1707 Value *Or = Builder->CreateOr(X, RHS);
1708 Or->takeName(Op0);
1709 return BinaryOperator::CreateAnd(Or,
1710 ConstantInt::get(I.getContext(),
1711 RHS->getValue() | C1->getValue()));
1714 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
1715 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
1716 Op0->hasOneUse()) {
1717 Value *Or = Builder->CreateOr(X, RHS);
1718 Or->takeName(Op0);
1719 return BinaryOperator::CreateXor(Or,
1720 ConstantInt::get(I.getContext(),
1721 C1->getValue() & ~RHS->getValue()));
1724 // Try to fold constant and into select arguments.
1725 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1726 if (Instruction *R = FoldOpIntoSelect(I, SI))
1727 return R;
1729 if (isa<PHINode>(Op0))
1730 if (Instruction *NV = FoldOpIntoPhi(I))
1731 return NV;
1734 Value *A = 0, *B = 0;
1735 ConstantInt *C1 = 0, *C2 = 0;
1737 // (A | B) | C and A | (B | C) -> bswap if possible.
1738 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1739 if (match(Op0, m_Or(m_Value(), m_Value())) ||
1740 match(Op1, m_Or(m_Value(), m_Value())) ||
1741 (match(Op0, m_Shift(m_Value(), m_Value())) &&
1742 match(Op1, m_Shift(m_Value(), m_Value())))) {
1743 if (Instruction *BSwap = MatchBSwap(I))
1744 return BSwap;
1747 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1748 if (Op0->hasOneUse() &&
1749 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1750 MaskedValueIsZero(Op1, C1->getValue())) {
1751 Value *NOr = Builder->CreateOr(A, Op1);
1752 NOr->takeName(Op0);
1753 return BinaryOperator::CreateXor(NOr, C1);
1756 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1757 if (Op1->hasOneUse() &&
1758 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1759 MaskedValueIsZero(Op0, C1->getValue())) {
1760 Value *NOr = Builder->CreateOr(A, Op0);
1761 NOr->takeName(Op0);
1762 return BinaryOperator::CreateXor(NOr, C1);
1765 // (A & C)|(B & D)
1766 Value *C = 0, *D = 0;
1767 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1768 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1769 Value *V1 = 0, *V2 = 0, *V3 = 0;
1770 C1 = dyn_cast<ConstantInt>(C);
1771 C2 = dyn_cast<ConstantInt>(D);
1772 if (C1 && C2) { // (A & C1)|(B & C2)
1773 // If we have: ((V + N) & C1) | (V & C2)
1774 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1775 // replace with V+N.
1776 if (C1->getValue() == ~C2->getValue()) {
1777 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
1778 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1779 // Add commutes, try both ways.
1780 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
1781 return ReplaceInstUsesWith(I, A);
1782 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
1783 return ReplaceInstUsesWith(I, A);
1785 // Or commutes, try both ways.
1786 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
1787 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1788 // Add commutes, try both ways.
1789 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
1790 return ReplaceInstUsesWith(I, B);
1791 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
1792 return ReplaceInstUsesWith(I, B);
1796 if ((C1->getValue() & C2->getValue()) == 0) {
1797 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1798 // iff (C1&C2) == 0 and (N&~C1) == 0
1799 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1800 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
1801 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
1802 return BinaryOperator::CreateAnd(A,
1803 ConstantInt::get(A->getContext(),
1804 C1->getValue()|C2->getValue()));
1805 // Or commutes, try both ways.
1806 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1807 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
1808 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
1809 return BinaryOperator::CreateAnd(B,
1810 ConstantInt::get(B->getContext(),
1811 C1->getValue()|C2->getValue()));
1813 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
1814 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
1815 ConstantInt *C3 = 0, *C4 = 0;
1816 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
1817 (C3->getValue() & ~C1->getValue()) == 0 &&
1818 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
1819 (C4->getValue() & ~C2->getValue()) == 0) {
1820 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
1821 return BinaryOperator::CreateAnd(V2,
1822 ConstantInt::get(B->getContext(),
1823 C1->getValue()|C2->getValue()));
1828 // Check to see if we have any common things being and'ed. If so, find the
1829 // terms for V1 & (V2|V3).
1830 if (Op0->hasOneUse() || Op1->hasOneUse()) {
1831 V1 = 0;
1832 if (A == B) // (A & C)|(A & D) == A & (C|D)
1833 V1 = A, V2 = C, V3 = D;
1834 else if (A == D) // (A & C)|(B & A) == A & (B|C)
1835 V1 = A, V2 = B, V3 = C;
1836 else if (C == B) // (A & C)|(C & D) == C & (A|D)
1837 V1 = C, V2 = A, V3 = D;
1838 else if (C == D) // (A & C)|(B & C) == C & (A|B)
1839 V1 = C, V2 = A, V3 = B;
1841 if (V1) {
1842 Value *Or = Builder->CreateOr(V2, V3, "tmp");
1843 return BinaryOperator::CreateAnd(V1, Or);
1847 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
1848 // Don't do this for vector select idioms, the code generator doesn't handle
1849 // them well yet.
1850 if (!I.getType()->isVectorTy()) {
1851 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
1852 return Match;
1853 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
1854 return Match;
1855 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
1856 return Match;
1857 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
1858 return Match;
1861 // ((A&~B)|(~A&B)) -> A^B
1862 if ((match(C, m_Not(m_Specific(D))) &&
1863 match(B, m_Not(m_Specific(A)))))
1864 return BinaryOperator::CreateXor(A, D);
1865 // ((~B&A)|(~A&B)) -> A^B
1866 if ((match(A, m_Not(m_Specific(D))) &&
1867 match(B, m_Not(m_Specific(C)))))
1868 return BinaryOperator::CreateXor(C, D);
1869 // ((A&~B)|(B&~A)) -> A^B
1870 if ((match(C, m_Not(m_Specific(B))) &&
1871 match(D, m_Not(m_Specific(A)))))
1872 return BinaryOperator::CreateXor(A, B);
1873 // ((~B&A)|(B&~A)) -> A^B
1874 if ((match(A, m_Not(m_Specific(B))) &&
1875 match(D, m_Not(m_Specific(C)))))
1876 return BinaryOperator::CreateXor(C, B);
1878 // ((A|B)&1)|(B&-2) -> (A&1) | B
1879 if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
1880 match(A, m_Or(m_Specific(B), m_Value(V1)))) {
1881 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
1882 if (Ret) return Ret;
1884 // (B&-2)|((A|B)&1) -> (A&1) | B
1885 if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
1886 match(B, m_Or(m_Value(V1), m_Specific(A)))) {
1887 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
1888 if (Ret) return Ret;
1892 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
1893 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1894 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1895 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1896 SI0->getOperand(1) == SI1->getOperand(1) &&
1897 (SI0->hasOneUse() || SI1->hasOneUse())) {
1898 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
1899 SI0->getName());
1900 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1901 SI1->getOperand(1));
1905 // (~A | ~B) == (~(A & B)) - De Morgan's Law
1906 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1907 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1908 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1909 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
1910 I.getName()+".demorgan");
1911 return BinaryOperator::CreateNot(And);
1914 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
1915 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
1916 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1917 return ReplaceInstUsesWith(I, Res);
1919 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
1920 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1921 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1922 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
1923 return ReplaceInstUsesWith(I, Res);
1925 // fold (or (cast A), (cast B)) -> (cast (or A, B))
1926 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
1927 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
1928 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
1929 const Type *SrcTy = Op0C->getOperand(0)->getType();
1930 if (SrcTy == Op1C->getOperand(0)->getType() &&
1931 SrcTy->isIntOrIntVectorTy()) {
1932 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
1934 if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
1935 // Only do this if the casts both really cause code to be
1936 // generated.
1937 ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
1938 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
1939 Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
1940 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1943 // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
1944 // cast is otherwise not optimizable. This happens for vector sexts.
1945 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
1946 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
1947 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1948 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1950 // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
1951 // cast is otherwise not optimizable. This happens for vector sexts.
1952 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
1953 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
1954 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
1955 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1960 // Note: If we've gotten to the point of visiting the outer OR, then the
1961 // inner one couldn't be simplified. If it was a constant, then it won't
1962 // be simplified by a later pass either, so we try swapping the inner/outer
1963 // ORs in the hopes that we'll be able to simplify it this way.
1964 // (X|C) | V --> (X|V) | C
1965 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
1966 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
1967 Value *Inner = Builder->CreateOr(A, Op1);
1968 Inner->takeName(Op0);
1969 return BinaryOperator::CreateOr(Inner, C1);
1972 return Changed ? &I : 0;
1975 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
1976 bool Changed = SimplifyCommutative(I);
1977 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1979 if (isa<UndefValue>(Op1)) {
1980 if (isa<UndefValue>(Op0))
1981 // Handle undef ^ undef -> 0 special case. This is a common
1982 // idiom (misuse).
1983 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1984 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
1987 // xor X, X = 0
1988 if (Op0 == Op1)
1989 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1991 // See if we can simplify any instructions used by the instruction whose sole
1992 // purpose is to compute bits we don't care about.
1993 if (SimplifyDemandedInstructionBits(I))
1994 return &I;
1995 if (I.getType()->isVectorTy())
1996 if (isa<ConstantAggregateZero>(Op1))
1997 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
1999 // Is this a ~ operation?
2000 if (Value *NotOp = dyn_castNotVal(&I)) {
2001 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
2002 if (Op0I->getOpcode() == Instruction::And ||
2003 Op0I->getOpcode() == Instruction::Or) {
2004 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
2005 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
2006 if (dyn_castNotVal(Op0I->getOperand(1)))
2007 Op0I->swapOperands();
2008 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
2009 Value *NotY =
2010 Builder->CreateNot(Op0I->getOperand(1),
2011 Op0I->getOperand(1)->getName()+".not");
2012 if (Op0I->getOpcode() == Instruction::And)
2013 return BinaryOperator::CreateOr(Op0NotVal, NotY);
2014 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
2017 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
2018 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
2019 if (isFreeToInvert(Op0I->getOperand(0)) &&
2020 isFreeToInvert(Op0I->getOperand(1))) {
2021 Value *NotX =
2022 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
2023 Value *NotY =
2024 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
2025 if (Op0I->getOpcode() == Instruction::And)
2026 return BinaryOperator::CreateOr(NotX, NotY);
2027 return BinaryOperator::CreateAnd(NotX, NotY);
2030 } else if (Op0I->getOpcode() == Instruction::AShr) {
2031 // ~(~X >>s Y) --> (X >>s Y)
2032 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
2033 return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
2039 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2040 if (RHS->isOne() && Op0->hasOneUse())
2041 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
2042 if (CmpInst *CI = dyn_cast<CmpInst>(Op0))
2043 return CmpInst::Create(CI->getOpcode(),
2044 CI->getInversePredicate(),
2045 CI->getOperand(0), CI->getOperand(1));
2047 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
2048 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2049 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
2050 if (CI->hasOneUse() && Op0C->hasOneUse()) {
2051 Instruction::CastOps Opcode = Op0C->getOpcode();
2052 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
2053 (RHS == ConstantExpr::getCast(Opcode,
2054 ConstantInt::getTrue(I.getContext()),
2055 Op0C->getDestTy()))) {
2056 CI->setPredicate(CI->getInversePredicate());
2057 return CastInst::Create(Opcode, CI, Op0C->getType());
2063 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2064 // ~(c-X) == X-c-1 == X+(-c-1)
2065 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
2066 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
2067 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
2068 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
2069 ConstantInt::get(I.getType(), 1));
2070 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
2073 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2074 if (Op0I->getOpcode() == Instruction::Add) {
2075 // ~(X-c) --> (-c-1)-X
2076 if (RHS->isAllOnesValue()) {
2077 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
2078 return BinaryOperator::CreateSub(
2079 ConstantExpr::getSub(NegOp0CI,
2080 ConstantInt::get(I.getType(), 1)),
2081 Op0I->getOperand(0));
2082 } else if (RHS->getValue().isSignBit()) {
2083 // (X + C) ^ signbit -> (X + C + signbit)
2084 Constant *C = ConstantInt::get(I.getContext(),
2085 RHS->getValue() + Op0CI->getValue());
2086 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
2089 } else if (Op0I->getOpcode() == Instruction::Or) {
2090 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
2091 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
2092 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
2093 // Anything in both C1 and C2 is known to be zero, remove it from
2094 // NewRHS.
2095 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
2096 NewRHS = ConstantExpr::getAnd(NewRHS,
2097 ConstantExpr::getNot(CommonBits));
2098 Worklist.Add(Op0I);
2099 I.setOperand(0, Op0I->getOperand(0));
2100 I.setOperand(1, NewRHS);
2101 return &I;
2107 // Try to fold constant and into select arguments.
2108 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2109 if (Instruction *R = FoldOpIntoSelect(I, SI))
2110 return R;
2111 if (isa<PHINode>(Op0))
2112 if (Instruction *NV = FoldOpIntoPhi(I))
2113 return NV;
2116 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
2117 if (X == Op1)
2118 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2120 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
2121 if (X == Op0)
2122 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2125 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
2126 if (Op1I) {
2127 Value *A, *B;
2128 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
2129 if (A == Op0) { // B^(B|A) == (A|B)^B
2130 Op1I->swapOperands();
2131 I.swapOperands();
2132 std::swap(Op0, Op1);
2133 } else if (B == Op0) { // B^(A|B) == (A|B)^B
2134 I.swapOperands(); // Simplified below.
2135 std::swap(Op0, Op1);
2137 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
2138 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
2139 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
2140 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
2141 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
2142 Op1I->hasOneUse()){
2143 if (A == Op0) { // A^(A&B) -> A^(B&A)
2144 Op1I->swapOperands();
2145 std::swap(A, B);
2147 if (B == Op0) { // A^(B&A) -> (B&A)^A
2148 I.swapOperands(); // Simplified below.
2149 std::swap(Op0, Op1);
2154 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
2155 if (Op0I) {
2156 Value *A, *B;
2157 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2158 Op0I->hasOneUse()) {
2159 if (A == Op1) // (B|A)^B == (A|B)^B
2160 std::swap(A, B);
2161 if (B == Op1) // (A|B)^B == A & ~B
2162 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
2163 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
2164 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
2165 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
2166 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
2167 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2168 Op0I->hasOneUse()){
2169 if (A == Op1) // (A&B)^A -> (B&A)^A
2170 std::swap(A, B);
2171 if (B == Op1 && // (B&A)^A == ~B & A
2172 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
2173 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
2178 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
2179 if (Op0I && Op1I && Op0I->isShift() &&
2180 Op0I->getOpcode() == Op1I->getOpcode() &&
2181 Op0I->getOperand(1) == Op1I->getOperand(1) &&
2182 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
2183 Value *NewOp =
2184 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
2185 Op0I->getName());
2186 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
2187 Op1I->getOperand(1));
2190 if (Op0I && Op1I) {
2191 Value *A, *B, *C, *D;
2192 // (A & B)^(A | B) -> A ^ B
2193 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2194 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
2195 if ((A == C && B == D) || (A == D && B == C))
2196 return BinaryOperator::CreateXor(A, B);
2198 // (A | B)^(A & B) -> A ^ B
2199 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2200 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
2201 if ((A == C && B == D) || (A == D && B == C))
2202 return BinaryOperator::CreateXor(A, B);
2205 // (A & B)^(C & D)
2206 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
2207 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2208 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
2209 // (X & Y)^(X & Y) -> (Y^Z) & X
2210 Value *X = 0, *Y = 0, *Z = 0;
2211 if (A == C)
2212 X = A, Y = B, Z = D;
2213 else if (A == D)
2214 X = A, Y = B, Z = C;
2215 else if (B == C)
2216 X = B, Y = A, Z = D;
2217 else if (B == D)
2218 X = B, Y = A, Z = C;
2220 if (X) {
2221 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
2222 return BinaryOperator::CreateAnd(NewOp, X);
2227 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2228 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2229 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2230 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2231 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2232 LHS->getOperand(1) == RHS->getOperand(0))
2233 LHS->swapOperands();
2234 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2235 LHS->getOperand(1) == RHS->getOperand(1)) {
2236 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2237 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2238 bool isSigned = LHS->isSigned() || RHS->isSigned();
2239 return ReplaceInstUsesWith(I,
2240 getICmpValue(isSigned, Code, Op0, Op1, Builder));
2244 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
2245 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2246 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
2247 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
2248 const Type *SrcTy = Op0C->getOperand(0)->getType();
2249 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
2250 // Only do this if the casts both really cause code to be generated.
2251 ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
2252 I.getType()) &&
2253 ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
2254 I.getType())) {
2255 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
2256 Op1C->getOperand(0), I.getName());
2257 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
2262 return Changed ? &I : 0;