1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/ValueTracking.h"
46 #include "llvm/Target/TargetData.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/Utils/Local.h"
49 #include "llvm/Support/CallSite.h"
50 #include "llvm/Support/ConstantRange.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/GetElementPtrTypeIterator.h"
54 #include "llvm/Support/InstVisitor.h"
55 #include "llvm/Support/IRBuilder.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/PatternMatch.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/ADT/DenseMap.h"
60 #include "llvm/ADT/SmallVector.h"
61 #include "llvm/ADT/SmallPtrSet.h"
62 #include "llvm/ADT/Statistic.h"
63 #include "llvm/ADT/STLExtras.h"
67 using namespace llvm::PatternMatch
;
69 STATISTIC(NumCombined
, "Number of insts combined");
70 STATISTIC(NumConstProp
, "Number of constant folds");
71 STATISTIC(NumDeadInst
, "Number of dead inst eliminated");
72 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
73 STATISTIC(NumSunkInst
, "Number of instructions sunk");
76 /// InstCombineWorklist - This is the worklist management logic for
78 class InstCombineWorklist
{
79 SmallVector
<Instruction
*, 256> Worklist
;
80 DenseMap
<Instruction
*, unsigned> WorklistMap
;
82 void operator=(const InstCombineWorklist
&RHS
); // DO NOT IMPLEMENT
83 InstCombineWorklist(const InstCombineWorklist
&); // DO NOT IMPLEMENT
85 InstCombineWorklist() {}
87 bool isEmpty() const { return Worklist
.empty(); }
89 /// Add - Add the specified instruction to the worklist if it isn't already
91 void Add(Instruction
*I
) {
92 if (WorklistMap
.insert(std::make_pair(I
, Worklist
.size())).second
)
93 Worklist
.push_back(I
);
96 void AddValue(Value
*V
) {
97 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
101 // Remove - remove I from the worklist if it exists.
102 void Remove(Instruction
*I
) {
103 DenseMap
<Instruction
*, unsigned>::iterator It
= WorklistMap
.find(I
);
104 if (It
== WorklistMap
.end()) return; // Not in worklist.
106 // Don't bother moving everything down, just null out the slot.
107 Worklist
[It
->second
] = 0;
109 WorklistMap
.erase(It
);
112 Instruction
*RemoveOne() {
113 Instruction
*I
= Worklist
.back();
115 WorklistMap
.erase(I
);
119 /// AddUsersToWorkList - When an instruction is simplified, add all users of
120 /// the instruction to the work lists because they might get more simplified
123 void AddUsersToWorkList(Instruction
&I
) {
124 for (Value::use_iterator UI
= I
.use_begin(), UE
= I
.use_end();
126 Add(cast
<Instruction
>(*UI
));
130 /// Zap - check that the worklist is empty and nuke the backing store for
131 /// the map if it is large.
133 assert(WorklistMap
.empty() && "Worklist empty, but map not?");
135 // Do an explicit clear, this shrinks the map if needed.
139 } // end anonymous namespace.
143 /// InstCombineIRInserter - This is an IRBuilder insertion helper that works
144 /// just like the normal insertion helper, but also adds any new instructions
145 /// to the instcombine worklist.
146 class InstCombineIRInserter
: public IRBuilderDefaultInserter
<true> {
147 InstCombineWorklist
&Worklist
;
149 InstCombineIRInserter(InstCombineWorklist
&WL
) : Worklist(WL
) {}
151 void InsertHelper(Instruction
*I
, const Twine
&Name
,
152 BasicBlock
*BB
, BasicBlock::iterator InsertPt
) const {
153 IRBuilderDefaultInserter
<true>::InsertHelper(I
, Name
, BB
, InsertPt
);
157 } // end anonymous namespace
161 class InstCombiner
: public FunctionPass
,
162 public InstVisitor
<InstCombiner
, Instruction
*> {
164 bool MustPreserveLCSSA
;
167 /// Worklist - All of the instructions that need to be simplified.
168 InstCombineWorklist Worklist
;
170 /// Builder - This is an IRBuilder that automatically inserts new
171 /// instructions into the worklist when they are created.
172 typedef IRBuilder
<true, ConstantFolder
, InstCombineIRInserter
> BuilderTy
;
175 static char ID
; // Pass identification, replacement for typeid
176 InstCombiner() : FunctionPass(&ID
), TD(0), Builder(0) {}
178 LLVMContext
*Context
;
179 LLVMContext
*getContext() const { return Context
; }
182 virtual bool runOnFunction(Function
&F
);
184 bool DoOneIteration(Function
&F
, unsigned ItNum
);
186 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
187 AU
.addPreservedID(LCSSAID
);
188 AU
.setPreservesCFG();
191 TargetData
*getTargetData() const { return TD
; }
193 // Visitation implementation - Implement instruction combining for different
194 // instruction types. The semantics are as follows:
196 // null - No change was made
197 // I - Change was made, I is still valid, I may be dead though
198 // otherwise - Change was made, replace I with returned instruction
200 Instruction
*visitAdd(BinaryOperator
&I
);
201 Instruction
*visitFAdd(BinaryOperator
&I
);
202 Instruction
*visitSub(BinaryOperator
&I
);
203 Instruction
*visitFSub(BinaryOperator
&I
);
204 Instruction
*visitMul(BinaryOperator
&I
);
205 Instruction
*visitFMul(BinaryOperator
&I
);
206 Instruction
*visitURem(BinaryOperator
&I
);
207 Instruction
*visitSRem(BinaryOperator
&I
);
208 Instruction
*visitFRem(BinaryOperator
&I
);
209 bool SimplifyDivRemOfSelect(BinaryOperator
&I
);
210 Instruction
*commonRemTransforms(BinaryOperator
&I
);
211 Instruction
*commonIRemTransforms(BinaryOperator
&I
);
212 Instruction
*commonDivTransforms(BinaryOperator
&I
);
213 Instruction
*commonIDivTransforms(BinaryOperator
&I
);
214 Instruction
*visitUDiv(BinaryOperator
&I
);
215 Instruction
*visitSDiv(BinaryOperator
&I
);
216 Instruction
*visitFDiv(BinaryOperator
&I
);
217 Instruction
*FoldAndOfICmps(Instruction
&I
, ICmpInst
*LHS
, ICmpInst
*RHS
);
218 Instruction
*FoldAndOfFCmps(Instruction
&I
, FCmpInst
*LHS
, FCmpInst
*RHS
);
219 Instruction
*visitAnd(BinaryOperator
&I
);
220 Instruction
*FoldOrOfICmps(Instruction
&I
, ICmpInst
*LHS
, ICmpInst
*RHS
);
221 Instruction
*FoldOrOfFCmps(Instruction
&I
, FCmpInst
*LHS
, FCmpInst
*RHS
);
222 Instruction
*FoldOrWithConstants(BinaryOperator
&I
, Value
*Op
,
223 Value
*A
, Value
*B
, Value
*C
);
224 Instruction
*visitOr (BinaryOperator
&I
);
225 Instruction
*visitXor(BinaryOperator
&I
);
226 Instruction
*visitShl(BinaryOperator
&I
);
227 Instruction
*visitAShr(BinaryOperator
&I
);
228 Instruction
*visitLShr(BinaryOperator
&I
);
229 Instruction
*commonShiftTransforms(BinaryOperator
&I
);
230 Instruction
*FoldFCmp_IntToFP_Cst(FCmpInst
&I
, Instruction
*LHSI
,
232 Instruction
*visitFCmpInst(FCmpInst
&I
);
233 Instruction
*visitICmpInst(ICmpInst
&I
);
234 Instruction
*visitICmpInstWithCastAndCast(ICmpInst
&ICI
);
235 Instruction
*visitICmpInstWithInstAndIntCst(ICmpInst
&ICI
,
238 Instruction
*FoldICmpDivCst(ICmpInst
&ICI
, BinaryOperator
*DivI
,
239 ConstantInt
*DivRHS
);
241 Instruction
*FoldGEPICmp(GEPOperator
*GEPLHS
, Value
*RHS
,
242 ICmpInst::Predicate Cond
, Instruction
&I
);
243 Instruction
*FoldShiftByConstant(Value
*Op0
, ConstantInt
*Op1
,
245 Instruction
*commonCastTransforms(CastInst
&CI
);
246 Instruction
*commonIntCastTransforms(CastInst
&CI
);
247 Instruction
*commonPointerCastTransforms(CastInst
&CI
);
248 Instruction
*visitTrunc(TruncInst
&CI
);
249 Instruction
*visitZExt(ZExtInst
&CI
);
250 Instruction
*visitSExt(SExtInst
&CI
);
251 Instruction
*visitFPTrunc(FPTruncInst
&CI
);
252 Instruction
*visitFPExt(CastInst
&CI
);
253 Instruction
*visitFPToUI(FPToUIInst
&FI
);
254 Instruction
*visitFPToSI(FPToSIInst
&FI
);
255 Instruction
*visitUIToFP(CastInst
&CI
);
256 Instruction
*visitSIToFP(CastInst
&CI
);
257 Instruction
*visitPtrToInt(PtrToIntInst
&CI
);
258 Instruction
*visitIntToPtr(IntToPtrInst
&CI
);
259 Instruction
*visitBitCast(BitCastInst
&CI
);
260 Instruction
*FoldSelectOpOp(SelectInst
&SI
, Instruction
*TI
,
262 Instruction
*FoldSelectIntoOp(SelectInst
&SI
, Value
*, Value
*);
263 Instruction
*visitSelectInst(SelectInst
&SI
);
264 Instruction
*visitSelectInstWithICmp(SelectInst
&SI
, ICmpInst
*ICI
);
265 Instruction
*visitCallInst(CallInst
&CI
);
266 Instruction
*visitInvokeInst(InvokeInst
&II
);
267 Instruction
*visitPHINode(PHINode
&PN
);
268 Instruction
*visitGetElementPtrInst(GetElementPtrInst
&GEP
);
269 Instruction
*visitAllocationInst(AllocationInst
&AI
);
270 Instruction
*visitFreeInst(FreeInst
&FI
);
271 Instruction
*visitLoadInst(LoadInst
&LI
);
272 Instruction
*visitStoreInst(StoreInst
&SI
);
273 Instruction
*visitBranchInst(BranchInst
&BI
);
274 Instruction
*visitSwitchInst(SwitchInst
&SI
);
275 Instruction
*visitInsertElementInst(InsertElementInst
&IE
);
276 Instruction
*visitExtractElementInst(ExtractElementInst
&EI
);
277 Instruction
*visitShuffleVectorInst(ShuffleVectorInst
&SVI
);
278 Instruction
*visitExtractValueInst(ExtractValueInst
&EV
);
280 // visitInstruction - Specify what to return for unhandled instructions...
281 Instruction
*visitInstruction(Instruction
&I
) { return 0; }
284 Instruction
*visitCallSite(CallSite CS
);
285 bool transformConstExprCastCall(CallSite CS
);
286 Instruction
*transformCallThroughTrampoline(CallSite CS
);
287 Instruction
*transformZExtICmp(ICmpInst
*ICI
, Instruction
&CI
,
288 bool DoXform
= true);
289 bool WillNotOverflowSignedAdd(Value
*LHS
, Value
*RHS
);
290 DbgDeclareInst
*hasOneUsePlusDeclare(Value
*V
);
294 // InsertNewInstBefore - insert an instruction New before instruction Old
295 // in the program. Add the new instruction to the worklist.
297 Instruction
*InsertNewInstBefore(Instruction
*New
, Instruction
&Old
) {
298 assert(New
&& New
->getParent() == 0 &&
299 "New instruction already inserted into a basic block!");
300 BasicBlock
*BB
= Old
.getParent();
301 BB
->getInstList().insert(&Old
, New
); // Insert inst
306 // ReplaceInstUsesWith - This method is to be used when an instruction is
307 // found to be dead, replacable with another preexisting expression. Here
308 // we add all uses of I to the worklist, replace all uses of I with the new
309 // value, then return I, so that the inst combiner will know that I was
312 Instruction
*ReplaceInstUsesWith(Instruction
&I
, Value
*V
) {
313 Worklist
.AddUsersToWorkList(I
); // Add all modified instrs to worklist.
315 // If we are replacing the instruction with itself, this must be in a
316 // segment of unreachable code, so just clobber the instruction.
318 V
= UndefValue::get(I
.getType());
320 I
.replaceAllUsesWith(V
);
324 // EraseInstFromFunction - When dealing with an instruction that has side
325 // effects or produces a void value, we can't rely on DCE to delete the
326 // instruction. Instead, visit methods should return the value returned by
328 Instruction
*EraseInstFromFunction(Instruction
&I
) {
329 DEBUG(errs() << "IC: erase " << I
);
331 assert(I
.use_empty() && "Cannot erase instruction that is used!");
332 // Make sure that we reprocess all operands now that we reduced their
334 if (I
.getNumOperands() < 8) {
335 for (User::op_iterator i
= I
.op_begin(), e
= I
.op_end(); i
!= e
; ++i
)
336 if (Instruction
*Op
= dyn_cast
<Instruction
>(*i
))
342 return 0; // Don't do anything with FI
345 void ComputeMaskedBits(Value
*V
, const APInt
&Mask
, APInt
&KnownZero
,
346 APInt
&KnownOne
, unsigned Depth
= 0) const {
347 return llvm::ComputeMaskedBits(V
, Mask
, KnownZero
, KnownOne
, TD
, Depth
);
350 bool MaskedValueIsZero(Value
*V
, const APInt
&Mask
,
351 unsigned Depth
= 0) const {
352 return llvm::MaskedValueIsZero(V
, Mask
, TD
, Depth
);
354 unsigned ComputeNumSignBits(Value
*Op
, unsigned Depth
= 0) const {
355 return llvm::ComputeNumSignBits(Op
, TD
, Depth
);
360 /// SimplifyCommutative - This performs a few simplifications for
361 /// commutative operators.
362 bool SimplifyCommutative(BinaryOperator
&I
);
364 /// SimplifyCompare - This reorders the operands of a CmpInst to get them in
365 /// most-complex to least-complex order.
366 bool SimplifyCompare(CmpInst
&I
);
368 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
369 /// based on the demanded bits.
370 Value
*SimplifyDemandedUseBits(Value
*V
, APInt DemandedMask
,
371 APInt
& KnownZero
, APInt
& KnownOne
,
373 bool SimplifyDemandedBits(Use
&U
, APInt DemandedMask
,
374 APInt
& KnownZero
, APInt
& KnownOne
,
377 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
378 /// SimplifyDemandedBits knows about. See if the instruction has any
379 /// properties that allow us to simplify its operands.
380 bool SimplifyDemandedInstructionBits(Instruction
&Inst
);
382 Value
*SimplifyDemandedVectorElts(Value
*V
, APInt DemandedElts
,
383 APInt
& UndefElts
, unsigned Depth
= 0);
385 // FoldOpIntoPhi - Given a binary operator or cast instruction which has a
386 // PHI node as operand #0, see if we can fold the instruction into the PHI
387 // (which is only possible if all operands to the PHI are constants).
388 Instruction
*FoldOpIntoPhi(Instruction
&I
);
390 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
391 // operator and they all are only used by the PHI, PHI together their
392 // inputs, and do the operation once, to the result of the PHI.
393 Instruction
*FoldPHIArgOpIntoPHI(PHINode
&PN
);
394 Instruction
*FoldPHIArgBinOpIntoPHI(PHINode
&PN
);
395 Instruction
*FoldPHIArgGEPIntoPHI(PHINode
&PN
);
398 Instruction
*OptAndOp(Instruction
*Op
, ConstantInt
*OpRHS
,
399 ConstantInt
*AndRHS
, BinaryOperator
&TheAnd
);
401 Value
*FoldLogicalPlusAnd(Value
*LHS
, Value
*RHS
, ConstantInt
*Mask
,
402 bool isSub
, Instruction
&I
);
403 Instruction
*InsertRangeTest(Value
*V
, Constant
*Lo
, Constant
*Hi
,
404 bool isSigned
, bool Inside
, Instruction
&IB
);
405 Instruction
*PromoteCastOfAllocation(BitCastInst
&CI
, AllocationInst
&AI
);
406 Instruction
*MatchBSwap(BinaryOperator
&I
);
407 bool SimplifyStoreAtEndOfBlock(StoreInst
&SI
);
408 Instruction
*SimplifyMemTransfer(MemIntrinsic
*MI
);
409 Instruction
*SimplifyMemSet(MemSetInst
*MI
);
412 Value
*EvaluateInDifferentType(Value
*V
, const Type
*Ty
, bool isSigned
);
414 bool CanEvaluateInDifferentType(Value
*V
, const Type
*Ty
,
415 unsigned CastOpc
, int &NumCastsRemoved
);
416 unsigned GetOrEnforceKnownAlignment(Value
*V
,
417 unsigned PrefAlign
= 0);
420 } // end anonymous namespace
422 char InstCombiner::ID
= 0;
423 static RegisterPass
<InstCombiner
>
424 X("instcombine", "Combine redundant instructions");
426 // getComplexity: Assign a complexity or rank value to LLVM Values...
427 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
428 static unsigned getComplexity(Value
*V
) {
429 if (isa
<Instruction
>(V
)) {
430 if (BinaryOperator::isNeg(V
) ||
431 BinaryOperator::isFNeg(V
) ||
432 BinaryOperator::isNot(V
))
436 if (isa
<Argument
>(V
)) return 3;
437 return isa
<Constant
>(V
) ? (isa
<UndefValue
>(V
) ? 0 : 1) : 2;
440 // isOnlyUse - Return true if this instruction will be deleted if we stop using
442 static bool isOnlyUse(Value
*V
) {
443 return V
->hasOneUse() || isa
<Constant
>(V
);
446 // getPromotedType - Return the specified type promoted as it would be to pass
447 // though a va_arg area...
448 static const Type
*getPromotedType(const Type
*Ty
) {
449 if (const IntegerType
* ITy
= dyn_cast
<IntegerType
>(Ty
)) {
450 if (ITy
->getBitWidth() < 32)
451 return Type::getInt32Ty(Ty
->getContext());
456 /// getBitCastOperand - If the specified operand is a CastInst, a constant
457 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
458 /// operand value, otherwise return null.
459 static Value
*getBitCastOperand(Value
*V
) {
460 if (Operator
*O
= dyn_cast
<Operator
>(V
)) {
461 if (O
->getOpcode() == Instruction::BitCast
)
462 return O
->getOperand(0);
463 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
))
464 if (GEP
->hasAllZeroIndices())
465 return GEP
->getPointerOperand();
470 /// This function is a wrapper around CastInst::isEliminableCastPair. It
471 /// simply extracts arguments and returns what that function returns.
472 static Instruction::CastOps
473 isEliminableCastPair(
474 const CastInst
*CI
, ///< The first cast instruction
475 unsigned opcode
, ///< The opcode of the second cast instruction
476 const Type
*DstTy
, ///< The target type for the second cast instruction
477 TargetData
*TD
///< The target data for pointer size
480 const Type
*SrcTy
= CI
->getOperand(0)->getType(); // A from above
481 const Type
*MidTy
= CI
->getType(); // B from above
483 // Get the opcodes of the two Cast instructions
484 Instruction::CastOps firstOp
= Instruction::CastOps(CI
->getOpcode());
485 Instruction::CastOps secondOp
= Instruction::CastOps(opcode
);
487 unsigned Res
= CastInst::isEliminableCastPair(firstOp
, secondOp
, SrcTy
, MidTy
,
489 TD
? TD
->getIntPtrType(CI
->getContext()) : 0);
491 // We don't want to form an inttoptr or ptrtoint that converts to an integer
492 // type that differs from the pointer size.
493 if ((Res
== Instruction::IntToPtr
&&
494 (!TD
|| SrcTy
!= TD
->getIntPtrType(CI
->getContext()))) ||
495 (Res
== Instruction::PtrToInt
&&
496 (!TD
|| DstTy
!= TD
->getIntPtrType(CI
->getContext()))))
499 return Instruction::CastOps(Res
);
502 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
503 /// in any code being generated. It does not require codegen if V is simple
504 /// enough or if the cast can be folded into other casts.
505 static bool ValueRequiresCast(Instruction::CastOps opcode
, const Value
*V
,
506 const Type
*Ty
, TargetData
*TD
) {
507 if (V
->getType() == Ty
|| isa
<Constant
>(V
)) return false;
509 // If this is another cast that can be eliminated, it isn't codegen either.
510 if (const CastInst
*CI
= dyn_cast
<CastInst
>(V
))
511 if (isEliminableCastPair(CI
, opcode
, Ty
, TD
))
516 // SimplifyCommutative - This performs a few simplifications for commutative
519 // 1. Order operands such that they are listed from right (least complex) to
520 // left (most complex). This puts constants before unary operators before
523 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
524 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
526 bool InstCombiner::SimplifyCommutative(BinaryOperator
&I
) {
527 bool Changed
= false;
528 if (getComplexity(I
.getOperand(0)) < getComplexity(I
.getOperand(1)))
529 Changed
= !I
.swapOperands();
531 if (!I
.isAssociative()) return Changed
;
532 Instruction::BinaryOps Opcode
= I
.getOpcode();
533 if (BinaryOperator
*Op
= dyn_cast
<BinaryOperator
>(I
.getOperand(0)))
534 if (Op
->getOpcode() == Opcode
&& isa
<Constant
>(Op
->getOperand(1))) {
535 if (isa
<Constant
>(I
.getOperand(1))) {
536 Constant
*Folded
= ConstantExpr::get(I
.getOpcode(),
537 cast
<Constant
>(I
.getOperand(1)),
538 cast
<Constant
>(Op
->getOperand(1)));
539 I
.setOperand(0, Op
->getOperand(0));
540 I
.setOperand(1, Folded
);
542 } else if (BinaryOperator
*Op1
=dyn_cast
<BinaryOperator
>(I
.getOperand(1)))
543 if (Op1
->getOpcode() == Opcode
&& isa
<Constant
>(Op1
->getOperand(1)) &&
544 isOnlyUse(Op
) && isOnlyUse(Op1
)) {
545 Constant
*C1
= cast
<Constant
>(Op
->getOperand(1));
546 Constant
*C2
= cast
<Constant
>(Op1
->getOperand(1));
548 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
549 Constant
*Folded
= ConstantExpr::get(I
.getOpcode(), C1
, C2
);
550 Instruction
*New
= BinaryOperator::Create(Opcode
, Op
->getOperand(0),
554 I
.setOperand(0, New
);
555 I
.setOperand(1, Folded
);
562 /// SimplifyCompare - For a CmpInst this function just orders the operands
563 /// so that theyare listed from right (least complex) to left (most complex).
564 /// This puts constants before unary operators before binary operators.
565 bool InstCombiner::SimplifyCompare(CmpInst
&I
) {
566 if (getComplexity(I
.getOperand(0)) >= getComplexity(I
.getOperand(1)))
569 // Compare instructions are not associative so there's nothing else we can do.
573 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
574 // if the LHS is a constant zero (which is the 'negate' form).
576 static inline Value
*dyn_castNegVal(Value
*V
) {
577 if (BinaryOperator::isNeg(V
))
578 return BinaryOperator::getNegArgument(V
);
580 // Constants can be considered to be negated values if they can be folded.
581 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(V
))
582 return ConstantExpr::getNeg(C
);
584 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
))
585 if (C
->getType()->getElementType()->isInteger())
586 return ConstantExpr::getNeg(C
);
591 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
592 // instruction if the LHS is a constant negative zero (which is the 'negate'
595 static inline Value
*dyn_castFNegVal(Value
*V
) {
596 if (BinaryOperator::isFNeg(V
))
597 return BinaryOperator::getFNegArgument(V
);
599 // Constants can be considered to be negated values if they can be folded.
600 if (ConstantFP
*C
= dyn_cast
<ConstantFP
>(V
))
601 return ConstantExpr::getFNeg(C
);
603 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
))
604 if (C
->getType()->getElementType()->isFloatingPoint())
605 return ConstantExpr::getFNeg(C
);
610 static inline Value
*dyn_castNotVal(Value
*V
) {
611 if (BinaryOperator::isNot(V
))
612 return BinaryOperator::getNotArgument(V
);
614 // Constants can be considered to be not'ed values...
615 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(V
))
616 return ConstantInt::get(C
->getType(), ~C
->getValue());
620 // dyn_castFoldableMul - If this value is a multiply that can be folded into
621 // other computations (because it has a constant operand), return the
622 // non-constant operand of the multiply, and set CST to point to the multiplier.
623 // Otherwise, return null.
625 static inline Value
*dyn_castFoldableMul(Value
*V
, ConstantInt
*&CST
) {
626 if (V
->hasOneUse() && V
->getType()->isInteger())
627 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
628 if (I
->getOpcode() == Instruction::Mul
)
629 if ((CST
= dyn_cast
<ConstantInt
>(I
->getOperand(1))))
630 return I
->getOperand(0);
631 if (I
->getOpcode() == Instruction::Shl
)
632 if ((CST
= dyn_cast
<ConstantInt
>(I
->getOperand(1)))) {
633 // The multiplier is really 1 << CST.
634 uint32_t BitWidth
= cast
<IntegerType
>(V
->getType())->getBitWidth();
635 uint32_t CSTVal
= CST
->getLimitedValue(BitWidth
);
636 CST
= ConstantInt::get(V
->getType()->getContext(),
637 APInt(BitWidth
, 1).shl(CSTVal
));
638 return I
->getOperand(0);
644 /// AddOne - Add one to a ConstantInt
645 static Constant
*AddOne(Constant
*C
) {
646 return ConstantExpr::getAdd(C
,
647 ConstantInt::get(C
->getType(), 1));
649 /// SubOne - Subtract one from a ConstantInt
650 static Constant
*SubOne(ConstantInt
*C
) {
651 return ConstantExpr::getSub(C
,
652 ConstantInt::get(C
->getType(), 1));
654 /// MultiplyOverflows - True if the multiply can not be expressed in an int
656 static bool MultiplyOverflows(ConstantInt
*C1
, ConstantInt
*C2
, bool sign
) {
657 uint32_t W
= C1
->getBitWidth();
658 APInt LHSExt
= C1
->getValue(), RHSExt
= C2
->getValue();
667 APInt MulExt
= LHSExt
* RHSExt
;
670 APInt Min
= APInt::getSignedMinValue(W
).sext(W
* 2);
671 APInt Max
= APInt::getSignedMaxValue(W
).sext(W
* 2);
672 return MulExt
.slt(Min
) || MulExt
.sgt(Max
);
674 return MulExt
.ugt(APInt::getLowBitsSet(W
* 2, W
));
678 /// ShrinkDemandedConstant - Check to see if the specified operand of the
679 /// specified instruction is a constant integer. If so, check to see if there
680 /// are any bits set in the constant that are not demanded. If so, shrink the
681 /// constant and return true.
682 static bool ShrinkDemandedConstant(Instruction
*I
, unsigned OpNo
,
684 assert(I
&& "No instruction?");
685 assert(OpNo
< I
->getNumOperands() && "Operand index too large");
687 // If the operand is not a constant integer, nothing to do.
688 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(I
->getOperand(OpNo
));
689 if (!OpC
) return false;
691 // If there are no bits set that aren't demanded, nothing to do.
692 Demanded
.zextOrTrunc(OpC
->getValue().getBitWidth());
693 if ((~Demanded
& OpC
->getValue()) == 0)
696 // This instruction is producing bits that are not demanded. Shrink the RHS.
697 Demanded
&= OpC
->getValue();
698 I
->setOperand(OpNo
, ConstantInt::get(OpC
->getType(), Demanded
));
702 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
703 // set of known zero and one bits, compute the maximum and minimum values that
704 // could have the specified known zero and known one bits, returning them in
706 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt
& KnownZero
,
707 const APInt
& KnownOne
,
708 APInt
& Min
, APInt
& Max
) {
709 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
710 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
711 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
712 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
713 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
715 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
716 // bit if it is unknown.
718 Max
= KnownOne
|UnknownBits
;
720 if (UnknownBits
.isNegative()) { // Sign bit is unknown
721 Min
.set(Min
.getBitWidth()-1);
722 Max
.clear(Max
.getBitWidth()-1);
726 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
727 // a set of known zero and one bits, compute the maximum and minimum values that
728 // could have the specified known zero and known one bits, returning them in
730 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt
&KnownZero
,
731 const APInt
&KnownOne
,
732 APInt
&Min
, APInt
&Max
) {
733 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
734 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
735 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
736 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
737 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
739 // The minimum value is when the unknown bits are all zeros.
741 // The maximum value is when the unknown bits are all ones.
742 Max
= KnownOne
|UnknownBits
;
745 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
746 /// SimplifyDemandedBits knows about. See if the instruction has any
747 /// properties that allow us to simplify its operands.
748 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction
&Inst
) {
749 unsigned BitWidth
= Inst
.getType()->getScalarSizeInBits();
750 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
751 APInt
DemandedMask(APInt::getAllOnesValue(BitWidth
));
753 Value
*V
= SimplifyDemandedUseBits(&Inst
, DemandedMask
,
754 KnownZero
, KnownOne
, 0);
755 if (V
== 0) return false;
756 if (V
== &Inst
) return true;
757 ReplaceInstUsesWith(Inst
, V
);
761 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
762 /// specified instruction operand if possible, updating it in place. It returns
763 /// true if it made any change and false otherwise.
764 bool InstCombiner::SimplifyDemandedBits(Use
&U
, APInt DemandedMask
,
765 APInt
&KnownZero
, APInt
&KnownOne
,
767 Value
*NewVal
= SimplifyDemandedUseBits(U
.get(), DemandedMask
,
768 KnownZero
, KnownOne
, Depth
);
769 if (NewVal
== 0) return false;
775 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
776 /// value based on the demanded bits. When this function is called, it is known
777 /// that only the bits set in DemandedMask of the result of V are ever used
778 /// downstream. Consequently, depending on the mask and V, it may be possible
779 /// to replace V with a constant or one of its operands. In such cases, this
780 /// function does the replacement and returns true. In all other cases, it
781 /// returns false after analyzing the expression and setting KnownOne and known
782 /// to be one in the expression. KnownZero contains all the bits that are known
783 /// to be zero in the expression. These are provided to potentially allow the
784 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
785 /// the expression. KnownOne and KnownZero always follow the invariant that
786 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
787 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
788 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
789 /// and KnownOne must all be the same.
791 /// This returns null if it did not change anything and it permits no
792 /// simplification. This returns V itself if it did some simplification of V's
793 /// operands based on the information about what bits are demanded. This returns
794 /// some other non-null value if it found out that V is equal to another value
795 /// in the context where the specified bits are demanded, but not for all users.
796 Value
*InstCombiner::SimplifyDemandedUseBits(Value
*V
, APInt DemandedMask
,
797 APInt
&KnownZero
, APInt
&KnownOne
,
799 assert(V
!= 0 && "Null pointer of Value???");
800 assert(Depth
<= 6 && "Limit Search Depth");
801 uint32_t BitWidth
= DemandedMask
.getBitWidth();
802 const Type
*VTy
= V
->getType();
803 assert((TD
|| !isa
<PointerType
>(VTy
)) &&
804 "SimplifyDemandedBits needs to know bit widths!");
805 assert((!TD
|| TD
->getTypeSizeInBits(VTy
->getScalarType()) == BitWidth
) &&
806 (!VTy
->isIntOrIntVector() ||
807 VTy
->getScalarSizeInBits() == BitWidth
) &&
808 KnownZero
.getBitWidth() == BitWidth
&&
809 KnownOne
.getBitWidth() == BitWidth
&&
810 "Value *V, DemandedMask, KnownZero and KnownOne "
811 "must have same BitWidth");
812 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
813 // We know all of the bits for a constant!
814 KnownOne
= CI
->getValue() & DemandedMask
;
815 KnownZero
= ~KnownOne
& DemandedMask
;
818 if (isa
<ConstantPointerNull
>(V
)) {
819 // We know all of the bits for a constant!
821 KnownZero
= DemandedMask
;
827 if (DemandedMask
== 0) { // Not demanding any bits from V.
828 if (isa
<UndefValue
>(V
))
830 return UndefValue::get(VTy
);
833 if (Depth
== 6) // Limit search depth.
836 APInt
LHSKnownZero(BitWidth
, 0), LHSKnownOne(BitWidth
, 0);
837 APInt
&RHSKnownZero
= KnownZero
, &RHSKnownOne
= KnownOne
;
839 Instruction
*I
= dyn_cast
<Instruction
>(V
);
841 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
842 return 0; // Only analyze instructions.
845 // If there are multiple uses of this value and we aren't at the root, then
846 // we can't do any simplifications of the operands, because DemandedMask
847 // only reflects the bits demanded by *one* of the users.
848 if (Depth
!= 0 && !I
->hasOneUse()) {
849 // Despite the fact that we can't simplify this instruction in all User's
850 // context, we can at least compute the knownzero/knownone bits, and we can
851 // do simplifications that apply to *just* the one user if we know that
852 // this instruction has a simpler value in that context.
853 if (I
->getOpcode() == Instruction::And
) {
854 // If either the LHS or the RHS are Zero, the result is zero.
855 ComputeMaskedBits(I
->getOperand(1), DemandedMask
,
856 RHSKnownZero
, RHSKnownOne
, Depth
+1);
857 ComputeMaskedBits(I
->getOperand(0), DemandedMask
& ~RHSKnownZero
,
858 LHSKnownZero
, LHSKnownOne
, Depth
+1);
860 // If all of the demanded bits are known 1 on one side, return the other.
861 // These bits cannot contribute to the result of the 'and' in this
863 if ((DemandedMask
& ~LHSKnownZero
& RHSKnownOne
) ==
864 (DemandedMask
& ~LHSKnownZero
))
865 return I
->getOperand(0);
866 if ((DemandedMask
& ~RHSKnownZero
& LHSKnownOne
) ==
867 (DemandedMask
& ~RHSKnownZero
))
868 return I
->getOperand(1);
870 // If all of the demanded bits in the inputs are known zeros, return zero.
871 if ((DemandedMask
& (RHSKnownZero
|LHSKnownZero
)) == DemandedMask
)
872 return Constant::getNullValue(VTy
);
874 } else if (I
->getOpcode() == Instruction::Or
) {
875 // We can simplify (X|Y) -> X or Y in the user's context if we know that
876 // only bits from X or Y are demanded.
878 // If either the LHS or the RHS are One, the result is One.
879 ComputeMaskedBits(I
->getOperand(1), DemandedMask
,
880 RHSKnownZero
, RHSKnownOne
, Depth
+1);
881 ComputeMaskedBits(I
->getOperand(0), DemandedMask
& ~RHSKnownOne
,
882 LHSKnownZero
, LHSKnownOne
, Depth
+1);
884 // If all of the demanded bits are known zero on one side, return the
885 // other. These bits cannot contribute to the result of the 'or' in this
887 if ((DemandedMask
& ~LHSKnownOne
& RHSKnownZero
) ==
888 (DemandedMask
& ~LHSKnownOne
))
889 return I
->getOperand(0);
890 if ((DemandedMask
& ~RHSKnownOne
& LHSKnownZero
) ==
891 (DemandedMask
& ~RHSKnownOne
))
892 return I
->getOperand(1);
894 // If all of the potentially set bits on one side are known to be set on
895 // the other side, just use the 'other' side.
896 if ((DemandedMask
& (~RHSKnownZero
) & LHSKnownOne
) ==
897 (DemandedMask
& (~RHSKnownZero
)))
898 return I
->getOperand(0);
899 if ((DemandedMask
& (~LHSKnownZero
) & RHSKnownOne
) ==
900 (DemandedMask
& (~LHSKnownZero
)))
901 return I
->getOperand(1);
904 // Compute the KnownZero/KnownOne bits to simplify things downstream.
905 ComputeMaskedBits(I
, DemandedMask
, KnownZero
, KnownOne
, Depth
);
909 // If this is the root being simplified, allow it to have multiple uses,
910 // just set the DemandedMask to all bits so that we can try to simplify the
911 // operands. This allows visitTruncInst (for example) to simplify the
912 // operand of a trunc without duplicating all the logic below.
913 if (Depth
== 0 && !V
->hasOneUse())
914 DemandedMask
= APInt::getAllOnesValue(BitWidth
);
916 switch (I
->getOpcode()) {
918 ComputeMaskedBits(I
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
920 case Instruction::And
:
921 // If either the LHS or the RHS are Zero, the result is zero.
922 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
923 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
924 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
& ~RHSKnownZero
,
925 LHSKnownZero
, LHSKnownOne
, Depth
+1))
927 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
928 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
930 // If all of the demanded bits are known 1 on one side, return the other.
931 // These bits cannot contribute to the result of the 'and'.
932 if ((DemandedMask
& ~LHSKnownZero
& RHSKnownOne
) ==
933 (DemandedMask
& ~LHSKnownZero
))
934 return I
->getOperand(0);
935 if ((DemandedMask
& ~RHSKnownZero
& LHSKnownOne
) ==
936 (DemandedMask
& ~RHSKnownZero
))
937 return I
->getOperand(1);
939 // If all of the demanded bits in the inputs are known zeros, return zero.
940 if ((DemandedMask
& (RHSKnownZero
|LHSKnownZero
)) == DemandedMask
)
941 return Constant::getNullValue(VTy
);
943 // If the RHS is a constant, see if we can simplify it.
944 if (ShrinkDemandedConstant(I
, 1, DemandedMask
& ~LHSKnownZero
))
947 // Output known-1 bits are only known if set in both the LHS & RHS.
948 RHSKnownOne
&= LHSKnownOne
;
949 // Output known-0 are known to be clear if zero in either the LHS | RHS.
950 RHSKnownZero
|= LHSKnownZero
;
952 case Instruction::Or
:
953 // If either the LHS or the RHS are One, the result is One.
954 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
955 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
956 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
& ~RHSKnownOne
,
957 LHSKnownZero
, LHSKnownOne
, Depth
+1))
959 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
960 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
962 // If all of the demanded bits are known zero on one side, return the other.
963 // These bits cannot contribute to the result of the 'or'.
964 if ((DemandedMask
& ~LHSKnownOne
& RHSKnownZero
) ==
965 (DemandedMask
& ~LHSKnownOne
))
966 return I
->getOperand(0);
967 if ((DemandedMask
& ~RHSKnownOne
& LHSKnownZero
) ==
968 (DemandedMask
& ~RHSKnownOne
))
969 return I
->getOperand(1);
971 // If all of the potentially set bits on one side are known to be set on
972 // the other side, just use the 'other' side.
973 if ((DemandedMask
& (~RHSKnownZero
) & LHSKnownOne
) ==
974 (DemandedMask
& (~RHSKnownZero
)))
975 return I
->getOperand(0);
976 if ((DemandedMask
& (~LHSKnownZero
) & RHSKnownOne
) ==
977 (DemandedMask
& (~LHSKnownZero
)))
978 return I
->getOperand(1);
980 // If the RHS is a constant, see if we can simplify it.
981 if (ShrinkDemandedConstant(I
, 1, DemandedMask
))
984 // Output known-0 bits are only known if clear in both the LHS & RHS.
985 RHSKnownZero
&= LHSKnownZero
;
986 // Output known-1 are known to be set if set in either the LHS | RHS.
987 RHSKnownOne
|= LHSKnownOne
;
989 case Instruction::Xor
: {
990 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
991 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
992 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
993 LHSKnownZero
, LHSKnownOne
, Depth
+1))
995 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
996 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
998 // If all of the demanded bits are known zero on one side, return the other.
999 // These bits cannot contribute to the result of the 'xor'.
1000 if ((DemandedMask
& RHSKnownZero
) == DemandedMask
)
1001 return I
->getOperand(0);
1002 if ((DemandedMask
& LHSKnownZero
) == DemandedMask
)
1003 return I
->getOperand(1);
1005 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1006 APInt KnownZeroOut
= (RHSKnownZero
& LHSKnownZero
) |
1007 (RHSKnownOne
& LHSKnownOne
);
1008 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1009 APInt KnownOneOut
= (RHSKnownZero
& LHSKnownOne
) |
1010 (RHSKnownOne
& LHSKnownZero
);
1012 // If all of the demanded bits are known to be zero on one side or the
1013 // other, turn this into an *inclusive* or.
1014 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1015 if ((DemandedMask
& ~RHSKnownZero
& ~LHSKnownZero
) == 0) {
1017 BinaryOperator::CreateOr(I
->getOperand(0), I
->getOperand(1),
1019 return InsertNewInstBefore(Or
, *I
);
1022 // If all of the demanded bits on one side are known, and all of the set
1023 // bits on that side are also known to be set on the other side, turn this
1024 // into an AND, as we know the bits will be cleared.
1025 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1026 if ((DemandedMask
& (RHSKnownZero
|RHSKnownOne
)) == DemandedMask
) {
1028 if ((RHSKnownOne
& LHSKnownOne
) == RHSKnownOne
) {
1029 Constant
*AndC
= Constant::getIntegerValue(VTy
,
1030 ~RHSKnownOne
& DemandedMask
);
1032 BinaryOperator::CreateAnd(I
->getOperand(0), AndC
, "tmp");
1033 return InsertNewInstBefore(And
, *I
);
1037 // If the RHS is a constant, see if we can simplify it.
1038 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1039 if (ShrinkDemandedConstant(I
, 1, DemandedMask
))
1042 RHSKnownZero
= KnownZeroOut
;
1043 RHSKnownOne
= KnownOneOut
;
1046 case Instruction::Select
:
1047 if (SimplifyDemandedBits(I
->getOperandUse(2), DemandedMask
,
1048 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
1049 SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
1050 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1052 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1053 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
1055 // If the operands are constants, see if we can simplify them.
1056 if (ShrinkDemandedConstant(I
, 1, DemandedMask
) ||
1057 ShrinkDemandedConstant(I
, 2, DemandedMask
))
1060 // Only known if known in both the LHS and RHS.
1061 RHSKnownOne
&= LHSKnownOne
;
1062 RHSKnownZero
&= LHSKnownZero
;
1064 case Instruction::Trunc
: {
1065 unsigned truncBf
= I
->getOperand(0)->getType()->getScalarSizeInBits();
1066 DemandedMask
.zext(truncBf
);
1067 RHSKnownZero
.zext(truncBf
);
1068 RHSKnownOne
.zext(truncBf
);
1069 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1070 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1072 DemandedMask
.trunc(BitWidth
);
1073 RHSKnownZero
.trunc(BitWidth
);
1074 RHSKnownOne
.trunc(BitWidth
);
1075 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1078 case Instruction::BitCast
:
1079 if (!I
->getOperand(0)->getType()->isIntOrIntVector())
1080 return false; // vector->int or fp->int?
1082 if (const VectorType
*DstVTy
= dyn_cast
<VectorType
>(I
->getType())) {
1083 if (const VectorType
*SrcVTy
=
1084 dyn_cast
<VectorType
>(I
->getOperand(0)->getType())) {
1085 if (DstVTy
->getNumElements() != SrcVTy
->getNumElements())
1086 // Don't touch a bitcast between vectors of different element counts.
1089 // Don't touch a scalar-to-vector bitcast.
1091 } else if (isa
<VectorType
>(I
->getOperand(0)->getType()))
1092 // Don't touch a vector-to-scalar bitcast.
1095 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1096 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1098 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1100 case Instruction::ZExt
: {
1101 // Compute the bits in the result that are not present in the input.
1102 unsigned SrcBitWidth
=I
->getOperand(0)->getType()->getScalarSizeInBits();
1104 DemandedMask
.trunc(SrcBitWidth
);
1105 RHSKnownZero
.trunc(SrcBitWidth
);
1106 RHSKnownOne
.trunc(SrcBitWidth
);
1107 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1108 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1110 DemandedMask
.zext(BitWidth
);
1111 RHSKnownZero
.zext(BitWidth
);
1112 RHSKnownOne
.zext(BitWidth
);
1113 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1114 // The top bits are known to be zero.
1115 RHSKnownZero
|= APInt::getHighBitsSet(BitWidth
, BitWidth
- SrcBitWidth
);
1118 case Instruction::SExt
: {
1119 // Compute the bits in the result that are not present in the input.
1120 unsigned SrcBitWidth
=I
->getOperand(0)->getType()->getScalarSizeInBits();
1122 APInt InputDemandedBits
= DemandedMask
&
1123 APInt::getLowBitsSet(BitWidth
, SrcBitWidth
);
1125 APInt
NewBits(APInt::getHighBitsSet(BitWidth
, BitWidth
- SrcBitWidth
));
1126 // If any of the sign extended bits are demanded, we know that the sign
1128 if ((NewBits
& DemandedMask
) != 0)
1129 InputDemandedBits
.set(SrcBitWidth
-1);
1131 InputDemandedBits
.trunc(SrcBitWidth
);
1132 RHSKnownZero
.trunc(SrcBitWidth
);
1133 RHSKnownOne
.trunc(SrcBitWidth
);
1134 if (SimplifyDemandedBits(I
->getOperandUse(0), InputDemandedBits
,
1135 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1137 InputDemandedBits
.zext(BitWidth
);
1138 RHSKnownZero
.zext(BitWidth
);
1139 RHSKnownOne
.zext(BitWidth
);
1140 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1142 // If the sign bit of the input is known set or clear, then we know the
1143 // top bits of the result.
1145 // If the input sign bit is known zero, or if the NewBits are not demanded
1146 // convert this into a zero extension.
1147 if (RHSKnownZero
[SrcBitWidth
-1] || (NewBits
& ~DemandedMask
) == NewBits
) {
1148 // Convert to ZExt cast
1149 CastInst
*NewCast
= new ZExtInst(I
->getOperand(0), VTy
, I
->getName());
1150 return InsertNewInstBefore(NewCast
, *I
);
1151 } else if (RHSKnownOne
[SrcBitWidth
-1]) { // Input sign bit known set
1152 RHSKnownOne
|= NewBits
;
1156 case Instruction::Add
: {
1157 // Figure out what the input bits are. If the top bits of the and result
1158 // are not demanded, then the add doesn't demand them from its input
1160 unsigned NLZ
= DemandedMask
.countLeadingZeros();
1162 // If there is a constant on the RHS, there are a variety of xformations
1164 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1165 // If null, this should be simplified elsewhere. Some of the xforms here
1166 // won't work if the RHS is zero.
1170 // If the top bit of the output is demanded, demand everything from the
1171 // input. Otherwise, we demand all the input bits except NLZ top bits.
1172 APInt
InDemandedBits(APInt::getLowBitsSet(BitWidth
, BitWidth
- NLZ
));
1174 // Find information about known zero/one bits in the input.
1175 if (SimplifyDemandedBits(I
->getOperandUse(0), InDemandedBits
,
1176 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1179 // If the RHS of the add has bits set that can't affect the input, reduce
1181 if (ShrinkDemandedConstant(I
, 1, InDemandedBits
))
1184 // Avoid excess work.
1185 if (LHSKnownZero
== 0 && LHSKnownOne
== 0)
1188 // Turn it into OR if input bits are zero.
1189 if ((LHSKnownZero
& RHS
->getValue()) == RHS
->getValue()) {
1191 BinaryOperator::CreateOr(I
->getOperand(0), I
->getOperand(1),
1193 return InsertNewInstBefore(Or
, *I
);
1196 // We can say something about the output known-zero and known-one bits,
1197 // depending on potential carries from the input constant and the
1198 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1199 // bits set and the RHS constant is 0x01001, then we know we have a known
1200 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1202 // To compute this, we first compute the potential carry bits. These are
1203 // the bits which may be modified. I'm not aware of a better way to do
1205 const APInt
&RHSVal
= RHS
->getValue();
1206 APInt
CarryBits((~LHSKnownZero
+ RHSVal
) ^ (~LHSKnownZero
^ RHSVal
));
1208 // Now that we know which bits have carries, compute the known-1/0 sets.
1210 // Bits are known one if they are known zero in one operand and one in the
1211 // other, and there is no input carry.
1212 RHSKnownOne
= ((LHSKnownZero
& RHSVal
) |
1213 (LHSKnownOne
& ~RHSVal
)) & ~CarryBits
;
1215 // Bits are known zero if they are known zero in both operands and there
1216 // is no input carry.
1217 RHSKnownZero
= LHSKnownZero
& ~RHSVal
& ~CarryBits
;
1219 // If the high-bits of this ADD are not demanded, then it does not demand
1220 // the high bits of its LHS or RHS.
1221 if (DemandedMask
[BitWidth
-1] == 0) {
1222 // Right fill the mask of bits for this ADD to demand the most
1223 // significant bit and all those below it.
1224 APInt
DemandedFromOps(APInt::getLowBitsSet(BitWidth
, BitWidth
-NLZ
));
1225 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedFromOps
,
1226 LHSKnownZero
, LHSKnownOne
, Depth
+1) ||
1227 SimplifyDemandedBits(I
->getOperandUse(1), DemandedFromOps
,
1228 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1234 case Instruction::Sub
:
1235 // If the high-bits of this SUB are not demanded, then it does not demand
1236 // the high bits of its LHS or RHS.
1237 if (DemandedMask
[BitWidth
-1] == 0) {
1238 // Right fill the mask of bits for this SUB to demand the most
1239 // significant bit and all those below it.
1240 uint32_t NLZ
= DemandedMask
.countLeadingZeros();
1241 APInt
DemandedFromOps(APInt::getLowBitsSet(BitWidth
, BitWidth
-NLZ
));
1242 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedFromOps
,
1243 LHSKnownZero
, LHSKnownOne
, Depth
+1) ||
1244 SimplifyDemandedBits(I
->getOperandUse(1), DemandedFromOps
,
1245 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1248 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1249 // the known zeros and ones.
1250 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
1252 case Instruction::Shl
:
1253 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1254 uint64_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1255 APInt
DemandedMaskIn(DemandedMask
.lshr(ShiftAmt
));
1256 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1257 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1259 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1260 RHSKnownZero
<<= ShiftAmt
;
1261 RHSKnownOne
<<= ShiftAmt
;
1262 // low bits known zero.
1264 RHSKnownZero
|= APInt::getLowBitsSet(BitWidth
, ShiftAmt
);
1267 case Instruction::LShr
:
1268 // For a logical shift right
1269 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1270 uint64_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1272 // Unsigned shift right.
1273 APInt
DemandedMaskIn(DemandedMask
.shl(ShiftAmt
));
1274 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1275 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1277 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1278 RHSKnownZero
= APIntOps::lshr(RHSKnownZero
, ShiftAmt
);
1279 RHSKnownOne
= APIntOps::lshr(RHSKnownOne
, ShiftAmt
);
1281 // Compute the new bits that are at the top now.
1282 APInt
HighBits(APInt::getHighBitsSet(BitWidth
, ShiftAmt
));
1283 RHSKnownZero
|= HighBits
; // high bits known zero.
1287 case Instruction::AShr
:
1288 // If this is an arithmetic shift right and only the low-bit is set, we can
1289 // always convert this into a logical shr, even if the shift amount is
1290 // variable. The low bit of the shift cannot be an input sign bit unless
1291 // the shift amount is >= the size of the datatype, which is undefined.
1292 if (DemandedMask
== 1) {
1293 // Perform the logical shift right.
1294 Instruction
*NewVal
= BinaryOperator::CreateLShr(
1295 I
->getOperand(0), I
->getOperand(1), I
->getName());
1296 return InsertNewInstBefore(NewVal
, *I
);
1299 // If the sign bit is the only bit demanded by this ashr, then there is no
1300 // need to do it, the shift doesn't change the high bit.
1301 if (DemandedMask
.isSignBit())
1302 return I
->getOperand(0);
1304 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1305 uint32_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1307 // Signed shift right.
1308 APInt
DemandedMaskIn(DemandedMask
.shl(ShiftAmt
));
1309 // If any of the "high bits" are demanded, we should set the sign bit as
1311 if (DemandedMask
.countLeadingZeros() <= ShiftAmt
)
1312 DemandedMaskIn
.set(BitWidth
-1);
1313 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1314 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1316 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1317 // Compute the new bits that are at the top now.
1318 APInt
HighBits(APInt::getHighBitsSet(BitWidth
, ShiftAmt
));
1319 RHSKnownZero
= APIntOps::lshr(RHSKnownZero
, ShiftAmt
);
1320 RHSKnownOne
= APIntOps::lshr(RHSKnownOne
, ShiftAmt
);
1322 // Handle the sign bits.
1323 APInt
SignBit(APInt::getSignBit(BitWidth
));
1324 // Adjust to where it is now in the mask.
1325 SignBit
= APIntOps::lshr(SignBit
, ShiftAmt
);
1327 // If the input sign bit is known to be zero, or if none of the top bits
1328 // are demanded, turn this into an unsigned shift right.
1329 if (BitWidth
<= ShiftAmt
|| RHSKnownZero
[BitWidth
-ShiftAmt
-1] ||
1330 (HighBits
& ~DemandedMask
) == HighBits
) {
1331 // Perform the logical shift right.
1332 Instruction
*NewVal
= BinaryOperator::CreateLShr(
1333 I
->getOperand(0), SA
, I
->getName());
1334 return InsertNewInstBefore(NewVal
, *I
);
1335 } else if ((RHSKnownOne
& SignBit
) != 0) { // New bits are known one.
1336 RHSKnownOne
|= HighBits
;
1340 case Instruction::SRem
:
1341 if (ConstantInt
*Rem
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1342 APInt RA
= Rem
->getValue().abs();
1343 if (RA
.isPowerOf2()) {
1344 if (DemandedMask
.ult(RA
)) // srem won't affect demanded bits
1345 return I
->getOperand(0);
1347 APInt LowBits
= RA
- 1;
1348 APInt Mask2
= LowBits
| APInt::getSignBit(BitWidth
);
1349 if (SimplifyDemandedBits(I
->getOperandUse(0), Mask2
,
1350 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1353 if (LHSKnownZero
[BitWidth
-1] || ((LHSKnownZero
& LowBits
) == LowBits
))
1354 LHSKnownZero
|= ~LowBits
;
1356 KnownZero
|= LHSKnownZero
& DemandedMask
;
1358 assert(!(KnownZero
& KnownOne
) && "Bits known to be one AND zero?");
1362 case Instruction::URem
: {
1363 APInt
KnownZero2(BitWidth
, 0), KnownOne2(BitWidth
, 0);
1364 APInt AllOnes
= APInt::getAllOnesValue(BitWidth
);
1365 if (SimplifyDemandedBits(I
->getOperandUse(0), AllOnes
,
1366 KnownZero2
, KnownOne2
, Depth
+1) ||
1367 SimplifyDemandedBits(I
->getOperandUse(1), AllOnes
,
1368 KnownZero2
, KnownOne2
, Depth
+1))
1371 unsigned Leaders
= KnownZero2
.countLeadingOnes();
1372 Leaders
= std::max(Leaders
,
1373 KnownZero2
.countLeadingOnes());
1374 KnownZero
= APInt::getHighBitsSet(BitWidth
, Leaders
) & DemandedMask
;
1377 case Instruction::Call
:
1378 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
1379 switch (II
->getIntrinsicID()) {
1381 case Intrinsic::bswap
: {
1382 // If the only bits demanded come from one byte of the bswap result,
1383 // just shift the input byte into position to eliminate the bswap.
1384 unsigned NLZ
= DemandedMask
.countLeadingZeros();
1385 unsigned NTZ
= DemandedMask
.countTrailingZeros();
1387 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1388 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1389 // have 14 leading zeros, round to 8.
1392 // If we need exactly one byte, we can do this transformation.
1393 if (BitWidth
-NLZ
-NTZ
== 8) {
1394 unsigned ResultBit
= NTZ
;
1395 unsigned InputBit
= BitWidth
-NTZ
-8;
1397 // Replace this with either a left or right shift to get the byte into
1399 Instruction
*NewVal
;
1400 if (InputBit
> ResultBit
)
1401 NewVal
= BinaryOperator::CreateLShr(I
->getOperand(1),
1402 ConstantInt::get(I
->getType(), InputBit
-ResultBit
));
1404 NewVal
= BinaryOperator::CreateShl(I
->getOperand(1),
1405 ConstantInt::get(I
->getType(), ResultBit
-InputBit
));
1406 NewVal
->takeName(I
);
1407 return InsertNewInstBefore(NewVal
, *I
);
1410 // TODO: Could compute known zero/one bits based on the input.
1415 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
1419 // If the client is only demanding bits that we know, return the known
1421 if ((DemandedMask
& (RHSKnownZero
|RHSKnownOne
)) == DemandedMask
)
1422 return Constant::getIntegerValue(VTy
, RHSKnownOne
);
1427 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1428 /// any number of elements. DemandedElts contains the set of elements that are
1429 /// actually used by the caller. This method analyzes which elements of the
1430 /// operand are undef and returns that information in UndefElts.
1432 /// If the information about demanded elements can be used to simplify the
1433 /// operation, the operation is simplified, then the resultant value is
1434 /// returned. This returns null if no change was made.
1435 Value
*InstCombiner::SimplifyDemandedVectorElts(Value
*V
, APInt DemandedElts
,
1438 unsigned VWidth
= cast
<VectorType
>(V
->getType())->getNumElements();
1439 APInt
EltMask(APInt::getAllOnesValue(VWidth
));
1440 assert((DemandedElts
& ~EltMask
) == 0 && "Invalid DemandedElts!");
1442 if (isa
<UndefValue
>(V
)) {
1443 // If the entire vector is undefined, just return this info.
1444 UndefElts
= EltMask
;
1446 } else if (DemandedElts
== 0) { // If nothing is demanded, provide undef.
1447 UndefElts
= EltMask
;
1448 return UndefValue::get(V
->getType());
1452 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(V
)) {
1453 const Type
*EltTy
= cast
<VectorType
>(V
->getType())->getElementType();
1454 Constant
*Undef
= UndefValue::get(EltTy
);
1456 std::vector
<Constant
*> Elts
;
1457 for (unsigned i
= 0; i
!= VWidth
; ++i
)
1458 if (!DemandedElts
[i
]) { // If not demanded, set to undef.
1459 Elts
.push_back(Undef
);
1461 } else if (isa
<UndefValue
>(CP
->getOperand(i
))) { // Already undef.
1462 Elts
.push_back(Undef
);
1464 } else { // Otherwise, defined.
1465 Elts
.push_back(CP
->getOperand(i
));
1468 // If we changed the constant, return it.
1469 Constant
*NewCP
= ConstantVector::get(Elts
);
1470 return NewCP
!= CP
? NewCP
: 0;
1471 } else if (isa
<ConstantAggregateZero
>(V
)) {
1472 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1475 // Check if this is identity. If so, return 0 since we are not simplifying
1477 if (DemandedElts
== ((1ULL << VWidth
) -1))
1480 const Type
*EltTy
= cast
<VectorType
>(V
->getType())->getElementType();
1481 Constant
*Zero
= Constant::getNullValue(EltTy
);
1482 Constant
*Undef
= UndefValue::get(EltTy
);
1483 std::vector
<Constant
*> Elts
;
1484 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
1485 Constant
*Elt
= DemandedElts
[i
] ? Zero
: Undef
;
1486 Elts
.push_back(Elt
);
1488 UndefElts
= DemandedElts
^ EltMask
;
1489 return ConstantVector::get(Elts
);
1492 // Limit search depth.
1496 // If multiple users are using the root value, procede with
1497 // simplification conservatively assuming that all elements
1499 if (!V
->hasOneUse()) {
1500 // Quit if we find multiple users of a non-root value though.
1501 // They'll be handled when it's their turn to be visited by
1502 // the main instcombine process.
1504 // TODO: Just compute the UndefElts information recursively.
1507 // Conservatively assume that all elements are needed.
1508 DemandedElts
= EltMask
;
1511 Instruction
*I
= dyn_cast
<Instruction
>(V
);
1512 if (!I
) return 0; // Only analyze instructions.
1514 bool MadeChange
= false;
1515 APInt
UndefElts2(VWidth
, 0);
1517 switch (I
->getOpcode()) {
1520 case Instruction::InsertElement
: {
1521 // If this is a variable index, we don't know which element it overwrites.
1522 // demand exactly the same input as we produce.
1523 ConstantInt
*Idx
= dyn_cast
<ConstantInt
>(I
->getOperand(2));
1525 // Note that we can't propagate undef elt info, because we don't know
1526 // which elt is getting updated.
1527 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts
,
1528 UndefElts2
, Depth
+1);
1529 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1533 // If this is inserting an element that isn't demanded, remove this
1535 unsigned IdxNo
= Idx
->getZExtValue();
1536 if (IdxNo
>= VWidth
|| !DemandedElts
[IdxNo
]) {
1538 return I
->getOperand(0);
1541 // Otherwise, the element inserted overwrites whatever was there, so the
1542 // input demanded set is simpler than the output set.
1543 APInt DemandedElts2
= DemandedElts
;
1544 DemandedElts2
.clear(IdxNo
);
1545 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts2
,
1546 UndefElts
, Depth
+1);
1547 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1549 // The inserted element is defined.
1550 UndefElts
.clear(IdxNo
);
1553 case Instruction::ShuffleVector
: {
1554 ShuffleVectorInst
*Shuffle
= cast
<ShuffleVectorInst
>(I
);
1555 uint64_t LHSVWidth
=
1556 cast
<VectorType
>(Shuffle
->getOperand(0)->getType())->getNumElements();
1557 APInt
LeftDemanded(LHSVWidth
, 0), RightDemanded(LHSVWidth
, 0);
1558 for (unsigned i
= 0; i
< VWidth
; i
++) {
1559 if (DemandedElts
[i
]) {
1560 unsigned MaskVal
= Shuffle
->getMaskValue(i
);
1561 if (MaskVal
!= -1u) {
1562 assert(MaskVal
< LHSVWidth
* 2 &&
1563 "shufflevector mask index out of range!");
1564 if (MaskVal
< LHSVWidth
)
1565 LeftDemanded
.set(MaskVal
);
1567 RightDemanded
.set(MaskVal
- LHSVWidth
);
1572 APInt
UndefElts4(LHSVWidth
, 0);
1573 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), LeftDemanded
,
1574 UndefElts4
, Depth
+1);
1575 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1577 APInt
UndefElts3(LHSVWidth
, 0);
1578 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(1), RightDemanded
,
1579 UndefElts3
, Depth
+1);
1580 if (TmpV
) { I
->setOperand(1, TmpV
); MadeChange
= true; }
1582 bool NewUndefElts
= false;
1583 for (unsigned i
= 0; i
< VWidth
; i
++) {
1584 unsigned MaskVal
= Shuffle
->getMaskValue(i
);
1585 if (MaskVal
== -1u) {
1587 } else if (MaskVal
< LHSVWidth
) {
1588 if (UndefElts4
[MaskVal
]) {
1589 NewUndefElts
= true;
1593 if (UndefElts3
[MaskVal
- LHSVWidth
]) {
1594 NewUndefElts
= true;
1601 // Add additional discovered undefs.
1602 std::vector
<Constant
*> Elts
;
1603 for (unsigned i
= 0; i
< VWidth
; ++i
) {
1605 Elts
.push_back(UndefValue::get(Type::getInt32Ty(*Context
)));
1607 Elts
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
),
1608 Shuffle
->getMaskValue(i
)));
1610 I
->setOperand(2, ConstantVector::get(Elts
));
1615 case Instruction::BitCast
: {
1616 // Vector->vector casts only.
1617 const VectorType
*VTy
= dyn_cast
<VectorType
>(I
->getOperand(0)->getType());
1619 unsigned InVWidth
= VTy
->getNumElements();
1620 APInt
InputDemandedElts(InVWidth
, 0);
1623 if (VWidth
== InVWidth
) {
1624 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1625 // elements as are demanded of us.
1627 InputDemandedElts
= DemandedElts
;
1628 } else if (VWidth
> InVWidth
) {
1632 // If there are more elements in the result than there are in the source,
1633 // then an input element is live if any of the corresponding output
1634 // elements are live.
1635 Ratio
= VWidth
/InVWidth
;
1636 for (unsigned OutIdx
= 0; OutIdx
!= VWidth
; ++OutIdx
) {
1637 if (DemandedElts
[OutIdx
])
1638 InputDemandedElts
.set(OutIdx
/Ratio
);
1644 // If there are more elements in the source than there are in the result,
1645 // then an input element is live if the corresponding output element is
1647 Ratio
= InVWidth
/VWidth
;
1648 for (unsigned InIdx
= 0; InIdx
!= InVWidth
; ++InIdx
)
1649 if (DemandedElts
[InIdx
/Ratio
])
1650 InputDemandedElts
.set(InIdx
);
1653 // div/rem demand all inputs, because they don't want divide by zero.
1654 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), InputDemandedElts
,
1655 UndefElts2
, Depth
+1);
1657 I
->setOperand(0, TmpV
);
1661 UndefElts
= UndefElts2
;
1662 if (VWidth
> InVWidth
) {
1663 llvm_unreachable("Unimp");
1664 // If there are more elements in the result than there are in the source,
1665 // then an output element is undef if the corresponding input element is
1667 for (unsigned OutIdx
= 0; OutIdx
!= VWidth
; ++OutIdx
)
1668 if (UndefElts2
[OutIdx
/Ratio
])
1669 UndefElts
.set(OutIdx
);
1670 } else if (VWidth
< InVWidth
) {
1671 llvm_unreachable("Unimp");
1672 // If there are more elements in the source than there are in the result,
1673 // then a result element is undef if all of the corresponding input
1674 // elements are undef.
1675 UndefElts
= ~0ULL >> (64-VWidth
); // Start out all undef.
1676 for (unsigned InIdx
= 0; InIdx
!= InVWidth
; ++InIdx
)
1677 if (!UndefElts2
[InIdx
]) // Not undef?
1678 UndefElts
.clear(InIdx
/Ratio
); // Clear undef bit.
1682 case Instruction::And
:
1683 case Instruction::Or
:
1684 case Instruction::Xor
:
1685 case Instruction::Add
:
1686 case Instruction::Sub
:
1687 case Instruction::Mul
:
1688 // div/rem demand all inputs, because they don't want divide by zero.
1689 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts
,
1690 UndefElts
, Depth
+1);
1691 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1692 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(1), DemandedElts
,
1693 UndefElts2
, Depth
+1);
1694 if (TmpV
) { I
->setOperand(1, TmpV
); MadeChange
= true; }
1696 // Output elements are undefined if both are undefined. Consider things
1697 // like undef&0. The result is known zero, not undef.
1698 UndefElts
&= UndefElts2
;
1701 case Instruction::Call
: {
1702 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
);
1704 switch (II
->getIntrinsicID()) {
1707 // Binary vector operations that work column-wise. A dest element is a
1708 // function of the corresponding input elements from the two inputs.
1709 case Intrinsic::x86_sse_sub_ss
:
1710 case Intrinsic::x86_sse_mul_ss
:
1711 case Intrinsic::x86_sse_min_ss
:
1712 case Intrinsic::x86_sse_max_ss
:
1713 case Intrinsic::x86_sse2_sub_sd
:
1714 case Intrinsic::x86_sse2_mul_sd
:
1715 case Intrinsic::x86_sse2_min_sd
:
1716 case Intrinsic::x86_sse2_max_sd
:
1717 TmpV
= SimplifyDemandedVectorElts(II
->getOperand(1), DemandedElts
,
1718 UndefElts
, Depth
+1);
1719 if (TmpV
) { II
->setOperand(1, TmpV
); MadeChange
= true; }
1720 TmpV
= SimplifyDemandedVectorElts(II
->getOperand(2), DemandedElts
,
1721 UndefElts2
, Depth
+1);
1722 if (TmpV
) { II
->setOperand(2, TmpV
); MadeChange
= true; }
1724 // If only the low elt is demanded and this is a scalarizable intrinsic,
1725 // scalarize it now.
1726 if (DemandedElts
== 1) {
1727 switch (II
->getIntrinsicID()) {
1729 case Intrinsic::x86_sse_sub_ss
:
1730 case Intrinsic::x86_sse_mul_ss
:
1731 case Intrinsic::x86_sse2_sub_sd
:
1732 case Intrinsic::x86_sse2_mul_sd
:
1733 // TODO: Lower MIN/MAX/ABS/etc
1734 Value
*LHS
= II
->getOperand(1);
1735 Value
*RHS
= II
->getOperand(2);
1736 // Extract the element as scalars.
1737 LHS
= InsertNewInstBefore(ExtractElementInst::Create(LHS
,
1738 ConstantInt::get(Type::getInt32Ty(*Context
), 0U, false), "tmp"), *II
);
1739 RHS
= InsertNewInstBefore(ExtractElementInst::Create(RHS
,
1740 ConstantInt::get(Type::getInt32Ty(*Context
), 0U, false), "tmp"), *II
);
1742 switch (II
->getIntrinsicID()) {
1743 default: llvm_unreachable("Case stmts out of sync!");
1744 case Intrinsic::x86_sse_sub_ss
:
1745 case Intrinsic::x86_sse2_sub_sd
:
1746 TmpV
= InsertNewInstBefore(BinaryOperator::CreateFSub(LHS
, RHS
,
1747 II
->getName()), *II
);
1749 case Intrinsic::x86_sse_mul_ss
:
1750 case Intrinsic::x86_sse2_mul_sd
:
1751 TmpV
= InsertNewInstBefore(BinaryOperator::CreateFMul(LHS
, RHS
,
1752 II
->getName()), *II
);
1757 InsertElementInst::Create(
1758 UndefValue::get(II
->getType()), TmpV
,
1759 ConstantInt::get(Type::getInt32Ty(*Context
), 0U, false), II
->getName());
1760 InsertNewInstBefore(New
, *II
);
1765 // Output elements are undefined if both are undefined. Consider things
1766 // like undef&0. The result is known zero, not undef.
1767 UndefElts
&= UndefElts2
;
1773 return MadeChange
? I
: 0;
1777 /// AssociativeOpt - Perform an optimization on an associative operator. This
1778 /// function is designed to check a chain of associative operators for a
1779 /// potential to apply a certain optimization. Since the optimization may be
1780 /// applicable if the expression was reassociated, this checks the chain, then
1781 /// reassociates the expression as necessary to expose the optimization
1782 /// opportunity. This makes use of a special Functor, which must define
1783 /// 'shouldApply' and 'apply' methods.
1785 template<typename Functor
>
1786 static Instruction
*AssociativeOpt(BinaryOperator
&Root
, const Functor
&F
) {
1787 unsigned Opcode
= Root
.getOpcode();
1788 Value
*LHS
= Root
.getOperand(0);
1790 // Quick check, see if the immediate LHS matches...
1791 if (F
.shouldApply(LHS
))
1792 return F
.apply(Root
);
1794 // Otherwise, if the LHS is not of the same opcode as the root, return.
1795 Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
);
1796 while (LHSI
&& LHSI
->getOpcode() == Opcode
&& LHSI
->hasOneUse()) {
1797 // Should we apply this transform to the RHS?
1798 bool ShouldApply
= F
.shouldApply(LHSI
->getOperand(1));
1800 // If not to the RHS, check to see if we should apply to the LHS...
1801 if (!ShouldApply
&& F
.shouldApply(LHSI
->getOperand(0))) {
1802 cast
<BinaryOperator
>(LHSI
)->swapOperands(); // Make the LHS the RHS
1806 // If the functor wants to apply the optimization to the RHS of LHSI,
1807 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1809 // Now all of the instructions are in the current basic block, go ahead
1810 // and perform the reassociation.
1811 Instruction
*TmpLHSI
= cast
<Instruction
>(Root
.getOperand(0));
1813 // First move the selected RHS to the LHS of the root...
1814 Root
.setOperand(0, LHSI
->getOperand(1));
1816 // Make what used to be the LHS of the root be the user of the root...
1817 Value
*ExtraOperand
= TmpLHSI
->getOperand(1);
1818 if (&Root
== TmpLHSI
) {
1819 Root
.replaceAllUsesWith(Constant::getNullValue(TmpLHSI
->getType()));
1822 Root
.replaceAllUsesWith(TmpLHSI
); // Users now use TmpLHSI
1823 TmpLHSI
->setOperand(1, &Root
); // TmpLHSI now uses the root
1824 BasicBlock::iterator ARI
= &Root
; ++ARI
;
1825 TmpLHSI
->moveBefore(ARI
); // Move TmpLHSI to after Root
1828 // Now propagate the ExtraOperand down the chain of instructions until we
1830 while (TmpLHSI
!= LHSI
) {
1831 Instruction
*NextLHSI
= cast
<Instruction
>(TmpLHSI
->getOperand(0));
1832 // Move the instruction to immediately before the chain we are
1833 // constructing to avoid breaking dominance properties.
1834 NextLHSI
->moveBefore(ARI
);
1837 Value
*NextOp
= NextLHSI
->getOperand(1);
1838 NextLHSI
->setOperand(1, ExtraOperand
);
1840 ExtraOperand
= NextOp
;
1843 // Now that the instructions are reassociated, have the functor perform
1844 // the transformation...
1845 return F
.apply(Root
);
1848 LHSI
= dyn_cast
<Instruction
>(LHSI
->getOperand(0));
1855 // AddRHS - Implements: X + X --> X << 1
1858 explicit AddRHS(Value
*rhs
) : RHS(rhs
) {}
1859 bool shouldApply(Value
*LHS
) const { return LHS
== RHS
; }
1860 Instruction
*apply(BinaryOperator
&Add
) const {
1861 return BinaryOperator::CreateShl(Add
.getOperand(0),
1862 ConstantInt::get(Add
.getType(), 1));
1866 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
1868 struct AddMaskingAnd
{
1870 explicit AddMaskingAnd(Constant
*c
) : C2(c
) {}
1871 bool shouldApply(Value
*LHS
) const {
1873 return match(LHS
, m_And(m_Value(), m_ConstantInt(C1
))) &&
1874 ConstantExpr::getAnd(C1
, C2
)->isNullValue();
1876 Instruction
*apply(BinaryOperator
&Add
) const {
1877 return BinaryOperator::CreateOr(Add
.getOperand(0), Add
.getOperand(1));
1883 static Value
*FoldOperationIntoSelectOperand(Instruction
&I
, Value
*SO
,
1885 if (CastInst
*CI
= dyn_cast
<CastInst
>(&I
))
1886 return IC
->Builder
->CreateCast(CI
->getOpcode(), SO
, I
.getType());
1888 // Figure out if the constant is the left or the right argument.
1889 bool ConstIsRHS
= isa
<Constant
>(I
.getOperand(1));
1890 Constant
*ConstOperand
= cast
<Constant
>(I
.getOperand(ConstIsRHS
));
1892 if (Constant
*SOC
= dyn_cast
<Constant
>(SO
)) {
1894 return ConstantExpr::get(I
.getOpcode(), SOC
, ConstOperand
);
1895 return ConstantExpr::get(I
.getOpcode(), ConstOperand
, SOC
);
1898 Value
*Op0
= SO
, *Op1
= ConstOperand
;
1900 std::swap(Op0
, Op1
);
1902 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(&I
))
1903 return IC
->Builder
->CreateBinOp(BO
->getOpcode(), Op0
, Op1
,
1904 SO
->getName()+".op");
1905 if (ICmpInst
*CI
= dyn_cast
<ICmpInst
>(&I
))
1906 return IC
->Builder
->CreateICmp(CI
->getPredicate(), Op0
, Op1
,
1907 SO
->getName()+".cmp");
1908 if (FCmpInst
*CI
= dyn_cast
<FCmpInst
>(&I
))
1909 return IC
->Builder
->CreateICmp(CI
->getPredicate(), Op0
, Op1
,
1910 SO
->getName()+".cmp");
1911 llvm_unreachable("Unknown binary instruction type!");
1914 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
1915 // constant as the other operand, try to fold the binary operator into the
1916 // select arguments. This also works for Cast instructions, which obviously do
1917 // not have a second operand.
1918 static Instruction
*FoldOpIntoSelect(Instruction
&Op
, SelectInst
*SI
,
1920 // Don't modify shared select instructions
1921 if (!SI
->hasOneUse()) return 0;
1922 Value
*TV
= SI
->getOperand(1);
1923 Value
*FV
= SI
->getOperand(2);
1925 if (isa
<Constant
>(TV
) || isa
<Constant
>(FV
)) {
1926 // Bool selects with constant operands can be folded to logical ops.
1927 if (SI
->getType() == Type::getInt1Ty(*IC
->getContext())) return 0;
1929 Value
*SelectTrueVal
= FoldOperationIntoSelectOperand(Op
, TV
, IC
);
1930 Value
*SelectFalseVal
= FoldOperationIntoSelectOperand(Op
, FV
, IC
);
1932 return SelectInst::Create(SI
->getCondition(), SelectTrueVal
,
1939 /// FoldOpIntoPhi - Given a binary operator or cast instruction which has a PHI
1940 /// node as operand #0, see if we can fold the instruction into the PHI (which
1941 /// is only possible if all operands to the PHI are constants).
1942 Instruction
*InstCombiner::FoldOpIntoPhi(Instruction
&I
) {
1943 PHINode
*PN
= cast
<PHINode
>(I
.getOperand(0));
1944 unsigned NumPHIValues
= PN
->getNumIncomingValues();
1945 if (!PN
->hasOneUse() || NumPHIValues
== 0) return 0;
1947 // Check to see if all of the operands of the PHI are constants. If there is
1948 // one non-constant value, remember the BB it is. If there is more than one
1949 // or if *it* is a PHI, bail out.
1950 BasicBlock
*NonConstBB
= 0;
1951 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
)
1952 if (!isa
<Constant
>(PN
->getIncomingValue(i
))) {
1953 if (NonConstBB
) return 0; // More than one non-const value.
1954 if (isa
<PHINode
>(PN
->getIncomingValue(i
))) return 0; // Itself a phi.
1955 NonConstBB
= PN
->getIncomingBlock(i
);
1957 // If the incoming non-constant value is in I's block, we have an infinite
1959 if (NonConstBB
== I
.getParent())
1963 // If there is exactly one non-constant value, we can insert a copy of the
1964 // operation in that block. However, if this is a critical edge, we would be
1965 // inserting the computation one some other paths (e.g. inside a loop). Only
1966 // do this if the pred block is unconditionally branching into the phi block.
1968 BranchInst
*BI
= dyn_cast
<BranchInst
>(NonConstBB
->getTerminator());
1969 if (!BI
|| !BI
->isUnconditional()) return 0;
1972 // Okay, we can do the transformation: create the new PHI node.
1973 PHINode
*NewPN
= PHINode::Create(I
.getType(), "");
1974 NewPN
->reserveOperandSpace(PN
->getNumOperands()/2);
1975 InsertNewInstBefore(NewPN
, *PN
);
1976 NewPN
->takeName(PN
);
1978 // Next, add all of the operands to the PHI.
1979 if (I
.getNumOperands() == 2) {
1980 Constant
*C
= cast
<Constant
>(I
.getOperand(1));
1981 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
) {
1983 if (Constant
*InC
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
))) {
1984 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(&I
))
1985 InV
= ConstantExpr::getCompare(CI
->getPredicate(), InC
, C
);
1987 InV
= ConstantExpr::get(I
.getOpcode(), InC
, C
);
1989 assert(PN
->getIncomingBlock(i
) == NonConstBB
);
1990 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(&I
))
1991 InV
= BinaryOperator::Create(BO
->getOpcode(),
1992 PN
->getIncomingValue(i
), C
, "phitmp",
1993 NonConstBB
->getTerminator());
1994 else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(&I
))
1995 InV
= CmpInst::Create(CI
->getOpcode(),
1997 PN
->getIncomingValue(i
), C
, "phitmp",
1998 NonConstBB
->getTerminator());
2000 llvm_unreachable("Unknown binop!");
2002 Worklist
.Add(cast
<Instruction
>(InV
));
2004 NewPN
->addIncoming(InV
, PN
->getIncomingBlock(i
));
2007 CastInst
*CI
= cast
<CastInst
>(&I
);
2008 const Type
*RetTy
= CI
->getType();
2009 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
) {
2011 if (Constant
*InC
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
))) {
2012 InV
= ConstantExpr::getCast(CI
->getOpcode(), InC
, RetTy
);
2014 assert(PN
->getIncomingBlock(i
) == NonConstBB
);
2015 InV
= CastInst::Create(CI
->getOpcode(), PN
->getIncomingValue(i
),
2016 I
.getType(), "phitmp",
2017 NonConstBB
->getTerminator());
2018 Worklist
.Add(cast
<Instruction
>(InV
));
2020 NewPN
->addIncoming(InV
, PN
->getIncomingBlock(i
));
2023 return ReplaceInstUsesWith(I
, NewPN
);
2027 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2028 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2029 /// This basically requires proving that the add in the original type would not
2030 /// overflow to change the sign bit or have a carry out.
2031 bool InstCombiner::WillNotOverflowSignedAdd(Value
*LHS
, Value
*RHS
) {
2032 // There are different heuristics we can use for this. Here are some simple
2035 // Add has the property that adding any two 2's complement numbers can only
2036 // have one carry bit which can change a sign. As such, if LHS and RHS each
2037 // have at least two sign bits, we know that the addition of the two values will
2038 // sign extend fine.
2039 if (ComputeNumSignBits(LHS
) > 1 && ComputeNumSignBits(RHS
) > 1)
2043 // If one of the operands only has one non-zero bit, and if the other operand
2044 // has a known-zero bit in a more significant place than it (not including the
2045 // sign bit) the ripple may go up to and fill the zero, but won't change the
2046 // sign. For example, (X & ~4) + 1.
2054 Instruction
*InstCombiner::visitAdd(BinaryOperator
&I
) {
2055 bool Changed
= SimplifyCommutative(I
);
2056 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
2058 if (Constant
*RHSC
= dyn_cast
<Constant
>(RHS
)) {
2059 // X + undef -> undef
2060 if (isa
<UndefValue
>(RHS
))
2061 return ReplaceInstUsesWith(I
, RHS
);
2064 if (RHSC
->isNullValue())
2065 return ReplaceInstUsesWith(I
, LHS
);
2067 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHSC
)) {
2068 // X + (signbit) --> X ^ signbit
2069 const APInt
& Val
= CI
->getValue();
2070 uint32_t BitWidth
= Val
.getBitWidth();
2071 if (Val
== APInt::getSignBit(BitWidth
))
2072 return BinaryOperator::CreateXor(LHS
, RHS
);
2074 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2075 // (X & 254)+1 -> (X&254)|1
2076 if (SimplifyDemandedInstructionBits(I
))
2079 // zext(bool) + C -> bool ? C + 1 : C
2080 if (ZExtInst
*ZI
= dyn_cast
<ZExtInst
>(LHS
))
2081 if (ZI
->getSrcTy() == Type::getInt1Ty(*Context
))
2082 return SelectInst::Create(ZI
->getOperand(0), AddOne(CI
), CI
);
2085 if (isa
<PHINode
>(LHS
))
2086 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2089 ConstantInt
*XorRHS
= 0;
2091 if (isa
<ConstantInt
>(RHSC
) &&
2092 match(LHS
, m_Xor(m_Value(XorLHS
), m_ConstantInt(XorRHS
)))) {
2093 uint32_t TySizeBits
= I
.getType()->getScalarSizeInBits();
2094 const APInt
& RHSVal
= cast
<ConstantInt
>(RHSC
)->getValue();
2096 uint32_t Size
= TySizeBits
/ 2;
2097 APInt
C0080Val(APInt(TySizeBits
, 1ULL).shl(Size
- 1));
2098 APInt
CFF80Val(-C0080Val
);
2100 if (TySizeBits
> Size
) {
2101 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2102 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2103 if ((RHSVal
== CFF80Val
&& XorRHS
->getValue() == C0080Val
) ||
2104 (RHSVal
== C0080Val
&& XorRHS
->getValue() == CFF80Val
)) {
2105 // This is a sign extend if the top bits are known zero.
2106 if (!MaskedValueIsZero(XorLHS
,
2107 APInt::getHighBitsSet(TySizeBits
, TySizeBits
- Size
)))
2108 Size
= 0; // Not a sign ext, but can't be any others either.
2113 C0080Val
= APIntOps::lshr(C0080Val
, Size
);
2114 CFF80Val
= APIntOps::ashr(CFF80Val
, Size
);
2115 } while (Size
>= 1);
2117 // FIXME: This shouldn't be necessary. When the backends can handle types
2118 // with funny bit widths then this switch statement should be removed. It
2119 // is just here to get the size of the "middle" type back up to something
2120 // that the back ends can handle.
2121 const Type
*MiddleType
= 0;
2124 case 32: MiddleType
= Type::getInt32Ty(*Context
); break;
2125 case 16: MiddleType
= Type::getInt16Ty(*Context
); break;
2126 case 8: MiddleType
= Type::getInt8Ty(*Context
); break;
2129 Value
*NewTrunc
= Builder
->CreateTrunc(XorLHS
, MiddleType
, "sext");
2130 return new SExtInst(NewTrunc
, I
.getType(), I
.getName());
2135 if (I
.getType() == Type::getInt1Ty(*Context
))
2136 return BinaryOperator::CreateXor(LHS
, RHS
);
2139 if (I
.getType()->isInteger()) {
2140 if (Instruction
*Result
= AssociativeOpt(I
, AddRHS(RHS
)))
2143 if (Instruction
*RHSI
= dyn_cast
<Instruction
>(RHS
)) {
2144 if (RHSI
->getOpcode() == Instruction::Sub
)
2145 if (LHS
== RHSI
->getOperand(1)) // A + (B - A) --> B
2146 return ReplaceInstUsesWith(I
, RHSI
->getOperand(0));
2148 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
)) {
2149 if (LHSI
->getOpcode() == Instruction::Sub
)
2150 if (RHS
== LHSI
->getOperand(1)) // (B - A) + A --> B
2151 return ReplaceInstUsesWith(I
, LHSI
->getOperand(0));
2156 // -A + -B --> -(A + B)
2157 if (Value
*LHSV
= dyn_castNegVal(LHS
)) {
2158 if (LHS
->getType()->isIntOrIntVector()) {
2159 if (Value
*RHSV
= dyn_castNegVal(RHS
)) {
2160 Value
*NewAdd
= Builder
->CreateAdd(LHSV
, RHSV
, "sum");
2161 return BinaryOperator::CreateNeg(NewAdd
);
2165 return BinaryOperator::CreateSub(RHS
, LHSV
);
2169 if (!isa
<Constant
>(RHS
))
2170 if (Value
*V
= dyn_castNegVal(RHS
))
2171 return BinaryOperator::CreateSub(LHS
, V
);
2175 if (Value
*X
= dyn_castFoldableMul(LHS
, C2
)) {
2176 if (X
== RHS
) // X*C + X --> X * (C+1)
2177 return BinaryOperator::CreateMul(RHS
, AddOne(C2
));
2179 // X*C1 + X*C2 --> X * (C1+C2)
2181 if (X
== dyn_castFoldableMul(RHS
, C1
))
2182 return BinaryOperator::CreateMul(X
, ConstantExpr::getAdd(C1
, C2
));
2185 // X + X*C --> X * (C+1)
2186 if (dyn_castFoldableMul(RHS
, C2
) == LHS
)
2187 return BinaryOperator::CreateMul(LHS
, AddOne(C2
));
2189 // X + ~X --> -1 since ~X = -X-1
2190 if (dyn_castNotVal(LHS
) == RHS
||
2191 dyn_castNotVal(RHS
) == LHS
)
2192 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
2195 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2196 if (match(RHS
, m_And(m_Value(), m_ConstantInt(C2
))))
2197 if (Instruction
*R
= AssociativeOpt(I
, AddMaskingAnd(C2
)))
2200 // A+B --> A|B iff A and B have no bits set in common.
2201 if (const IntegerType
*IT
= dyn_cast
<IntegerType
>(I
.getType())) {
2202 APInt Mask
= APInt::getAllOnesValue(IT
->getBitWidth());
2203 APInt
LHSKnownOne(IT
->getBitWidth(), 0);
2204 APInt
LHSKnownZero(IT
->getBitWidth(), 0);
2205 ComputeMaskedBits(LHS
, Mask
, LHSKnownZero
, LHSKnownOne
);
2206 if (LHSKnownZero
!= 0) {
2207 APInt
RHSKnownOne(IT
->getBitWidth(), 0);
2208 APInt
RHSKnownZero(IT
->getBitWidth(), 0);
2209 ComputeMaskedBits(RHS
, Mask
, RHSKnownZero
, RHSKnownOne
);
2211 // No bits in common -> bitwise or.
2212 if ((LHSKnownZero
|RHSKnownZero
).isAllOnesValue())
2213 return BinaryOperator::CreateOr(LHS
, RHS
);
2217 // W*X + Y*Z --> W * (X+Z) iff W == Y
2218 if (I
.getType()->isIntOrIntVector()) {
2219 Value
*W
, *X
, *Y
, *Z
;
2220 if (match(LHS
, m_Mul(m_Value(W
), m_Value(X
))) &&
2221 match(RHS
, m_Mul(m_Value(Y
), m_Value(Z
)))) {
2225 } else if (Y
== X
) {
2227 } else if (X
== Z
) {
2234 Value
*NewAdd
= Builder
->CreateAdd(X
, Z
, LHS
->getName());
2235 return BinaryOperator::CreateMul(W
, NewAdd
);
2240 if (ConstantInt
*CRHS
= dyn_cast
<ConstantInt
>(RHS
)) {
2242 if (match(LHS
, m_Not(m_Value(X
)))) // ~X + C --> (C-1) - X
2243 return BinaryOperator::CreateSub(SubOne(CRHS
), X
);
2245 // (X & FF00) + xx00 -> (X+xx00) & FF00
2246 if (LHS
->hasOneUse() &&
2247 match(LHS
, m_And(m_Value(X
), m_ConstantInt(C2
)))) {
2248 Constant
*Anded
= ConstantExpr::getAnd(CRHS
, C2
);
2249 if (Anded
== CRHS
) {
2250 // See if all bits from the first bit set in the Add RHS up are included
2251 // in the mask. First, get the rightmost bit.
2252 const APInt
& AddRHSV
= CRHS
->getValue();
2254 // Form a mask of all bits from the lowest bit added through the top.
2255 APInt
AddRHSHighBits(~((AddRHSV
& -AddRHSV
)-1));
2257 // See if the and mask includes all of these bits.
2258 APInt
AddRHSHighBitsAnd(AddRHSHighBits
& C2
->getValue());
2260 if (AddRHSHighBits
== AddRHSHighBitsAnd
) {
2261 // Okay, the xform is safe. Insert the new add pronto.
2262 Value
*NewAdd
= Builder
->CreateAdd(X
, CRHS
, LHS
->getName());
2263 return BinaryOperator::CreateAnd(NewAdd
, C2
);
2268 // Try to fold constant add into select arguments.
2269 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(LHS
))
2270 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2274 // add (select X 0 (sub n A)) A --> select X A n
2276 SelectInst
*SI
= dyn_cast
<SelectInst
>(LHS
);
2279 SI
= dyn_cast
<SelectInst
>(RHS
);
2282 if (SI
&& SI
->hasOneUse()) {
2283 Value
*TV
= SI
->getTrueValue();
2284 Value
*FV
= SI
->getFalseValue();
2287 // Can we fold the add into the argument of the select?
2288 // We check both true and false select arguments for a matching subtract.
2289 if (match(FV
, m_Zero()) &&
2290 match(TV
, m_Sub(m_Value(N
), m_Specific(A
))))
2291 // Fold the add into the true select value.
2292 return SelectInst::Create(SI
->getCondition(), N
, A
);
2293 if (match(TV
, m_Zero()) &&
2294 match(FV
, m_Sub(m_Value(N
), m_Specific(A
))))
2295 // Fold the add into the false select value.
2296 return SelectInst::Create(SI
->getCondition(), A
, N
);
2300 // Check for (add (sext x), y), see if we can merge this into an
2301 // integer add followed by a sext.
2302 if (SExtInst
*LHSConv
= dyn_cast
<SExtInst
>(LHS
)) {
2303 // (add (sext x), cst) --> (sext (add x, cst'))
2304 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(RHS
)) {
2306 ConstantExpr::getTrunc(RHSC
, LHSConv
->getOperand(0)->getType());
2307 if (LHSConv
->hasOneUse() &&
2308 ConstantExpr::getSExt(CI
, I
.getType()) == RHSC
&&
2309 WillNotOverflowSignedAdd(LHSConv
->getOperand(0), CI
)) {
2310 // Insert the new, smaller add.
2311 Value
*NewAdd
= Builder
->CreateAdd(LHSConv
->getOperand(0),
2313 return new SExtInst(NewAdd
, I
.getType());
2317 // (add (sext x), (sext y)) --> (sext (add int x, y))
2318 if (SExtInst
*RHSConv
= dyn_cast
<SExtInst
>(RHS
)) {
2319 // Only do this if x/y have the same type, if at last one of them has a
2320 // single use (so we don't increase the number of sexts), and if the
2321 // integer add will not overflow.
2322 if (LHSConv
->getOperand(0)->getType()==RHSConv
->getOperand(0)->getType()&&
2323 (LHSConv
->hasOneUse() || RHSConv
->hasOneUse()) &&
2324 WillNotOverflowSignedAdd(LHSConv
->getOperand(0),
2325 RHSConv
->getOperand(0))) {
2326 // Insert the new integer add.
2327 Value
*NewAdd
= Builder
->CreateAdd(LHSConv
->getOperand(0),
2328 RHSConv
->getOperand(0), "addconv");
2329 return new SExtInst(NewAdd
, I
.getType());
2334 return Changed
? &I
: 0;
2337 Instruction
*InstCombiner::visitFAdd(BinaryOperator
&I
) {
2338 bool Changed
= SimplifyCommutative(I
);
2339 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
2341 if (Constant
*RHSC
= dyn_cast
<Constant
>(RHS
)) {
2343 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHSC
)) {
2344 if (CFP
->isExactlyValue(ConstantFP::getNegativeZero
2345 (I
.getType())->getValueAPF()))
2346 return ReplaceInstUsesWith(I
, LHS
);
2349 if (isa
<PHINode
>(LHS
))
2350 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2355 // -A + -B --> -(A + B)
2356 if (Value
*LHSV
= dyn_castFNegVal(LHS
))
2357 return BinaryOperator::CreateFSub(RHS
, LHSV
);
2360 if (!isa
<Constant
>(RHS
))
2361 if (Value
*V
= dyn_castFNegVal(RHS
))
2362 return BinaryOperator::CreateFSub(LHS
, V
);
2364 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2365 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHS
))
2366 if (CFP
->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS
))
2367 return ReplaceInstUsesWith(I
, LHS
);
2369 // Check for (add double (sitofp x), y), see if we can merge this into an
2370 // integer add followed by a promotion.
2371 if (SIToFPInst
*LHSConv
= dyn_cast
<SIToFPInst
>(LHS
)) {
2372 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2373 // ... if the constant fits in the integer value. This is useful for things
2374 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2375 // requires a constant pool load, and generally allows the add to be better
2377 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHS
)) {
2379 ConstantExpr::getFPToSI(CFP
, LHSConv
->getOperand(0)->getType());
2380 if (LHSConv
->hasOneUse() &&
2381 ConstantExpr::getSIToFP(CI
, I
.getType()) == CFP
&&
2382 WillNotOverflowSignedAdd(LHSConv
->getOperand(0), CI
)) {
2383 // Insert the new integer add.
2384 Value
*NewAdd
= Builder
->CreateAdd(LHSConv
->getOperand(0),
2386 return new SIToFPInst(NewAdd
, I
.getType());
2390 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2391 if (SIToFPInst
*RHSConv
= dyn_cast
<SIToFPInst
>(RHS
)) {
2392 // Only do this if x/y have the same type, if at last one of them has a
2393 // single use (so we don't increase the number of int->fp conversions),
2394 // and if the integer add will not overflow.
2395 if (LHSConv
->getOperand(0)->getType()==RHSConv
->getOperand(0)->getType()&&
2396 (LHSConv
->hasOneUse() || RHSConv
->hasOneUse()) &&
2397 WillNotOverflowSignedAdd(LHSConv
->getOperand(0),
2398 RHSConv
->getOperand(0))) {
2399 // Insert the new integer add.
2400 Value
*NewAdd
= Builder
->CreateAdd(LHSConv
->getOperand(0),
2401 RHSConv
->getOperand(0), "addconv");
2402 return new SIToFPInst(NewAdd
, I
.getType());
2407 return Changed
? &I
: 0;
2410 Instruction
*InstCombiner::visitSub(BinaryOperator
&I
) {
2411 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2413 if (Op0
== Op1
) // sub X, X -> 0
2414 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2416 // If this is a 'B = x-(-A)', change to B = x+A...
2417 if (Value
*V
= dyn_castNegVal(Op1
))
2418 return BinaryOperator::CreateAdd(Op0
, V
);
2420 if (isa
<UndefValue
>(Op0
))
2421 return ReplaceInstUsesWith(I
, Op0
); // undef - X -> undef
2422 if (isa
<UndefValue
>(Op1
))
2423 return ReplaceInstUsesWith(I
, Op1
); // X - undef -> undef
2425 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Op0
)) {
2426 // Replace (-1 - A) with (~A)...
2427 if (C
->isAllOnesValue())
2428 return BinaryOperator::CreateNot(Op1
);
2430 // C - ~X == X + (1+C)
2432 if (match(Op1
, m_Not(m_Value(X
))))
2433 return BinaryOperator::CreateAdd(X
, AddOne(C
));
2435 // -(X >>u 31) -> (X >>s 31)
2436 // -(X >>s 31) -> (X >>u 31)
2438 if (BinaryOperator
*SI
= dyn_cast
<BinaryOperator
>(Op1
)) {
2439 if (SI
->getOpcode() == Instruction::LShr
) {
2440 if (ConstantInt
*CU
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
2441 // Check to see if we are shifting out everything but the sign bit.
2442 if (CU
->getLimitedValue(SI
->getType()->getPrimitiveSizeInBits()) ==
2443 SI
->getType()->getPrimitiveSizeInBits()-1) {
2444 // Ok, the transformation is safe. Insert AShr.
2445 return BinaryOperator::Create(Instruction::AShr
,
2446 SI
->getOperand(0), CU
, SI
->getName());
2450 else if (SI
->getOpcode() == Instruction::AShr
) {
2451 if (ConstantInt
*CU
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
2452 // Check to see if we are shifting out everything but the sign bit.
2453 if (CU
->getLimitedValue(SI
->getType()->getPrimitiveSizeInBits()) ==
2454 SI
->getType()->getPrimitiveSizeInBits()-1) {
2455 // Ok, the transformation is safe. Insert LShr.
2456 return BinaryOperator::CreateLShr(
2457 SI
->getOperand(0), CU
, SI
->getName());
2464 // Try to fold constant sub into select arguments.
2465 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
2466 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2469 // C - zext(bool) -> bool ? C - 1 : C
2470 if (ZExtInst
*ZI
= dyn_cast
<ZExtInst
>(Op1
))
2471 if (ZI
->getSrcTy() == Type::getInt1Ty(*Context
))
2472 return SelectInst::Create(ZI
->getOperand(0), SubOne(C
), C
);
2475 if (I
.getType() == Type::getInt1Ty(*Context
))
2476 return BinaryOperator::CreateXor(Op0
, Op1
);
2478 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
2479 if (Op1I
->getOpcode() == Instruction::Add
) {
2480 if (Op1I
->getOperand(0) == Op0
) // X-(X+Y) == -Y
2481 return BinaryOperator::CreateNeg(Op1I
->getOperand(1),
2483 else if (Op1I
->getOperand(1) == Op0
) // X-(Y+X) == -Y
2484 return BinaryOperator::CreateNeg(Op1I
->getOperand(0),
2486 else if (ConstantInt
*CI1
= dyn_cast
<ConstantInt
>(I
.getOperand(0))) {
2487 if (ConstantInt
*CI2
= dyn_cast
<ConstantInt
>(Op1I
->getOperand(1)))
2488 // C1-(X+C2) --> (C1-C2)-X
2489 return BinaryOperator::CreateSub(
2490 ConstantExpr::getSub(CI1
, CI2
), Op1I
->getOperand(0));
2494 if (Op1I
->hasOneUse()) {
2495 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2496 // is not used by anyone else...
2498 if (Op1I
->getOpcode() == Instruction::Sub
) {
2499 // Swap the two operands of the subexpr...
2500 Value
*IIOp0
= Op1I
->getOperand(0), *IIOp1
= Op1I
->getOperand(1);
2501 Op1I
->setOperand(0, IIOp1
);
2502 Op1I
->setOperand(1, IIOp0
);
2504 // Create the new top level add instruction...
2505 return BinaryOperator::CreateAdd(Op0
, Op1
);
2508 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2510 if (Op1I
->getOpcode() == Instruction::And
&&
2511 (Op1I
->getOperand(0) == Op0
|| Op1I
->getOperand(1) == Op0
)) {
2512 Value
*OtherOp
= Op1I
->getOperand(Op1I
->getOperand(0) == Op0
);
2514 Value
*NewNot
= Builder
->CreateNot(OtherOp
, "B.not");
2515 return BinaryOperator::CreateAnd(Op0
, NewNot
);
2518 // 0 - (X sdiv C) -> (X sdiv -C)
2519 if (Op1I
->getOpcode() == Instruction::SDiv
)
2520 if (ConstantInt
*CSI
= dyn_cast
<ConstantInt
>(Op0
))
2522 if (Constant
*DivRHS
= dyn_cast
<Constant
>(Op1I
->getOperand(1)))
2523 return BinaryOperator::CreateSDiv(Op1I
->getOperand(0),
2524 ConstantExpr::getNeg(DivRHS
));
2526 // X - X*C --> X * (1-C)
2527 ConstantInt
*C2
= 0;
2528 if (dyn_castFoldableMul(Op1I
, C2
) == Op0
) {
2530 ConstantExpr::getSub(ConstantInt::get(I
.getType(), 1),
2532 return BinaryOperator::CreateMul(Op0
, CP1
);
2537 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
2538 if (Op0I
->getOpcode() == Instruction::Add
) {
2539 if (Op0I
->getOperand(0) == Op1
) // (Y+X)-Y == X
2540 return ReplaceInstUsesWith(I
, Op0I
->getOperand(1));
2541 else if (Op0I
->getOperand(1) == Op1
) // (X+Y)-Y == X
2542 return ReplaceInstUsesWith(I
, Op0I
->getOperand(0));
2543 } else if (Op0I
->getOpcode() == Instruction::Sub
) {
2544 if (Op0I
->getOperand(0) == Op1
) // (X-Y)-X == -Y
2545 return BinaryOperator::CreateNeg(Op0I
->getOperand(1),
2551 if (Value
*X
= dyn_castFoldableMul(Op0
, C1
)) {
2552 if (X
== Op1
) // X*C - X --> X * (C-1)
2553 return BinaryOperator::CreateMul(Op1
, SubOne(C1
));
2555 ConstantInt
*C2
; // X*C1 - X*C2 -> X * (C1-C2)
2556 if (X
== dyn_castFoldableMul(Op1
, C2
))
2557 return BinaryOperator::CreateMul(X
, ConstantExpr::getSub(C1
, C2
));
2562 Instruction
*InstCombiner::visitFSub(BinaryOperator
&I
) {
2563 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2565 // If this is a 'B = x-(-A)', change to B = x+A...
2566 if (Value
*V
= dyn_castFNegVal(Op1
))
2567 return BinaryOperator::CreateFAdd(Op0
, V
);
2569 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
2570 if (Op1I
->getOpcode() == Instruction::FAdd
) {
2571 if (Op1I
->getOperand(0) == Op0
) // X-(X+Y) == -Y
2572 return BinaryOperator::CreateFNeg(Op1I
->getOperand(1),
2574 else if (Op1I
->getOperand(1) == Op0
) // X-(Y+X) == -Y
2575 return BinaryOperator::CreateFNeg(Op1I
->getOperand(0),
2583 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
2584 /// comparison only checks the sign bit. If it only checks the sign bit, set
2585 /// TrueIfSigned if the result of the comparison is true when the input value is
2587 static bool isSignBitCheck(ICmpInst::Predicate pred
, ConstantInt
*RHS
,
2588 bool &TrueIfSigned
) {
2590 case ICmpInst::ICMP_SLT
: // True if LHS s< 0
2591 TrueIfSigned
= true;
2592 return RHS
->isZero();
2593 case ICmpInst::ICMP_SLE
: // True if LHS s<= RHS and RHS == -1
2594 TrueIfSigned
= true;
2595 return RHS
->isAllOnesValue();
2596 case ICmpInst::ICMP_SGT
: // True if LHS s> -1
2597 TrueIfSigned
= false;
2598 return RHS
->isAllOnesValue();
2599 case ICmpInst::ICMP_UGT
:
2600 // True if LHS u> RHS and RHS == high-bit-mask - 1
2601 TrueIfSigned
= true;
2602 return RHS
->getValue() ==
2603 APInt::getSignedMaxValue(RHS
->getType()->getPrimitiveSizeInBits());
2604 case ICmpInst::ICMP_UGE
:
2605 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2606 TrueIfSigned
= true;
2607 return RHS
->getValue().isSignBit();
2613 Instruction
*InstCombiner::visitMul(BinaryOperator
&I
) {
2614 bool Changed
= SimplifyCommutative(I
);
2615 Value
*Op0
= I
.getOperand(0);
2617 if (isa
<UndefValue
>(I
.getOperand(1))) // undef * X -> 0
2618 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2620 // Simplify mul instructions with a constant RHS...
2621 if (Constant
*Op1
= dyn_cast
<Constant
>(I
.getOperand(1))) {
2622 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2624 // ((X << C1)*C2) == (X * (C2 << C1))
2625 if (BinaryOperator
*SI
= dyn_cast
<BinaryOperator
>(Op0
))
2626 if (SI
->getOpcode() == Instruction::Shl
)
2627 if (Constant
*ShOp
= dyn_cast
<Constant
>(SI
->getOperand(1)))
2628 return BinaryOperator::CreateMul(SI
->getOperand(0),
2629 ConstantExpr::getShl(CI
, ShOp
));
2632 return ReplaceInstUsesWith(I
, Op1
); // X * 0 == 0
2633 if (CI
->equalsInt(1)) // X * 1 == X
2634 return ReplaceInstUsesWith(I
, Op0
);
2635 if (CI
->isAllOnesValue()) // X * -1 == 0 - X
2636 return BinaryOperator::CreateNeg(Op0
, I
.getName());
2638 const APInt
& Val
= cast
<ConstantInt
>(CI
)->getValue();
2639 if (Val
.isPowerOf2()) { // Replace X*(2^C) with X << C
2640 return BinaryOperator::CreateShl(Op0
,
2641 ConstantInt::get(Op0
->getType(), Val
.logBase2()));
2643 } else if (isa
<VectorType
>(Op1
->getType())) {
2644 if (Op1
->isNullValue())
2645 return ReplaceInstUsesWith(I
, Op1
);
2647 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2648 if (Op1V
->isAllOnesValue()) // X * -1 == 0 - X
2649 return BinaryOperator::CreateNeg(Op0
, I
.getName());
2651 // As above, vector X*splat(1.0) -> X in all defined cases.
2652 if (Constant
*Splat
= Op1V
->getSplatValue()) {
2653 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Splat
))
2654 if (CI
->equalsInt(1))
2655 return ReplaceInstUsesWith(I
, Op0
);
2660 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
))
2661 if (Op0I
->getOpcode() == Instruction::Add
&& Op0I
->hasOneUse() &&
2662 isa
<ConstantInt
>(Op0I
->getOperand(1)) && isa
<ConstantInt
>(Op1
)) {
2663 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
2664 Value
*Add
= Builder
->CreateMul(Op0I
->getOperand(0), Op1
, "tmp");
2665 Value
*C1C2
= Builder
->CreateMul(Op1
, Op0I
->getOperand(1));
2666 return BinaryOperator::CreateAdd(Add
, C1C2
);
2670 // Try to fold constant mul into select arguments.
2671 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2672 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2675 if (isa
<PHINode
>(Op0
))
2676 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2680 if (Value
*Op0v
= dyn_castNegVal(Op0
)) // -X * -Y = X*Y
2681 if (Value
*Op1v
= dyn_castNegVal(I
.getOperand(1)))
2682 return BinaryOperator::CreateMul(Op0v
, Op1v
);
2684 // (X / Y) * Y = X - (X % Y)
2685 // (X / Y) * -Y = (X % Y) - X
2687 Value
*Op1
= I
.getOperand(1);
2688 BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(Op0
);
2690 (BO
->getOpcode() != Instruction::UDiv
&&
2691 BO
->getOpcode() != Instruction::SDiv
)) {
2693 BO
= dyn_cast
<BinaryOperator
>(I
.getOperand(1));
2695 Value
*Neg
= dyn_castNegVal(Op1
);
2696 if (BO
&& BO
->hasOneUse() &&
2697 (BO
->getOperand(1) == Op1
|| BO
->getOperand(1) == Neg
) &&
2698 (BO
->getOpcode() == Instruction::UDiv
||
2699 BO
->getOpcode() == Instruction::SDiv
)) {
2700 Value
*Op0BO
= BO
->getOperand(0), *Op1BO
= BO
->getOperand(1);
2702 // If the division is exact, X % Y is zero.
2703 if (SDivOperator
*SDiv
= dyn_cast
<SDivOperator
>(BO
))
2704 if (SDiv
->isExact()) {
2706 return ReplaceInstUsesWith(I
, Op0BO
);
2708 return BinaryOperator::CreateNeg(Op0BO
);
2712 if (BO
->getOpcode() == Instruction::UDiv
)
2713 Rem
= Builder
->CreateURem(Op0BO
, Op1BO
);
2715 Rem
= Builder
->CreateSRem(Op0BO
, Op1BO
);
2719 return BinaryOperator::CreateSub(Op0BO
, Rem
);
2720 return BinaryOperator::CreateSub(Rem
, Op0BO
);
2724 if (I
.getType() == Type::getInt1Ty(*Context
))
2725 return BinaryOperator::CreateAnd(Op0
, I
.getOperand(1));
2727 // If one of the operands of the multiply is a cast from a boolean value, then
2728 // we know the bool is either zero or one, so this is a 'masking' multiply.
2729 // See if we can simplify things based on how the boolean was originally
2731 CastInst
*BoolCast
= 0;
2732 if (ZExtInst
*CI
= dyn_cast
<ZExtInst
>(Op0
))
2733 if (CI
->getOperand(0)->getType() == Type::getInt1Ty(*Context
))
2736 if (ZExtInst
*CI
= dyn_cast
<ZExtInst
>(I
.getOperand(1)))
2737 if (CI
->getOperand(0)->getType() == Type::getInt1Ty(*Context
))
2740 if (ICmpInst
*SCI
= dyn_cast
<ICmpInst
>(BoolCast
->getOperand(0))) {
2741 Value
*SCIOp0
= SCI
->getOperand(0), *SCIOp1
= SCI
->getOperand(1);
2742 const Type
*SCOpTy
= SCIOp0
->getType();
2745 // If the icmp is true iff the sign bit of X is set, then convert this
2746 // multiply into a shift/and combination.
2747 if (isa
<ConstantInt
>(SCIOp1
) &&
2748 isSignBitCheck(SCI
->getPredicate(), cast
<ConstantInt
>(SCIOp1
), TIS
) &&
2750 // Shift the X value right to turn it into "all signbits".
2751 Constant
*Amt
= ConstantInt::get(SCIOp0
->getType(),
2752 SCOpTy
->getPrimitiveSizeInBits()-1);
2753 Value
*V
= Builder
->CreateAShr(SCIOp0
, Amt
,
2754 BoolCast
->getOperand(0)->getName()+".mask");
2756 // If the multiply type is not the same as the source type, sign extend
2757 // or truncate to the multiply type.
2758 if (I
.getType() != V
->getType())
2759 V
= Builder
->CreateIntCast(V
, I
.getType(), true);
2761 Value
*OtherOp
= Op0
== BoolCast
? I
.getOperand(1) : Op0
;
2762 return BinaryOperator::CreateAnd(V
, OtherOp
);
2767 return Changed
? &I
: 0;
2770 Instruction
*InstCombiner::visitFMul(BinaryOperator
&I
) {
2771 bool Changed
= SimplifyCommutative(I
);
2772 Value
*Op0
= I
.getOperand(0);
2774 // Simplify mul instructions with a constant RHS...
2775 if (Constant
*Op1
= dyn_cast
<Constant
>(I
.getOperand(1))) {
2776 if (ConstantFP
*Op1F
= dyn_cast
<ConstantFP
>(Op1
)) {
2777 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
2778 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
2779 if (Op1F
->isExactlyValue(1.0))
2780 return ReplaceInstUsesWith(I
, Op0
); // Eliminate 'mul double %X, 1.0'
2781 } else if (isa
<VectorType
>(Op1
->getType())) {
2782 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2783 // As above, vector X*splat(1.0) -> X in all defined cases.
2784 if (Constant
*Splat
= Op1V
->getSplatValue()) {
2785 if (ConstantFP
*F
= dyn_cast
<ConstantFP
>(Splat
))
2786 if (F
->isExactlyValue(1.0))
2787 return ReplaceInstUsesWith(I
, Op0
);
2792 // Try to fold constant mul into select arguments.
2793 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2794 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2797 if (isa
<PHINode
>(Op0
))
2798 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2802 if (Value
*Op0v
= dyn_castFNegVal(Op0
)) // -X * -Y = X*Y
2803 if (Value
*Op1v
= dyn_castFNegVal(I
.getOperand(1)))
2804 return BinaryOperator::CreateFMul(Op0v
, Op1v
);
2806 return Changed
? &I
: 0;
2809 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
2811 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator
&I
) {
2812 SelectInst
*SI
= cast
<SelectInst
>(I
.getOperand(1));
2814 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
2815 int NonNullOperand
= -1;
2816 if (Constant
*ST
= dyn_cast
<Constant
>(SI
->getOperand(1)))
2817 if (ST
->isNullValue())
2819 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
2820 if (Constant
*ST
= dyn_cast
<Constant
>(SI
->getOperand(2)))
2821 if (ST
->isNullValue())
2824 if (NonNullOperand
== -1)
2827 Value
*SelectCond
= SI
->getOperand(0);
2829 // Change the div/rem to use 'Y' instead of the select.
2830 I
.setOperand(1, SI
->getOperand(NonNullOperand
));
2832 // Okay, we know we replace the operand of the div/rem with 'Y' with no
2833 // problem. However, the select, or the condition of the select may have
2834 // multiple uses. Based on our knowledge that the operand must be non-zero,
2835 // propagate the known value for the select into other uses of it, and
2836 // propagate a known value of the condition into its other users.
2838 // If the select and condition only have a single use, don't bother with this,
2840 if (SI
->use_empty() && SelectCond
->hasOneUse())
2843 // Scan the current block backward, looking for other uses of SI.
2844 BasicBlock::iterator BBI
= &I
, BBFront
= I
.getParent()->begin();
2846 while (BBI
!= BBFront
) {
2848 // If we found a call to a function, we can't assume it will return, so
2849 // information from below it cannot be propagated above it.
2850 if (isa
<CallInst
>(BBI
) && !isa
<IntrinsicInst
>(BBI
))
2853 // Replace uses of the select or its condition with the known values.
2854 for (Instruction::op_iterator I
= BBI
->op_begin(), E
= BBI
->op_end();
2857 *I
= SI
->getOperand(NonNullOperand
);
2859 } else if (*I
== SelectCond
) {
2860 *I
= NonNullOperand
== 1 ? ConstantInt::getTrue(*Context
) :
2861 ConstantInt::getFalse(*Context
);
2866 // If we past the instruction, quit looking for it.
2869 if (&*BBI
== SelectCond
)
2872 // If we ran out of things to eliminate, break out of the loop.
2873 if (SelectCond
== 0 && SI
== 0)
2881 /// This function implements the transforms on div instructions that work
2882 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
2883 /// used by the visitors to those instructions.
2884 /// @brief Transforms common to all three div instructions
2885 Instruction
*InstCombiner::commonDivTransforms(BinaryOperator
&I
) {
2886 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2888 // undef / X -> 0 for integer.
2889 // undef / X -> undef for FP (the undef could be a snan).
2890 if (isa
<UndefValue
>(Op0
)) {
2891 if (Op0
->getType()->isFPOrFPVector())
2892 return ReplaceInstUsesWith(I
, Op0
);
2893 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2896 // X / undef -> undef
2897 if (isa
<UndefValue
>(Op1
))
2898 return ReplaceInstUsesWith(I
, Op1
);
2903 /// This function implements the transforms common to both integer division
2904 /// instructions (udiv and sdiv). It is called by the visitors to those integer
2905 /// division instructions.
2906 /// @brief Common integer divide transforms
2907 Instruction
*InstCombiner::commonIDivTransforms(BinaryOperator
&I
) {
2908 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2910 // (sdiv X, X) --> 1 (udiv X, X) --> 1
2912 if (const VectorType
*Ty
= dyn_cast
<VectorType
>(I
.getType())) {
2913 Constant
*CI
= ConstantInt::get(Ty
->getElementType(), 1);
2914 std::vector
<Constant
*> Elts(Ty
->getNumElements(), CI
);
2915 return ReplaceInstUsesWith(I
, ConstantVector::get(Elts
));
2918 Constant
*CI
= ConstantInt::get(I
.getType(), 1);
2919 return ReplaceInstUsesWith(I
, CI
);
2922 if (Instruction
*Common
= commonDivTransforms(I
))
2925 // Handle cases involving: [su]div X, (select Cond, Y, Z)
2926 // This does not apply for fdiv.
2927 if (isa
<SelectInst
>(Op1
) && SimplifyDivRemOfSelect(I
))
2930 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
2932 if (RHS
->equalsInt(1))
2933 return ReplaceInstUsesWith(I
, Op0
);
2935 // (X / C1) / C2 -> X / (C1*C2)
2936 if (Instruction
*LHS
= dyn_cast
<Instruction
>(Op0
))
2937 if (Instruction::BinaryOps(LHS
->getOpcode()) == I
.getOpcode())
2938 if (ConstantInt
*LHSRHS
= dyn_cast
<ConstantInt
>(LHS
->getOperand(1))) {
2939 if (MultiplyOverflows(RHS
, LHSRHS
,
2940 I
.getOpcode()==Instruction::SDiv
))
2941 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2943 return BinaryOperator::Create(I
.getOpcode(), LHS
->getOperand(0),
2944 ConstantExpr::getMul(RHS
, LHSRHS
));
2947 if (!RHS
->isZero()) { // avoid X udiv 0
2948 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2949 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2951 if (isa
<PHINode
>(Op0
))
2952 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2957 // 0 / X == 0, we don't need to preserve faults!
2958 if (ConstantInt
*LHS
= dyn_cast
<ConstantInt
>(Op0
))
2959 if (LHS
->equalsInt(0))
2960 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2962 // It can't be division by zero, hence it must be division by one.
2963 if (I
.getType() == Type::getInt1Ty(*Context
))
2964 return ReplaceInstUsesWith(I
, Op0
);
2966 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2967 if (ConstantInt
*X
= cast_or_null
<ConstantInt
>(Op1V
->getSplatValue()))
2970 return ReplaceInstUsesWith(I
, Op0
);
2976 Instruction
*InstCombiner::visitUDiv(BinaryOperator
&I
) {
2977 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2979 // Handle the integer div common cases
2980 if (Instruction
*Common
= commonIDivTransforms(I
))
2983 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Op1
)) {
2984 // X udiv C^2 -> X >> C
2985 // Check to see if this is an unsigned division with an exact power of 2,
2986 // if so, convert to a right shift.
2987 if (C
->getValue().isPowerOf2()) // 0 not included in isPowerOf2
2988 return BinaryOperator::CreateLShr(Op0
,
2989 ConstantInt::get(Op0
->getType(), C
->getValue().logBase2()));
2991 // X udiv C, where C >= signbit
2992 if (C
->getValue().isNegative()) {
2993 Value
*IC
= Builder
->CreateICmpULT( Op0
, C
);
2994 return SelectInst::Create(IC
, Constant::getNullValue(I
.getType()),
2995 ConstantInt::get(I
.getType(), 1));
2999 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3000 if (BinaryOperator
*RHSI
= dyn_cast
<BinaryOperator
>(I
.getOperand(1))) {
3001 if (RHSI
->getOpcode() == Instruction::Shl
&&
3002 isa
<ConstantInt
>(RHSI
->getOperand(0))) {
3003 const APInt
& C1
= cast
<ConstantInt
>(RHSI
->getOperand(0))->getValue();
3004 if (C1
.isPowerOf2()) {
3005 Value
*N
= RHSI
->getOperand(1);
3006 const Type
*NTy
= N
->getType();
3007 if (uint32_t C2
= C1
.logBase2())
3008 N
= Builder
->CreateAdd(N
, ConstantInt::get(NTy
, C2
), "tmp");
3009 return BinaryOperator::CreateLShr(Op0
, N
);
3014 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3015 // where C1&C2 are powers of two.
3016 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
3017 if (ConstantInt
*STO
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
3018 if (ConstantInt
*SFO
= dyn_cast
<ConstantInt
>(SI
->getOperand(2))) {
3019 const APInt
&TVA
= STO
->getValue(), &FVA
= SFO
->getValue();
3020 if (TVA
.isPowerOf2() && FVA
.isPowerOf2()) {
3021 // Compute the shift amounts
3022 uint32_t TSA
= TVA
.logBase2(), FSA
= FVA
.logBase2();
3023 // Construct the "on true" case of the select
3024 Constant
*TC
= ConstantInt::get(Op0
->getType(), TSA
);
3025 Value
*TSI
= Builder
->CreateLShr(Op0
, TC
, SI
->getName()+".t");
3027 // Construct the "on false" case of the select
3028 Constant
*FC
= ConstantInt::get(Op0
->getType(), FSA
);
3029 Value
*FSI
= Builder
->CreateLShr(Op0
, FC
, SI
->getName()+".f");
3031 // construct the select instruction and return it.
3032 return SelectInst::Create(SI
->getOperand(0), TSI
, FSI
, SI
->getName());
3038 Instruction
*InstCombiner::visitSDiv(BinaryOperator
&I
) {
3039 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3041 // Handle the integer div common cases
3042 if (Instruction
*Common
= commonIDivTransforms(I
))
3045 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3047 if (RHS
->isAllOnesValue())
3048 return BinaryOperator::CreateNeg(Op0
);
3050 // sdiv X, C --> ashr X, log2(C)
3051 if (cast
<SDivOperator
>(&I
)->isExact() &&
3052 RHS
->getValue().isNonNegative() &&
3053 RHS
->getValue().isPowerOf2()) {
3054 Value
*ShAmt
= llvm::ConstantInt::get(RHS
->getType(),
3055 RHS
->getValue().exactLogBase2());
3056 return BinaryOperator::CreateAShr(Op0
, ShAmt
, I
.getName());
3059 // -X/C --> X/-C provided the negation doesn't overflow.
3060 if (SubOperator
*Sub
= dyn_cast
<SubOperator
>(Op0
))
3061 if (isa
<Constant
>(Sub
->getOperand(0)) &&
3062 cast
<Constant
>(Sub
->getOperand(0))->isNullValue() &&
3063 Sub
->hasNoSignedWrap())
3064 return BinaryOperator::CreateSDiv(Sub
->getOperand(1),
3065 ConstantExpr::getNeg(RHS
));
3068 // If the sign bits of both operands are zero (i.e. we can prove they are
3069 // unsigned inputs), turn this into a udiv.
3070 if (I
.getType()->isInteger()) {
3071 APInt
Mask(APInt::getSignBit(I
.getType()->getPrimitiveSizeInBits()));
3072 if (MaskedValueIsZero(Op0
, Mask
)) {
3073 if (MaskedValueIsZero(Op1
, Mask
)) {
3074 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3075 return BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
3077 ConstantInt
*ShiftedInt
;
3078 if (match(Op1
, m_Shl(m_ConstantInt(ShiftedInt
), m_Value())) &&
3079 ShiftedInt
->getValue().isPowerOf2()) {
3080 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
3081 // Safe because the only negative value (1 << Y) can take on is
3082 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
3083 // the sign bit set.
3084 return BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
3092 Instruction
*InstCombiner::visitFDiv(BinaryOperator
&I
) {
3093 return commonDivTransforms(I
);
3096 /// This function implements the transforms on rem instructions that work
3097 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3098 /// is used by the visitors to those instructions.
3099 /// @brief Transforms common to all three rem instructions
3100 Instruction
*InstCombiner::commonRemTransforms(BinaryOperator
&I
) {
3101 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3103 if (isa
<UndefValue
>(Op0
)) { // undef % X -> 0
3104 if (I
.getType()->isFPOrFPVector())
3105 return ReplaceInstUsesWith(I
, Op0
); // X % undef -> undef (could be SNaN)
3106 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3108 if (isa
<UndefValue
>(Op1
))
3109 return ReplaceInstUsesWith(I
, Op1
); // X % undef -> undef
3111 // Handle cases involving: rem X, (select Cond, Y, Z)
3112 if (isa
<SelectInst
>(Op1
) && SimplifyDivRemOfSelect(I
))
3118 /// This function implements the transforms common to both integer remainder
3119 /// instructions (urem and srem). It is called by the visitors to those integer
3120 /// remainder instructions.
3121 /// @brief Common integer remainder transforms
3122 Instruction
*InstCombiner::commonIRemTransforms(BinaryOperator
&I
) {
3123 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3125 if (Instruction
*common
= commonRemTransforms(I
))
3128 // 0 % X == 0 for integer, we don't need to preserve faults!
3129 if (Constant
*LHS
= dyn_cast
<Constant
>(Op0
))
3130 if (LHS
->isNullValue())
3131 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3133 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3134 // X % 0 == undef, we don't need to preserve faults!
3135 if (RHS
->equalsInt(0))
3136 return ReplaceInstUsesWith(I
, UndefValue::get(I
.getType()));
3138 if (RHS
->equalsInt(1)) // X % 1 == 0
3139 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3141 if (Instruction
*Op0I
= dyn_cast
<Instruction
>(Op0
)) {
3142 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0I
)) {
3143 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
3145 } else if (isa
<PHINode
>(Op0I
)) {
3146 if (Instruction
*NV
= FoldOpIntoPhi(I
))
3150 // See if we can fold away this rem instruction.
3151 if (SimplifyDemandedInstructionBits(I
))
3159 Instruction
*InstCombiner::visitURem(BinaryOperator
&I
) {
3160 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3162 if (Instruction
*common
= commonIRemTransforms(I
))
3165 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3166 // X urem C^2 -> X and C
3167 // Check to see if this is an unsigned remainder with an exact power of 2,
3168 // if so, convert to a bitwise and.
3169 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(RHS
))
3170 if (C
->getValue().isPowerOf2())
3171 return BinaryOperator::CreateAnd(Op0
, SubOne(C
));
3174 if (Instruction
*RHSI
= dyn_cast
<Instruction
>(I
.getOperand(1))) {
3175 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3176 if (RHSI
->getOpcode() == Instruction::Shl
&&
3177 isa
<ConstantInt
>(RHSI
->getOperand(0))) {
3178 if (cast
<ConstantInt
>(RHSI
->getOperand(0))->getValue().isPowerOf2()) {
3179 Constant
*N1
= Constant::getAllOnesValue(I
.getType());
3180 Value
*Add
= Builder
->CreateAdd(RHSI
, N1
, "tmp");
3181 return BinaryOperator::CreateAnd(Op0
, Add
);
3186 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3187 // where C1&C2 are powers of two.
3188 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
)) {
3189 if (ConstantInt
*STO
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
3190 if (ConstantInt
*SFO
= dyn_cast
<ConstantInt
>(SI
->getOperand(2))) {
3191 // STO == 0 and SFO == 0 handled above.
3192 if ((STO
->getValue().isPowerOf2()) &&
3193 (SFO
->getValue().isPowerOf2())) {
3194 Value
*TrueAnd
= Builder
->CreateAnd(Op0
, SubOne(STO
),
3195 SI
->getName()+".t");
3196 Value
*FalseAnd
= Builder
->CreateAnd(Op0
, SubOne(SFO
),
3197 SI
->getName()+".f");
3198 return SelectInst::Create(SI
->getOperand(0), TrueAnd
, FalseAnd
);
3206 Instruction
*InstCombiner::visitSRem(BinaryOperator
&I
) {
3207 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3209 // Handle the integer rem common cases
3210 if (Instruction
*Common
= commonIRemTransforms(I
))
3213 if (Value
*RHSNeg
= dyn_castNegVal(Op1
))
3214 if (!isa
<Constant
>(RHSNeg
) ||
3215 (isa
<ConstantInt
>(RHSNeg
) &&
3216 cast
<ConstantInt
>(RHSNeg
)->getValue().isStrictlyPositive())) {
3218 Worklist
.AddValue(I
.getOperand(1));
3219 I
.setOperand(1, RHSNeg
);
3223 // If the sign bits of both operands are zero (i.e. we can prove they are
3224 // unsigned inputs), turn this into a urem.
3225 if (I
.getType()->isInteger()) {
3226 APInt
Mask(APInt::getSignBit(I
.getType()->getPrimitiveSizeInBits()));
3227 if (MaskedValueIsZero(Op1
, Mask
) && MaskedValueIsZero(Op0
, Mask
)) {
3228 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3229 return BinaryOperator::CreateURem(Op0
, Op1
, I
.getName());
3233 // If it's a constant vector, flip any negative values positive.
3234 if (ConstantVector
*RHSV
= dyn_cast
<ConstantVector
>(Op1
)) {
3235 unsigned VWidth
= RHSV
->getNumOperands();
3237 bool hasNegative
= false;
3238 for (unsigned i
= 0; !hasNegative
&& i
!= VWidth
; ++i
)
3239 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
->getOperand(i
)))
3240 if (RHS
->getValue().isNegative())
3244 std::vector
<Constant
*> Elts(VWidth
);
3245 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
3246 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
->getOperand(i
))) {
3247 if (RHS
->getValue().isNegative())
3248 Elts
[i
] = cast
<ConstantInt
>(ConstantExpr::getNeg(RHS
));
3254 Constant
*NewRHSV
= ConstantVector::get(Elts
);
3255 if (NewRHSV
!= RHSV
) {
3256 Worklist
.AddValue(I
.getOperand(1));
3257 I
.setOperand(1, NewRHSV
);
3266 Instruction
*InstCombiner::visitFRem(BinaryOperator
&I
) {
3267 return commonRemTransforms(I
);
3270 // isOneBitSet - Return true if there is exactly one bit set in the specified
3272 static bool isOneBitSet(const ConstantInt
*CI
) {
3273 return CI
->getValue().isPowerOf2();
3276 // isHighOnes - Return true if the constant is of the form 1+0+.
3277 // This is the same as lowones(~X).
3278 static bool isHighOnes(const ConstantInt
*CI
) {
3279 return (~CI
->getValue() + 1).isPowerOf2();
3282 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3283 /// are carefully arranged to allow folding of expressions such as:
3285 /// (A < B) | (A > B) --> (A != B)
3287 /// Note that this is only valid if the first and second predicates have the
3288 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3290 /// Three bits are used to represent the condition, as follows:
3295 /// <=> Value Definition
3296 /// 000 0 Always false
3303 /// 111 7 Always true
3305 static unsigned getICmpCode(const ICmpInst
*ICI
) {
3306 switch (ICI
->getPredicate()) {
3308 case ICmpInst::ICMP_UGT
: return 1; // 001
3309 case ICmpInst::ICMP_SGT
: return 1; // 001
3310 case ICmpInst::ICMP_EQ
: return 2; // 010
3311 case ICmpInst::ICMP_UGE
: return 3; // 011
3312 case ICmpInst::ICMP_SGE
: return 3; // 011
3313 case ICmpInst::ICMP_ULT
: return 4; // 100
3314 case ICmpInst::ICMP_SLT
: return 4; // 100
3315 case ICmpInst::ICMP_NE
: return 5; // 101
3316 case ICmpInst::ICMP_ULE
: return 6; // 110
3317 case ICmpInst::ICMP_SLE
: return 6; // 110
3320 llvm_unreachable("Invalid ICmp predicate!");
3325 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3326 /// predicate into a three bit mask. It also returns whether it is an ordered
3327 /// predicate by reference.
3328 static unsigned getFCmpCode(FCmpInst::Predicate CC
, bool &isOrdered
) {
3331 case FCmpInst::FCMP_ORD
: isOrdered
= true; return 0; // 000
3332 case FCmpInst::FCMP_UNO
: return 0; // 000
3333 case FCmpInst::FCMP_OGT
: isOrdered
= true; return 1; // 001
3334 case FCmpInst::FCMP_UGT
: return 1; // 001
3335 case FCmpInst::FCMP_OEQ
: isOrdered
= true; return 2; // 010
3336 case FCmpInst::FCMP_UEQ
: return 2; // 010
3337 case FCmpInst::FCMP_OGE
: isOrdered
= true; return 3; // 011
3338 case FCmpInst::FCMP_UGE
: return 3; // 011
3339 case FCmpInst::FCMP_OLT
: isOrdered
= true; return 4; // 100
3340 case FCmpInst::FCMP_ULT
: return 4; // 100
3341 case FCmpInst::FCMP_ONE
: isOrdered
= true; return 5; // 101
3342 case FCmpInst::FCMP_UNE
: return 5; // 101
3343 case FCmpInst::FCMP_OLE
: isOrdered
= true; return 6; // 110
3344 case FCmpInst::FCMP_ULE
: return 6; // 110
3347 // Not expecting FCMP_FALSE and FCMP_TRUE;
3348 llvm_unreachable("Unexpected FCmp predicate!");
3353 /// getICmpValue - This is the complement of getICmpCode, which turns an
3354 /// opcode and two operands into either a constant true or false, or a brand
3355 /// new ICmp instruction. The sign is passed in to determine which kind
3356 /// of predicate to use in the new icmp instruction.
3357 static Value
*getICmpValue(bool sign
, unsigned code
, Value
*LHS
, Value
*RHS
,
3358 LLVMContext
*Context
) {
3360 default: llvm_unreachable("Illegal ICmp code!");
3361 case 0: return ConstantInt::getFalse(*Context
);
3364 return new ICmpInst(ICmpInst::ICMP_SGT
, LHS
, RHS
);
3366 return new ICmpInst(ICmpInst::ICMP_UGT
, LHS
, RHS
);
3367 case 2: return new ICmpInst(ICmpInst::ICMP_EQ
, LHS
, RHS
);
3370 return new ICmpInst(ICmpInst::ICMP_SGE
, LHS
, RHS
);
3372 return new ICmpInst(ICmpInst::ICMP_UGE
, LHS
, RHS
);
3375 return new ICmpInst(ICmpInst::ICMP_SLT
, LHS
, RHS
);
3377 return new ICmpInst(ICmpInst::ICMP_ULT
, LHS
, RHS
);
3378 case 5: return new ICmpInst(ICmpInst::ICMP_NE
, LHS
, RHS
);
3381 return new ICmpInst(ICmpInst::ICMP_SLE
, LHS
, RHS
);
3383 return new ICmpInst(ICmpInst::ICMP_ULE
, LHS
, RHS
);
3384 case 7: return ConstantInt::getTrue(*Context
);
3388 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3389 /// opcode and two operands into either a FCmp instruction. isordered is passed
3390 /// in to determine which kind of predicate to use in the new fcmp instruction.
3391 static Value
*getFCmpValue(bool isordered
, unsigned code
,
3392 Value
*LHS
, Value
*RHS
, LLVMContext
*Context
) {
3394 default: llvm_unreachable("Illegal FCmp code!");
3397 return new FCmpInst(FCmpInst::FCMP_ORD
, LHS
, RHS
);
3399 return new FCmpInst(FCmpInst::FCMP_UNO
, LHS
, RHS
);
3402 return new FCmpInst(FCmpInst::FCMP_OGT
, LHS
, RHS
);
3404 return new FCmpInst(FCmpInst::FCMP_UGT
, LHS
, RHS
);
3407 return new FCmpInst(FCmpInst::FCMP_OEQ
, LHS
, RHS
);
3409 return new FCmpInst(FCmpInst::FCMP_UEQ
, LHS
, RHS
);
3412 return new FCmpInst(FCmpInst::FCMP_OGE
, LHS
, RHS
);
3414 return new FCmpInst(FCmpInst::FCMP_UGE
, LHS
, RHS
);
3417 return new FCmpInst(FCmpInst::FCMP_OLT
, LHS
, RHS
);
3419 return new FCmpInst(FCmpInst::FCMP_ULT
, LHS
, RHS
);
3422 return new FCmpInst(FCmpInst::FCMP_ONE
, LHS
, RHS
);
3424 return new FCmpInst(FCmpInst::FCMP_UNE
, LHS
, RHS
);
3427 return new FCmpInst(FCmpInst::FCMP_OLE
, LHS
, RHS
);
3429 return new FCmpInst(FCmpInst::FCMP_ULE
, LHS
, RHS
);
3430 case 7: return ConstantInt::getTrue(*Context
);
3434 /// PredicatesFoldable - Return true if both predicates match sign or if at
3435 /// least one of them is an equality comparison (which is signless).
3436 static bool PredicatesFoldable(ICmpInst::Predicate p1
, ICmpInst::Predicate p2
) {
3437 return (ICmpInst::isSignedPredicate(p1
) == ICmpInst::isSignedPredicate(p2
)) ||
3438 (ICmpInst::isSignedPredicate(p1
) && ICmpInst::isEquality(p2
)) ||
3439 (ICmpInst::isSignedPredicate(p2
) && ICmpInst::isEquality(p1
));
3443 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3444 struct FoldICmpLogical
{
3447 ICmpInst::Predicate pred
;
3448 FoldICmpLogical(InstCombiner
&ic
, ICmpInst
*ICI
)
3449 : IC(ic
), LHS(ICI
->getOperand(0)), RHS(ICI
->getOperand(1)),
3450 pred(ICI
->getPredicate()) {}
3451 bool shouldApply(Value
*V
) const {
3452 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(V
))
3453 if (PredicatesFoldable(pred
, ICI
->getPredicate()))
3454 return ((ICI
->getOperand(0) == LHS
&& ICI
->getOperand(1) == RHS
) ||
3455 (ICI
->getOperand(0) == RHS
&& ICI
->getOperand(1) == LHS
));
3458 Instruction
*apply(Instruction
&Log
) const {
3459 ICmpInst
*ICI
= cast
<ICmpInst
>(Log
.getOperand(0));
3460 if (ICI
->getOperand(0) != LHS
) {
3461 assert(ICI
->getOperand(1) == LHS
);
3462 ICI
->swapOperands(); // Swap the LHS and RHS of the ICmp
3465 ICmpInst
*RHSICI
= cast
<ICmpInst
>(Log
.getOperand(1));
3466 unsigned LHSCode
= getICmpCode(ICI
);
3467 unsigned RHSCode
= getICmpCode(RHSICI
);
3469 switch (Log
.getOpcode()) {
3470 case Instruction::And
: Code
= LHSCode
& RHSCode
; break;
3471 case Instruction::Or
: Code
= LHSCode
| RHSCode
; break;
3472 case Instruction::Xor
: Code
= LHSCode
^ RHSCode
; break;
3473 default: llvm_unreachable("Illegal logical opcode!"); return 0;
3476 bool isSigned
= ICmpInst::isSignedPredicate(RHSICI
->getPredicate()) ||
3477 ICmpInst::isSignedPredicate(ICI
->getPredicate());
3479 Value
*RV
= getICmpValue(isSigned
, Code
, LHS
, RHS
, IC
.getContext());
3480 if (Instruction
*I
= dyn_cast
<Instruction
>(RV
))
3482 // Otherwise, it's a constant boolean value...
3483 return IC
.ReplaceInstUsesWith(Log
, RV
);
3486 } // end anonymous namespace
3488 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3489 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3490 // guaranteed to be a binary operator.
3491 Instruction
*InstCombiner::OptAndOp(Instruction
*Op
,
3493 ConstantInt
*AndRHS
,
3494 BinaryOperator
&TheAnd
) {
3495 Value
*X
= Op
->getOperand(0);
3496 Constant
*Together
= 0;
3498 Together
= ConstantExpr::getAnd(AndRHS
, OpRHS
);
3500 switch (Op
->getOpcode()) {
3501 case Instruction::Xor
:
3502 if (Op
->hasOneUse()) {
3503 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3504 Value
*And
= Builder
->CreateAnd(X
, AndRHS
);
3506 return BinaryOperator::CreateXor(And
, Together
);
3509 case Instruction::Or
:
3510 if (Together
== AndRHS
) // (X | C) & C --> C
3511 return ReplaceInstUsesWith(TheAnd
, AndRHS
);
3513 if (Op
->hasOneUse() && Together
!= OpRHS
) {
3514 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3515 Value
*Or
= Builder
->CreateOr(X
, Together
);
3517 return BinaryOperator::CreateAnd(Or
, AndRHS
);
3520 case Instruction::Add
:
3521 if (Op
->hasOneUse()) {
3522 // Adding a one to a single bit bit-field should be turned into an XOR
3523 // of the bit. First thing to check is to see if this AND is with a
3524 // single bit constant.
3525 const APInt
& AndRHSV
= cast
<ConstantInt
>(AndRHS
)->getValue();
3527 // If there is only one bit set...
3528 if (isOneBitSet(cast
<ConstantInt
>(AndRHS
))) {
3529 // Ok, at this point, we know that we are masking the result of the
3530 // ADD down to exactly one bit. If the constant we are adding has
3531 // no bits set below this bit, then we can eliminate the ADD.
3532 const APInt
& AddRHS
= cast
<ConstantInt
>(OpRHS
)->getValue();
3534 // Check to see if any bits below the one bit set in AndRHSV are set.
3535 if ((AddRHS
& (AndRHSV
-1)) == 0) {
3536 // If not, the only thing that can effect the output of the AND is
3537 // the bit specified by AndRHSV. If that bit is set, the effect of
3538 // the XOR is to toggle the bit. If it is clear, then the ADD has
3540 if ((AddRHS
& AndRHSV
) == 0) { // Bit is not set, noop
3541 TheAnd
.setOperand(0, X
);
3544 // Pull the XOR out of the AND.
3545 Value
*NewAnd
= Builder
->CreateAnd(X
, AndRHS
);
3546 NewAnd
->takeName(Op
);
3547 return BinaryOperator::CreateXor(NewAnd
, AndRHS
);
3554 case Instruction::Shl
: {
3555 // We know that the AND will not produce any of the bits shifted in, so if
3556 // the anded constant includes them, clear them now!
3558 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3559 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3560 APInt
ShlMask(APInt::getHighBitsSet(BitWidth
, BitWidth
-OpRHSVal
));
3561 ConstantInt
*CI
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShlMask
);
3563 if (CI
->getValue() == ShlMask
) {
3564 // Masking out bits that the shift already masks
3565 return ReplaceInstUsesWith(TheAnd
, Op
); // No need for the and.
3566 } else if (CI
!= AndRHS
) { // Reducing bits set in and.
3567 TheAnd
.setOperand(1, CI
);
3572 case Instruction::LShr
:
3574 // We know that the AND will not produce any of the bits shifted in, so if
3575 // the anded constant includes them, clear them now! This only applies to
3576 // unsigned shifts, because a signed shr may bring in set bits!
3578 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3579 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3580 APInt
ShrMask(APInt::getLowBitsSet(BitWidth
, BitWidth
- OpRHSVal
));
3581 ConstantInt
*CI
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShrMask
);
3583 if (CI
->getValue() == ShrMask
) {
3584 // Masking out bits that the shift already masks.
3585 return ReplaceInstUsesWith(TheAnd
, Op
);
3586 } else if (CI
!= AndRHS
) {
3587 TheAnd
.setOperand(1, CI
); // Reduce bits set in and cst.
3592 case Instruction::AShr
:
3594 // See if this is shifting in some sign extension, then masking it out
3596 if (Op
->hasOneUse()) {
3597 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3598 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3599 APInt
ShrMask(APInt::getLowBitsSet(BitWidth
, BitWidth
- OpRHSVal
));
3600 Constant
*C
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShrMask
);
3601 if (C
== AndRHS
) { // Masking out bits shifted in.
3602 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3603 // Make the argument unsigned.
3604 Value
*ShVal
= Op
->getOperand(0);
3605 ShVal
= Builder
->CreateLShr(ShVal
, OpRHS
, Op
->getName());
3606 return BinaryOperator::CreateAnd(ShVal
, AndRHS
, TheAnd
.getName());
3615 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3616 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3617 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3618 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
3619 /// insert new instructions.
3620 Instruction
*InstCombiner::InsertRangeTest(Value
*V
, Constant
*Lo
, Constant
*Hi
,
3621 bool isSigned
, bool Inside
,
3623 assert(cast
<ConstantInt
>(ConstantExpr::getICmp((isSigned
?
3624 ICmpInst::ICMP_SLE
:ICmpInst::ICMP_ULE
), Lo
, Hi
))->getZExtValue() &&
3625 "Lo is not <= Hi in range emission code!");
3628 if (Lo
== Hi
) // Trivially false.
3629 return new ICmpInst(ICmpInst::ICMP_NE
, V
, V
);
3631 // V >= Min && V < Hi --> V < Hi
3632 if (cast
<ConstantInt
>(Lo
)->isMinValue(isSigned
)) {
3633 ICmpInst::Predicate pred
= (isSigned
?
3634 ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
);
3635 return new ICmpInst(pred
, V
, Hi
);
3638 // Emit V-Lo <u Hi-Lo
3639 Constant
*NegLo
= ConstantExpr::getNeg(Lo
);
3640 Value
*Add
= Builder
->CreateAdd(V
, NegLo
, V
->getName()+".off");
3641 Constant
*UpperBound
= ConstantExpr::getAdd(NegLo
, Hi
);
3642 return new ICmpInst(ICmpInst::ICMP_ULT
, Add
, UpperBound
);
3645 if (Lo
== Hi
) // Trivially true.
3646 return new ICmpInst(ICmpInst::ICMP_EQ
, V
, V
);
3648 // V < Min || V >= Hi -> V > Hi-1
3649 Hi
= SubOne(cast
<ConstantInt
>(Hi
));
3650 if (cast
<ConstantInt
>(Lo
)->isMinValue(isSigned
)) {
3651 ICmpInst::Predicate pred
= (isSigned
?
3652 ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
);
3653 return new ICmpInst(pred
, V
, Hi
);
3656 // Emit V-Lo >u Hi-1-Lo
3657 // Note that Hi has already had one subtracted from it, above.
3658 ConstantInt
*NegLo
= cast
<ConstantInt
>(ConstantExpr::getNeg(Lo
));
3659 Value
*Add
= Builder
->CreateAdd(V
, NegLo
, V
->getName()+".off");
3660 Constant
*LowerBound
= ConstantExpr::getAdd(NegLo
, Hi
);
3661 return new ICmpInst(ICmpInst::ICMP_UGT
, Add
, LowerBound
);
3664 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3665 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
3666 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
3667 // not, since all 1s are not contiguous.
3668 static bool isRunOfOnes(ConstantInt
*Val
, uint32_t &MB
, uint32_t &ME
) {
3669 const APInt
& V
= Val
->getValue();
3670 uint32_t BitWidth
= Val
->getType()->getBitWidth();
3671 if (!APIntOps::isShiftedMask(BitWidth
, V
)) return false;
3673 // look for the first zero bit after the run of ones
3674 MB
= BitWidth
- ((V
- 1) ^ V
).countLeadingZeros();
3675 // look for the first non-zero bit
3676 ME
= V
.getActiveBits();
3680 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
3681 /// where isSub determines whether the operator is a sub. If we can fold one of
3682 /// the following xforms:
3684 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
3685 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3686 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3688 /// return (A +/- B).
3690 Value
*InstCombiner::FoldLogicalPlusAnd(Value
*LHS
, Value
*RHS
,
3691 ConstantInt
*Mask
, bool isSub
,
3693 Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
);
3694 if (!LHSI
|| LHSI
->getNumOperands() != 2 ||
3695 !isa
<ConstantInt
>(LHSI
->getOperand(1))) return 0;
3697 ConstantInt
*N
= cast
<ConstantInt
>(LHSI
->getOperand(1));
3699 switch (LHSI
->getOpcode()) {
3701 case Instruction::And
:
3702 if (ConstantExpr::getAnd(N
, Mask
) == Mask
) {
3703 // If the AndRHS is a power of two minus one (0+1+), this is simple.
3704 if ((Mask
->getValue().countLeadingZeros() +
3705 Mask
->getValue().countPopulation()) ==
3706 Mask
->getValue().getBitWidth())
3709 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
3710 // part, we don't need any explicit masks to take them out of A. If that
3711 // is all N is, ignore it.
3712 uint32_t MB
= 0, ME
= 0;
3713 if (isRunOfOnes(Mask
, MB
, ME
)) { // begin/end bit of run, inclusive
3714 uint32_t BitWidth
= cast
<IntegerType
>(RHS
->getType())->getBitWidth();
3715 APInt
Mask(APInt::getLowBitsSet(BitWidth
, MB
-1));
3716 if (MaskedValueIsZero(RHS
, Mask
))
3721 case Instruction::Or
:
3722 case Instruction::Xor
:
3723 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
3724 if ((Mask
->getValue().countLeadingZeros() +
3725 Mask
->getValue().countPopulation()) == Mask
->getValue().getBitWidth()
3726 && ConstantExpr::getAnd(N
, Mask
)->isNullValue())
3732 return Builder
->CreateSub(LHSI
->getOperand(0), RHS
, "fold");
3733 return Builder
->CreateAdd(LHSI
->getOperand(0), RHS
, "fold");
3736 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
3737 Instruction
*InstCombiner::FoldAndOfICmps(Instruction
&I
,
3738 ICmpInst
*LHS
, ICmpInst
*RHS
) {
3740 ConstantInt
*LHSCst
, *RHSCst
;
3741 ICmpInst::Predicate LHSCC
, RHSCC
;
3743 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
3744 if (!match(LHS
, m_ICmp(LHSCC
, m_Value(Val
),
3745 m_ConstantInt(LHSCst
))) ||
3746 !match(RHS
, m_ICmp(RHSCC
, m_Value(Val2
),
3747 m_ConstantInt(RHSCst
))))
3750 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
3751 // where C is a power of 2
3752 if (LHSCst
== RHSCst
&& LHSCC
== RHSCC
&& LHSCC
== ICmpInst::ICMP_ULT
&&
3753 LHSCst
->getValue().isPowerOf2()) {
3754 Value
*NewOr
= Builder
->CreateOr(Val
, Val2
);
3755 return new ICmpInst(LHSCC
, NewOr
, LHSCst
);
3758 // From here on, we only handle:
3759 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
3760 if (Val
!= Val2
) return 0;
3762 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
3763 if (LHSCC
== ICmpInst::ICMP_UGE
|| LHSCC
== ICmpInst::ICMP_ULE
||
3764 RHSCC
== ICmpInst::ICMP_UGE
|| RHSCC
== ICmpInst::ICMP_ULE
||
3765 LHSCC
== ICmpInst::ICMP_SGE
|| LHSCC
== ICmpInst::ICMP_SLE
||
3766 RHSCC
== ICmpInst::ICMP_SGE
|| RHSCC
== ICmpInst::ICMP_SLE
)
3769 // We can't fold (ugt x, C) & (sgt x, C2).
3770 if (!PredicatesFoldable(LHSCC
, RHSCC
))
3773 // Ensure that the larger constant is on the RHS.
3775 if (ICmpInst::isSignedPredicate(LHSCC
) ||
3776 (ICmpInst::isEquality(LHSCC
) &&
3777 ICmpInst::isSignedPredicate(RHSCC
)))
3778 ShouldSwap
= LHSCst
->getValue().sgt(RHSCst
->getValue());
3780 ShouldSwap
= LHSCst
->getValue().ugt(RHSCst
->getValue());
3783 std::swap(LHS
, RHS
);
3784 std::swap(LHSCst
, RHSCst
);
3785 std::swap(LHSCC
, RHSCC
);
3788 // At this point, we know we have have two icmp instructions
3789 // comparing a value against two constants and and'ing the result
3790 // together. Because of the above check, we know that we only have
3791 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
3792 // (from the FoldICmpLogical check above), that the two constants
3793 // are not equal and that the larger constant is on the RHS
3794 assert(LHSCst
!= RHSCst
&& "Compares not folded above?");
3797 default: llvm_unreachable("Unknown integer condition code!");
3798 case ICmpInst::ICMP_EQ
:
3800 default: llvm_unreachable("Unknown integer condition code!");
3801 case ICmpInst::ICMP_EQ
: // (X == 13 & X == 15) -> false
3802 case ICmpInst::ICMP_UGT
: // (X == 13 & X > 15) -> false
3803 case ICmpInst::ICMP_SGT
: // (X == 13 & X > 15) -> false
3804 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3805 case ICmpInst::ICMP_NE
: // (X == 13 & X != 15) -> X == 13
3806 case ICmpInst::ICMP_ULT
: // (X == 13 & X < 15) -> X == 13
3807 case ICmpInst::ICMP_SLT
: // (X == 13 & X < 15) -> X == 13
3808 return ReplaceInstUsesWith(I
, LHS
);
3810 case ICmpInst::ICMP_NE
:
3812 default: llvm_unreachable("Unknown integer condition code!");
3813 case ICmpInst::ICMP_ULT
:
3814 if (LHSCst
== SubOne(RHSCst
)) // (X != 13 & X u< 14) -> X < 13
3815 return new ICmpInst(ICmpInst::ICMP_ULT
, Val
, LHSCst
);
3816 break; // (X != 13 & X u< 15) -> no change
3817 case ICmpInst::ICMP_SLT
:
3818 if (LHSCst
== SubOne(RHSCst
)) // (X != 13 & X s< 14) -> X < 13
3819 return new ICmpInst(ICmpInst::ICMP_SLT
, Val
, LHSCst
);
3820 break; // (X != 13 & X s< 15) -> no change
3821 case ICmpInst::ICMP_EQ
: // (X != 13 & X == 15) -> X == 15
3822 case ICmpInst::ICMP_UGT
: // (X != 13 & X u> 15) -> X u> 15
3823 case ICmpInst::ICMP_SGT
: // (X != 13 & X s> 15) -> X s> 15
3824 return ReplaceInstUsesWith(I
, RHS
);
3825 case ICmpInst::ICMP_NE
:
3826 if (LHSCst
== SubOne(RHSCst
)){// (X != 13 & X != 14) -> X-13 >u 1
3827 Constant
*AddCST
= ConstantExpr::getNeg(LHSCst
);
3828 Value
*Add
= Builder
->CreateAdd(Val
, AddCST
, Val
->getName()+".off");
3829 return new ICmpInst(ICmpInst::ICMP_UGT
, Add
,
3830 ConstantInt::get(Add
->getType(), 1));
3832 break; // (X != 13 & X != 15) -> no change
3835 case ICmpInst::ICMP_ULT
:
3837 default: llvm_unreachable("Unknown integer condition code!");
3838 case ICmpInst::ICMP_EQ
: // (X u< 13 & X == 15) -> false
3839 case ICmpInst::ICMP_UGT
: // (X u< 13 & X u> 15) -> false
3840 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3841 case ICmpInst::ICMP_SGT
: // (X u< 13 & X s> 15) -> no change
3843 case ICmpInst::ICMP_NE
: // (X u< 13 & X != 15) -> X u< 13
3844 case ICmpInst::ICMP_ULT
: // (X u< 13 & X u< 15) -> X u< 13
3845 return ReplaceInstUsesWith(I
, LHS
);
3846 case ICmpInst::ICMP_SLT
: // (X u< 13 & X s< 15) -> no change
3850 case ICmpInst::ICMP_SLT
:
3852 default: llvm_unreachable("Unknown integer condition code!");
3853 case ICmpInst::ICMP_EQ
: // (X s< 13 & X == 15) -> false
3854 case ICmpInst::ICMP_SGT
: // (X s< 13 & X s> 15) -> false
3855 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3856 case ICmpInst::ICMP_UGT
: // (X s< 13 & X u> 15) -> no change
3858 case ICmpInst::ICMP_NE
: // (X s< 13 & X != 15) -> X < 13
3859 case ICmpInst::ICMP_SLT
: // (X s< 13 & X s< 15) -> X < 13
3860 return ReplaceInstUsesWith(I
, LHS
);
3861 case ICmpInst::ICMP_ULT
: // (X s< 13 & X u< 15) -> no change
3865 case ICmpInst::ICMP_UGT
:
3867 default: llvm_unreachable("Unknown integer condition code!");
3868 case ICmpInst::ICMP_EQ
: // (X u> 13 & X == 15) -> X == 15
3869 case ICmpInst::ICMP_UGT
: // (X u> 13 & X u> 15) -> X u> 15
3870 return ReplaceInstUsesWith(I
, RHS
);
3871 case ICmpInst::ICMP_SGT
: // (X u> 13 & X s> 15) -> no change
3873 case ICmpInst::ICMP_NE
:
3874 if (RHSCst
== AddOne(LHSCst
)) // (X u> 13 & X != 14) -> X u> 14
3875 return new ICmpInst(LHSCC
, Val
, RHSCst
);
3876 break; // (X u> 13 & X != 15) -> no change
3877 case ICmpInst::ICMP_ULT
: // (X u> 13 & X u< 15) -> (X-14) <u 1
3878 return InsertRangeTest(Val
, AddOne(LHSCst
),
3879 RHSCst
, false, true, I
);
3880 case ICmpInst::ICMP_SLT
: // (X u> 13 & X s< 15) -> no change
3884 case ICmpInst::ICMP_SGT
:
3886 default: llvm_unreachable("Unknown integer condition code!");
3887 case ICmpInst::ICMP_EQ
: // (X s> 13 & X == 15) -> X == 15
3888 case ICmpInst::ICMP_SGT
: // (X s> 13 & X s> 15) -> X s> 15
3889 return ReplaceInstUsesWith(I
, RHS
);
3890 case ICmpInst::ICMP_UGT
: // (X s> 13 & X u> 15) -> no change
3892 case ICmpInst::ICMP_NE
:
3893 if (RHSCst
== AddOne(LHSCst
)) // (X s> 13 & X != 14) -> X s> 14
3894 return new ICmpInst(LHSCC
, Val
, RHSCst
);
3895 break; // (X s> 13 & X != 15) -> no change
3896 case ICmpInst::ICMP_SLT
: // (X s> 13 & X s< 15) -> (X-14) s< 1
3897 return InsertRangeTest(Val
, AddOne(LHSCst
),
3898 RHSCst
, true, true, I
);
3899 case ICmpInst::ICMP_ULT
: // (X s> 13 & X u< 15) -> no change
3908 Instruction
*InstCombiner::FoldAndOfFCmps(Instruction
&I
, FCmpInst
*LHS
,
3911 if (LHS
->getPredicate() == FCmpInst::FCMP_ORD
&&
3912 RHS
->getPredicate() == FCmpInst::FCMP_ORD
) {
3913 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
3914 if (ConstantFP
*LHSC
= dyn_cast
<ConstantFP
>(LHS
->getOperand(1)))
3915 if (ConstantFP
*RHSC
= dyn_cast
<ConstantFP
>(RHS
->getOperand(1))) {
3916 // If either of the constants are nans, then the whole thing returns
3918 if (LHSC
->getValueAPF().isNaN() || RHSC
->getValueAPF().isNaN())
3919 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3920 return new FCmpInst(FCmpInst::FCMP_ORD
,
3921 LHS
->getOperand(0), RHS
->getOperand(0));
3924 // Handle vector zeros. This occurs because the canonical form of
3925 // "fcmp ord x,x" is "fcmp ord x, 0".
3926 if (isa
<ConstantAggregateZero
>(LHS
->getOperand(1)) &&
3927 isa
<ConstantAggregateZero
>(RHS
->getOperand(1)))
3928 return new FCmpInst(FCmpInst::FCMP_ORD
,
3929 LHS
->getOperand(0), RHS
->getOperand(0));
3933 Value
*Op0LHS
= LHS
->getOperand(0), *Op0RHS
= LHS
->getOperand(1);
3934 Value
*Op1LHS
= RHS
->getOperand(0), *Op1RHS
= RHS
->getOperand(1);
3935 FCmpInst::Predicate Op0CC
= LHS
->getPredicate(), Op1CC
= RHS
->getPredicate();
3938 if (Op0LHS
== Op1RHS
&& Op0RHS
== Op1LHS
) {
3939 // Swap RHS operands to match LHS.
3940 Op1CC
= FCmpInst::getSwappedPredicate(Op1CC
);
3941 std::swap(Op1LHS
, Op1RHS
);
3944 if (Op0LHS
== Op1LHS
&& Op0RHS
== Op1RHS
) {
3945 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
3947 return new FCmpInst((FCmpInst::Predicate
)Op0CC
, Op0LHS
, Op0RHS
);
3949 if (Op0CC
== FCmpInst::FCMP_FALSE
|| Op1CC
== FCmpInst::FCMP_FALSE
)
3950 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3951 if (Op0CC
== FCmpInst::FCMP_TRUE
)
3952 return ReplaceInstUsesWith(I
, RHS
);
3953 if (Op1CC
== FCmpInst::FCMP_TRUE
)
3954 return ReplaceInstUsesWith(I
, LHS
);
3958 unsigned Op0Pred
= getFCmpCode(Op0CC
, Op0Ordered
);
3959 unsigned Op1Pred
= getFCmpCode(Op1CC
, Op1Ordered
);
3961 std::swap(LHS
, RHS
);
3962 std::swap(Op0Pred
, Op1Pred
);
3963 std::swap(Op0Ordered
, Op1Ordered
);
3966 // uno && ueq -> uno && (uno || eq) -> ueq
3967 // ord && olt -> ord && (ord && lt) -> olt
3968 if (Op0Ordered
== Op1Ordered
)
3969 return ReplaceInstUsesWith(I
, RHS
);
3971 // uno && oeq -> uno && (ord && eq) -> false
3972 // uno && ord -> false
3974 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3975 // ord && ueq -> ord && (uno || eq) -> oeq
3976 return cast
<Instruction
>(getFCmpValue(true, Op1Pred
,
3977 Op0LHS
, Op0RHS
, Context
));
3985 Instruction
*InstCombiner::visitAnd(BinaryOperator
&I
) {
3986 bool Changed
= SimplifyCommutative(I
);
3987 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3989 if (isa
<UndefValue
>(Op1
)) // X & undef -> 0
3990 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3994 return ReplaceInstUsesWith(I
, Op1
);
3996 // See if we can simplify any instructions used by the instruction whose sole
3997 // purpose is to compute bits we don't care about.
3998 if (SimplifyDemandedInstructionBits(I
))
4000 if (isa
<VectorType
>(I
.getType())) {
4001 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(Op1
)) {
4002 if (CP
->isAllOnesValue()) // X & <-1,-1> -> X
4003 return ReplaceInstUsesWith(I
, I
.getOperand(0));
4004 } else if (isa
<ConstantAggregateZero
>(Op1
)) {
4005 return ReplaceInstUsesWith(I
, Op1
); // X & <0,0> -> <0,0>
4009 if (ConstantInt
*AndRHS
= dyn_cast
<ConstantInt
>(Op1
)) {
4010 const APInt
& AndRHSMask
= AndRHS
->getValue();
4011 APInt
NotAndRHS(~AndRHSMask
);
4013 // Optimize a variety of ((val OP C1) & C2) combinations...
4014 if (isa
<BinaryOperator
>(Op0
)) {
4015 Instruction
*Op0I
= cast
<Instruction
>(Op0
);
4016 Value
*Op0LHS
= Op0I
->getOperand(0);
4017 Value
*Op0RHS
= Op0I
->getOperand(1);
4018 switch (Op0I
->getOpcode()) {
4019 case Instruction::Xor
:
4020 case Instruction::Or
:
4021 // If the mask is only needed on one incoming arm, push it up.
4022 if (Op0I
->hasOneUse()) {
4023 if (MaskedValueIsZero(Op0LHS
, NotAndRHS
)) {
4024 // Not masking anything out for the LHS, move to RHS.
4025 Value
*NewRHS
= Builder
->CreateAnd(Op0RHS
, AndRHS
,
4026 Op0RHS
->getName()+".masked");
4027 return BinaryOperator::Create(
4028 cast
<BinaryOperator
>(Op0I
)->getOpcode(), Op0LHS
, NewRHS
);
4030 if (!isa
<Constant
>(Op0RHS
) &&
4031 MaskedValueIsZero(Op0RHS
, NotAndRHS
)) {
4032 // Not masking anything out for the RHS, move to LHS.
4033 Value
*NewLHS
= Builder
->CreateAnd(Op0LHS
, AndRHS
,
4034 Op0LHS
->getName()+".masked");
4035 return BinaryOperator::Create(
4036 cast
<BinaryOperator
>(Op0I
)->getOpcode(), NewLHS
, Op0RHS
);
4041 case Instruction::Add
:
4042 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4043 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4044 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4045 if (Value
*V
= FoldLogicalPlusAnd(Op0LHS
, Op0RHS
, AndRHS
, false, I
))
4046 return BinaryOperator::CreateAnd(V
, AndRHS
);
4047 if (Value
*V
= FoldLogicalPlusAnd(Op0RHS
, Op0LHS
, AndRHS
, false, I
))
4048 return BinaryOperator::CreateAnd(V
, AndRHS
); // Add commutes
4051 case Instruction::Sub
:
4052 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4053 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4054 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4055 if (Value
*V
= FoldLogicalPlusAnd(Op0LHS
, Op0RHS
, AndRHS
, true, I
))
4056 return BinaryOperator::CreateAnd(V
, AndRHS
);
4058 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4059 // has 1's for all bits that the subtraction with A might affect.
4060 if (Op0I
->hasOneUse()) {
4061 uint32_t BitWidth
= AndRHSMask
.getBitWidth();
4062 uint32_t Zeros
= AndRHSMask
.countLeadingZeros();
4063 APInt Mask
= APInt::getLowBitsSet(BitWidth
, BitWidth
- Zeros
);
4065 ConstantInt
*A
= dyn_cast
<ConstantInt
>(Op0LHS
);
4066 if (!(A
&& A
->isZero()) && // avoid infinite recursion.
4067 MaskedValueIsZero(Op0LHS
, Mask
)) {
4068 Value
*NewNeg
= Builder
->CreateNeg(Op0RHS
);
4069 return BinaryOperator::CreateAnd(NewNeg
, AndRHS
);
4074 case Instruction::Shl
:
4075 case Instruction::LShr
:
4076 // (1 << x) & 1 --> zext(x == 0)
4077 // (1 >> x) & 1 --> zext(x == 0)
4078 if (AndRHSMask
== 1 && Op0LHS
== AndRHS
) {
4080 Builder
->CreateICmpEQ(Op0RHS
, Constant::getNullValue(I
.getType()));
4081 return new ZExtInst(NewICmp
, I
.getType());
4086 if (ConstantInt
*Op0CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1)))
4087 if (Instruction
*Res
= OptAndOp(Op0I
, Op0CI
, AndRHS
, I
))
4089 } else if (CastInst
*CI
= dyn_cast
<CastInst
>(Op0
)) {
4090 // If this is an integer truncation or change from signed-to-unsigned, and
4091 // if the source is an and/or with immediate, transform it. This
4092 // frequently occurs for bitfield accesses.
4093 if (Instruction
*CastOp
= dyn_cast
<Instruction
>(CI
->getOperand(0))) {
4094 if ((isa
<TruncInst
>(CI
) || isa
<BitCastInst
>(CI
)) &&
4095 CastOp
->getNumOperands() == 2)
4096 if (ConstantInt
*AndCI
= dyn_cast
<ConstantInt
>(CastOp
->getOperand(1))) {
4097 if (CastOp
->getOpcode() == Instruction::And
) {
4098 // Change: and (cast (and X, C1) to T), C2
4099 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4100 // This will fold the two constants together, which may allow
4101 // other simplifications.
4102 Value
*NewCast
= Builder
->CreateTruncOrBitCast(
4103 CastOp
->getOperand(0), I
.getType(),
4104 CastOp
->getName()+".shrunk");
4105 // trunc_or_bitcast(C1)&C2
4106 Constant
*C3
= ConstantExpr::getTruncOrBitCast(AndCI
,I
.getType());
4107 C3
= ConstantExpr::getAnd(C3
, AndRHS
);
4108 return BinaryOperator::CreateAnd(NewCast
, C3
);
4109 } else if (CastOp
->getOpcode() == Instruction::Or
) {
4110 // Change: and (cast (or X, C1) to T), C2
4111 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4112 Constant
*C3
= ConstantExpr::getTruncOrBitCast(AndCI
,I
.getType());
4113 if (ConstantExpr::getAnd(C3
, AndRHS
) == AndRHS
)
4115 return ReplaceInstUsesWith(I
, AndRHS
);
4121 // Try to fold constant and into select arguments.
4122 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
4123 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
4125 if (isa
<PHINode
>(Op0
))
4126 if (Instruction
*NV
= FoldOpIntoPhi(I
))
4130 Value
*Op0NotVal
= dyn_castNotVal(Op0
);
4131 Value
*Op1NotVal
= dyn_castNotVal(Op1
);
4133 if (Op0NotVal
== Op1
|| Op1NotVal
== Op0
) // A & ~A == ~A & A == 0
4134 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
4136 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4137 if (Op0NotVal
&& Op1NotVal
&& isOnlyUse(Op0
) && isOnlyUse(Op1
)) {
4138 Value
*Or
= Builder
->CreateOr(Op0NotVal
, Op1NotVal
,
4139 I
.getName()+".demorgan");
4140 return BinaryOperator::CreateNot(Or
);
4144 Value
*A
= 0, *B
= 0, *C
= 0, *D
= 0;
4145 if (match(Op0
, m_Or(m_Value(A
), m_Value(B
)))) {
4146 if (A
== Op1
|| B
== Op1
) // (A | ?) & A --> A
4147 return ReplaceInstUsesWith(I
, Op1
);
4149 // (A|B) & ~(A&B) -> A^B
4150 if (match(Op1
, m_Not(m_And(m_Value(C
), m_Value(D
))))) {
4151 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
4152 return BinaryOperator::CreateXor(A
, B
);
4156 if (match(Op1
, m_Or(m_Value(A
), m_Value(B
)))) {
4157 if (A
== Op0
|| B
== Op0
) // A & (A | ?) --> A
4158 return ReplaceInstUsesWith(I
, Op0
);
4160 // ~(A&B) & (A|B) -> A^B
4161 if (match(Op0
, m_Not(m_And(m_Value(C
), m_Value(D
))))) {
4162 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
4163 return BinaryOperator::CreateXor(A
, B
);
4167 if (Op0
->hasOneUse() &&
4168 match(Op0
, m_Xor(m_Value(A
), m_Value(B
)))) {
4169 if (A
== Op1
) { // (A^B)&A -> A&(A^B)
4170 I
.swapOperands(); // Simplify below
4171 std::swap(Op0
, Op1
);
4172 } else if (B
== Op1
) { // (A^B)&B -> B&(B^A)
4173 cast
<BinaryOperator
>(Op0
)->swapOperands();
4174 I
.swapOperands(); // Simplify below
4175 std::swap(Op0
, Op1
);
4179 if (Op1
->hasOneUse() &&
4180 match(Op1
, m_Xor(m_Value(A
), m_Value(B
)))) {
4181 if (B
== Op0
) { // B&(A^B) -> B&(B^A)
4182 cast
<BinaryOperator
>(Op1
)->swapOperands();
4185 if (A
== Op0
) // A&(A^B) -> A & ~B
4186 return BinaryOperator::CreateAnd(A
, Builder
->CreateNot(B
, "tmp"));
4189 // (A&((~A)|B)) -> A&B
4190 if (match(Op0
, m_Or(m_Not(m_Specific(Op1
)), m_Value(A
))) ||
4191 match(Op0
, m_Or(m_Value(A
), m_Not(m_Specific(Op1
)))))
4192 return BinaryOperator::CreateAnd(A
, Op1
);
4193 if (match(Op1
, m_Or(m_Not(m_Specific(Op0
)), m_Value(A
))) ||
4194 match(Op1
, m_Or(m_Value(A
), m_Not(m_Specific(Op0
)))))
4195 return BinaryOperator::CreateAnd(A
, Op0
);
4198 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(Op1
)) {
4199 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4200 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
)))
4203 if (ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(Op0
))
4204 if (Instruction
*Res
= FoldAndOfICmps(I
, LHS
, RHS
))
4208 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4209 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
))
4210 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
4211 if (Op0C
->getOpcode() == Op1C
->getOpcode()) { // same cast kind ?
4212 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
4213 if (SrcTy
== Op1C
->getOperand(0)->getType() &&
4214 SrcTy
->isIntOrIntVector() &&
4215 // Only do this if the casts both really cause code to be generated.
4216 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
4218 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
4220 Value
*NewOp
= Builder
->CreateAnd(Op0C
->getOperand(0),
4221 Op1C
->getOperand(0), I
.getName());
4222 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
4226 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4227 if (BinaryOperator
*SI1
= dyn_cast
<BinaryOperator
>(Op1
)) {
4228 if (BinaryOperator
*SI0
= dyn_cast
<BinaryOperator
>(Op0
))
4229 if (SI0
->isShift() && SI0
->getOpcode() == SI1
->getOpcode() &&
4230 SI0
->getOperand(1) == SI1
->getOperand(1) &&
4231 (SI0
->hasOneUse() || SI1
->hasOneUse())) {
4233 Builder
->CreateAnd(SI0
->getOperand(0), SI1
->getOperand(0),
4235 return BinaryOperator::Create(SI1
->getOpcode(), NewOp
,
4236 SI1
->getOperand(1));
4240 // If and'ing two fcmp, try combine them into one.
4241 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0))) {
4242 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
4243 if (Instruction
*Res
= FoldAndOfFCmps(I
, LHS
, RHS
))
4247 return Changed
? &I
: 0;
4250 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4251 /// capable of providing pieces of a bswap. The subexpression provides pieces
4252 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4253 /// the expression came from the corresponding "byte swapped" byte in some other
4254 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4255 /// we know that the expression deposits the low byte of %X into the high byte
4256 /// of the bswap result and that all other bytes are zero. This expression is
4257 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4260 /// This function returns true if the match was unsuccessful and false if so.
4261 /// On entry to the function the "OverallLeftShift" is a signed integer value
4262 /// indicating the number of bytes that the subexpression is later shifted. For
4263 /// example, if the expression is later right shifted by 16 bits, the
4264 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4265 /// byte of ByteValues is actually being set.
4267 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4268 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4269 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4270 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4271 /// always in the local (OverallLeftShift) coordinate space.
4273 static bool CollectBSwapParts(Value
*V
, int OverallLeftShift
, uint32_t ByteMask
,
4274 SmallVector
<Value
*, 8> &ByteValues
) {
4275 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
4276 // If this is an or instruction, it may be an inner node of the bswap.
4277 if (I
->getOpcode() == Instruction::Or
) {
4278 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4280 CollectBSwapParts(I
->getOperand(1), OverallLeftShift
, ByteMask
,
4284 // If this is a logical shift by a constant multiple of 8, recurse with
4285 // OverallLeftShift and ByteMask adjusted.
4286 if (I
->isLogicalShift() && isa
<ConstantInt
>(I
->getOperand(1))) {
4288 cast
<ConstantInt
>(I
->getOperand(1))->getLimitedValue(~0U);
4289 // Ensure the shift amount is defined and of a byte value.
4290 if ((ShAmt
& 7) || (ShAmt
> 8*ByteValues
.size()))
4293 unsigned ByteShift
= ShAmt
>> 3;
4294 if (I
->getOpcode() == Instruction::Shl
) {
4295 // X << 2 -> collect(X, +2)
4296 OverallLeftShift
+= ByteShift
;
4297 ByteMask
>>= ByteShift
;
4299 // X >>u 2 -> collect(X, -2)
4300 OverallLeftShift
-= ByteShift
;
4301 ByteMask
<<= ByteShift
;
4302 ByteMask
&= (~0U >> (32-ByteValues
.size()));
4305 if (OverallLeftShift
>= (int)ByteValues
.size()) return true;
4306 if (OverallLeftShift
<= -(int)ByteValues
.size()) return true;
4308 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4312 // If this is a logical 'and' with a mask that clears bytes, clear the
4313 // corresponding bytes in ByteMask.
4314 if (I
->getOpcode() == Instruction::And
&&
4315 isa
<ConstantInt
>(I
->getOperand(1))) {
4316 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4317 unsigned NumBytes
= ByteValues
.size();
4318 APInt
Byte(I
->getType()->getPrimitiveSizeInBits(), 255);
4319 const APInt
&AndMask
= cast
<ConstantInt
>(I
->getOperand(1))->getValue();
4321 for (unsigned i
= 0; i
!= NumBytes
; ++i
, Byte
<<= 8) {
4322 // If this byte is masked out by a later operation, we don't care what
4324 if ((ByteMask
& (1 << i
)) == 0)
4327 // If the AndMask is all zeros for this byte, clear the bit.
4328 APInt MaskB
= AndMask
& Byte
;
4330 ByteMask
&= ~(1U << i
);
4334 // If the AndMask is not all ones for this byte, it's not a bytezap.
4338 // Otherwise, this byte is kept.
4341 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4346 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4347 // the input value to the bswap. Some observations: 1) if more than one byte
4348 // is demanded from this input, then it could not be successfully assembled
4349 // into a byteswap. At least one of the two bytes would not be aligned with
4350 // their ultimate destination.
4351 if (!isPowerOf2_32(ByteMask
)) return true;
4352 unsigned InputByteNo
= CountTrailingZeros_32(ByteMask
);
4354 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4355 // is demanded, it needs to go into byte 0 of the result. This means that the
4356 // byte needs to be shifted until it lands in the right byte bucket. The
4357 // shift amount depends on the position: if the byte is coming from the high
4358 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4359 // low part, it must be shifted left.
4360 unsigned DestByteNo
= InputByteNo
+ OverallLeftShift
;
4361 if (InputByteNo
< ByteValues
.size()/2) {
4362 if (ByteValues
.size()-1-DestByteNo
!= InputByteNo
)
4365 if (ByteValues
.size()-1-DestByteNo
!= InputByteNo
)
4369 // If the destination byte value is already defined, the values are or'd
4370 // together, which isn't a bswap (unless it's an or of the same bits).
4371 if (ByteValues
[DestByteNo
] && ByteValues
[DestByteNo
] != V
)
4373 ByteValues
[DestByteNo
] = V
;
4377 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4378 /// If so, insert the new bswap intrinsic and return it.
4379 Instruction
*InstCombiner::MatchBSwap(BinaryOperator
&I
) {
4380 const IntegerType
*ITy
= dyn_cast
<IntegerType
>(I
.getType());
4381 if (!ITy
|| ITy
->getBitWidth() % 16 ||
4382 // ByteMask only allows up to 32-byte values.
4383 ITy
->getBitWidth() > 32*8)
4384 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4386 /// ByteValues - For each byte of the result, we keep track of which value
4387 /// defines each byte.
4388 SmallVector
<Value
*, 8> ByteValues
;
4389 ByteValues
.resize(ITy
->getBitWidth()/8);
4391 // Try to find all the pieces corresponding to the bswap.
4392 uint32_t ByteMask
= ~0U >> (32-ByteValues
.size());
4393 if (CollectBSwapParts(&I
, 0, ByteMask
, ByteValues
))
4396 // Check to see if all of the bytes come from the same value.
4397 Value
*V
= ByteValues
[0];
4398 if (V
== 0) return 0; // Didn't find a byte? Must be zero.
4400 // Check to make sure that all of the bytes come from the same value.
4401 for (unsigned i
= 1, e
= ByteValues
.size(); i
!= e
; ++i
)
4402 if (ByteValues
[i
] != V
)
4404 const Type
*Tys
[] = { ITy
};
4405 Module
*M
= I
.getParent()->getParent()->getParent();
4406 Function
*F
= Intrinsic::getDeclaration(M
, Intrinsic::bswap
, Tys
, 1);
4407 return CallInst::Create(F
, V
);
4410 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4411 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4412 /// we can simplify this expression to "cond ? C : D or B".
4413 static Instruction
*MatchSelectFromAndOr(Value
*A
, Value
*B
,
4415 LLVMContext
*Context
) {
4416 // If A is not a select of -1/0, this cannot match.
4418 if (!match(A
, m_SelectCst
<-1, 0>(m_Value(Cond
))))
4421 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4422 if (match(D
, m_SelectCst
<0, -1>(m_Specific(Cond
))))
4423 return SelectInst::Create(Cond
, C
, B
);
4424 if (match(D
, m_Not(m_SelectCst
<-1, 0>(m_Specific(Cond
)))))
4425 return SelectInst::Create(Cond
, C
, B
);
4426 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4427 if (match(B
, m_SelectCst
<0, -1>(m_Specific(Cond
))))
4428 return SelectInst::Create(Cond
, C
, D
);
4429 if (match(B
, m_Not(m_SelectCst
<-1, 0>(m_Specific(Cond
)))))
4430 return SelectInst::Create(Cond
, C
, D
);
4434 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4435 Instruction
*InstCombiner::FoldOrOfICmps(Instruction
&I
,
4436 ICmpInst
*LHS
, ICmpInst
*RHS
) {
4438 ConstantInt
*LHSCst
, *RHSCst
;
4439 ICmpInst::Predicate LHSCC
, RHSCC
;
4441 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4442 if (!match(LHS
, m_ICmp(LHSCC
, m_Value(Val
),
4443 m_ConstantInt(LHSCst
))) ||
4444 !match(RHS
, m_ICmp(RHSCC
, m_Value(Val2
),
4445 m_ConstantInt(RHSCst
))))
4448 // From here on, we only handle:
4449 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4450 if (Val
!= Val2
) return 0;
4452 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4453 if (LHSCC
== ICmpInst::ICMP_UGE
|| LHSCC
== ICmpInst::ICMP_ULE
||
4454 RHSCC
== ICmpInst::ICMP_UGE
|| RHSCC
== ICmpInst::ICMP_ULE
||
4455 LHSCC
== ICmpInst::ICMP_SGE
|| LHSCC
== ICmpInst::ICMP_SLE
||
4456 RHSCC
== ICmpInst::ICMP_SGE
|| RHSCC
== ICmpInst::ICMP_SLE
)
4459 // We can't fold (ugt x, C) | (sgt x, C2).
4460 if (!PredicatesFoldable(LHSCC
, RHSCC
))
4463 // Ensure that the larger constant is on the RHS.
4465 if (ICmpInst::isSignedPredicate(LHSCC
) ||
4466 (ICmpInst::isEquality(LHSCC
) &&
4467 ICmpInst::isSignedPredicate(RHSCC
)))
4468 ShouldSwap
= LHSCst
->getValue().sgt(RHSCst
->getValue());
4470 ShouldSwap
= LHSCst
->getValue().ugt(RHSCst
->getValue());
4473 std::swap(LHS
, RHS
);
4474 std::swap(LHSCst
, RHSCst
);
4475 std::swap(LHSCC
, RHSCC
);
4478 // At this point, we know we have have two icmp instructions
4479 // comparing a value against two constants and or'ing the result
4480 // together. Because of the above check, we know that we only have
4481 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4482 // FoldICmpLogical check above), that the two constants are not
4484 assert(LHSCst
!= RHSCst
&& "Compares not folded above?");
4487 default: llvm_unreachable("Unknown integer condition code!");
4488 case ICmpInst::ICMP_EQ
:
4490 default: llvm_unreachable("Unknown integer condition code!");
4491 case ICmpInst::ICMP_EQ
:
4492 if (LHSCst
== SubOne(RHSCst
)) {
4493 // (X == 13 | X == 14) -> X-13 <u 2
4494 Constant
*AddCST
= ConstantExpr::getNeg(LHSCst
);
4495 Value
*Add
= Builder
->CreateAdd(Val
, AddCST
, Val
->getName()+".off");
4496 AddCST
= ConstantExpr::getSub(AddOne(RHSCst
), LHSCst
);
4497 return new ICmpInst(ICmpInst::ICMP_ULT
, Add
, AddCST
);
4499 break; // (X == 13 | X == 15) -> no change
4500 case ICmpInst::ICMP_UGT
: // (X == 13 | X u> 14) -> no change
4501 case ICmpInst::ICMP_SGT
: // (X == 13 | X s> 14) -> no change
4503 case ICmpInst::ICMP_NE
: // (X == 13 | X != 15) -> X != 15
4504 case ICmpInst::ICMP_ULT
: // (X == 13 | X u< 15) -> X u< 15
4505 case ICmpInst::ICMP_SLT
: // (X == 13 | X s< 15) -> X s< 15
4506 return ReplaceInstUsesWith(I
, RHS
);
4509 case ICmpInst::ICMP_NE
:
4511 default: llvm_unreachable("Unknown integer condition code!");
4512 case ICmpInst::ICMP_EQ
: // (X != 13 | X == 15) -> X != 13
4513 case ICmpInst::ICMP_UGT
: // (X != 13 | X u> 15) -> X != 13
4514 case ICmpInst::ICMP_SGT
: // (X != 13 | X s> 15) -> X != 13
4515 return ReplaceInstUsesWith(I
, LHS
);
4516 case ICmpInst::ICMP_NE
: // (X != 13 | X != 15) -> true
4517 case ICmpInst::ICMP_ULT
: // (X != 13 | X u< 15) -> true
4518 case ICmpInst::ICMP_SLT
: // (X != 13 | X s< 15) -> true
4519 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4522 case ICmpInst::ICMP_ULT
:
4524 default: llvm_unreachable("Unknown integer condition code!");
4525 case ICmpInst::ICMP_EQ
: // (X u< 13 | X == 14) -> no change
4527 case ICmpInst::ICMP_UGT
: // (X u< 13 | X u> 15) -> (X-13) u> 2
4528 // If RHSCst is [us]MAXINT, it is always false. Not handling
4529 // this can cause overflow.
4530 if (RHSCst
->isMaxValue(false))
4531 return ReplaceInstUsesWith(I
, LHS
);
4532 return InsertRangeTest(Val
, LHSCst
, AddOne(RHSCst
),
4534 case ICmpInst::ICMP_SGT
: // (X u< 13 | X s> 15) -> no change
4536 case ICmpInst::ICMP_NE
: // (X u< 13 | X != 15) -> X != 15
4537 case ICmpInst::ICMP_ULT
: // (X u< 13 | X u< 15) -> X u< 15
4538 return ReplaceInstUsesWith(I
, RHS
);
4539 case ICmpInst::ICMP_SLT
: // (X u< 13 | X s< 15) -> no change
4543 case ICmpInst::ICMP_SLT
:
4545 default: llvm_unreachable("Unknown integer condition code!");
4546 case ICmpInst::ICMP_EQ
: // (X s< 13 | X == 14) -> no change
4548 case ICmpInst::ICMP_SGT
: // (X s< 13 | X s> 15) -> (X-13) s> 2
4549 // If RHSCst is [us]MAXINT, it is always false. Not handling
4550 // this can cause overflow.
4551 if (RHSCst
->isMaxValue(true))
4552 return ReplaceInstUsesWith(I
, LHS
);
4553 return InsertRangeTest(Val
, LHSCst
, AddOne(RHSCst
),
4555 case ICmpInst::ICMP_UGT
: // (X s< 13 | X u> 15) -> no change
4557 case ICmpInst::ICMP_NE
: // (X s< 13 | X != 15) -> X != 15
4558 case ICmpInst::ICMP_SLT
: // (X s< 13 | X s< 15) -> X s< 15
4559 return ReplaceInstUsesWith(I
, RHS
);
4560 case ICmpInst::ICMP_ULT
: // (X s< 13 | X u< 15) -> no change
4564 case ICmpInst::ICMP_UGT
:
4566 default: llvm_unreachable("Unknown integer condition code!");
4567 case ICmpInst::ICMP_EQ
: // (X u> 13 | X == 15) -> X u> 13
4568 case ICmpInst::ICMP_UGT
: // (X u> 13 | X u> 15) -> X u> 13
4569 return ReplaceInstUsesWith(I
, LHS
);
4570 case ICmpInst::ICMP_SGT
: // (X u> 13 | X s> 15) -> no change
4572 case ICmpInst::ICMP_NE
: // (X u> 13 | X != 15) -> true
4573 case ICmpInst::ICMP_ULT
: // (X u> 13 | X u< 15) -> true
4574 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4575 case ICmpInst::ICMP_SLT
: // (X u> 13 | X s< 15) -> no change
4579 case ICmpInst::ICMP_SGT
:
4581 default: llvm_unreachable("Unknown integer condition code!");
4582 case ICmpInst::ICMP_EQ
: // (X s> 13 | X == 15) -> X > 13
4583 case ICmpInst::ICMP_SGT
: // (X s> 13 | X s> 15) -> X > 13
4584 return ReplaceInstUsesWith(I
, LHS
);
4585 case ICmpInst::ICMP_UGT
: // (X s> 13 | X u> 15) -> no change
4587 case ICmpInst::ICMP_NE
: // (X s> 13 | X != 15) -> true
4588 case ICmpInst::ICMP_SLT
: // (X s> 13 | X s< 15) -> true
4589 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4590 case ICmpInst::ICMP_ULT
: // (X s> 13 | X u< 15) -> no change
4598 Instruction
*InstCombiner::FoldOrOfFCmps(Instruction
&I
, FCmpInst
*LHS
,
4600 if (LHS
->getPredicate() == FCmpInst::FCMP_UNO
&&
4601 RHS
->getPredicate() == FCmpInst::FCMP_UNO
&&
4602 LHS
->getOperand(0)->getType() == RHS
->getOperand(0)->getType()) {
4603 if (ConstantFP
*LHSC
= dyn_cast
<ConstantFP
>(LHS
->getOperand(1)))
4604 if (ConstantFP
*RHSC
= dyn_cast
<ConstantFP
>(RHS
->getOperand(1))) {
4605 // If either of the constants are nans, then the whole thing returns
4607 if (LHSC
->getValueAPF().isNaN() || RHSC
->getValueAPF().isNaN())
4608 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4610 // Otherwise, no need to compare the two constants, compare the
4612 return new FCmpInst(FCmpInst::FCMP_UNO
,
4613 LHS
->getOperand(0), RHS
->getOperand(0));
4616 // Handle vector zeros. This occurs because the canonical form of
4617 // "fcmp uno x,x" is "fcmp uno x, 0".
4618 if (isa
<ConstantAggregateZero
>(LHS
->getOperand(1)) &&
4619 isa
<ConstantAggregateZero
>(RHS
->getOperand(1)))
4620 return new FCmpInst(FCmpInst::FCMP_UNO
,
4621 LHS
->getOperand(0), RHS
->getOperand(0));
4626 Value
*Op0LHS
= LHS
->getOperand(0), *Op0RHS
= LHS
->getOperand(1);
4627 Value
*Op1LHS
= RHS
->getOperand(0), *Op1RHS
= RHS
->getOperand(1);
4628 FCmpInst::Predicate Op0CC
= LHS
->getPredicate(), Op1CC
= RHS
->getPredicate();
4630 if (Op0LHS
== Op1RHS
&& Op0RHS
== Op1LHS
) {
4631 // Swap RHS operands to match LHS.
4632 Op1CC
= FCmpInst::getSwappedPredicate(Op1CC
);
4633 std::swap(Op1LHS
, Op1RHS
);
4635 if (Op0LHS
== Op1LHS
&& Op0RHS
== Op1RHS
) {
4636 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
4638 return new FCmpInst((FCmpInst::Predicate
)Op0CC
,
4640 if (Op0CC
== FCmpInst::FCMP_TRUE
|| Op1CC
== FCmpInst::FCMP_TRUE
)
4641 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4642 if (Op0CC
== FCmpInst::FCMP_FALSE
)
4643 return ReplaceInstUsesWith(I
, RHS
);
4644 if (Op1CC
== FCmpInst::FCMP_FALSE
)
4645 return ReplaceInstUsesWith(I
, LHS
);
4648 unsigned Op0Pred
= getFCmpCode(Op0CC
, Op0Ordered
);
4649 unsigned Op1Pred
= getFCmpCode(Op1CC
, Op1Ordered
);
4650 if (Op0Ordered
== Op1Ordered
) {
4651 // If both are ordered or unordered, return a new fcmp with
4652 // or'ed predicates.
4653 Value
*RV
= getFCmpValue(Op0Ordered
, Op0Pred
|Op1Pred
,
4654 Op0LHS
, Op0RHS
, Context
);
4655 if (Instruction
*I
= dyn_cast
<Instruction
>(RV
))
4657 // Otherwise, it's a constant boolean value...
4658 return ReplaceInstUsesWith(I
, RV
);
4664 /// FoldOrWithConstants - This helper function folds:
4666 /// ((A | B) & C1) | (B & C2)
4672 /// when the XOR of the two constants is "all ones" (-1).
4673 Instruction
*InstCombiner::FoldOrWithConstants(BinaryOperator
&I
, Value
*Op
,
4674 Value
*A
, Value
*B
, Value
*C
) {
4675 ConstantInt
*CI1
= dyn_cast
<ConstantInt
>(C
);
4679 ConstantInt
*CI2
= 0;
4680 if (!match(Op
, m_And(m_Value(V1
), m_ConstantInt(CI2
)))) return 0;
4682 APInt Xor
= CI1
->getValue() ^ CI2
->getValue();
4683 if (!Xor
.isAllOnesValue()) return 0;
4685 if (V1
== A
|| V1
== B
) {
4686 Value
*NewOp
= Builder
->CreateAnd((V1
== A
) ? B
: A
, CI1
);
4687 return BinaryOperator::CreateOr(NewOp
, V1
);
4693 Instruction
*InstCombiner::visitOr(BinaryOperator
&I
) {
4694 bool Changed
= SimplifyCommutative(I
);
4695 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
4697 if (isa
<UndefValue
>(Op1
)) // X | undef -> -1
4698 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4702 return ReplaceInstUsesWith(I
, Op0
);
4704 // See if we can simplify any instructions used by the instruction whose sole
4705 // purpose is to compute bits we don't care about.
4706 if (SimplifyDemandedInstructionBits(I
))
4708 if (isa
<VectorType
>(I
.getType())) {
4709 if (isa
<ConstantAggregateZero
>(Op1
)) {
4710 return ReplaceInstUsesWith(I
, Op0
); // X | <0,0> -> X
4711 } else if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(Op1
)) {
4712 if (CP
->isAllOnesValue()) // X | <-1,-1> -> <-1,-1>
4713 return ReplaceInstUsesWith(I
, I
.getOperand(1));
4718 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
4719 ConstantInt
*C1
= 0; Value
*X
= 0;
4720 // (X & C1) | C2 --> (X | C2) & (C1|C2)
4721 if (match(Op0
, m_And(m_Value(X
), m_ConstantInt(C1
))) &&
4723 Value
*Or
= Builder
->CreateOr(X
, RHS
);
4725 return BinaryOperator::CreateAnd(Or
,
4726 ConstantInt::get(*Context
, RHS
->getValue() | C1
->getValue()));
4729 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
4730 if (match(Op0
, m_Xor(m_Value(X
), m_ConstantInt(C1
))) &&
4732 Value
*Or
= Builder
->CreateOr(X
, RHS
);
4734 return BinaryOperator::CreateXor(Or
,
4735 ConstantInt::get(*Context
, C1
->getValue() & ~RHS
->getValue()));
4738 // Try to fold constant and into select arguments.
4739 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
4740 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
4742 if (isa
<PHINode
>(Op0
))
4743 if (Instruction
*NV
= FoldOpIntoPhi(I
))
4747 Value
*A
= 0, *B
= 0;
4748 ConstantInt
*C1
= 0, *C2
= 0;
4750 if (match(Op0
, m_And(m_Value(A
), m_Value(B
))))
4751 if (A
== Op1
|| B
== Op1
) // (A & ?) | A --> A
4752 return ReplaceInstUsesWith(I
, Op1
);
4753 if (match(Op1
, m_And(m_Value(A
), m_Value(B
))))
4754 if (A
== Op0
|| B
== Op0
) // A | (A & ?) --> A
4755 return ReplaceInstUsesWith(I
, Op0
);
4757 // (A | B) | C and A | (B | C) -> bswap if possible.
4758 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
4759 if (match(Op0
, m_Or(m_Value(), m_Value())) ||
4760 match(Op1
, m_Or(m_Value(), m_Value())) ||
4761 (match(Op0
, m_Shift(m_Value(), m_Value())) &&
4762 match(Op1
, m_Shift(m_Value(), m_Value())))) {
4763 if (Instruction
*BSwap
= MatchBSwap(I
))
4767 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
4768 if (Op0
->hasOneUse() &&
4769 match(Op0
, m_Xor(m_Value(A
), m_ConstantInt(C1
))) &&
4770 MaskedValueIsZero(Op1
, C1
->getValue())) {
4771 Value
*NOr
= Builder
->CreateOr(A
, Op1
);
4773 return BinaryOperator::CreateXor(NOr
, C1
);
4776 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
4777 if (Op1
->hasOneUse() &&
4778 match(Op1
, m_Xor(m_Value(A
), m_ConstantInt(C1
))) &&
4779 MaskedValueIsZero(Op0
, C1
->getValue())) {
4780 Value
*NOr
= Builder
->CreateOr(A
, Op0
);
4782 return BinaryOperator::CreateXor(NOr
, C1
);
4786 Value
*C
= 0, *D
= 0;
4787 if (match(Op0
, m_And(m_Value(A
), m_Value(C
))) &&
4788 match(Op1
, m_And(m_Value(B
), m_Value(D
)))) {
4789 Value
*V1
= 0, *V2
= 0, *V3
= 0;
4790 C1
= dyn_cast
<ConstantInt
>(C
);
4791 C2
= dyn_cast
<ConstantInt
>(D
);
4792 if (C1
&& C2
) { // (A & C1)|(B & C2)
4793 // If we have: ((V + N) & C1) | (V & C2)
4794 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
4795 // replace with V+N.
4796 if (C1
->getValue() == ~C2
->getValue()) {
4797 if ((C2
->getValue() & (C2
->getValue()+1)) == 0 && // C2 == 0+1+
4798 match(A
, m_Add(m_Value(V1
), m_Value(V2
)))) {
4799 // Add commutes, try both ways.
4800 if (V1
== B
&& MaskedValueIsZero(V2
, C2
->getValue()))
4801 return ReplaceInstUsesWith(I
, A
);
4802 if (V2
== B
&& MaskedValueIsZero(V1
, C2
->getValue()))
4803 return ReplaceInstUsesWith(I
, A
);
4805 // Or commutes, try both ways.
4806 if ((C1
->getValue() & (C1
->getValue()+1)) == 0 &&
4807 match(B
, m_Add(m_Value(V1
), m_Value(V2
)))) {
4808 // Add commutes, try both ways.
4809 if (V1
== A
&& MaskedValueIsZero(V2
, C1
->getValue()))
4810 return ReplaceInstUsesWith(I
, B
);
4811 if (V2
== A
&& MaskedValueIsZero(V1
, C1
->getValue()))
4812 return ReplaceInstUsesWith(I
, B
);
4815 V1
= 0; V2
= 0; V3
= 0;
4818 // Check to see if we have any common things being and'ed. If so, find the
4819 // terms for V1 & (V2|V3).
4820 if (isOnlyUse(Op0
) || isOnlyUse(Op1
)) {
4821 if (A
== B
) // (A & C)|(A & D) == A & (C|D)
4822 V1
= A
, V2
= C
, V3
= D
;
4823 else if (A
== D
) // (A & C)|(B & A) == A & (B|C)
4824 V1
= A
, V2
= B
, V3
= C
;
4825 else if (C
== B
) // (A & C)|(C & D) == C & (A|D)
4826 V1
= C
, V2
= A
, V3
= D
;
4827 else if (C
== D
) // (A & C)|(B & C) == C & (A|B)
4828 V1
= C
, V2
= A
, V3
= B
;
4831 Value
*Or
= Builder
->CreateOr(V2
, V3
, "tmp");
4832 return BinaryOperator::CreateAnd(V1
, Or
);
4836 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
4837 if (Instruction
*Match
= MatchSelectFromAndOr(A
, B
, C
, D
, Context
))
4839 if (Instruction
*Match
= MatchSelectFromAndOr(B
, A
, D
, C
, Context
))
4841 if (Instruction
*Match
= MatchSelectFromAndOr(C
, B
, A
, D
, Context
))
4843 if (Instruction
*Match
= MatchSelectFromAndOr(D
, A
, B
, C
, Context
))
4846 // ((A&~B)|(~A&B)) -> A^B
4847 if ((match(C
, m_Not(m_Specific(D
))) &&
4848 match(B
, m_Not(m_Specific(A
)))))
4849 return BinaryOperator::CreateXor(A
, D
);
4850 // ((~B&A)|(~A&B)) -> A^B
4851 if ((match(A
, m_Not(m_Specific(D
))) &&
4852 match(B
, m_Not(m_Specific(C
)))))
4853 return BinaryOperator::CreateXor(C
, D
);
4854 // ((A&~B)|(B&~A)) -> A^B
4855 if ((match(C
, m_Not(m_Specific(B
))) &&
4856 match(D
, m_Not(m_Specific(A
)))))
4857 return BinaryOperator::CreateXor(A
, B
);
4858 // ((~B&A)|(B&~A)) -> A^B
4859 if ((match(A
, m_Not(m_Specific(B
))) &&
4860 match(D
, m_Not(m_Specific(C
)))))
4861 return BinaryOperator::CreateXor(C
, B
);
4864 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
4865 if (BinaryOperator
*SI1
= dyn_cast
<BinaryOperator
>(Op1
)) {
4866 if (BinaryOperator
*SI0
= dyn_cast
<BinaryOperator
>(Op0
))
4867 if (SI0
->isShift() && SI0
->getOpcode() == SI1
->getOpcode() &&
4868 SI0
->getOperand(1) == SI1
->getOperand(1) &&
4869 (SI0
->hasOneUse() || SI1
->hasOneUse())) {
4870 Value
*NewOp
= Builder
->CreateOr(SI0
->getOperand(0), SI1
->getOperand(0),
4872 return BinaryOperator::Create(SI1
->getOpcode(), NewOp
,
4873 SI1
->getOperand(1));
4877 // ((A|B)&1)|(B&-2) -> (A&1) | B
4878 if (match(Op0
, m_And(m_Or(m_Value(A
), m_Value(B
)), m_Value(C
))) ||
4879 match(Op0
, m_And(m_Value(C
), m_Or(m_Value(A
), m_Value(B
))))) {
4880 Instruction
*Ret
= FoldOrWithConstants(I
, Op1
, A
, B
, C
);
4881 if (Ret
) return Ret
;
4883 // (B&-2)|((A|B)&1) -> (A&1) | B
4884 if (match(Op1
, m_And(m_Or(m_Value(A
), m_Value(B
)), m_Value(C
))) ||
4885 match(Op1
, m_And(m_Value(C
), m_Or(m_Value(A
), m_Value(B
))))) {
4886 Instruction
*Ret
= FoldOrWithConstants(I
, Op0
, A
, B
, C
);
4887 if (Ret
) return Ret
;
4890 if (match(Op0
, m_Not(m_Value(A
)))) { // ~A | Op1
4891 if (A
== Op1
) // ~A | A == -1
4892 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4896 // Note, A is still live here!
4897 if (match(Op1
, m_Not(m_Value(B
)))) { // Op0 | ~B
4899 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4901 // (~A | ~B) == (~(A & B)) - De Morgan's Law
4902 if (A
&& isOnlyUse(Op0
) && isOnlyUse(Op1
)) {
4903 Value
*And
= Builder
->CreateAnd(A
, B
, I
.getName()+".demorgan");
4904 return BinaryOperator::CreateNot(And
);
4908 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
4909 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(I
.getOperand(1))) {
4910 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
)))
4913 if (ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(I
.getOperand(0)))
4914 if (Instruction
*Res
= FoldOrOfICmps(I
, LHS
, RHS
))
4918 // fold (or (cast A), (cast B)) -> (cast (or A, B))
4919 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
4920 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
4921 if (Op0C
->getOpcode() == Op1C
->getOpcode()) {// same cast kind ?
4922 if (!isa
<ICmpInst
>(Op0C
->getOperand(0)) ||
4923 !isa
<ICmpInst
>(Op1C
->getOperand(0))) {
4924 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
4925 if (SrcTy
== Op1C
->getOperand(0)->getType() &&
4926 SrcTy
->isIntOrIntVector() &&
4927 // Only do this if the casts both really cause code to be
4929 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
4931 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
4933 Value
*NewOp
= Builder
->CreateOr(Op0C
->getOperand(0),
4934 Op1C
->getOperand(0), I
.getName());
4935 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
4942 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
4943 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0))) {
4944 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
4945 if (Instruction
*Res
= FoldOrOfFCmps(I
, LHS
, RHS
))
4949 return Changed
? &I
: 0;
4954 // XorSelf - Implements: X ^ X --> 0
4957 XorSelf(Value
*rhs
) : RHS(rhs
) {}
4958 bool shouldApply(Value
*LHS
) const { return LHS
== RHS
; }
4959 Instruction
*apply(BinaryOperator
&Xor
) const {
4966 Instruction
*InstCombiner::visitXor(BinaryOperator
&I
) {
4967 bool Changed
= SimplifyCommutative(I
);
4968 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
4970 if (isa
<UndefValue
>(Op1
)) {
4971 if (isa
<UndefValue
>(Op0
))
4972 // Handle undef ^ undef -> 0 special case. This is a common
4974 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
4975 return ReplaceInstUsesWith(I
, Op1
); // X ^ undef -> undef
4978 // xor X, X = 0, even if X is nested in a sequence of Xor's.
4979 if (Instruction
*Result
= AssociativeOpt(I
, XorSelf(Op1
))) {
4980 assert(Result
== &I
&& "AssociativeOpt didn't work?"); Result
=Result
;
4981 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
4984 // See if we can simplify any instructions used by the instruction whose sole
4985 // purpose is to compute bits we don't care about.
4986 if (SimplifyDemandedInstructionBits(I
))
4988 if (isa
<VectorType
>(I
.getType()))
4989 if (isa
<ConstantAggregateZero
>(Op1
))
4990 return ReplaceInstUsesWith(I
, Op0
); // X ^ <0,0> -> X
4992 // Is this a ~ operation?
4993 if (Value
*NotOp
= dyn_castNotVal(&I
)) {
4994 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
4995 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
4996 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(NotOp
)) {
4997 if (Op0I
->getOpcode() == Instruction::And
||
4998 Op0I
->getOpcode() == Instruction::Or
) {
4999 if (dyn_castNotVal(Op0I
->getOperand(1))) Op0I
->swapOperands();
5000 if (Value
*Op0NotVal
= dyn_castNotVal(Op0I
->getOperand(0))) {
5002 Builder
->CreateNot(Op0I
->getOperand(1),
5003 Op0I
->getOperand(1)->getName()+".not");
5004 if (Op0I
->getOpcode() == Instruction::And
)
5005 return BinaryOperator::CreateOr(Op0NotVal
, NotY
);
5006 return BinaryOperator::CreateAnd(Op0NotVal
, NotY
);
5013 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
5014 if (RHS
== ConstantInt::getTrue(*Context
) && Op0
->hasOneUse()) {
5015 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5016 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(Op0
))
5017 return new ICmpInst(ICI
->getInversePredicate(),
5018 ICI
->getOperand(0), ICI
->getOperand(1));
5020 if (FCmpInst
*FCI
= dyn_cast
<FCmpInst
>(Op0
))
5021 return new FCmpInst(FCI
->getInversePredicate(),
5022 FCI
->getOperand(0), FCI
->getOperand(1));
5025 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5026 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
5027 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(Op0C
->getOperand(0))) {
5028 if (CI
->hasOneUse() && Op0C
->hasOneUse()) {
5029 Instruction::CastOps Opcode
= Op0C
->getOpcode();
5030 if ((Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) &&
5031 (RHS
== ConstantExpr::getCast(Opcode
,
5032 ConstantInt::getTrue(*Context
),
5033 Op0C
->getDestTy()))) {
5034 CI
->setPredicate(CI
->getInversePredicate());
5035 return CastInst::Create(Opcode
, CI
, Op0C
->getType());
5041 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
5042 // ~(c-X) == X-c-1 == X+(-c-1)
5043 if (Op0I
->getOpcode() == Instruction::Sub
&& RHS
->isAllOnesValue())
5044 if (Constant
*Op0I0C
= dyn_cast
<Constant
>(Op0I
->getOperand(0))) {
5045 Constant
*NegOp0I0C
= ConstantExpr::getNeg(Op0I0C
);
5046 Constant
*ConstantRHS
= ConstantExpr::getSub(NegOp0I0C
,
5047 ConstantInt::get(I
.getType(), 1));
5048 return BinaryOperator::CreateAdd(Op0I
->getOperand(1), ConstantRHS
);
5051 if (ConstantInt
*Op0CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
5052 if (Op0I
->getOpcode() == Instruction::Add
) {
5053 // ~(X-c) --> (-c-1)-X
5054 if (RHS
->isAllOnesValue()) {
5055 Constant
*NegOp0CI
= ConstantExpr::getNeg(Op0CI
);
5056 return BinaryOperator::CreateSub(
5057 ConstantExpr::getSub(NegOp0CI
,
5058 ConstantInt::get(I
.getType(), 1)),
5059 Op0I
->getOperand(0));
5060 } else if (RHS
->getValue().isSignBit()) {
5061 // (X + C) ^ signbit -> (X + C + signbit)
5062 Constant
*C
= ConstantInt::get(*Context
,
5063 RHS
->getValue() + Op0CI
->getValue());
5064 return BinaryOperator::CreateAdd(Op0I
->getOperand(0), C
);
5067 } else if (Op0I
->getOpcode() == Instruction::Or
) {
5068 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5069 if (MaskedValueIsZero(Op0I
->getOperand(0), Op0CI
->getValue())) {
5070 Constant
*NewRHS
= ConstantExpr::getOr(Op0CI
, RHS
);
5071 // Anything in both C1 and C2 is known to be zero, remove it from
5073 Constant
*CommonBits
= ConstantExpr::getAnd(Op0CI
, RHS
);
5074 NewRHS
= ConstantExpr::getAnd(NewRHS
,
5075 ConstantExpr::getNot(CommonBits
));
5077 I
.setOperand(0, Op0I
->getOperand(0));
5078 I
.setOperand(1, NewRHS
);
5085 // Try to fold constant and into select arguments.
5086 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
5087 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
5089 if (isa
<PHINode
>(Op0
))
5090 if (Instruction
*NV
= FoldOpIntoPhi(I
))
5094 if (Value
*X
= dyn_castNotVal(Op0
)) // ~A ^ A == -1
5096 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
5098 if (Value
*X
= dyn_castNotVal(Op1
)) // A ^ ~A == -1
5100 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
5103 BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
);
5106 if (match(Op1I
, m_Or(m_Value(A
), m_Value(B
)))) {
5107 if (A
== Op0
) { // B^(B|A) == (A|B)^B
5108 Op1I
->swapOperands();
5110 std::swap(Op0
, Op1
);
5111 } else if (B
== Op0
) { // B^(A|B) == (A|B)^B
5112 I
.swapOperands(); // Simplified below.
5113 std::swap(Op0
, Op1
);
5115 } else if (match(Op1I
, m_Xor(m_Specific(Op0
), m_Value(B
)))) {
5116 return ReplaceInstUsesWith(I
, B
); // A^(A^B) == B
5117 } else if (match(Op1I
, m_Xor(m_Value(A
), m_Specific(Op0
)))) {
5118 return ReplaceInstUsesWith(I
, A
); // A^(B^A) == B
5119 } else if (match(Op1I
, m_And(m_Value(A
), m_Value(B
))) &&
5121 if (A
== Op0
) { // A^(A&B) -> A^(B&A)
5122 Op1I
->swapOperands();
5125 if (B
== Op0
) { // A^(B&A) -> (B&A)^A
5126 I
.swapOperands(); // Simplified below.
5127 std::swap(Op0
, Op1
);
5132 BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
);
5135 if (match(Op0I
, m_Or(m_Value(A
), m_Value(B
))) &&
5136 Op0I
->hasOneUse()) {
5137 if (A
== Op1
) // (B|A)^B == (A|B)^B
5139 if (B
== Op1
) // (A|B)^B == A & ~B
5140 return BinaryOperator::CreateAnd(A
, Builder
->CreateNot(Op1
, "tmp"));
5141 } else if (match(Op0I
, m_Xor(m_Specific(Op1
), m_Value(B
)))) {
5142 return ReplaceInstUsesWith(I
, B
); // (A^B)^A == B
5143 } else if (match(Op0I
, m_Xor(m_Value(A
), m_Specific(Op1
)))) {
5144 return ReplaceInstUsesWith(I
, A
); // (B^A)^A == B
5145 } else if (match(Op0I
, m_And(m_Value(A
), m_Value(B
))) &&
5147 if (A
== Op1
) // (A&B)^A -> (B&A)^A
5149 if (B
== Op1
&& // (B&A)^A == ~B & A
5150 !isa
<ConstantInt
>(Op1
)) { // Canonical form is (B&C)^C
5151 return BinaryOperator::CreateAnd(Builder
->CreateNot(A
, "tmp"), Op1
);
5156 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5157 if (Op0I
&& Op1I
&& Op0I
->isShift() &&
5158 Op0I
->getOpcode() == Op1I
->getOpcode() &&
5159 Op0I
->getOperand(1) == Op1I
->getOperand(1) &&
5160 (Op1I
->hasOneUse() || Op1I
->hasOneUse())) {
5162 Builder
->CreateXor(Op0I
->getOperand(0), Op1I
->getOperand(0),
5164 return BinaryOperator::Create(Op1I
->getOpcode(), NewOp
,
5165 Op1I
->getOperand(1));
5169 Value
*A
, *B
, *C
, *D
;
5170 // (A & B)^(A | B) -> A ^ B
5171 if (match(Op0I
, m_And(m_Value(A
), m_Value(B
))) &&
5172 match(Op1I
, m_Or(m_Value(C
), m_Value(D
)))) {
5173 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
5174 return BinaryOperator::CreateXor(A
, B
);
5176 // (A | B)^(A & B) -> A ^ B
5177 if (match(Op0I
, m_Or(m_Value(A
), m_Value(B
))) &&
5178 match(Op1I
, m_And(m_Value(C
), m_Value(D
)))) {
5179 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
5180 return BinaryOperator::CreateXor(A
, B
);
5184 if ((Op0I
->hasOneUse() || Op1I
->hasOneUse()) &&
5185 match(Op0I
, m_And(m_Value(A
), m_Value(B
))) &&
5186 match(Op1I
, m_And(m_Value(C
), m_Value(D
)))) {
5187 // (X & Y)^(X & Y) -> (Y^Z) & X
5188 Value
*X
= 0, *Y
= 0, *Z
= 0;
5190 X
= A
, Y
= B
, Z
= D
;
5192 X
= A
, Y
= B
, Z
= C
;
5194 X
= B
, Y
= A
, Z
= D
;
5196 X
= B
, Y
= A
, Z
= C
;
5199 Value
*NewOp
= Builder
->CreateXor(Y
, Z
, Op0
->getName());
5200 return BinaryOperator::CreateAnd(NewOp
, X
);
5205 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5206 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(I
.getOperand(1)))
5207 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
)))
5210 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5211 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
5212 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
5213 if (Op0C
->getOpcode() == Op1C
->getOpcode()) { // same cast kind?
5214 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
5215 if (SrcTy
== Op1C
->getOperand(0)->getType() && SrcTy
->isInteger() &&
5216 // Only do this if the casts both really cause code to be generated.
5217 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
5219 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
5221 Value
*NewOp
= Builder
->CreateXor(Op0C
->getOperand(0),
5222 Op1C
->getOperand(0), I
.getName());
5223 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
5228 return Changed
? &I
: 0;
5231 static ConstantInt
*ExtractElement(Constant
*V
, Constant
*Idx
,
5232 LLVMContext
*Context
) {
5233 return cast
<ConstantInt
>(ConstantExpr::getExtractElement(V
, Idx
));
5236 static bool HasAddOverflow(ConstantInt
*Result
,
5237 ConstantInt
*In1
, ConstantInt
*In2
,
5240 if (In2
->getValue().isNegative())
5241 return Result
->getValue().sgt(In1
->getValue());
5243 return Result
->getValue().slt(In1
->getValue());
5245 return Result
->getValue().ult(In1
->getValue());
5248 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5249 /// overflowed for this type.
5250 static bool AddWithOverflow(Constant
*&Result
, Constant
*In1
,
5251 Constant
*In2
, LLVMContext
*Context
,
5252 bool IsSigned
= false) {
5253 Result
= ConstantExpr::getAdd(In1
, In2
);
5255 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
5256 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
5257 Constant
*Idx
= ConstantInt::get(Type::getInt32Ty(*Context
), i
);
5258 if (HasAddOverflow(ExtractElement(Result
, Idx
, Context
),
5259 ExtractElement(In1
, Idx
, Context
),
5260 ExtractElement(In2
, Idx
, Context
),
5267 return HasAddOverflow(cast
<ConstantInt
>(Result
),
5268 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
5272 static bool HasSubOverflow(ConstantInt
*Result
,
5273 ConstantInt
*In1
, ConstantInt
*In2
,
5276 if (In2
->getValue().isNegative())
5277 return Result
->getValue().slt(In1
->getValue());
5279 return Result
->getValue().sgt(In1
->getValue());
5281 return Result
->getValue().ugt(In1
->getValue());
5284 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5285 /// overflowed for this type.
5286 static bool SubWithOverflow(Constant
*&Result
, Constant
*In1
,
5287 Constant
*In2
, LLVMContext
*Context
,
5288 bool IsSigned
= false) {
5289 Result
= ConstantExpr::getSub(In1
, In2
);
5291 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
5292 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
5293 Constant
*Idx
= ConstantInt::get(Type::getInt32Ty(*Context
), i
);
5294 if (HasSubOverflow(ExtractElement(Result
, Idx
, Context
),
5295 ExtractElement(In1
, Idx
, Context
),
5296 ExtractElement(In2
, Idx
, Context
),
5303 return HasSubOverflow(cast
<ConstantInt
>(Result
),
5304 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
5308 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
5309 /// code necessary to compute the offset from the base pointer (without adding
5310 /// in the base pointer). Return the result as a signed integer of intptr size.
5311 static Value
*EmitGEPOffset(User
*GEP
, Instruction
&I
, InstCombiner
&IC
) {
5312 TargetData
&TD
= *IC
.getTargetData();
5313 gep_type_iterator GTI
= gep_type_begin(GEP
);
5314 const Type
*IntPtrTy
= TD
.getIntPtrType(I
.getContext());
5315 Value
*Result
= Constant::getNullValue(IntPtrTy
);
5317 // Build a mask for high order bits.
5318 unsigned IntPtrWidth
= TD
.getPointerSizeInBits();
5319 uint64_t PtrSizeMask
= ~0ULL >> (64-IntPtrWidth
);
5321 for (User::op_iterator i
= GEP
->op_begin() + 1, e
= GEP
->op_end(); i
!= e
;
5324 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType()) & PtrSizeMask
;
5325 if (ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(Op
)) {
5326 if (OpC
->isZero()) continue;
5328 // Handle a struct index, which adds its field offset to the pointer.
5329 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5330 Size
= TD
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
5332 Result
= IC
.Builder
->CreateAdd(Result
,
5333 ConstantInt::get(IntPtrTy
, Size
),
5334 GEP
->getName()+".offs");
5338 Constant
*Scale
= ConstantInt::get(IntPtrTy
, Size
);
5340 ConstantExpr::getIntegerCast(OpC
, IntPtrTy
, true /*SExt*/);
5341 Scale
= ConstantExpr::getMul(OC
, Scale
);
5342 // Emit an add instruction.
5343 Result
= IC
.Builder
->CreateAdd(Result
, Scale
, GEP
->getName()+".offs");
5346 // Convert to correct type.
5347 if (Op
->getType() != IntPtrTy
)
5348 Op
= IC
.Builder
->CreateIntCast(Op
, IntPtrTy
, true, Op
->getName()+".c");
5350 Constant
*Scale
= ConstantInt::get(IntPtrTy
, Size
);
5351 // We'll let instcombine(mul) convert this to a shl if possible.
5352 Op
= IC
.Builder
->CreateMul(Op
, Scale
, GEP
->getName()+".idx");
5355 // Emit an add instruction.
5356 Result
= IC
.Builder
->CreateAdd(Op
, Result
, GEP
->getName()+".offs");
5362 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
5363 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
5364 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
5365 /// be complex, and scales are involved. The above expression would also be
5366 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
5367 /// This later form is less amenable to optimization though, and we are allowed
5368 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
5370 /// If we can't emit an optimized form for this expression, this returns null.
5372 static Value
*EvaluateGEPOffsetExpression(User
*GEP
, Instruction
&I
,
5374 TargetData
&TD
= *IC
.getTargetData();
5375 gep_type_iterator GTI
= gep_type_begin(GEP
);
5377 // Check to see if this gep only has a single variable index. If so, and if
5378 // any constant indices are a multiple of its scale, then we can compute this
5379 // in terms of the scale of the variable index. For example, if the GEP
5380 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
5381 // because the expression will cross zero at the same point.
5382 unsigned i
, e
= GEP
->getNumOperands();
5384 for (i
= 1; i
!= e
; ++i
, ++GTI
) {
5385 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
5386 // Compute the aggregate offset of constant indices.
5387 if (CI
->isZero()) continue;
5389 // Handle a struct index, which adds its field offset to the pointer.
5390 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5391 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
5393 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5394 Offset
+= Size
*CI
->getSExtValue();
5397 // Found our variable index.
5402 // If there are no variable indices, we must have a constant offset, just
5403 // evaluate it the general way.
5404 if (i
== e
) return 0;
5406 Value
*VariableIdx
= GEP
->getOperand(i
);
5407 // Determine the scale factor of the variable element. For example, this is
5408 // 4 if the variable index is into an array of i32.
5409 uint64_t VariableScale
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5411 // Verify that there are no other variable indices. If so, emit the hard way.
5412 for (++i
, ++GTI
; i
!= e
; ++i
, ++GTI
) {
5413 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
5416 // Compute the aggregate offset of constant indices.
5417 if (CI
->isZero()) continue;
5419 // Handle a struct index, which adds its field offset to the pointer.
5420 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5421 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
5423 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5424 Offset
+= Size
*CI
->getSExtValue();
5428 // Okay, we know we have a single variable index, which must be a
5429 // pointer/array/vector index. If there is no offset, life is simple, return
5431 unsigned IntPtrWidth
= TD
.getPointerSizeInBits();
5433 // Cast to intptrty in case a truncation occurs. If an extension is needed,
5434 // we don't need to bother extending: the extension won't affect where the
5435 // computation crosses zero.
5436 if (VariableIdx
->getType()->getPrimitiveSizeInBits() > IntPtrWidth
)
5437 VariableIdx
= new TruncInst(VariableIdx
,
5438 TD
.getIntPtrType(VariableIdx
->getContext()),
5439 VariableIdx
->getName(), &I
);
5443 // Otherwise, there is an index. The computation we will do will be modulo
5444 // the pointer size, so get it.
5445 uint64_t PtrSizeMask
= ~0ULL >> (64-IntPtrWidth
);
5447 Offset
&= PtrSizeMask
;
5448 VariableScale
&= PtrSizeMask
;
5450 // To do this transformation, any constant index must be a multiple of the
5451 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
5452 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
5453 // multiple of the variable scale.
5454 int64_t NewOffs
= Offset
/ (int64_t)VariableScale
;
5455 if (Offset
!= NewOffs
*(int64_t)VariableScale
)
5458 // Okay, we can do this evaluation. Start by converting the index to intptr.
5459 const Type
*IntPtrTy
= TD
.getIntPtrType(VariableIdx
->getContext());
5460 if (VariableIdx
->getType() != IntPtrTy
)
5461 VariableIdx
= CastInst::CreateIntegerCast(VariableIdx
, IntPtrTy
,
5463 VariableIdx
->getName(), &I
);
5464 Constant
*OffsetVal
= ConstantInt::get(IntPtrTy
, NewOffs
);
5465 return BinaryOperator::CreateAdd(VariableIdx
, OffsetVal
, "offset", &I
);
5469 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5470 /// else. At this point we know that the GEP is on the LHS of the comparison.
5471 Instruction
*InstCombiner::FoldGEPICmp(GEPOperator
*GEPLHS
, Value
*RHS
,
5472 ICmpInst::Predicate Cond
,
5474 // Look through bitcasts.
5475 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(RHS
))
5476 RHS
= BCI
->getOperand(0);
5478 Value
*PtrBase
= GEPLHS
->getOperand(0);
5479 if (TD
&& PtrBase
== RHS
&& GEPLHS
->isInBounds()) {
5480 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5481 // This transformation (ignoring the base and scales) is valid because we
5482 // know pointers can't overflow since the gep is inbounds. See if we can
5483 // output an optimized form.
5484 Value
*Offset
= EvaluateGEPOffsetExpression(GEPLHS
, I
, *this);
5486 // If not, synthesize the offset the hard way.
5488 Offset
= EmitGEPOffset(GEPLHS
, I
, *this);
5489 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), Offset
,
5490 Constant::getNullValue(Offset
->getType()));
5491 } else if (GEPOperator
*GEPRHS
= dyn_cast
<GEPOperator
>(RHS
)) {
5492 // If the base pointers are different, but the indices are the same, just
5493 // compare the base pointer.
5494 if (PtrBase
!= GEPRHS
->getOperand(0)) {
5495 bool IndicesTheSame
= GEPLHS
->getNumOperands()==GEPRHS
->getNumOperands();
5496 IndicesTheSame
&= GEPLHS
->getOperand(0)->getType() ==
5497 GEPRHS
->getOperand(0)->getType();
5499 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
5500 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
5501 IndicesTheSame
= false;
5505 // If all indices are the same, just compare the base pointers.
5507 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
),
5508 GEPLHS
->getOperand(0), GEPRHS
->getOperand(0));
5510 // Otherwise, the base pointers are different and the indices are
5511 // different, bail out.
5515 // If one of the GEPs has all zero indices, recurse.
5516 bool AllZeros
= true;
5517 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
5518 if (!isa
<Constant
>(GEPLHS
->getOperand(i
)) ||
5519 !cast
<Constant
>(GEPLHS
->getOperand(i
))->isNullValue()) {
5524 return FoldGEPICmp(GEPRHS
, GEPLHS
->getOperand(0),
5525 ICmpInst::getSwappedPredicate(Cond
), I
);
5527 // If the other GEP has all zero indices, recurse.
5529 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
5530 if (!isa
<Constant
>(GEPRHS
->getOperand(i
)) ||
5531 !cast
<Constant
>(GEPRHS
->getOperand(i
))->isNullValue()) {
5536 return FoldGEPICmp(GEPLHS
, GEPRHS
->getOperand(0), Cond
, I
);
5538 if (GEPLHS
->getNumOperands() == GEPRHS
->getNumOperands()) {
5539 // If the GEPs only differ by one index, compare it.
5540 unsigned NumDifferences
= 0; // Keep track of # differences.
5541 unsigned DiffOperand
= 0; // The operand that differs.
5542 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
5543 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
5544 if (GEPLHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits() !=
5545 GEPRHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits()) {
5546 // Irreconcilable differences.
5550 if (NumDifferences
++) break;
5555 if (NumDifferences
== 0) // SAME GEP?
5556 return ReplaceInstUsesWith(I
, // No comparison is needed here.
5557 ConstantInt::get(Type::getInt1Ty(*Context
),
5558 ICmpInst::isTrueWhenEqual(Cond
)));
5560 else if (NumDifferences
== 1) {
5561 Value
*LHSV
= GEPLHS
->getOperand(DiffOperand
);
5562 Value
*RHSV
= GEPRHS
->getOperand(DiffOperand
);
5563 // Make sure we do a signed comparison here.
5564 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), LHSV
, RHSV
);
5568 // Only lower this if the icmp is the only user of the GEP or if we expect
5569 // the result to fold to a constant!
5571 (isa
<ConstantExpr
>(GEPLHS
) || GEPLHS
->hasOneUse()) &&
5572 (isa
<ConstantExpr
>(GEPRHS
) || GEPRHS
->hasOneUse())) {
5573 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5574 Value
*L
= EmitGEPOffset(GEPLHS
, I
, *this);
5575 Value
*R
= EmitGEPOffset(GEPRHS
, I
, *this);
5576 return new ICmpInst(ICmpInst::getSignedPredicate(Cond
), L
, R
);
5582 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5584 Instruction
*InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst
&I
,
5587 if (!isa
<ConstantFP
>(RHSC
)) return 0;
5588 const APFloat
&RHS
= cast
<ConstantFP
>(RHSC
)->getValueAPF();
5590 // Get the width of the mantissa. We don't want to hack on conversions that
5591 // might lose information from the integer, e.g. "i64 -> float"
5592 int MantissaWidth
= LHSI
->getType()->getFPMantissaWidth();
5593 if (MantissaWidth
== -1) return 0; // Unknown.
5595 // Check to see that the input is converted from an integer type that is small
5596 // enough that preserves all bits. TODO: check here for "known" sign bits.
5597 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5598 unsigned InputSize
= LHSI
->getOperand(0)->getType()->getScalarSizeInBits();
5600 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5601 bool LHSUnsigned
= isa
<UIToFPInst
>(LHSI
);
5605 // If the conversion would lose info, don't hack on this.
5606 if ((int)InputSize
> MantissaWidth
)
5609 // Otherwise, we can potentially simplify the comparison. We know that it
5610 // will always come through as an integer value and we know the constant is
5611 // not a NAN (it would have been previously simplified).
5612 assert(!RHS
.isNaN() && "NaN comparison not already folded!");
5614 ICmpInst::Predicate Pred
;
5615 switch (I
.getPredicate()) {
5616 default: llvm_unreachable("Unexpected predicate!");
5617 case FCmpInst::FCMP_UEQ
:
5618 case FCmpInst::FCMP_OEQ
:
5619 Pred
= ICmpInst::ICMP_EQ
;
5621 case FCmpInst::FCMP_UGT
:
5622 case FCmpInst::FCMP_OGT
:
5623 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGT
: ICmpInst::ICMP_SGT
;
5625 case FCmpInst::FCMP_UGE
:
5626 case FCmpInst::FCMP_OGE
:
5627 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGE
: ICmpInst::ICMP_SGE
;
5629 case FCmpInst::FCMP_ULT
:
5630 case FCmpInst::FCMP_OLT
:
5631 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULT
: ICmpInst::ICMP_SLT
;
5633 case FCmpInst::FCMP_ULE
:
5634 case FCmpInst::FCMP_OLE
:
5635 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULE
: ICmpInst::ICMP_SLE
;
5637 case FCmpInst::FCMP_UNE
:
5638 case FCmpInst::FCMP_ONE
:
5639 Pred
= ICmpInst::ICMP_NE
;
5641 case FCmpInst::FCMP_ORD
:
5642 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5643 case FCmpInst::FCMP_UNO
:
5644 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5647 const IntegerType
*IntTy
= cast
<IntegerType
>(LHSI
->getOperand(0)->getType());
5649 // Now we know that the APFloat is a normal number, zero or inf.
5651 // See if the FP constant is too large for the integer. For example,
5652 // comparing an i8 to 300.0.
5653 unsigned IntWidth
= IntTy
->getScalarSizeInBits();
5656 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5657 // and large values.
5658 APFloat
SMax(RHS
.getSemantics(), APFloat::fcZero
, false);
5659 SMax
.convertFromAPInt(APInt::getSignedMaxValue(IntWidth
), true,
5660 APFloat::rmNearestTiesToEven
);
5661 if (SMax
.compare(RHS
) == APFloat::cmpLessThan
) { // smax < 13123.0
5662 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SLT
||
5663 Pred
== ICmpInst::ICMP_SLE
)
5664 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5665 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5668 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5669 // +INF and large values.
5670 APFloat
UMax(RHS
.getSemantics(), APFloat::fcZero
, false);
5671 UMax
.convertFromAPInt(APInt::getMaxValue(IntWidth
), false,
5672 APFloat::rmNearestTiesToEven
);
5673 if (UMax
.compare(RHS
) == APFloat::cmpLessThan
) { // umax < 13123.0
5674 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_ULT
||
5675 Pred
== ICmpInst::ICMP_ULE
)
5676 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5677 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5682 // See if the RHS value is < SignedMin.
5683 APFloat
SMin(RHS
.getSemantics(), APFloat::fcZero
, false);
5684 SMin
.convertFromAPInt(APInt::getSignedMinValue(IntWidth
), true,
5685 APFloat::rmNearestTiesToEven
);
5686 if (SMin
.compare(RHS
) == APFloat::cmpGreaterThan
) { // smin > 12312.0
5687 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SGT
||
5688 Pred
== ICmpInst::ICMP_SGE
)
5689 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5690 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5694 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5695 // [0, UMAX], but it may still be fractional. See if it is fractional by
5696 // casting the FP value to the integer value and back, checking for equality.
5697 // Don't do this for zero, because -0.0 is not fractional.
5698 Constant
*RHSInt
= LHSUnsigned
5699 ? ConstantExpr::getFPToUI(RHSC
, IntTy
)
5700 : ConstantExpr::getFPToSI(RHSC
, IntTy
);
5701 if (!RHS
.isZero()) {
5702 bool Equal
= LHSUnsigned
5703 ? ConstantExpr::getUIToFP(RHSInt
, RHSC
->getType()) == RHSC
5704 : ConstantExpr::getSIToFP(RHSInt
, RHSC
->getType()) == RHSC
;
5706 // If we had a comparison against a fractional value, we have to adjust
5707 // the compare predicate and sometimes the value. RHSC is rounded towards
5708 // zero at this point.
5710 default: llvm_unreachable("Unexpected integer comparison!");
5711 case ICmpInst::ICMP_NE
: // (float)int != 4.4 --> true
5712 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5713 case ICmpInst::ICMP_EQ
: // (float)int == 4.4 --> false
5714 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5715 case ICmpInst::ICMP_ULE
:
5716 // (float)int <= 4.4 --> int <= 4
5717 // (float)int <= -4.4 --> false
5718 if (RHS
.isNegative())
5719 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5721 case ICmpInst::ICMP_SLE
:
5722 // (float)int <= 4.4 --> int <= 4
5723 // (float)int <= -4.4 --> int < -4
5724 if (RHS
.isNegative())
5725 Pred
= ICmpInst::ICMP_SLT
;
5727 case ICmpInst::ICMP_ULT
:
5728 // (float)int < -4.4 --> false
5729 // (float)int < 4.4 --> int <= 4
5730 if (RHS
.isNegative())
5731 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5732 Pred
= ICmpInst::ICMP_ULE
;
5734 case ICmpInst::ICMP_SLT
:
5735 // (float)int < -4.4 --> int < -4
5736 // (float)int < 4.4 --> int <= 4
5737 if (!RHS
.isNegative())
5738 Pred
= ICmpInst::ICMP_SLE
;
5740 case ICmpInst::ICMP_UGT
:
5741 // (float)int > 4.4 --> int > 4
5742 // (float)int > -4.4 --> true
5743 if (RHS
.isNegative())
5744 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5746 case ICmpInst::ICMP_SGT
:
5747 // (float)int > 4.4 --> int > 4
5748 // (float)int > -4.4 --> int >= -4
5749 if (RHS
.isNegative())
5750 Pred
= ICmpInst::ICMP_SGE
;
5752 case ICmpInst::ICMP_UGE
:
5753 // (float)int >= -4.4 --> true
5754 // (float)int >= 4.4 --> int > 4
5755 if (!RHS
.isNegative())
5756 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5757 Pred
= ICmpInst::ICMP_UGT
;
5759 case ICmpInst::ICMP_SGE
:
5760 // (float)int >= -4.4 --> int >= -4
5761 // (float)int >= 4.4 --> int > 4
5762 if (!RHS
.isNegative())
5763 Pred
= ICmpInst::ICMP_SGT
;
5769 // Lower this FP comparison into an appropriate integer version of the
5771 return new ICmpInst(Pred
, LHSI
->getOperand(0), RHSInt
);
5774 Instruction
*InstCombiner::visitFCmpInst(FCmpInst
&I
) {
5775 bool Changed
= SimplifyCompare(I
);
5776 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
5778 // Fold trivial predicates.
5779 if (I
.getPredicate() == FCmpInst::FCMP_FALSE
)
5780 return ReplaceInstUsesWith(I
, ConstantInt::get(I
.getType(), 0));
5781 if (I
.getPredicate() == FCmpInst::FCMP_TRUE
)
5782 return ReplaceInstUsesWith(I
, ConstantInt::get(I
.getType(), 1));
5784 // Simplify 'fcmp pred X, X'
5786 switch (I
.getPredicate()) {
5787 default: llvm_unreachable("Unknown predicate!");
5788 case FCmpInst::FCMP_UEQ
: // True if unordered or equal
5789 case FCmpInst::FCMP_UGE
: // True if unordered, greater than, or equal
5790 case FCmpInst::FCMP_ULE
: // True if unordered, less than, or equal
5791 return ReplaceInstUsesWith(I
, ConstantInt::get(I
.getType(), 1));
5792 case FCmpInst::FCMP_OGT
: // True if ordered and greater than
5793 case FCmpInst::FCMP_OLT
: // True if ordered and less than
5794 case FCmpInst::FCMP_ONE
: // True if ordered and operands are unequal
5795 return ReplaceInstUsesWith(I
, ConstantInt::get(I
.getType(), 0));
5797 case FCmpInst::FCMP_UNO
: // True if unordered: isnan(X) | isnan(Y)
5798 case FCmpInst::FCMP_ULT
: // True if unordered or less than
5799 case FCmpInst::FCMP_UGT
: // True if unordered or greater than
5800 case FCmpInst::FCMP_UNE
: // True if unordered or not equal
5801 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5802 I
.setPredicate(FCmpInst::FCMP_UNO
);
5803 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
5806 case FCmpInst::FCMP_ORD
: // True if ordered (no nans)
5807 case FCmpInst::FCMP_OEQ
: // True if ordered and equal
5808 case FCmpInst::FCMP_OGE
: // True if ordered and greater than or equal
5809 case FCmpInst::FCMP_OLE
: // True if ordered and less than or equal
5810 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5811 I
.setPredicate(FCmpInst::FCMP_ORD
);
5812 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
5817 if (isa
<UndefValue
>(Op1
)) // fcmp pred X, undef -> undef
5818 return ReplaceInstUsesWith(I
, UndefValue::get(I
.getType()));
5820 // Handle fcmp with constant RHS
5821 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
5822 // If the constant is a nan, see if we can fold the comparison based on it.
5823 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHSC
)) {
5824 if (CFP
->getValueAPF().isNaN()) {
5825 if (FCmpInst::isOrdered(I
.getPredicate())) // True if ordered and...
5826 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5827 assert(FCmpInst::isUnordered(I
.getPredicate()) &&
5828 "Comparison must be either ordered or unordered!");
5829 // True if unordered.
5830 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5834 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
5835 switch (LHSI
->getOpcode()) {
5836 case Instruction::PHI
:
5837 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5838 // block. If in the same block, we're encouraging jump threading. If
5839 // not, we are just pessimizing the code by making an i1 phi.
5840 if (LHSI
->getParent() == I
.getParent())
5841 if (Instruction
*NV
= FoldOpIntoPhi(I
))
5844 case Instruction::SIToFP
:
5845 case Instruction::UIToFP
:
5846 if (Instruction
*NV
= FoldFCmp_IntToFP_Cst(I
, LHSI
, RHSC
))
5849 case Instruction::Select
:
5850 // If either operand of the select is a constant, we can fold the
5851 // comparison into the select arms, which will cause one to be
5852 // constant folded and the select turned into a bitwise or.
5853 Value
*Op1
= 0, *Op2
= 0;
5854 if (LHSI
->hasOneUse()) {
5855 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1))) {
5856 // Fold the known value into the constant operand.
5857 Op1
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
5858 // Insert a new FCmp of the other select operand.
5859 Op2
= Builder
->CreateFCmp(I
.getPredicate(),
5860 LHSI
->getOperand(2), RHSC
, I
.getName());
5861 } else if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2))) {
5862 // Fold the known value into the constant operand.
5863 Op2
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
5864 // Insert a new FCmp of the other select operand.
5865 Op1
= Builder
->CreateFCmp(I
.getPredicate(), LHSI
->getOperand(1),
5871 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
5876 return Changed
? &I
: 0;
5879 Instruction
*InstCombiner::visitICmpInst(ICmpInst
&I
) {
5880 bool Changed
= SimplifyCompare(I
);
5881 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
5882 const Type
*Ty
= Op0
->getType();
5886 return ReplaceInstUsesWith(I
, ConstantInt::get(I
.getType(),
5887 I
.isTrueWhenEqual()));
5889 if (isa
<UndefValue
>(Op1
)) // X icmp undef -> undef
5890 return ReplaceInstUsesWith(I
, UndefValue::get(I
.getType()));
5892 // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
5893 // addresses never equal each other! We already know that Op0 != Op1.
5894 if ((isa
<GlobalValue
>(Op0
) || isa
<AllocaInst
>(Op0
) ||
5895 isa
<ConstantPointerNull
>(Op0
)) &&
5896 (isa
<GlobalValue
>(Op1
) || isa
<AllocaInst
>(Op1
) ||
5897 isa
<ConstantPointerNull
>(Op1
)))
5898 return ReplaceInstUsesWith(I
, ConstantInt::get(Type::getInt1Ty(*Context
),
5899 !I
.isTrueWhenEqual()));
5901 // icmp's with boolean values can always be turned into bitwise operations
5902 if (Ty
== Type::getInt1Ty(*Context
)) {
5903 switch (I
.getPredicate()) {
5904 default: llvm_unreachable("Invalid icmp instruction!");
5905 case ICmpInst::ICMP_EQ
: { // icmp eq i1 A, B -> ~(A^B)
5906 Value
*Xor
= Builder
->CreateXor(Op0
, Op1
, I
.getName()+"tmp");
5907 return BinaryOperator::CreateNot(Xor
);
5909 case ICmpInst::ICMP_NE
: // icmp eq i1 A, B -> A^B
5910 return BinaryOperator::CreateXor(Op0
, Op1
);
5912 case ICmpInst::ICMP_UGT
:
5913 std::swap(Op0
, Op1
); // Change icmp ugt -> icmp ult
5915 case ICmpInst::ICMP_ULT
:{ // icmp ult i1 A, B -> ~A & B
5916 Value
*Not
= Builder
->CreateNot(Op0
, I
.getName()+"tmp");
5917 return BinaryOperator::CreateAnd(Not
, Op1
);
5919 case ICmpInst::ICMP_SGT
:
5920 std::swap(Op0
, Op1
); // Change icmp sgt -> icmp slt
5922 case ICmpInst::ICMP_SLT
: { // icmp slt i1 A, B -> A & ~B
5923 Value
*Not
= Builder
->CreateNot(Op1
, I
.getName()+"tmp");
5924 return BinaryOperator::CreateAnd(Not
, Op0
);
5926 case ICmpInst::ICMP_UGE
:
5927 std::swap(Op0
, Op1
); // Change icmp uge -> icmp ule
5929 case ICmpInst::ICMP_ULE
: { // icmp ule i1 A, B -> ~A | B
5930 Value
*Not
= Builder
->CreateNot(Op0
, I
.getName()+"tmp");
5931 return BinaryOperator::CreateOr(Not
, Op1
);
5933 case ICmpInst::ICMP_SGE
:
5934 std::swap(Op0
, Op1
); // Change icmp sge -> icmp sle
5936 case ICmpInst::ICMP_SLE
: { // icmp sle i1 A, B -> A | ~B
5937 Value
*Not
= Builder
->CreateNot(Op1
, I
.getName()+"tmp");
5938 return BinaryOperator::CreateOr(Not
, Op0
);
5943 unsigned BitWidth
= 0;
5945 BitWidth
= TD
->getTypeSizeInBits(Ty
->getScalarType());
5946 else if (Ty
->isIntOrIntVector())
5947 BitWidth
= Ty
->getScalarSizeInBits();
5949 bool isSignBit
= false;
5951 // See if we are doing a comparison with a constant.
5952 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
5953 Value
*A
= 0, *B
= 0;
5955 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
5956 if (I
.isEquality() && CI
->isNullValue() &&
5957 match(Op0
, m_Sub(m_Value(A
), m_Value(B
)))) {
5958 // (icmp cond A B) if cond is equality
5959 return new ICmpInst(I
.getPredicate(), A
, B
);
5962 // If we have an icmp le or icmp ge instruction, turn it into the
5963 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
5964 // them being folded in the code below.
5965 switch (I
.getPredicate()) {
5967 case ICmpInst::ICMP_ULE
:
5968 if (CI
->isMaxValue(false)) // A <=u MAX -> TRUE
5969 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5970 return new ICmpInst(ICmpInst::ICMP_ULT
, Op0
,
5972 case ICmpInst::ICMP_SLE
:
5973 if (CI
->isMaxValue(true)) // A <=s MAX -> TRUE
5974 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5975 return new ICmpInst(ICmpInst::ICMP_SLT
, Op0
,
5977 case ICmpInst::ICMP_UGE
:
5978 if (CI
->isMinValue(false)) // A >=u MIN -> TRUE
5979 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5980 return new ICmpInst(ICmpInst::ICMP_UGT
, Op0
,
5982 case ICmpInst::ICMP_SGE
:
5983 if (CI
->isMinValue(true)) // A >=s MIN -> TRUE
5984 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5985 return new ICmpInst(ICmpInst::ICMP_SGT
, Op0
,
5989 // If this comparison is a normal comparison, it demands all
5990 // bits, if it is a sign bit comparison, it only demands the sign bit.
5992 isSignBit
= isSignBitCheck(I
.getPredicate(), CI
, UnusedBit
);
5995 // See if we can fold the comparison based on range information we can get
5996 // by checking whether bits are known to be zero or one in the input.
5997 if (BitWidth
!= 0) {
5998 APInt
Op0KnownZero(BitWidth
, 0), Op0KnownOne(BitWidth
, 0);
5999 APInt
Op1KnownZero(BitWidth
, 0), Op1KnownOne(BitWidth
, 0);
6001 if (SimplifyDemandedBits(I
.getOperandUse(0),
6002 isSignBit
? APInt::getSignBit(BitWidth
)
6003 : APInt::getAllOnesValue(BitWidth
),
6004 Op0KnownZero
, Op0KnownOne
, 0))
6006 if (SimplifyDemandedBits(I
.getOperandUse(1),
6007 APInt::getAllOnesValue(BitWidth
),
6008 Op1KnownZero
, Op1KnownOne
, 0))
6011 // Given the known and unknown bits, compute a range that the LHS could be
6012 // in. Compute the Min, Max and RHS values based on the known bits. For the
6013 // EQ and NE we use unsigned values.
6014 APInt
Op0Min(BitWidth
, 0), Op0Max(BitWidth
, 0);
6015 APInt
Op1Min(BitWidth
, 0), Op1Max(BitWidth
, 0);
6016 if (ICmpInst::isSignedPredicate(I
.getPredicate())) {
6017 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
6019 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
6022 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
6024 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
6028 // If Min and Max are known to be the same, then SimplifyDemandedBits
6029 // figured out that the LHS is a constant. Just constant fold this now so
6030 // that code below can assume that Min != Max.
6031 if (!isa
<Constant
>(Op0
) && Op0Min
== Op0Max
)
6032 return new ICmpInst(I
.getPredicate(),
6033 ConstantInt::get(*Context
, Op0Min
), Op1
);
6034 if (!isa
<Constant
>(Op1
) && Op1Min
== Op1Max
)
6035 return new ICmpInst(I
.getPredicate(), Op0
,
6036 ConstantInt::get(*Context
, Op1Min
));
6038 // Based on the range information we know about the LHS, see if we can
6039 // simplify this comparison. For example, (x&4) < 8 is always true.
6040 switch (I
.getPredicate()) {
6041 default: llvm_unreachable("Unknown icmp opcode!");
6042 case ICmpInst::ICMP_EQ
:
6043 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
6044 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6046 case ICmpInst::ICMP_NE
:
6047 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
6048 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6050 case ICmpInst::ICMP_ULT
:
6051 if (Op0Max
.ult(Op1Min
)) // A <u B -> true if max(A) < min(B)
6052 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6053 if (Op0Min
.uge(Op1Max
)) // A <u B -> false if min(A) >= max(B)
6054 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6055 if (Op1Min
== Op0Max
) // A <u B -> A != B if max(A) == min(B)
6056 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
6057 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6058 if (Op1Max
== Op0Min
+1) // A <u C -> A == C-1 if min(A)+1 == C
6059 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
6062 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6063 if (CI
->isMinValue(true))
6064 return new ICmpInst(ICmpInst::ICMP_SGT
, Op0
,
6065 Constant::getAllOnesValue(Op0
->getType()));
6068 case ICmpInst::ICMP_UGT
:
6069 if (Op0Min
.ugt(Op1Max
)) // A >u B -> true if min(A) > max(B)
6070 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6071 if (Op0Max
.ule(Op1Min
)) // A >u B -> false if max(A) <= max(B)
6072 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6074 if (Op1Max
== Op0Min
) // A >u B -> A != B if min(A) == max(B)
6075 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
6076 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6077 if (Op1Min
== Op0Max
-1) // A >u C -> A == C+1 if max(a)-1 == C
6078 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
6081 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6082 if (CI
->isMaxValue(true))
6083 return new ICmpInst(ICmpInst::ICMP_SLT
, Op0
,
6084 Constant::getNullValue(Op0
->getType()));
6087 case ICmpInst::ICMP_SLT
:
6088 if (Op0Max
.slt(Op1Min
)) // A <s B -> true if max(A) < min(C)
6089 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6090 if (Op0Min
.sge(Op1Max
)) // A <s B -> false if min(A) >= max(C)
6091 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6092 if (Op1Min
== Op0Max
) // A <s B -> A != B if max(A) == min(B)
6093 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
6094 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6095 if (Op1Max
== Op0Min
+1) // A <s C -> A == C-1 if min(A)+1 == C
6096 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
6100 case ICmpInst::ICMP_SGT
:
6101 if (Op0Min
.sgt(Op1Max
)) // A >s B -> true if min(A) > max(B)
6102 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6103 if (Op0Max
.sle(Op1Min
)) // A >s B -> false if max(A) <= min(B)
6104 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6106 if (Op1Max
== Op0Min
) // A >s B -> A != B if min(A) == max(B)
6107 return new ICmpInst(ICmpInst::ICMP_NE
, Op0
, Op1
);
6108 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6109 if (Op1Min
== Op0Max
-1) // A >s C -> A == C+1 if max(A)-1 == C
6110 return new ICmpInst(ICmpInst::ICMP_EQ
, Op0
,
6114 case ICmpInst::ICMP_SGE
:
6115 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SGE with ConstantInt not folded!");
6116 if (Op0Min
.sge(Op1Max
)) // A >=s B -> true if min(A) >= max(B)
6117 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6118 if (Op0Max
.slt(Op1Min
)) // A >=s B -> false if max(A) < min(B)
6119 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6121 case ICmpInst::ICMP_SLE
:
6122 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SLE with ConstantInt not folded!");
6123 if (Op0Max
.sle(Op1Min
)) // A <=s B -> true if max(A) <= min(B)
6124 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6125 if (Op0Min
.sgt(Op1Max
)) // A <=s B -> false if min(A) > max(B)
6126 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6128 case ICmpInst::ICMP_UGE
:
6129 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_UGE with ConstantInt not folded!");
6130 if (Op0Min
.uge(Op1Max
)) // A >=u B -> true if min(A) >= max(B)
6131 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6132 if (Op0Max
.ult(Op1Min
)) // A >=u B -> false if max(A) < min(B)
6133 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6135 case ICmpInst::ICMP_ULE
:
6136 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_ULE with ConstantInt not folded!");
6137 if (Op0Max
.ule(Op1Min
)) // A <=u B -> true if max(A) <= min(B)
6138 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6139 if (Op0Min
.ugt(Op1Max
)) // A <=u B -> false if min(A) > max(B)
6140 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6144 // Turn a signed comparison into an unsigned one if both operands
6145 // are known to have the same sign.
6146 if (I
.isSignedPredicate() &&
6147 ((Op0KnownZero
.isNegative() && Op1KnownZero
.isNegative()) ||
6148 (Op0KnownOne
.isNegative() && Op1KnownOne
.isNegative())))
6149 return new ICmpInst(I
.getUnsignedPredicate(), Op0
, Op1
);
6152 // Test if the ICmpInst instruction is used exclusively by a select as
6153 // part of a minimum or maximum operation. If so, refrain from doing
6154 // any other folding. This helps out other analyses which understand
6155 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6156 // and CodeGen. And in this case, at least one of the comparison
6157 // operands has at least one user besides the compare (the select),
6158 // which would often largely negate the benefit of folding anyway.
6160 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(*I
.use_begin()))
6161 if ((SI
->getOperand(1) == Op0
&& SI
->getOperand(2) == Op1
) ||
6162 (SI
->getOperand(2) == Op0
&& SI
->getOperand(1) == Op1
))
6165 // See if we are doing a comparison between a constant and an instruction that
6166 // can be folded into the comparison.
6167 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6168 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6169 // instruction, see if that instruction also has constants so that the
6170 // instruction can be folded into the icmp
6171 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
6172 if (Instruction
*Res
= visitICmpInstWithInstAndIntCst(I
, LHSI
, CI
))
6176 // Handle icmp with constant (but not simple integer constant) RHS
6177 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
6178 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
6179 switch (LHSI
->getOpcode()) {
6180 case Instruction::GetElementPtr
:
6181 if (RHSC
->isNullValue()) {
6182 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6183 bool isAllZeros
= true;
6184 for (unsigned i
= 1, e
= LHSI
->getNumOperands(); i
!= e
; ++i
)
6185 if (!isa
<Constant
>(LHSI
->getOperand(i
)) ||
6186 !cast
<Constant
>(LHSI
->getOperand(i
))->isNullValue()) {
6191 return new ICmpInst(I
.getPredicate(), LHSI
->getOperand(0),
6192 Constant::getNullValue(LHSI
->getOperand(0)->getType()));
6196 case Instruction::PHI
:
6197 // Only fold icmp into the PHI if the phi and fcmp are in the same
6198 // block. If in the same block, we're encouraging jump threading. If
6199 // not, we are just pessimizing the code by making an i1 phi.
6200 if (LHSI
->getParent() == I
.getParent())
6201 if (Instruction
*NV
= FoldOpIntoPhi(I
))
6204 case Instruction::Select
: {
6205 // If either operand of the select is a constant, we can fold the
6206 // comparison into the select arms, which will cause one to be
6207 // constant folded and the select turned into a bitwise or.
6208 Value
*Op1
= 0, *Op2
= 0;
6209 if (LHSI
->hasOneUse()) {
6210 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1))) {
6211 // Fold the known value into the constant operand.
6212 Op1
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
6213 // Insert a new ICmp of the other select operand.
6214 Op2
= Builder
->CreateICmp(I
.getPredicate(), LHSI
->getOperand(2),
6216 } else if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2))) {
6217 // Fold the known value into the constant operand.
6218 Op2
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
6219 // Insert a new ICmp of the other select operand.
6220 Op1
= Builder
->CreateICmp(I
.getPredicate(), LHSI
->getOperand(1),
6226 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
6229 case Instruction::Malloc
:
6230 // If we have (malloc != null), and if the malloc has a single use, we
6231 // can assume it is successful and remove the malloc.
6232 if (LHSI
->hasOneUse() && isa
<ConstantPointerNull
>(RHSC
)) {
6234 return ReplaceInstUsesWith(I
, ConstantInt::get(Type::getInt1Ty(*Context
),
6235 !I
.isTrueWhenEqual()));
6241 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6242 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op0
))
6243 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op1
, I
.getPredicate(), I
))
6245 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op1
))
6246 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op0
,
6247 ICmpInst::getSwappedPredicate(I
.getPredicate()), I
))
6250 // Test to see if the operands of the icmp are casted versions of other
6251 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6253 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(Op0
)) {
6254 if (isa
<PointerType
>(Op0
->getType()) &&
6255 (isa
<Constant
>(Op1
) || isa
<BitCastInst
>(Op1
))) {
6256 // We keep moving the cast from the left operand over to the right
6257 // operand, where it can often be eliminated completely.
6258 Op0
= CI
->getOperand(0);
6260 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6261 // so eliminate it as well.
6262 if (BitCastInst
*CI2
= dyn_cast
<BitCastInst
>(Op1
))
6263 Op1
= CI2
->getOperand(0);
6265 // If Op1 is a constant, we can fold the cast into the constant.
6266 if (Op0
->getType() != Op1
->getType()) {
6267 if (Constant
*Op1C
= dyn_cast
<Constant
>(Op1
)) {
6268 Op1
= ConstantExpr::getBitCast(Op1C
, Op0
->getType());
6270 // Otherwise, cast the RHS right before the icmp
6271 Op1
= Builder
->CreateBitCast(Op1
, Op0
->getType());
6274 return new ICmpInst(I
.getPredicate(), Op0
, Op1
);
6278 if (isa
<CastInst
>(Op0
)) {
6279 // Handle the special case of: icmp (cast bool to X), <cst>
6280 // This comes up when you have code like
6283 // For generality, we handle any zero-extension of any operand comparison
6284 // with a constant or another cast from the same type.
6285 if (isa
<ConstantInt
>(Op1
) || isa
<CastInst
>(Op1
))
6286 if (Instruction
*R
= visitICmpInstWithCastAndCast(I
))
6290 // See if it's the same type of instruction on the left and right.
6291 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
6292 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
6293 if (Op0I
->getOpcode() == Op1I
->getOpcode() && Op0I
->hasOneUse() &&
6294 Op1I
->hasOneUse() && Op0I
->getOperand(1) == Op1I
->getOperand(1)) {
6295 switch (Op0I
->getOpcode()) {
6297 case Instruction::Add
:
6298 case Instruction::Sub
:
6299 case Instruction::Xor
:
6300 if (I
.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6301 return new ICmpInst(I
.getPredicate(), Op0I
->getOperand(0),
6302 Op1I
->getOperand(0));
6303 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6304 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
6305 if (CI
->getValue().isSignBit()) {
6306 ICmpInst::Predicate Pred
= I
.isSignedPredicate()
6307 ? I
.getUnsignedPredicate()
6308 : I
.getSignedPredicate();
6309 return new ICmpInst(Pred
, Op0I
->getOperand(0),
6310 Op1I
->getOperand(0));
6313 if (CI
->getValue().isMaxSignedValue()) {
6314 ICmpInst::Predicate Pred
= I
.isSignedPredicate()
6315 ? I
.getUnsignedPredicate()
6316 : I
.getSignedPredicate();
6317 Pred
= I
.getSwappedPredicate(Pred
);
6318 return new ICmpInst(Pred
, Op0I
->getOperand(0),
6319 Op1I
->getOperand(0));
6323 case Instruction::Mul
:
6324 if (!I
.isEquality())
6327 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
6328 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6329 // Mask = -1 >> count-trailing-zeros(Cst).
6330 if (!CI
->isZero() && !CI
->isOne()) {
6331 const APInt
&AP
= CI
->getValue();
6332 ConstantInt
*Mask
= ConstantInt::get(*Context
,
6333 APInt::getLowBitsSet(AP
.getBitWidth(),
6335 AP
.countTrailingZeros()));
6336 Value
*And1
= Builder
->CreateAnd(Op0I
->getOperand(0), Mask
);
6337 Value
*And2
= Builder
->CreateAnd(Op1I
->getOperand(0), Mask
);
6338 return new ICmpInst(I
.getPredicate(), And1
, And2
);
6347 // ~x < ~y --> y < x
6349 if (match(Op0
, m_Not(m_Value(A
))) &&
6350 match(Op1
, m_Not(m_Value(B
))))
6351 return new ICmpInst(I
.getPredicate(), B
, A
);
6354 if (I
.isEquality()) {
6355 Value
*A
, *B
, *C
, *D
;
6357 // -x == -y --> x == y
6358 if (match(Op0
, m_Neg(m_Value(A
))) &&
6359 match(Op1
, m_Neg(m_Value(B
))))
6360 return new ICmpInst(I
.getPredicate(), A
, B
);
6362 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
)))) {
6363 if (A
== Op1
|| B
== Op1
) { // (A^B) == A -> B == 0
6364 Value
*OtherVal
= A
== Op1
? B
: A
;
6365 return new ICmpInst(I
.getPredicate(), OtherVal
,
6366 Constant::getNullValue(A
->getType()));
6369 if (match(Op1
, m_Xor(m_Value(C
), m_Value(D
)))) {
6370 // A^c1 == C^c2 --> A == C^(c1^c2)
6371 ConstantInt
*C1
, *C2
;
6372 if (match(B
, m_ConstantInt(C1
)) &&
6373 match(D
, m_ConstantInt(C2
)) && Op1
->hasOneUse()) {
6375 ConstantInt::get(*Context
, C1
->getValue() ^ C2
->getValue());
6376 Value
*Xor
= Builder
->CreateXor(C
, NC
, "tmp");
6377 return new ICmpInst(I
.getPredicate(), A
, Xor
);
6380 // A^B == A^D -> B == D
6381 if (A
== C
) return new ICmpInst(I
.getPredicate(), B
, D
);
6382 if (A
== D
) return new ICmpInst(I
.getPredicate(), B
, C
);
6383 if (B
== C
) return new ICmpInst(I
.getPredicate(), A
, D
);
6384 if (B
== D
) return new ICmpInst(I
.getPredicate(), A
, C
);
6388 if (match(Op1
, m_Xor(m_Value(A
), m_Value(B
))) &&
6389 (A
== Op0
|| B
== Op0
)) {
6390 // A == (A^B) -> B == 0
6391 Value
*OtherVal
= A
== Op0
? B
: A
;
6392 return new ICmpInst(I
.getPredicate(), OtherVal
,
6393 Constant::getNullValue(A
->getType()));
6396 // (A-B) == A -> B == 0
6397 if (match(Op0
, m_Sub(m_Specific(Op1
), m_Value(B
))))
6398 return new ICmpInst(I
.getPredicate(), B
,
6399 Constant::getNullValue(B
->getType()));
6401 // A == (A-B) -> B == 0
6402 if (match(Op1
, m_Sub(m_Specific(Op0
), m_Value(B
))))
6403 return new ICmpInst(I
.getPredicate(), B
,
6404 Constant::getNullValue(B
->getType()));
6406 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6407 if (Op0
->hasOneUse() && Op1
->hasOneUse() &&
6408 match(Op0
, m_And(m_Value(A
), m_Value(B
))) &&
6409 match(Op1
, m_And(m_Value(C
), m_Value(D
)))) {
6410 Value
*X
= 0, *Y
= 0, *Z
= 0;
6413 X
= B
; Y
= D
; Z
= A
;
6414 } else if (A
== D
) {
6415 X
= B
; Y
= C
; Z
= A
;
6416 } else if (B
== C
) {
6417 X
= A
; Y
= D
; Z
= B
;
6418 } else if (B
== D
) {
6419 X
= A
; Y
= C
; Z
= B
;
6422 if (X
) { // Build (X^Y) & Z
6423 Op1
= Builder
->CreateXor(X
, Y
, "tmp");
6424 Op1
= Builder
->CreateAnd(Op1
, Z
, "tmp");
6425 I
.setOperand(0, Op1
);
6426 I
.setOperand(1, Constant::getNullValue(Op1
->getType()));
6431 return Changed
? &I
: 0;
6435 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
6436 /// and CmpRHS are both known to be integer constants.
6437 Instruction
*InstCombiner::FoldICmpDivCst(ICmpInst
&ICI
, BinaryOperator
*DivI
,
6438 ConstantInt
*DivRHS
) {
6439 ConstantInt
*CmpRHS
= cast
<ConstantInt
>(ICI
.getOperand(1));
6440 const APInt
&CmpRHSV
= CmpRHS
->getValue();
6442 // FIXME: If the operand types don't match the type of the divide
6443 // then don't attempt this transform. The code below doesn't have the
6444 // logic to deal with a signed divide and an unsigned compare (and
6445 // vice versa). This is because (x /s C1) <s C2 produces different
6446 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
6447 // (x /u C1) <u C2. Simply casting the operands and result won't
6448 // work. :( The if statement below tests that condition and bails
6450 bool DivIsSigned
= DivI
->getOpcode() == Instruction::SDiv
;
6451 if (!ICI
.isEquality() && DivIsSigned
!= ICI
.isSignedPredicate())
6453 if (DivRHS
->isZero())
6454 return 0; // The ProdOV computation fails on divide by zero.
6455 if (DivIsSigned
&& DivRHS
->isAllOnesValue())
6456 return 0; // The overflow computation also screws up here
6457 if (DivRHS
->isOne())
6458 return 0; // Not worth bothering, and eliminates some funny cases
6461 // Compute Prod = CI * DivRHS. We are essentially solving an equation
6462 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
6463 // C2 (CI). By solving for X we can turn this into a range check
6464 // instead of computing a divide.
6465 Constant
*Prod
= ConstantExpr::getMul(CmpRHS
, DivRHS
);
6467 // Determine if the product overflows by seeing if the product is
6468 // not equal to the divide. Make sure we do the same kind of divide
6469 // as in the LHS instruction that we're folding.
6470 bool ProdOV
= (DivIsSigned
? ConstantExpr::getSDiv(Prod
, DivRHS
) :
6471 ConstantExpr::getUDiv(Prod
, DivRHS
)) != CmpRHS
;
6473 // Get the ICmp opcode
6474 ICmpInst::Predicate Pred
= ICI
.getPredicate();
6476 // Figure out the interval that is being checked. For example, a comparison
6477 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
6478 // Compute this interval based on the constants involved and the signedness of
6479 // the compare/divide. This computes a half-open interval, keeping track of
6480 // whether either value in the interval overflows. After analysis each
6481 // overflow variable is set to 0 if it's corresponding bound variable is valid
6482 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
6483 int LoOverflow
= 0, HiOverflow
= 0;
6484 Constant
*LoBound
= 0, *HiBound
= 0;
6486 if (!DivIsSigned
) { // udiv
6487 // e.g. X/5 op 3 --> [15, 20)
6489 HiOverflow
= LoOverflow
= ProdOV
;
6491 HiOverflow
= AddWithOverflow(HiBound
, LoBound
, DivRHS
, Context
, false);
6492 } else if (DivRHS
->getValue().isStrictlyPositive()) { // Divisor is > 0.
6493 if (CmpRHSV
== 0) { // (X / pos) op 0
6494 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
6495 LoBound
= cast
<ConstantInt
>(ConstantExpr::getNeg(SubOne(DivRHS
)));
6497 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / pos) op pos
6498 LoBound
= Prod
; // e.g. X/5 op 3 --> [15, 20)
6499 HiOverflow
= LoOverflow
= ProdOV
;
6501 HiOverflow
= AddWithOverflow(HiBound
, Prod
, DivRHS
, Context
, true);
6502 } else { // (X / pos) op neg
6503 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
6504 HiBound
= AddOne(Prod
);
6505 LoOverflow
= HiOverflow
= ProdOV
? -1 : 0;
6507 ConstantInt
* DivNeg
=
6508 cast
<ConstantInt
>(ConstantExpr::getNeg(DivRHS
));
6509 LoOverflow
= AddWithOverflow(LoBound
, HiBound
, DivNeg
, Context
,
6513 } else if (DivRHS
->getValue().isNegative()) { // Divisor is < 0.
6514 if (CmpRHSV
== 0) { // (X / neg) op 0
6515 // e.g. X/-5 op 0 --> [-4, 5)
6516 LoBound
= AddOne(DivRHS
);
6517 HiBound
= cast
<ConstantInt
>(ConstantExpr::getNeg(DivRHS
));
6518 if (HiBound
== DivRHS
) { // -INTMIN = INTMIN
6519 HiOverflow
= 1; // [INTMIN+1, overflow)
6520 HiBound
= 0; // e.g. X/INTMIN = 0 --> X > INTMIN
6522 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / neg) op pos
6523 // e.g. X/-5 op 3 --> [-19, -14)
6524 HiBound
= AddOne(Prod
);
6525 HiOverflow
= LoOverflow
= ProdOV
? -1 : 0;
6527 LoOverflow
= AddWithOverflow(LoBound
, HiBound
,
6528 DivRHS
, Context
, true) ? -1 : 0;
6529 } else { // (X / neg) op neg
6530 LoBound
= Prod
; // e.g. X/-5 op -3 --> [15, 20)
6531 LoOverflow
= HiOverflow
= ProdOV
;
6533 HiOverflow
= SubWithOverflow(HiBound
, Prod
, DivRHS
, Context
, true);
6536 // Dividing by a negative swaps the condition. LT <-> GT
6537 Pred
= ICmpInst::getSwappedPredicate(Pred
);
6540 Value
*X
= DivI
->getOperand(0);
6542 default: llvm_unreachable("Unhandled icmp opcode!");
6543 case ICmpInst::ICMP_EQ
:
6544 if (LoOverflow
&& HiOverflow
)
6545 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6546 else if (HiOverflow
)
6547 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SGE
:
6548 ICmpInst::ICMP_UGE
, X
, LoBound
);
6549 else if (LoOverflow
)
6550 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SLT
:
6551 ICmpInst::ICMP_ULT
, X
, HiBound
);
6553 return InsertRangeTest(X
, LoBound
, HiBound
, DivIsSigned
, true, ICI
);
6554 case ICmpInst::ICMP_NE
:
6555 if (LoOverflow
&& HiOverflow
)
6556 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6557 else if (HiOverflow
)
6558 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SLT
:
6559 ICmpInst::ICMP_ULT
, X
, LoBound
);
6560 else if (LoOverflow
)
6561 return new ICmpInst(DivIsSigned
? ICmpInst::ICMP_SGE
:
6562 ICmpInst::ICMP_UGE
, X
, HiBound
);
6564 return InsertRangeTest(X
, LoBound
, HiBound
, DivIsSigned
, false, ICI
);
6565 case ICmpInst::ICMP_ULT
:
6566 case ICmpInst::ICMP_SLT
:
6567 if (LoOverflow
== +1) // Low bound is greater than input range.
6568 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6569 if (LoOverflow
== -1) // Low bound is less than input range.
6570 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6571 return new ICmpInst(Pred
, X
, LoBound
);
6572 case ICmpInst::ICMP_UGT
:
6573 case ICmpInst::ICMP_SGT
:
6574 if (HiOverflow
== +1) // High bound greater than input range.
6575 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6576 else if (HiOverflow
== -1) // High bound less than input range.
6577 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6578 if (Pred
== ICmpInst::ICMP_UGT
)
6579 return new ICmpInst(ICmpInst::ICMP_UGE
, X
, HiBound
);
6581 return new ICmpInst(ICmpInst::ICMP_SGE
, X
, HiBound
);
6586 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
6588 Instruction
*InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst
&ICI
,
6591 const APInt
&RHSV
= RHS
->getValue();
6593 switch (LHSI
->getOpcode()) {
6594 case Instruction::Trunc
:
6595 if (ICI
.isEquality() && LHSI
->hasOneUse()) {
6596 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
6597 // of the high bits truncated out of x are known.
6598 unsigned DstBits
= LHSI
->getType()->getPrimitiveSizeInBits(),
6599 SrcBits
= LHSI
->getOperand(0)->getType()->getPrimitiveSizeInBits();
6600 APInt
Mask(APInt::getHighBitsSet(SrcBits
, SrcBits
-DstBits
));
6601 APInt
KnownZero(SrcBits
, 0), KnownOne(SrcBits
, 0);
6602 ComputeMaskedBits(LHSI
->getOperand(0), Mask
, KnownZero
, KnownOne
);
6604 // If all the high bits are known, we can do this xform.
6605 if ((KnownZero
|KnownOne
).countLeadingOnes() >= SrcBits
-DstBits
) {
6606 // Pull in the high bits from known-ones set.
6607 APInt
NewRHS(RHS
->getValue());
6608 NewRHS
.zext(SrcBits
);
6610 return new ICmpInst(ICI
.getPredicate(), LHSI
->getOperand(0),
6611 ConstantInt::get(*Context
, NewRHS
));
6616 case Instruction::Xor
: // (icmp pred (xor X, XorCST), CI)
6617 if (ConstantInt
*XorCST
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1))) {
6618 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
6620 if ((ICI
.getPredicate() == ICmpInst::ICMP_SLT
&& RHSV
== 0) ||
6621 (ICI
.getPredicate() == ICmpInst::ICMP_SGT
&& RHSV
.isAllOnesValue())) {
6622 Value
*CompareVal
= LHSI
->getOperand(0);
6624 // If the sign bit of the XorCST is not set, there is no change to
6625 // the operation, just stop using the Xor.
6626 if (!XorCST
->getValue().isNegative()) {
6627 ICI
.setOperand(0, CompareVal
);
6632 // Was the old condition true if the operand is positive?
6633 bool isTrueIfPositive
= ICI
.getPredicate() == ICmpInst::ICMP_SGT
;
6635 // If so, the new one isn't.
6636 isTrueIfPositive
^= true;
6638 if (isTrueIfPositive
)
6639 return new ICmpInst(ICmpInst::ICMP_SGT
, CompareVal
,
6642 return new ICmpInst(ICmpInst::ICMP_SLT
, CompareVal
,
6646 if (LHSI
->hasOneUse()) {
6647 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
6648 if (!ICI
.isEquality() && XorCST
->getValue().isSignBit()) {
6649 const APInt
&SignBit
= XorCST
->getValue();
6650 ICmpInst::Predicate Pred
= ICI
.isSignedPredicate()
6651 ? ICI
.getUnsignedPredicate()
6652 : ICI
.getSignedPredicate();
6653 return new ICmpInst(Pred
, LHSI
->getOperand(0),
6654 ConstantInt::get(*Context
, RHSV
^ SignBit
));
6657 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
6658 if (!ICI
.isEquality() && XorCST
->getValue().isMaxSignedValue()) {
6659 const APInt
&NotSignBit
= XorCST
->getValue();
6660 ICmpInst::Predicate Pred
= ICI
.isSignedPredicate()
6661 ? ICI
.getUnsignedPredicate()
6662 : ICI
.getSignedPredicate();
6663 Pred
= ICI
.getSwappedPredicate(Pred
);
6664 return new ICmpInst(Pred
, LHSI
->getOperand(0),
6665 ConstantInt::get(*Context
, RHSV
^ NotSignBit
));
6670 case Instruction::And
: // (icmp pred (and X, AndCST), RHS)
6671 if (LHSI
->hasOneUse() && isa
<ConstantInt
>(LHSI
->getOperand(1)) &&
6672 LHSI
->getOperand(0)->hasOneUse()) {
6673 ConstantInt
*AndCST
= cast
<ConstantInt
>(LHSI
->getOperand(1));
6675 // If the LHS is an AND of a truncating cast, we can widen the
6676 // and/compare to be the input width without changing the value
6677 // produced, eliminating a cast.
6678 if (TruncInst
*Cast
= dyn_cast
<TruncInst
>(LHSI
->getOperand(0))) {
6679 // We can do this transformation if either the AND constant does not
6680 // have its sign bit set or if it is an equality comparison.
6681 // Extending a relational comparison when we're checking the sign
6682 // bit would not work.
6683 if (Cast
->hasOneUse() &&
6684 (ICI
.isEquality() ||
6685 (AndCST
->getValue().isNonNegative() && RHSV
.isNonNegative()))) {
6687 cast
<IntegerType
>(Cast
->getOperand(0)->getType())->getBitWidth();
6688 APInt NewCST
= AndCST
->getValue();
6689 NewCST
.zext(BitWidth
);
6691 NewCI
.zext(BitWidth
);
6693 Builder
->CreateAnd(Cast
->getOperand(0),
6694 ConstantInt::get(*Context
, NewCST
), LHSI
->getName());
6695 return new ICmpInst(ICI
.getPredicate(), NewAnd
,
6696 ConstantInt::get(*Context
, NewCI
));
6700 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
6701 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
6702 // happens a LOT in code produced by the C front-end, for bitfield
6704 BinaryOperator
*Shift
= dyn_cast
<BinaryOperator
>(LHSI
->getOperand(0));
6705 if (Shift
&& !Shift
->isShift())
6709 ShAmt
= Shift
? dyn_cast
<ConstantInt
>(Shift
->getOperand(1)) : 0;
6710 const Type
*Ty
= Shift
? Shift
->getType() : 0; // Type of the shift.
6711 const Type
*AndTy
= AndCST
->getType(); // Type of the and.
6713 // We can fold this as long as we can't shift unknown bits
6714 // into the mask. This can only happen with signed shift
6715 // rights, as they sign-extend.
6717 bool CanFold
= Shift
->isLogicalShift();
6719 // To test for the bad case of the signed shr, see if any
6720 // of the bits shifted in could be tested after the mask.
6721 uint32_t TyBits
= Ty
->getPrimitiveSizeInBits();
6722 int ShAmtVal
= TyBits
- ShAmt
->getLimitedValue(TyBits
);
6724 uint32_t BitWidth
= AndTy
->getPrimitiveSizeInBits();
6725 if ((APInt::getHighBitsSet(BitWidth
, BitWidth
-ShAmtVal
) &
6726 AndCST
->getValue()) == 0)
6732 if (Shift
->getOpcode() == Instruction::Shl
)
6733 NewCst
= ConstantExpr::getLShr(RHS
, ShAmt
);
6735 NewCst
= ConstantExpr::getShl(RHS
, ShAmt
);
6737 // Check to see if we are shifting out any of the bits being
6739 if (ConstantExpr::get(Shift
->getOpcode(),
6740 NewCst
, ShAmt
) != RHS
) {
6741 // If we shifted bits out, the fold is not going to work out.
6742 // As a special case, check to see if this means that the
6743 // result is always true or false now.
6744 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
6745 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6746 if (ICI
.getPredicate() == ICmpInst::ICMP_NE
)
6747 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6749 ICI
.setOperand(1, NewCst
);
6750 Constant
*NewAndCST
;
6751 if (Shift
->getOpcode() == Instruction::Shl
)
6752 NewAndCST
= ConstantExpr::getLShr(AndCST
, ShAmt
);
6754 NewAndCST
= ConstantExpr::getShl(AndCST
, ShAmt
);
6755 LHSI
->setOperand(1, NewAndCST
);
6756 LHSI
->setOperand(0, Shift
->getOperand(0));
6757 Worklist
.Add(Shift
); // Shift is dead.
6763 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6764 // preferable because it allows the C<<Y expression to be hoisted out
6765 // of a loop if Y is invariant and X is not.
6766 if (Shift
&& Shift
->hasOneUse() && RHSV
== 0 &&
6767 ICI
.isEquality() && !Shift
->isArithmeticShift() &&
6768 !isa
<Constant
>(Shift
->getOperand(0))) {
6771 if (Shift
->getOpcode() == Instruction::LShr
) {
6772 NS
= Builder
->CreateShl(AndCST
, Shift
->getOperand(1), "tmp");
6774 // Insert a logical shift.
6775 NS
= Builder
->CreateLShr(AndCST
, Shift
->getOperand(1), "tmp");
6778 // Compute X & (C << Y).
6780 Builder
->CreateAnd(Shift
->getOperand(0), NS
, LHSI
->getName());
6782 ICI
.setOperand(0, NewAnd
);
6788 case Instruction::Shl
: { // (icmp pred (shl X, ShAmt), CI)
6789 ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
6792 uint32_t TypeBits
= RHSV
.getBitWidth();
6794 // Check that the shift amount is in range. If not, don't perform
6795 // undefined shifts. When the shift is visited it will be
6797 if (ShAmt
->uge(TypeBits
))
6800 if (ICI
.isEquality()) {
6801 // If we are comparing against bits always shifted out, the
6802 // comparison cannot succeed.
6804 ConstantExpr::getShl(ConstantExpr::getLShr(RHS
, ShAmt
),
6806 if (Comp
!= RHS
) {// Comparing against a bit that we know is zero.
6807 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
6808 Constant
*Cst
= ConstantInt::get(Type::getInt1Ty(*Context
), IsICMP_NE
);
6809 return ReplaceInstUsesWith(ICI
, Cst
);
6812 if (LHSI
->hasOneUse()) {
6813 // Otherwise strength reduce the shift into an and.
6814 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
6816 ConstantInt::get(*Context
, APInt::getLowBitsSet(TypeBits
,
6817 TypeBits
-ShAmtVal
));
6820 Builder
->CreateAnd(LHSI
->getOperand(0),Mask
, LHSI
->getName()+".mask");
6821 return new ICmpInst(ICI
.getPredicate(), And
,
6822 ConstantInt::get(*Context
, RHSV
.lshr(ShAmtVal
)));
6826 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6827 bool TrueIfSigned
= false;
6828 if (LHSI
->hasOneUse() &&
6829 isSignBitCheck(ICI
.getPredicate(), RHS
, TrueIfSigned
)) {
6830 // (X << 31) <s 0 --> (X&1) != 0
6831 Constant
*Mask
= ConstantInt::get(*Context
, APInt(TypeBits
, 1) <<
6832 (TypeBits
-ShAmt
->getZExtValue()-1));
6834 Builder
->CreateAnd(LHSI
->getOperand(0), Mask
, LHSI
->getName()+".mask");
6835 return new ICmpInst(TrueIfSigned
? ICmpInst::ICMP_NE
: ICmpInst::ICMP_EQ
,
6836 And
, Constant::getNullValue(And
->getType()));
6841 case Instruction::LShr
: // (icmp pred (shr X, ShAmt), CI)
6842 case Instruction::AShr
: {
6843 // Only handle equality comparisons of shift-by-constant.
6844 ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
6845 if (!ShAmt
|| !ICI
.isEquality()) break;
6847 // Check that the shift amount is in range. If not, don't perform
6848 // undefined shifts. When the shift is visited it will be
6850 uint32_t TypeBits
= RHSV
.getBitWidth();
6851 if (ShAmt
->uge(TypeBits
))
6854 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
6856 // If we are comparing against bits always shifted out, the
6857 // comparison cannot succeed.
6858 APInt Comp
= RHSV
<< ShAmtVal
;
6859 if (LHSI
->getOpcode() == Instruction::LShr
)
6860 Comp
= Comp
.lshr(ShAmtVal
);
6862 Comp
= Comp
.ashr(ShAmtVal
);
6864 if (Comp
!= RHSV
) { // Comparing against a bit that we know is zero.
6865 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
6866 Constant
*Cst
= ConstantInt::get(Type::getInt1Ty(*Context
), IsICMP_NE
);
6867 return ReplaceInstUsesWith(ICI
, Cst
);
6870 // Otherwise, check to see if the bits shifted out are known to be zero.
6871 // If so, we can compare against the unshifted value:
6872 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
6873 if (LHSI
->hasOneUse() &&
6874 MaskedValueIsZero(LHSI
->getOperand(0),
6875 APInt::getLowBitsSet(Comp
.getBitWidth(), ShAmtVal
))) {
6876 return new ICmpInst(ICI
.getPredicate(), LHSI
->getOperand(0),
6877 ConstantExpr::getShl(RHS
, ShAmt
));
6880 if (LHSI
->hasOneUse()) {
6881 // Otherwise strength reduce the shift into an and.
6882 APInt
Val(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShAmtVal
));
6883 Constant
*Mask
= ConstantInt::get(*Context
, Val
);
6885 Value
*And
= Builder
->CreateAnd(LHSI
->getOperand(0),
6886 Mask
, LHSI
->getName()+".mask");
6887 return new ICmpInst(ICI
.getPredicate(), And
,
6888 ConstantExpr::getShl(RHS
, ShAmt
));
6893 case Instruction::SDiv
:
6894 case Instruction::UDiv
:
6895 // Fold: icmp pred ([us]div X, C1), C2 -> range test
6896 // Fold this div into the comparison, producing a range check.
6897 // Determine, based on the divide type, what the range is being
6898 // checked. If there is an overflow on the low or high side, remember
6899 // it, otherwise compute the range [low, hi) bounding the new value.
6900 // See: InsertRangeTest above for the kinds of replacements possible.
6901 if (ConstantInt
*DivRHS
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1)))
6902 if (Instruction
*R
= FoldICmpDivCst(ICI
, cast
<BinaryOperator
>(LHSI
),
6907 case Instruction::Add
:
6908 // Fold: icmp pred (add, X, C1), C2
6910 if (!ICI
.isEquality()) {
6911 ConstantInt
*LHSC
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
6913 const APInt
&LHSV
= LHSC
->getValue();
6915 ConstantRange CR
= ICI
.makeConstantRange(ICI
.getPredicate(), RHSV
)
6918 if (ICI
.isSignedPredicate()) {
6919 if (CR
.getLower().isSignBit()) {
6920 return new ICmpInst(ICmpInst::ICMP_SLT
, LHSI
->getOperand(0),
6921 ConstantInt::get(*Context
, CR
.getUpper()));
6922 } else if (CR
.getUpper().isSignBit()) {
6923 return new ICmpInst(ICmpInst::ICMP_SGE
, LHSI
->getOperand(0),
6924 ConstantInt::get(*Context
, CR
.getLower()));
6927 if (CR
.getLower().isMinValue()) {
6928 return new ICmpInst(ICmpInst::ICMP_ULT
, LHSI
->getOperand(0),
6929 ConstantInt::get(*Context
, CR
.getUpper()));
6930 } else if (CR
.getUpper().isMinValue()) {
6931 return new ICmpInst(ICmpInst::ICMP_UGE
, LHSI
->getOperand(0),
6932 ConstantInt::get(*Context
, CR
.getLower()));
6939 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
6940 if (ICI
.isEquality()) {
6941 bool isICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
6943 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
6944 // the second operand is a constant, simplify a bit.
6945 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(LHSI
)) {
6946 switch (BO
->getOpcode()) {
6947 case Instruction::SRem
:
6948 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
6949 if (RHSV
== 0 && isa
<ConstantInt
>(BO
->getOperand(1)) &&BO
->hasOneUse()){
6950 const APInt
&V
= cast
<ConstantInt
>(BO
->getOperand(1))->getValue();
6951 if (V
.sgt(APInt(V
.getBitWidth(), 1)) && V
.isPowerOf2()) {
6953 Builder
->CreateURem(BO
->getOperand(0), BO
->getOperand(1),
6955 return new ICmpInst(ICI
.getPredicate(), NewRem
,
6956 Constant::getNullValue(BO
->getType()));
6960 case Instruction::Add
:
6961 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
6962 if (ConstantInt
*BOp1C
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
6963 if (BO
->hasOneUse())
6964 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
6965 ConstantExpr::getSub(RHS
, BOp1C
));
6966 } else if (RHSV
== 0) {
6967 // Replace ((add A, B) != 0) with (A != -B) if A or B is
6968 // efficiently invertible, or if the add has just this one use.
6969 Value
*BOp0
= BO
->getOperand(0), *BOp1
= BO
->getOperand(1);
6971 if (Value
*NegVal
= dyn_castNegVal(BOp1
))
6972 return new ICmpInst(ICI
.getPredicate(), BOp0
, NegVal
);
6973 else if (Value
*NegVal
= dyn_castNegVal(BOp0
))
6974 return new ICmpInst(ICI
.getPredicate(), NegVal
, BOp1
);
6975 else if (BO
->hasOneUse()) {
6976 Value
*Neg
= Builder
->CreateNeg(BOp1
);
6978 return new ICmpInst(ICI
.getPredicate(), BOp0
, Neg
);
6982 case Instruction::Xor
:
6983 // For the xor case, we can xor two constants together, eliminating
6984 // the explicit xor.
6985 if (Constant
*BOC
= dyn_cast
<Constant
>(BO
->getOperand(1)))
6986 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
6987 ConstantExpr::getXor(RHS
, BOC
));
6990 case Instruction::Sub
:
6991 // Replace (([sub|xor] A, B) != 0) with (A != B)
6993 return new ICmpInst(ICI
.getPredicate(), BO
->getOperand(0),
6997 case Instruction::Or
:
6998 // If bits are being or'd in that are not present in the constant we
6999 // are comparing against, then the comparison could never succeed!
7000 if (Constant
*BOC
= dyn_cast
<Constant
>(BO
->getOperand(1))) {
7001 Constant
*NotCI
= ConstantExpr::getNot(RHS
);
7002 if (!ConstantExpr::getAnd(BOC
, NotCI
)->isNullValue())
7003 return ReplaceInstUsesWith(ICI
,
7004 ConstantInt::get(Type::getInt1Ty(*Context
),
7009 case Instruction::And
:
7010 if (ConstantInt
*BOC
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
7011 // If bits are being compared against that are and'd out, then the
7012 // comparison can never succeed!
7013 if ((RHSV
& ~BOC
->getValue()) != 0)
7014 return ReplaceInstUsesWith(ICI
,
7015 ConstantInt::get(Type::getInt1Ty(*Context
),
7018 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7019 if (RHS
== BOC
&& RHSV
.isPowerOf2())
7020 return new ICmpInst(isICMP_NE
? ICmpInst::ICMP_EQ
:
7021 ICmpInst::ICMP_NE
, LHSI
,
7022 Constant::getNullValue(RHS
->getType()));
7024 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7025 if (BOC
->getValue().isSignBit()) {
7026 Value
*X
= BO
->getOperand(0);
7027 Constant
*Zero
= Constant::getNullValue(X
->getType());
7028 ICmpInst::Predicate pred
= isICMP_NE
?
7029 ICmpInst::ICMP_SLT
: ICmpInst::ICMP_SGE
;
7030 return new ICmpInst(pred
, X
, Zero
);
7033 // ((X & ~7) == 0) --> X < 8
7034 if (RHSV
== 0 && isHighOnes(BOC
)) {
7035 Value
*X
= BO
->getOperand(0);
7036 Constant
*NegX
= ConstantExpr::getNeg(BOC
);
7037 ICmpInst::Predicate pred
= isICMP_NE
?
7038 ICmpInst::ICMP_UGE
: ICmpInst::ICMP_ULT
;
7039 return new ICmpInst(pred
, X
, NegX
);
7044 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(LHSI
)) {
7045 // Handle icmp {eq|ne} <intrinsic>, intcst.
7046 if (II
->getIntrinsicID() == Intrinsic::bswap
) {
7048 ICI
.setOperand(0, II
->getOperand(1));
7049 ICI
.setOperand(1, ConstantInt::get(*Context
, RHSV
.byteSwap()));
7057 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7058 /// We only handle extending casts so far.
7060 Instruction
*InstCombiner::visitICmpInstWithCastAndCast(ICmpInst
&ICI
) {
7061 const CastInst
*LHSCI
= cast
<CastInst
>(ICI
.getOperand(0));
7062 Value
*LHSCIOp
= LHSCI
->getOperand(0);
7063 const Type
*SrcTy
= LHSCIOp
->getType();
7064 const Type
*DestTy
= LHSCI
->getType();
7067 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7068 // integer type is the same size as the pointer type.
7069 if (TD
&& LHSCI
->getOpcode() == Instruction::PtrToInt
&&
7070 TD
->getPointerSizeInBits() ==
7071 cast
<IntegerType
>(DestTy
)->getBitWidth()) {
7073 if (Constant
*RHSC
= dyn_cast
<Constant
>(ICI
.getOperand(1))) {
7074 RHSOp
= ConstantExpr::getIntToPtr(RHSC
, SrcTy
);
7075 } else if (PtrToIntInst
*RHSC
= dyn_cast
<PtrToIntInst
>(ICI
.getOperand(1))) {
7076 RHSOp
= RHSC
->getOperand(0);
7077 // If the pointer types don't match, insert a bitcast.
7078 if (LHSCIOp
->getType() != RHSOp
->getType())
7079 RHSOp
= Builder
->CreateBitCast(RHSOp
, LHSCIOp
->getType());
7083 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSOp
);
7086 // The code below only handles extension cast instructions, so far.
7088 if (LHSCI
->getOpcode() != Instruction::ZExt
&&
7089 LHSCI
->getOpcode() != Instruction::SExt
)
7092 bool isSignedExt
= LHSCI
->getOpcode() == Instruction::SExt
;
7093 bool isSignedCmp
= ICI
.isSignedPredicate();
7095 if (CastInst
*CI
= dyn_cast
<CastInst
>(ICI
.getOperand(1))) {
7096 // Not an extension from the same type?
7097 RHSCIOp
= CI
->getOperand(0);
7098 if (RHSCIOp
->getType() != LHSCIOp
->getType())
7101 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7102 // and the other is a zext), then we can't handle this.
7103 if (CI
->getOpcode() != LHSCI
->getOpcode())
7106 // Deal with equality cases early.
7107 if (ICI
.isEquality())
7108 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
7110 // A signed comparison of sign extended values simplifies into a
7111 // signed comparison.
7112 if (isSignedCmp
&& isSignedExt
)
7113 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
7115 // The other three cases all fold into an unsigned comparison.
7116 return new ICmpInst(ICI
.getUnsignedPredicate(), LHSCIOp
, RHSCIOp
);
7119 // If we aren't dealing with a constant on the RHS, exit early
7120 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ICI
.getOperand(1));
7124 // Compute the constant that would happen if we truncated to SrcTy then
7125 // reextended to DestTy.
7126 Constant
*Res1
= ConstantExpr::getTrunc(CI
, SrcTy
);
7127 Constant
*Res2
= ConstantExpr::getCast(LHSCI
->getOpcode(),
7130 // If the re-extended constant didn't change...
7132 // Make sure that sign of the Cmp and the sign of the Cast are the same.
7133 // For example, we might have:
7134 // %A = sext i16 %X to i32
7135 // %B = icmp ugt i32 %A, 1330
7136 // It is incorrect to transform this into
7137 // %B = icmp ugt i16 %X, 1330
7138 // because %A may have negative value.
7140 // However, we allow this when the compare is EQ/NE, because they are
7142 if (isSignedExt
== isSignedCmp
|| ICI
.isEquality())
7143 return new ICmpInst(ICI
.getPredicate(), LHSCIOp
, Res1
);
7147 // The re-extended constant changed so the constant cannot be represented
7148 // in the shorter type. Consequently, we cannot emit a simple comparison.
7150 // First, handle some easy cases. We know the result cannot be equal at this
7151 // point so handle the ICI.isEquality() cases
7152 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
7153 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
7154 if (ICI
.getPredicate() == ICmpInst::ICMP_NE
)
7155 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
7157 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7158 // should have been folded away previously and not enter in here.
7161 // We're performing a signed comparison.
7162 if (cast
<ConstantInt
>(CI
)->getValue().isNegative())
7163 Result
= ConstantInt::getFalse(*Context
); // X < (small) --> false
7165 Result
= ConstantInt::getTrue(*Context
); // X < (large) --> true
7167 // We're performing an unsigned comparison.
7169 // We're performing an unsigned comp with a sign extended value.
7170 // This is true if the input is >= 0. [aka >s -1]
7171 Constant
*NegOne
= Constant::getAllOnesValue(SrcTy
);
7172 Result
= Builder
->CreateICmpSGT(LHSCIOp
, NegOne
, ICI
.getName());
7174 // Unsigned extend & unsigned compare -> always true.
7175 Result
= ConstantInt::getTrue(*Context
);
7179 // Finally, return the value computed.
7180 if (ICI
.getPredicate() == ICmpInst::ICMP_ULT
||
7181 ICI
.getPredicate() == ICmpInst::ICMP_SLT
)
7182 return ReplaceInstUsesWith(ICI
, Result
);
7184 assert((ICI
.getPredicate()==ICmpInst::ICMP_UGT
||
7185 ICI
.getPredicate()==ICmpInst::ICMP_SGT
) &&
7186 "ICmp should be folded!");
7187 if (Constant
*CI
= dyn_cast
<Constant
>(Result
))
7188 return ReplaceInstUsesWith(ICI
, ConstantExpr::getNot(CI
));
7189 return BinaryOperator::CreateNot(Result
);
7192 Instruction
*InstCombiner::visitShl(BinaryOperator
&I
) {
7193 return commonShiftTransforms(I
);
7196 Instruction
*InstCombiner::visitLShr(BinaryOperator
&I
) {
7197 return commonShiftTransforms(I
);
7200 Instruction
*InstCombiner::visitAShr(BinaryOperator
&I
) {
7201 if (Instruction
*R
= commonShiftTransforms(I
))
7204 Value
*Op0
= I
.getOperand(0);
7206 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7207 if (ConstantInt
*CSI
= dyn_cast
<ConstantInt
>(Op0
))
7208 if (CSI
->isAllOnesValue())
7209 return ReplaceInstUsesWith(I
, CSI
);
7211 // See if we can turn a signed shr into an unsigned shr.
7212 if (MaskedValueIsZero(Op0
,
7213 APInt::getSignBit(I
.getType()->getScalarSizeInBits())))
7214 return BinaryOperator::CreateLShr(Op0
, I
.getOperand(1));
7216 // Arithmetic shifting an all-sign-bit value is a no-op.
7217 unsigned NumSignBits
= ComputeNumSignBits(Op0
);
7218 if (NumSignBits
== Op0
->getType()->getScalarSizeInBits())
7219 return ReplaceInstUsesWith(I
, Op0
);
7224 Instruction
*InstCombiner::commonShiftTransforms(BinaryOperator
&I
) {
7225 assert(I
.getOperand(1)->getType() == I
.getOperand(0)->getType());
7226 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
7228 // shl X, 0 == X and shr X, 0 == X
7229 // shl 0, X == 0 and shr 0, X == 0
7230 if (Op1
== Constant::getNullValue(Op1
->getType()) ||
7231 Op0
== Constant::getNullValue(Op0
->getType()))
7232 return ReplaceInstUsesWith(I
, Op0
);
7234 if (isa
<UndefValue
>(Op0
)) {
7235 if (I
.getOpcode() == Instruction::AShr
) // undef >>s X -> undef
7236 return ReplaceInstUsesWith(I
, Op0
);
7237 else // undef << X -> 0, undef >>u X -> 0
7238 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7240 if (isa
<UndefValue
>(Op1
)) {
7241 if (I
.getOpcode() == Instruction::AShr
) // X >>s undef -> X
7242 return ReplaceInstUsesWith(I
, Op0
);
7243 else // X << undef, X >>u undef -> 0
7244 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7247 // See if we can fold away this shift.
7248 if (SimplifyDemandedInstructionBits(I
))
7251 // Try to fold constant and into select arguments.
7252 if (isa
<Constant
>(Op0
))
7253 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
7254 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
7257 if (ConstantInt
*CUI
= dyn_cast
<ConstantInt
>(Op1
))
7258 if (Instruction
*Res
= FoldShiftByConstant(Op0
, CUI
, I
))
7263 Instruction
*InstCombiner::FoldShiftByConstant(Value
*Op0
, ConstantInt
*Op1
,
7264 BinaryOperator
&I
) {
7265 bool isLeftShift
= I
.getOpcode() == Instruction::Shl
;
7267 // See if we can simplify any instructions used by the instruction whose sole
7268 // purpose is to compute bits we don't care about.
7269 uint32_t TypeBits
= Op0
->getType()->getScalarSizeInBits();
7271 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7274 if (Op1
->uge(TypeBits
)) {
7275 if (I
.getOpcode() != Instruction::AShr
)
7276 return ReplaceInstUsesWith(I
, Constant::getNullValue(Op0
->getType()));
7278 I
.setOperand(1, ConstantInt::get(I
.getType(), TypeBits
-1));
7283 // ((X*C1) << C2) == (X * (C1 << C2))
7284 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(Op0
))
7285 if (BO
->getOpcode() == Instruction::Mul
&& isLeftShift
)
7286 if (Constant
*BOOp
= dyn_cast
<Constant
>(BO
->getOperand(1)))
7287 return BinaryOperator::CreateMul(BO
->getOperand(0),
7288 ConstantExpr::getShl(BOOp
, Op1
));
7290 // Try to fold constant and into select arguments.
7291 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
7292 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
7294 if (isa
<PHINode
>(Op0
))
7295 if (Instruction
*NV
= FoldOpIntoPhi(I
))
7298 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7299 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(Op0
)) {
7300 Instruction
*TrOp
= dyn_cast
<Instruction
>(TI
->getOperand(0));
7301 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7302 // place. Don't try to do this transformation in this case. Also, we
7303 // require that the input operand is a shift-by-constant so that we have
7304 // confidence that the shifts will get folded together. We could do this
7305 // xform in more cases, but it is unlikely to be profitable.
7306 if (TrOp
&& I
.isLogicalShift() && TrOp
->isShift() &&
7307 isa
<ConstantInt
>(TrOp
->getOperand(1))) {
7308 // Okay, we'll do this xform. Make the shift of shift.
7309 Constant
*ShAmt
= ConstantExpr::getZExt(Op1
, TrOp
->getType());
7310 // (shift2 (shift1 & 0x00FF), c2)
7311 Value
*NSh
= Builder
->CreateBinOp(I
.getOpcode(), TrOp
, ShAmt
,I
.getName());
7313 // For logical shifts, the truncation has the effect of making the high
7314 // part of the register be zeros. Emulate this by inserting an AND to
7315 // clear the top bits as needed. This 'and' will usually be zapped by
7316 // other xforms later if dead.
7317 unsigned SrcSize
= TrOp
->getType()->getScalarSizeInBits();
7318 unsigned DstSize
= TI
->getType()->getScalarSizeInBits();
7319 APInt
MaskV(APInt::getLowBitsSet(SrcSize
, DstSize
));
7321 // The mask we constructed says what the trunc would do if occurring
7322 // between the shifts. We want to know the effect *after* the second
7323 // shift. We know that it is a logical shift by a constant, so adjust the
7324 // mask as appropriate.
7325 if (I
.getOpcode() == Instruction::Shl
)
7326 MaskV
<<= Op1
->getZExtValue();
7328 assert(I
.getOpcode() == Instruction::LShr
&& "Unknown logical shift");
7329 MaskV
= MaskV
.lshr(Op1
->getZExtValue());
7333 Value
*And
= Builder
->CreateAnd(NSh
, ConstantInt::get(*Context
, MaskV
),
7336 // Return the value truncated to the interesting size.
7337 return new TruncInst(And
, I
.getType());
7341 if (Op0
->hasOneUse()) {
7342 if (BinaryOperator
*Op0BO
= dyn_cast
<BinaryOperator
>(Op0
)) {
7343 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7346 switch (Op0BO
->getOpcode()) {
7348 case Instruction::Add
:
7349 case Instruction::And
:
7350 case Instruction::Or
:
7351 case Instruction::Xor
: {
7352 // These operators commute.
7353 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
7354 if (isLeftShift
&& Op0BO
->getOperand(1)->hasOneUse() &&
7355 match(Op0BO
->getOperand(1), m_Shr(m_Value(V1
),
7356 m_Specific(Op1
)))) {
7357 Value
*YS
= // (Y << C)
7358 Builder
->CreateShl(Op0BO
->getOperand(0), Op1
, Op0BO
->getName());
7360 Value
*X
= Builder
->CreateBinOp(Op0BO
->getOpcode(), YS
, V1
,
7361 Op0BO
->getOperand(1)->getName());
7362 uint32_t Op1Val
= Op1
->getLimitedValue(TypeBits
);
7363 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
,
7364 APInt::getHighBitsSet(TypeBits
, TypeBits
-Op1Val
)));
7367 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
7368 Value
*Op0BOOp1
= Op0BO
->getOperand(1);
7369 if (isLeftShift
&& Op0BOOp1
->hasOneUse() &&
7371 m_And(m_Shr(m_Value(V1
), m_Specific(Op1
)),
7372 m_ConstantInt(CC
))) &&
7373 cast
<BinaryOperator
>(Op0BOOp1
)->getOperand(0)->hasOneUse()) {
7374 Value
*YS
= // (Y << C)
7375 Builder
->CreateShl(Op0BO
->getOperand(0), Op1
,
7378 Value
*XM
= Builder
->CreateAnd(V1
, ConstantExpr::getShl(CC
, Op1
),
7379 V1
->getName()+".mask");
7380 return BinaryOperator::Create(Op0BO
->getOpcode(), YS
, XM
);
7385 case Instruction::Sub
: {
7386 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7387 if (isLeftShift
&& Op0BO
->getOperand(0)->hasOneUse() &&
7388 match(Op0BO
->getOperand(0), m_Shr(m_Value(V1
),
7389 m_Specific(Op1
)))) {
7390 Value
*YS
= // (Y << C)
7391 Builder
->CreateShl(Op0BO
->getOperand(1), Op1
, Op0BO
->getName());
7393 Value
*X
= Builder
->CreateBinOp(Op0BO
->getOpcode(), V1
, YS
,
7394 Op0BO
->getOperand(0)->getName());
7395 uint32_t Op1Val
= Op1
->getLimitedValue(TypeBits
);
7396 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
,
7397 APInt::getHighBitsSet(TypeBits
, TypeBits
-Op1Val
)));
7400 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
7401 if (isLeftShift
&& Op0BO
->getOperand(0)->hasOneUse() &&
7402 match(Op0BO
->getOperand(0),
7403 m_And(m_Shr(m_Value(V1
), m_Value(V2
)),
7404 m_ConstantInt(CC
))) && V2
== Op1
&&
7405 cast
<BinaryOperator
>(Op0BO
->getOperand(0))
7406 ->getOperand(0)->hasOneUse()) {
7407 Value
*YS
= // (Y << C)
7408 Builder
->CreateShl(Op0BO
->getOperand(1), Op1
, Op0BO
->getName());
7410 Value
*XM
= Builder
->CreateAnd(V1
, ConstantExpr::getShl(CC
, Op1
),
7411 V1
->getName()+".mask");
7413 return BinaryOperator::Create(Op0BO
->getOpcode(), XM
, YS
);
7421 // If the operand is an bitwise operator with a constant RHS, and the
7422 // shift is the only use, we can pull it out of the shift.
7423 if (ConstantInt
*Op0C
= dyn_cast
<ConstantInt
>(Op0BO
->getOperand(1))) {
7424 bool isValid
= true; // Valid only for And, Or, Xor
7425 bool highBitSet
= false; // Transform if high bit of constant set?
7427 switch (Op0BO
->getOpcode()) {
7428 default: isValid
= false; break; // Do not perform transform!
7429 case Instruction::Add
:
7430 isValid
= isLeftShift
;
7432 case Instruction::Or
:
7433 case Instruction::Xor
:
7436 case Instruction::And
:
7441 // If this is a signed shift right, and the high bit is modified
7442 // by the logical operation, do not perform the transformation.
7443 // The highBitSet boolean indicates the value of the high bit of
7444 // the constant which would cause it to be modified for this
7447 if (isValid
&& I
.getOpcode() == Instruction::AShr
)
7448 isValid
= Op0C
->getValue()[TypeBits
-1] == highBitSet
;
7451 Constant
*NewRHS
= ConstantExpr::get(I
.getOpcode(), Op0C
, Op1
);
7454 Builder
->CreateBinOp(I
.getOpcode(), Op0BO
->getOperand(0), Op1
);
7455 NewShift
->takeName(Op0BO
);
7457 return BinaryOperator::Create(Op0BO
->getOpcode(), NewShift
,
7464 // Find out if this is a shift of a shift by a constant.
7465 BinaryOperator
*ShiftOp
= dyn_cast
<BinaryOperator
>(Op0
);
7466 if (ShiftOp
&& !ShiftOp
->isShift())
7469 if (ShiftOp
&& isa
<ConstantInt
>(ShiftOp
->getOperand(1))) {
7470 ConstantInt
*ShiftAmt1C
= cast
<ConstantInt
>(ShiftOp
->getOperand(1));
7471 uint32_t ShiftAmt1
= ShiftAmt1C
->getLimitedValue(TypeBits
);
7472 uint32_t ShiftAmt2
= Op1
->getLimitedValue(TypeBits
);
7473 assert(ShiftAmt2
!= 0 && "Should have been simplified earlier");
7474 if (ShiftAmt1
== 0) return 0; // Will be simplified in the future.
7475 Value
*X
= ShiftOp
->getOperand(0);
7477 uint32_t AmtSum
= ShiftAmt1
+ShiftAmt2
; // Fold into one big shift.
7479 const IntegerType
*Ty
= cast
<IntegerType
>(I
.getType());
7481 // Check for (X << c1) << c2 and (X >> c1) >> c2
7482 if (I
.getOpcode() == ShiftOp
->getOpcode()) {
7483 // If this is oversized composite shift, then unsigned shifts get 0, ashr
7485 if (AmtSum
>= TypeBits
) {
7486 if (I
.getOpcode() != Instruction::AShr
)
7487 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7488 AmtSum
= TypeBits
-1; // Saturate to 31 for i32 ashr.
7491 return BinaryOperator::Create(I
.getOpcode(), X
,
7492 ConstantInt::get(Ty
, AmtSum
));
7495 if (ShiftOp
->getOpcode() == Instruction::LShr
&&
7496 I
.getOpcode() == Instruction::AShr
) {
7497 if (AmtSum
>= TypeBits
)
7498 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7500 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
7501 return BinaryOperator::CreateLShr(X
, ConstantInt::get(Ty
, AmtSum
));
7504 if (ShiftOp
->getOpcode() == Instruction::AShr
&&
7505 I
.getOpcode() == Instruction::LShr
) {
7506 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
7507 if (AmtSum
>= TypeBits
)
7508 AmtSum
= TypeBits
-1;
7510 Value
*Shift
= Builder
->CreateAShr(X
, ConstantInt::get(Ty
, AmtSum
));
7512 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7513 return BinaryOperator::CreateAnd(Shift
, ConstantInt::get(*Context
, Mask
));
7516 // Okay, if we get here, one shift must be left, and the other shift must be
7517 // right. See if the amounts are equal.
7518 if (ShiftAmt1
== ShiftAmt2
) {
7519 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
7520 if (I
.getOpcode() == Instruction::Shl
) {
7521 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt1
));
7522 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
, Mask
));
7524 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
7525 if (I
.getOpcode() == Instruction::LShr
) {
7526 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt1
));
7527 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
, Mask
));
7529 // We can simplify ((X << C) >>s C) into a trunc + sext.
7530 // NOTE: we could do this for any C, but that would make 'unusual' integer
7531 // types. For now, just stick to ones well-supported by the code
7533 const Type
*SExtType
= 0;
7534 switch (Ty
->getBitWidth() - ShiftAmt1
) {
7541 SExtType
= IntegerType::get(*Context
, Ty
->getBitWidth() - ShiftAmt1
);
7546 return new SExtInst(Builder
->CreateTrunc(X
, SExtType
, "sext"), Ty
);
7547 // Otherwise, we can't handle it yet.
7548 } else if (ShiftAmt1
< ShiftAmt2
) {
7549 uint32_t ShiftDiff
= ShiftAmt2
-ShiftAmt1
;
7551 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
7552 if (I
.getOpcode() == Instruction::Shl
) {
7553 assert(ShiftOp
->getOpcode() == Instruction::LShr
||
7554 ShiftOp
->getOpcode() == Instruction::AShr
);
7555 Value
*Shift
= Builder
->CreateShl(X
, ConstantInt::get(Ty
, ShiftDiff
));
7557 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7558 return BinaryOperator::CreateAnd(Shift
,
7559 ConstantInt::get(*Context
, Mask
));
7562 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
7563 if (I
.getOpcode() == Instruction::LShr
) {
7564 assert(ShiftOp
->getOpcode() == Instruction::Shl
);
7565 Value
*Shift
= Builder
->CreateLShr(X
, ConstantInt::get(Ty
, ShiftDiff
));
7567 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7568 return BinaryOperator::CreateAnd(Shift
,
7569 ConstantInt::get(*Context
, Mask
));
7572 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
7574 assert(ShiftAmt2
< ShiftAmt1
);
7575 uint32_t ShiftDiff
= ShiftAmt1
-ShiftAmt2
;
7577 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
7578 if (I
.getOpcode() == Instruction::Shl
) {
7579 assert(ShiftOp
->getOpcode() == Instruction::LShr
||
7580 ShiftOp
->getOpcode() == Instruction::AShr
);
7581 Value
*Shift
= Builder
->CreateBinOp(ShiftOp
->getOpcode(), X
,
7582 ConstantInt::get(Ty
, ShiftDiff
));
7584 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7585 return BinaryOperator::CreateAnd(Shift
,
7586 ConstantInt::get(*Context
, Mask
));
7589 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
7590 if (I
.getOpcode() == Instruction::LShr
) {
7591 assert(ShiftOp
->getOpcode() == Instruction::Shl
);
7592 Value
*Shift
= Builder
->CreateShl(X
, ConstantInt::get(Ty
, ShiftDiff
));
7594 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7595 return BinaryOperator::CreateAnd(Shift
,
7596 ConstantInt::get(*Context
, Mask
));
7599 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
7606 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
7607 /// expression. If so, decompose it, returning some value X, such that Val is
7610 static Value
*DecomposeSimpleLinearExpr(Value
*Val
, unsigned &Scale
,
7611 int &Offset
, LLVMContext
*Context
) {
7612 assert(Val
->getType() == Type::getInt32Ty(*Context
) &&
7613 "Unexpected allocation size type!");
7614 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Val
)) {
7615 Offset
= CI
->getZExtValue();
7617 return ConstantInt::get(Type::getInt32Ty(*Context
), 0);
7618 } else if (BinaryOperator
*I
= dyn_cast
<BinaryOperator
>(Val
)) {
7619 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
7620 if (I
->getOpcode() == Instruction::Shl
) {
7621 // This is a value scaled by '1 << the shift amt'.
7622 Scale
= 1U << RHS
->getZExtValue();
7624 return I
->getOperand(0);
7625 } else if (I
->getOpcode() == Instruction::Mul
) {
7626 // This value is scaled by 'RHS'.
7627 Scale
= RHS
->getZExtValue();
7629 return I
->getOperand(0);
7630 } else if (I
->getOpcode() == Instruction::Add
) {
7631 // We have X+C. Check to see if we really have (X*C2)+C1,
7632 // where C1 is divisible by C2.
7635 DecomposeSimpleLinearExpr(I
->getOperand(0), SubScale
,
7637 Offset
+= RHS
->getZExtValue();
7644 // Otherwise, we can't look past this.
7651 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
7652 /// try to eliminate the cast by moving the type information into the alloc.
7653 Instruction
*InstCombiner::PromoteCastOfAllocation(BitCastInst
&CI
,
7654 AllocationInst
&AI
) {
7655 const PointerType
*PTy
= cast
<PointerType
>(CI
.getType());
7657 BuilderTy
AllocaBuilder(*Builder
);
7658 AllocaBuilder
.SetInsertPoint(AI
.getParent(), &AI
);
7660 // Remove any uses of AI that are dead.
7661 assert(!CI
.use_empty() && "Dead instructions should be removed earlier!");
7663 for (Value::use_iterator UI
= AI
.use_begin(), E
= AI
.use_end(); UI
!= E
; ) {
7664 Instruction
*User
= cast
<Instruction
>(*UI
++);
7665 if (isInstructionTriviallyDead(User
)) {
7666 while (UI
!= E
&& *UI
== User
)
7667 ++UI
; // If this instruction uses AI more than once, don't break UI.
7670 DEBUG(errs() << "IC: DCE: " << *User
<< '\n');
7671 EraseInstFromFunction(*User
);
7675 // This requires TargetData to get the alloca alignment and size information.
7678 // Get the type really allocated and the type casted to.
7679 const Type
*AllocElTy
= AI
.getAllocatedType();
7680 const Type
*CastElTy
= PTy
->getElementType();
7681 if (!AllocElTy
->isSized() || !CastElTy
->isSized()) return 0;
7683 unsigned AllocElTyAlign
= TD
->getABITypeAlignment(AllocElTy
);
7684 unsigned CastElTyAlign
= TD
->getABITypeAlignment(CastElTy
);
7685 if (CastElTyAlign
< AllocElTyAlign
) return 0;
7687 // If the allocation has multiple uses, only promote it if we are strictly
7688 // increasing the alignment of the resultant allocation. If we keep it the
7689 // same, we open the door to infinite loops of various kinds. (A reference
7690 // from a dbg.declare doesn't count as a use for this purpose.)
7691 if (!AI
.hasOneUse() && !hasOneUsePlusDeclare(&AI
) &&
7692 CastElTyAlign
== AllocElTyAlign
) return 0;
7694 uint64_t AllocElTySize
= TD
->getTypeAllocSize(AllocElTy
);
7695 uint64_t CastElTySize
= TD
->getTypeAllocSize(CastElTy
);
7696 if (CastElTySize
== 0 || AllocElTySize
== 0) return 0;
7698 // See if we can satisfy the modulus by pulling a scale out of the array
7700 unsigned ArraySizeScale
;
7702 Value
*NumElements
= // See if the array size is a decomposable linear expr.
7703 DecomposeSimpleLinearExpr(AI
.getOperand(0), ArraySizeScale
,
7704 ArrayOffset
, Context
);
7706 // If we can now satisfy the modulus, by using a non-1 scale, we really can
7708 if ((AllocElTySize
*ArraySizeScale
) % CastElTySize
!= 0 ||
7709 (AllocElTySize
*ArrayOffset
) % CastElTySize
!= 0) return 0;
7711 unsigned Scale
= (AllocElTySize
*ArraySizeScale
)/CastElTySize
;
7716 Amt
= ConstantInt::get(Type::getInt32Ty(*Context
), Scale
);
7717 // Insert before the alloca, not before the cast.
7718 Amt
= AllocaBuilder
.CreateMul(Amt
, NumElements
, "tmp");
7721 if (int Offset
= (AllocElTySize
*ArrayOffset
)/CastElTySize
) {
7722 Value
*Off
= ConstantInt::get(Type::getInt32Ty(*Context
), Offset
, true);
7723 Amt
= AllocaBuilder
.CreateAdd(Amt
, Off
, "tmp");
7726 AllocationInst
*New
;
7727 if (isa
<MallocInst
>(AI
))
7728 New
= AllocaBuilder
.CreateMalloc(CastElTy
, Amt
);
7730 New
= AllocaBuilder
.CreateAlloca(CastElTy
, Amt
);
7731 New
->setAlignment(AI
.getAlignment());
7734 // If the allocation has one real use plus a dbg.declare, just remove the
7736 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(&AI
)) {
7737 EraseInstFromFunction(*DI
);
7739 // If the allocation has multiple real uses, insert a cast and change all
7740 // things that used it to use the new cast. This will also hack on CI, but it
7742 else if (!AI
.hasOneUse()) {
7743 // New is the allocation instruction, pointer typed. AI is the original
7744 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7745 Value
*NewCast
= AllocaBuilder
.CreateBitCast(New
, AI
.getType(), "tmpcast");
7746 AI
.replaceAllUsesWith(NewCast
);
7748 return ReplaceInstUsesWith(CI
, New
);
7751 /// CanEvaluateInDifferentType - Return true if we can take the specified value
7752 /// and return it as type Ty without inserting any new casts and without
7753 /// changing the computed value. This is used by code that tries to decide
7754 /// whether promoting or shrinking integer operations to wider or smaller types
7755 /// will allow us to eliminate a truncate or extend.
7757 /// This is a truncation operation if Ty is smaller than V->getType(), or an
7758 /// extension operation if Ty is larger.
7760 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
7761 /// should return true if trunc(V) can be computed by computing V in the smaller
7762 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
7763 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
7764 /// efficiently truncated.
7766 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
7767 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
7768 /// the final result.
7769 bool InstCombiner::CanEvaluateInDifferentType(Value
*V
, const Type
*Ty
,
7771 int &NumCastsRemoved
){
7772 // We can always evaluate constants in another type.
7773 if (isa
<Constant
>(V
))
7776 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7777 if (!I
) return false;
7779 const Type
*OrigTy
= V
->getType();
7781 // If this is an extension or truncate, we can often eliminate it.
7782 if (isa
<TruncInst
>(I
) || isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) {
7783 // If this is a cast from the destination type, we can trivially eliminate
7784 // it, and this will remove a cast overall.
7785 if (I
->getOperand(0)->getType() == Ty
) {
7786 // If the first operand is itself a cast, and is eliminable, do not count
7787 // this as an eliminable cast. We would prefer to eliminate those two
7789 if (!isa
<CastInst
>(I
->getOperand(0)) && I
->hasOneUse())
7795 // We can't extend or shrink something that has multiple uses: doing so would
7796 // require duplicating the instruction in general, which isn't profitable.
7797 if (!I
->hasOneUse()) return false;
7799 unsigned Opc
= I
->getOpcode();
7801 case Instruction::Add
:
7802 case Instruction::Sub
:
7803 case Instruction::Mul
:
7804 case Instruction::And
:
7805 case Instruction::Or
:
7806 case Instruction::Xor
:
7807 // These operators can all arbitrarily be extended or truncated.
7808 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7810 CanEvaluateInDifferentType(I
->getOperand(1), Ty
, CastOpc
,
7813 case Instruction::UDiv
:
7814 case Instruction::URem
: {
7815 // UDiv and URem can be truncated if all the truncated bits are zero.
7816 uint32_t OrigBitWidth
= OrigTy
->getScalarSizeInBits();
7817 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
7818 if (BitWidth
< OrigBitWidth
) {
7819 APInt Mask
= APInt::getHighBitsSet(OrigBitWidth
, OrigBitWidth
-BitWidth
);
7820 if (MaskedValueIsZero(I
->getOperand(0), Mask
) &&
7821 MaskedValueIsZero(I
->getOperand(1), Mask
)) {
7822 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7824 CanEvaluateInDifferentType(I
->getOperand(1), Ty
, CastOpc
,
7830 case Instruction::Shl
:
7831 // If we are truncating the result of this SHL, and if it's a shift of a
7832 // constant amount, we can always perform a SHL in a smaller type.
7833 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
7834 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
7835 if (BitWidth
< OrigTy
->getScalarSizeInBits() &&
7836 CI
->getLimitedValue(BitWidth
) < BitWidth
)
7837 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7841 case Instruction::LShr
:
7842 // If this is a truncate of a logical shr, we can truncate it to a smaller
7843 // lshr iff we know that the bits we would otherwise be shifting in are
7845 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
7846 uint32_t OrigBitWidth
= OrigTy
->getScalarSizeInBits();
7847 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
7848 if (BitWidth
< OrigBitWidth
&&
7849 MaskedValueIsZero(I
->getOperand(0),
7850 APInt::getHighBitsSet(OrigBitWidth
, OrigBitWidth
-BitWidth
)) &&
7851 CI
->getLimitedValue(BitWidth
) < BitWidth
) {
7852 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7857 case Instruction::ZExt
:
7858 case Instruction::SExt
:
7859 case Instruction::Trunc
:
7860 // If this is the same kind of case as our original (e.g. zext+zext), we
7861 // can safely replace it. Note that replacing it does not reduce the number
7862 // of casts in the input.
7866 // sext (zext ty1), ty2 -> zext ty2
7867 if (CastOpc
== Instruction::SExt
&& Opc
== Instruction::ZExt
)
7870 case Instruction::Select
: {
7871 SelectInst
*SI
= cast
<SelectInst
>(I
);
7872 return CanEvaluateInDifferentType(SI
->getTrueValue(), Ty
, CastOpc
,
7874 CanEvaluateInDifferentType(SI
->getFalseValue(), Ty
, CastOpc
,
7877 case Instruction::PHI
: {
7878 // We can change a phi if we can change all operands.
7879 PHINode
*PN
= cast
<PHINode
>(I
);
7880 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
7881 if (!CanEvaluateInDifferentType(PN
->getIncomingValue(i
), Ty
, CastOpc
,
7887 // TODO: Can handle more cases here.
7894 /// EvaluateInDifferentType - Given an expression that
7895 /// CanEvaluateInDifferentType returns true for, actually insert the code to
7896 /// evaluate the expression.
7897 Value
*InstCombiner::EvaluateInDifferentType(Value
*V
, const Type
*Ty
,
7899 if (Constant
*C
= dyn_cast
<Constant
>(V
))
7900 return ConstantExpr::getIntegerCast(C
, Ty
,
7901 isSigned
/*Sext or ZExt*/);
7903 // Otherwise, it must be an instruction.
7904 Instruction
*I
= cast
<Instruction
>(V
);
7905 Instruction
*Res
= 0;
7906 unsigned Opc
= I
->getOpcode();
7908 case Instruction::Add
:
7909 case Instruction::Sub
:
7910 case Instruction::Mul
:
7911 case Instruction::And
:
7912 case Instruction::Or
:
7913 case Instruction::Xor
:
7914 case Instruction::AShr
:
7915 case Instruction::LShr
:
7916 case Instruction::Shl
:
7917 case Instruction::UDiv
:
7918 case Instruction::URem
: {
7919 Value
*LHS
= EvaluateInDifferentType(I
->getOperand(0), Ty
, isSigned
);
7920 Value
*RHS
= EvaluateInDifferentType(I
->getOperand(1), Ty
, isSigned
);
7921 Res
= BinaryOperator::Create((Instruction::BinaryOps
)Opc
, LHS
, RHS
);
7924 case Instruction::Trunc
:
7925 case Instruction::ZExt
:
7926 case Instruction::SExt
:
7927 // If the source type of the cast is the type we're trying for then we can
7928 // just return the source. There's no need to insert it because it is not
7930 if (I
->getOperand(0)->getType() == Ty
)
7931 return I
->getOperand(0);
7933 // Otherwise, must be the same type of cast, so just reinsert a new one.
7934 Res
= CastInst::Create(cast
<CastInst
>(I
)->getOpcode(), I
->getOperand(0),
7937 case Instruction::Select
: {
7938 Value
*True
= EvaluateInDifferentType(I
->getOperand(1), Ty
, isSigned
);
7939 Value
*False
= EvaluateInDifferentType(I
->getOperand(2), Ty
, isSigned
);
7940 Res
= SelectInst::Create(I
->getOperand(0), True
, False
);
7943 case Instruction::PHI
: {
7944 PHINode
*OPN
= cast
<PHINode
>(I
);
7945 PHINode
*NPN
= PHINode::Create(Ty
);
7946 for (unsigned i
= 0, e
= OPN
->getNumIncomingValues(); i
!= e
; ++i
) {
7947 Value
*V
=EvaluateInDifferentType(OPN
->getIncomingValue(i
), Ty
, isSigned
);
7948 NPN
->addIncoming(V
, OPN
->getIncomingBlock(i
));
7954 // TODO: Can handle more cases here.
7955 llvm_unreachable("Unreachable!");
7960 return InsertNewInstBefore(Res
, *I
);
7963 /// @brief Implement the transforms common to all CastInst visitors.
7964 Instruction
*InstCombiner::commonCastTransforms(CastInst
&CI
) {
7965 Value
*Src
= CI
.getOperand(0);
7967 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
7968 // eliminate it now.
7969 if (CastInst
*CSrc
= dyn_cast
<CastInst
>(Src
)) { // A->B->C cast
7970 if (Instruction::CastOps opc
=
7971 isEliminableCastPair(CSrc
, CI
.getOpcode(), CI
.getType(), TD
)) {
7972 // The first cast (CSrc) is eliminable so we need to fix up or replace
7973 // the second cast (CI). CSrc will then have a good chance of being dead.
7974 return CastInst::Create(opc
, CSrc
->getOperand(0), CI
.getType());
7978 // If we are casting a select then fold the cast into the select
7979 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Src
))
7980 if (Instruction
*NV
= FoldOpIntoSelect(CI
, SI
, this))
7983 // If we are casting a PHI then fold the cast into the PHI
7984 if (isa
<PHINode
>(Src
))
7985 if (Instruction
*NV
= FoldOpIntoPhi(CI
))
7991 /// FindElementAtOffset - Given a type and a constant offset, determine whether
7992 /// or not there is a sequence of GEP indices into the type that will land us at
7993 /// the specified offset. If so, fill them into NewIndices and return the
7994 /// resultant element type, otherwise return null.
7995 static const Type
*FindElementAtOffset(const Type
*Ty
, int64_t Offset
,
7996 SmallVectorImpl
<Value
*> &NewIndices
,
7997 const TargetData
*TD
,
7998 LLVMContext
*Context
) {
8000 if (!Ty
->isSized()) return 0;
8002 // Start with the index over the outer type. Note that the type size
8003 // might be zero (even if the offset isn't zero) if the indexed type
8004 // is something like [0 x {int, int}]
8005 const Type
*IntPtrTy
= TD
->getIntPtrType(*Context
);
8006 int64_t FirstIdx
= 0;
8007 if (int64_t TySize
= TD
->getTypeAllocSize(Ty
)) {
8008 FirstIdx
= Offset
/TySize
;
8009 Offset
-= FirstIdx
*TySize
;
8011 // Handle hosts where % returns negative instead of values [0..TySize).
8015 assert(Offset
>= 0);
8017 assert((uint64_t)Offset
< (uint64_t)TySize
&& "Out of range offset");
8020 NewIndices
.push_back(ConstantInt::get(IntPtrTy
, FirstIdx
));
8022 // Index into the types. If we fail, set OrigBase to null.
8024 // Indexing into tail padding between struct/array elements.
8025 if (uint64_t(Offset
*8) >= TD
->getTypeSizeInBits(Ty
))
8028 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
8029 const StructLayout
*SL
= TD
->getStructLayout(STy
);
8030 assert(Offset
< (int64_t)SL
->getSizeInBytes() &&
8031 "Offset must stay within the indexed type");
8033 unsigned Elt
= SL
->getElementContainingOffset(Offset
);
8034 NewIndices
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), Elt
));
8036 Offset
-= SL
->getElementOffset(Elt
);
8037 Ty
= STy
->getElementType(Elt
);
8038 } else if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(Ty
)) {
8039 uint64_t EltSize
= TD
->getTypeAllocSize(AT
->getElementType());
8040 assert(EltSize
&& "Cannot index into a zero-sized array");
8041 NewIndices
.push_back(ConstantInt::get(IntPtrTy
,Offset
/EltSize
));
8043 Ty
= AT
->getElementType();
8045 // Otherwise, we can't index into the middle of this atomic type, bail.
8053 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8054 Instruction
*InstCombiner::commonPointerCastTransforms(CastInst
&CI
) {
8055 Value
*Src
= CI
.getOperand(0);
8057 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Src
)) {
8058 // If casting the result of a getelementptr instruction with no offset, turn
8059 // this into a cast of the original pointer!
8060 if (GEP
->hasAllZeroIndices()) {
8061 // Changing the cast operand is usually not a good idea but it is safe
8062 // here because the pointer operand is being replaced with another
8063 // pointer operand so the opcode doesn't need to change.
8065 CI
.setOperand(0, GEP
->getOperand(0));
8069 // If the GEP has a single use, and the base pointer is a bitcast, and the
8070 // GEP computes a constant offset, see if we can convert these three
8071 // instructions into fewer. This typically happens with unions and other
8072 // non-type-safe code.
8073 if (TD
&& GEP
->hasOneUse() && isa
<BitCastInst
>(GEP
->getOperand(0))) {
8074 if (GEP
->hasAllConstantIndices()) {
8075 // We are guaranteed to get a constant from EmitGEPOffset.
8076 ConstantInt
*OffsetV
=
8077 cast
<ConstantInt
>(EmitGEPOffset(GEP
, CI
, *this));
8078 int64_t Offset
= OffsetV
->getSExtValue();
8080 // Get the base pointer input of the bitcast, and the type it points to.
8081 Value
*OrigBase
= cast
<BitCastInst
>(GEP
->getOperand(0))->getOperand(0);
8082 const Type
*GEPIdxTy
=
8083 cast
<PointerType
>(OrigBase
->getType())->getElementType();
8084 SmallVector
<Value
*, 8> NewIndices
;
8085 if (FindElementAtOffset(GEPIdxTy
, Offset
, NewIndices
, TD
, Context
)) {
8086 // If we were able to index down into an element, create the GEP
8087 // and bitcast the result. This eliminates one bitcast, potentially
8089 Value
*NGEP
= cast
<GEPOperator
>(GEP
)->isInBounds() ?
8090 Builder
->CreateInBoundsGEP(OrigBase
,
8091 NewIndices
.begin(), NewIndices
.end()) :
8092 Builder
->CreateGEP(OrigBase
, NewIndices
.begin(), NewIndices
.end());
8093 NGEP
->takeName(GEP
);
8095 if (isa
<BitCastInst
>(CI
))
8096 return new BitCastInst(NGEP
, CI
.getType());
8097 assert(isa
<PtrToIntInst
>(CI
));
8098 return new PtrToIntInst(NGEP
, CI
.getType());
8104 return commonCastTransforms(CI
);
8107 /// isSafeIntegerType - Return true if this is a basic integer type, not a crazy
8108 /// type like i42. We don't want to introduce operations on random non-legal
8109 /// integer types where they don't already exist in the code. In the future,
8110 /// we should consider making this based off target-data, so that 32-bit targets
8111 /// won't get i64 operations etc.
8112 static bool isSafeIntegerType(const Type
*Ty
) {
8113 switch (Ty
->getPrimitiveSizeInBits()) {
8124 /// commonIntCastTransforms - This function implements the common transforms
8125 /// for trunc, zext, and sext.
8126 Instruction
*InstCombiner::commonIntCastTransforms(CastInst
&CI
) {
8127 if (Instruction
*Result
= commonCastTransforms(CI
))
8130 Value
*Src
= CI
.getOperand(0);
8131 const Type
*SrcTy
= Src
->getType();
8132 const Type
*DestTy
= CI
.getType();
8133 uint32_t SrcBitSize
= SrcTy
->getScalarSizeInBits();
8134 uint32_t DestBitSize
= DestTy
->getScalarSizeInBits();
8136 // See if we can simplify any instructions used by the LHS whose sole
8137 // purpose is to compute bits we don't care about.
8138 if (SimplifyDemandedInstructionBits(CI
))
8141 // If the source isn't an instruction or has more than one use then we
8142 // can't do anything more.
8143 Instruction
*SrcI
= dyn_cast
<Instruction
>(Src
);
8144 if (!SrcI
|| !Src
->hasOneUse())
8147 // Attempt to propagate the cast into the instruction for int->int casts.
8148 int NumCastsRemoved
= 0;
8149 // Only do this if the dest type is a simple type, don't convert the
8150 // expression tree to something weird like i93 unless the source is also
8152 if ((isSafeIntegerType(DestTy
->getScalarType()) ||
8153 !isSafeIntegerType(SrcI
->getType()->getScalarType())) &&
8154 CanEvaluateInDifferentType(SrcI
, DestTy
,
8155 CI
.getOpcode(), NumCastsRemoved
)) {
8156 // If this cast is a truncate, evaluting in a different type always
8157 // eliminates the cast, so it is always a win. If this is a zero-extension,
8158 // we need to do an AND to maintain the clear top-part of the computation,
8159 // so we require that the input have eliminated at least one cast. If this
8160 // is a sign extension, we insert two new casts (to do the extension) so we
8161 // require that two casts have been eliminated.
8162 bool DoXForm
= false;
8163 bool JustReplace
= false;
8164 switch (CI
.getOpcode()) {
8166 // All the others use floating point so we shouldn't actually
8167 // get here because of the check above.
8168 llvm_unreachable("Unknown cast type");
8169 case Instruction::Trunc
:
8172 case Instruction::ZExt
: {
8173 DoXForm
= NumCastsRemoved
>= 1;
8174 if (!DoXForm
&& 0) {
8175 // If it's unnecessary to issue an AND to clear the high bits, it's
8176 // always profitable to do this xform.
8177 Value
*TryRes
= EvaluateInDifferentType(SrcI
, DestTy
, false);
8178 APInt
Mask(APInt::getBitsSet(DestBitSize
, SrcBitSize
, DestBitSize
));
8179 if (MaskedValueIsZero(TryRes
, Mask
))
8180 return ReplaceInstUsesWith(CI
, TryRes
);
8182 if (Instruction
*TryI
= dyn_cast
<Instruction
>(TryRes
))
8183 if (TryI
->use_empty())
8184 EraseInstFromFunction(*TryI
);
8188 case Instruction::SExt
: {
8189 DoXForm
= NumCastsRemoved
>= 2;
8190 if (!DoXForm
&& !isa
<TruncInst
>(SrcI
) && 0) {
8191 // If we do not have to emit the truncate + sext pair, then it's always
8192 // profitable to do this xform.
8194 // It's not safe to eliminate the trunc + sext pair if one of the
8195 // eliminated cast is a truncate. e.g.
8196 // t2 = trunc i32 t1 to i16
8197 // t3 = sext i16 t2 to i32
8200 Value
*TryRes
= EvaluateInDifferentType(SrcI
, DestTy
, true);
8201 unsigned NumSignBits
= ComputeNumSignBits(TryRes
);
8202 if (NumSignBits
> (DestBitSize
- SrcBitSize
))
8203 return ReplaceInstUsesWith(CI
, TryRes
);
8205 if (Instruction
*TryI
= dyn_cast
<Instruction
>(TryRes
))
8206 if (TryI
->use_empty())
8207 EraseInstFromFunction(*TryI
);
8214 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
8215 " to avoid cast: " << CI
);
8216 Value
*Res
= EvaluateInDifferentType(SrcI
, DestTy
,
8217 CI
.getOpcode() == Instruction::SExt
);
8219 // Just replace this cast with the result.
8220 return ReplaceInstUsesWith(CI
, Res
);
8222 assert(Res
->getType() == DestTy
);
8223 switch (CI
.getOpcode()) {
8224 default: llvm_unreachable("Unknown cast type!");
8225 case Instruction::Trunc
:
8226 // Just replace this cast with the result.
8227 return ReplaceInstUsesWith(CI
, Res
);
8228 case Instruction::ZExt
: {
8229 assert(SrcBitSize
< DestBitSize
&& "Not a zext?");
8231 // If the high bits are already zero, just replace this cast with the
8233 APInt
Mask(APInt::getBitsSet(DestBitSize
, SrcBitSize
, DestBitSize
));
8234 if (MaskedValueIsZero(Res
, Mask
))
8235 return ReplaceInstUsesWith(CI
, Res
);
8237 // We need to emit an AND to clear the high bits.
8238 Constant
*C
= ConstantInt::get(*Context
,
8239 APInt::getLowBitsSet(DestBitSize
, SrcBitSize
));
8240 return BinaryOperator::CreateAnd(Res
, C
);
8242 case Instruction::SExt
: {
8243 // If the high bits are already filled with sign bit, just replace this
8244 // cast with the result.
8245 unsigned NumSignBits
= ComputeNumSignBits(Res
);
8246 if (NumSignBits
> (DestBitSize
- SrcBitSize
))
8247 return ReplaceInstUsesWith(CI
, Res
);
8249 // We need to emit a cast to truncate, then a cast to sext.
8250 return new SExtInst(Builder
->CreateTrunc(Res
, Src
->getType()), DestTy
);
8256 Value
*Op0
= SrcI
->getNumOperands() > 0 ? SrcI
->getOperand(0) : 0;
8257 Value
*Op1
= SrcI
->getNumOperands() > 1 ? SrcI
->getOperand(1) : 0;
8259 switch (SrcI
->getOpcode()) {
8260 case Instruction::Add
:
8261 case Instruction::Mul
:
8262 case Instruction::And
:
8263 case Instruction::Or
:
8264 case Instruction::Xor
:
8265 // If we are discarding information, rewrite.
8266 if (DestBitSize
< SrcBitSize
&& DestBitSize
!= 1) {
8267 // Don't insert two casts unless at least one can be eliminated.
8268 if (!ValueRequiresCast(CI
.getOpcode(), Op1
, DestTy
, TD
) ||
8269 !ValueRequiresCast(CI
.getOpcode(), Op0
, DestTy
, TD
)) {
8270 Value
*Op0c
= Builder
->CreateTrunc(Op0
, DestTy
, Op0
->getName());
8271 Value
*Op1c
= Builder
->CreateTrunc(Op1
, DestTy
, Op1
->getName());
8272 return BinaryOperator::Create(
8273 cast
<BinaryOperator
>(SrcI
)->getOpcode(), Op0c
, Op1c
);
8277 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8278 if (isa
<ZExtInst
>(CI
) && SrcBitSize
== 1 &&
8279 SrcI
->getOpcode() == Instruction::Xor
&&
8280 Op1
== ConstantInt::getTrue(*Context
) &&
8281 (!Op0
->hasOneUse() || !isa
<CmpInst
>(Op0
))) {
8282 Value
*New
= Builder
->CreateZExt(Op0
, DestTy
, Op0
->getName());
8283 return BinaryOperator::CreateXor(New
,
8284 ConstantInt::get(CI
.getType(), 1));
8288 case Instruction::Shl
: {
8289 // Canonicalize trunc inside shl, if we can.
8290 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
);
8291 if (CI
&& DestBitSize
< SrcBitSize
&&
8292 CI
->getLimitedValue(DestBitSize
) < DestBitSize
) {
8293 Value
*Op0c
= Builder
->CreateTrunc(Op0
, DestTy
, Op0
->getName());
8294 Value
*Op1c
= Builder
->CreateTrunc(Op1
, DestTy
, Op1
->getName());
8295 return BinaryOperator::CreateShl(Op0c
, Op1c
);
8303 Instruction
*InstCombiner::visitTrunc(TruncInst
&CI
) {
8304 if (Instruction
*Result
= commonIntCastTransforms(CI
))
8307 Value
*Src
= CI
.getOperand(0);
8308 const Type
*Ty
= CI
.getType();
8309 uint32_t DestBitWidth
= Ty
->getScalarSizeInBits();
8310 uint32_t SrcBitWidth
= Src
->getType()->getScalarSizeInBits();
8312 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8313 if (DestBitWidth
== 1) {
8314 Constant
*One
= ConstantInt::get(Src
->getType(), 1);
8315 Src
= Builder
->CreateAnd(Src
, One
, "tmp");
8316 Value
*Zero
= Constant::getNullValue(Src
->getType());
8317 return new ICmpInst(ICmpInst::ICMP_NE
, Src
, Zero
);
8320 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8321 ConstantInt
*ShAmtV
= 0;
8323 if (Src
->hasOneUse() &&
8324 match(Src
, m_LShr(m_Value(ShiftOp
), m_ConstantInt(ShAmtV
)))) {
8325 uint32_t ShAmt
= ShAmtV
->getLimitedValue(SrcBitWidth
);
8327 // Get a mask for the bits shifting in.
8328 APInt
Mask(APInt::getLowBitsSet(SrcBitWidth
, ShAmt
).shl(DestBitWidth
));
8329 if (MaskedValueIsZero(ShiftOp
, Mask
)) {
8330 if (ShAmt
>= DestBitWidth
) // All zeros.
8331 return ReplaceInstUsesWith(CI
, Constant::getNullValue(Ty
));
8333 // Okay, we can shrink this. Truncate the input, then return a new
8335 Value
*V1
= Builder
->CreateTrunc(ShiftOp
, Ty
, ShiftOp
->getName());
8336 Value
*V2
= ConstantExpr::getTrunc(ShAmtV
, Ty
);
8337 return BinaryOperator::CreateLShr(V1
, V2
);
8344 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
8345 /// in order to eliminate the icmp.
8346 Instruction
*InstCombiner::transformZExtICmp(ICmpInst
*ICI
, Instruction
&CI
,
8348 // If we are just checking for a icmp eq of a single bit and zext'ing it
8349 // to an integer, then shift the bit to the appropriate place and then
8350 // cast to integer to avoid the comparison.
8351 if (ConstantInt
*Op1C
= dyn_cast
<ConstantInt
>(ICI
->getOperand(1))) {
8352 const APInt
&Op1CV
= Op1C
->getValue();
8354 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
8355 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
8356 if ((ICI
->getPredicate() == ICmpInst::ICMP_SLT
&& Op1CV
== 0) ||
8357 (ICI
->getPredicate() == ICmpInst::ICMP_SGT
&&Op1CV
.isAllOnesValue())) {
8358 if (!DoXform
) return ICI
;
8360 Value
*In
= ICI
->getOperand(0);
8361 Value
*Sh
= ConstantInt::get(In
->getType(),
8362 In
->getType()->getScalarSizeInBits()-1);
8363 In
= Builder
->CreateLShr(In
, Sh
, In
->getName()+".lobit");
8364 if (In
->getType() != CI
.getType())
8365 In
= Builder
->CreateIntCast(In
, CI
.getType(), false/*ZExt*/, "tmp");
8367 if (ICI
->getPredicate() == ICmpInst::ICMP_SGT
) {
8368 Constant
*One
= ConstantInt::get(In
->getType(), 1);
8369 In
= Builder
->CreateXor(In
, One
, In
->getName()+".not");
8372 return ReplaceInstUsesWith(CI
, In
);
8377 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
8378 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8379 // zext (X == 1) to i32 --> X iff X has only the low bit set.
8380 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
8381 // zext (X != 0) to i32 --> X iff X has only the low bit set.
8382 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
8383 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
8384 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8385 if ((Op1CV
== 0 || Op1CV
.isPowerOf2()) &&
8386 // This only works for EQ and NE
8387 ICI
->isEquality()) {
8388 // If Op1C some other power of two, convert:
8389 uint32_t BitWidth
= Op1C
->getType()->getBitWidth();
8390 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
8391 APInt
TypeMask(APInt::getAllOnesValue(BitWidth
));
8392 ComputeMaskedBits(ICI
->getOperand(0), TypeMask
, KnownZero
, KnownOne
);
8394 APInt
KnownZeroMask(~KnownZero
);
8395 if (KnownZeroMask
.isPowerOf2()) { // Exactly 1 possible 1?
8396 if (!DoXform
) return ICI
;
8398 bool isNE
= ICI
->getPredicate() == ICmpInst::ICMP_NE
;
8399 if (Op1CV
!= 0 && (Op1CV
!= KnownZeroMask
)) {
8400 // (X&4) == 2 --> false
8401 // (X&4) != 2 --> true
8402 Constant
*Res
= ConstantInt::get(Type::getInt1Ty(*Context
), isNE
);
8403 Res
= ConstantExpr::getZExt(Res
, CI
.getType());
8404 return ReplaceInstUsesWith(CI
, Res
);
8407 uint32_t ShiftAmt
= KnownZeroMask
.logBase2();
8408 Value
*In
= ICI
->getOperand(0);
8410 // Perform a logical shr by shiftamt.
8411 // Insert the shift to put the result in the low bit.
8412 In
= Builder
->CreateLShr(In
, ConstantInt::get(In
->getType(),ShiftAmt
),
8413 In
->getName()+".lobit");
8416 if ((Op1CV
!= 0) == isNE
) { // Toggle the low bit.
8417 Constant
*One
= ConstantInt::get(In
->getType(), 1);
8418 In
= Builder
->CreateXor(In
, One
, "tmp");
8421 if (CI
.getType() == In
->getType())
8422 return ReplaceInstUsesWith(CI
, In
);
8424 return CastInst::CreateIntegerCast(In
, CI
.getType(), false/*ZExt*/);
8432 Instruction
*InstCombiner::visitZExt(ZExtInst
&CI
) {
8433 // If one of the common conversion will work ..
8434 if (Instruction
*Result
= commonIntCastTransforms(CI
))
8437 Value
*Src
= CI
.getOperand(0);
8439 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
8440 // types and if the sizes are just right we can convert this into a logical
8441 // 'and' which will be much cheaper than the pair of casts.
8442 if (TruncInst
*CSrc
= dyn_cast
<TruncInst
>(Src
)) { // A->B->C cast
8443 // Get the sizes of the types involved. We know that the intermediate type
8444 // will be smaller than A or C, but don't know the relation between A and C.
8445 Value
*A
= CSrc
->getOperand(0);
8446 unsigned SrcSize
= A
->getType()->getScalarSizeInBits();
8447 unsigned MidSize
= CSrc
->getType()->getScalarSizeInBits();
8448 unsigned DstSize
= CI
.getType()->getScalarSizeInBits();
8449 // If we're actually extending zero bits, then if
8450 // SrcSize < DstSize: zext(a & mask)
8451 // SrcSize == DstSize: a & mask
8452 // SrcSize > DstSize: trunc(a) & mask
8453 if (SrcSize
< DstSize
) {
8454 APInt
AndValue(APInt::getLowBitsSet(SrcSize
, MidSize
));
8455 Constant
*AndConst
= ConstantInt::get(A
->getType(), AndValue
);
8456 Value
*And
= Builder
->CreateAnd(A
, AndConst
, CSrc
->getName()+".mask");
8457 return new ZExtInst(And
, CI
.getType());
8460 if (SrcSize
== DstSize
) {
8461 APInt
AndValue(APInt::getLowBitsSet(SrcSize
, MidSize
));
8462 return BinaryOperator::CreateAnd(A
, ConstantInt::get(A
->getType(),
8465 if (SrcSize
> DstSize
) {
8466 Value
*Trunc
= Builder
->CreateTrunc(A
, CI
.getType(), "tmp");
8467 APInt
AndValue(APInt::getLowBitsSet(DstSize
, MidSize
));
8468 return BinaryOperator::CreateAnd(Trunc
,
8469 ConstantInt::get(Trunc
->getType(),
8474 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(Src
))
8475 return transformZExtICmp(ICI
, CI
);
8477 BinaryOperator
*SrcI
= dyn_cast
<BinaryOperator
>(Src
);
8478 if (SrcI
&& SrcI
->getOpcode() == Instruction::Or
) {
8479 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
8480 // of the (zext icmp) will be transformed.
8481 ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(SrcI
->getOperand(0));
8482 ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(SrcI
->getOperand(1));
8483 if (LHS
&& RHS
&& LHS
->hasOneUse() && RHS
->hasOneUse() &&
8484 (transformZExtICmp(LHS
, CI
, false) ||
8485 transformZExtICmp(RHS
, CI
, false))) {
8486 Value
*LCast
= Builder
->CreateZExt(LHS
, CI
.getType(), LHS
->getName());
8487 Value
*RCast
= Builder
->CreateZExt(RHS
, CI
.getType(), RHS
->getName());
8488 return BinaryOperator::Create(Instruction::Or
, LCast
, RCast
);
8492 // zext(trunc(t) & C) -> (t & zext(C)).
8493 if (SrcI
&& SrcI
->getOpcode() == Instruction::And
&& SrcI
->hasOneUse())
8494 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(SrcI
->getOperand(1)))
8495 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(SrcI
->getOperand(0))) {
8496 Value
*TI0
= TI
->getOperand(0);
8497 if (TI0
->getType() == CI
.getType())
8499 BinaryOperator::CreateAnd(TI0
,
8500 ConstantExpr::getZExt(C
, CI
.getType()));
8503 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
8504 if (SrcI
&& SrcI
->getOpcode() == Instruction::Xor
&& SrcI
->hasOneUse())
8505 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(SrcI
->getOperand(1)))
8506 if (BinaryOperator
*And
= dyn_cast
<BinaryOperator
>(SrcI
->getOperand(0)))
8507 if (And
->getOpcode() == Instruction::And
&& And
->hasOneUse() &&
8508 And
->getOperand(1) == C
)
8509 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(And
->getOperand(0))) {
8510 Value
*TI0
= TI
->getOperand(0);
8511 if (TI0
->getType() == CI
.getType()) {
8512 Constant
*ZC
= ConstantExpr::getZExt(C
, CI
.getType());
8513 Value
*NewAnd
= Builder
->CreateAnd(TI0
, ZC
, "tmp");
8514 return BinaryOperator::CreateXor(NewAnd
, ZC
);
8521 Instruction
*InstCombiner::visitSExt(SExtInst
&CI
) {
8522 if (Instruction
*I
= commonIntCastTransforms(CI
))
8525 Value
*Src
= CI
.getOperand(0);
8527 // Canonicalize sign-extend from i1 to a select.
8528 if (Src
->getType() == Type::getInt1Ty(*Context
))
8529 return SelectInst::Create(Src
,
8530 Constant::getAllOnesValue(CI
.getType()),
8531 Constant::getNullValue(CI
.getType()));
8533 // See if the value being truncated is already sign extended. If so, just
8534 // eliminate the trunc/sext pair.
8535 if (Operator::getOpcode(Src
) == Instruction::Trunc
) {
8536 Value
*Op
= cast
<User
>(Src
)->getOperand(0);
8537 unsigned OpBits
= Op
->getType()->getScalarSizeInBits();
8538 unsigned MidBits
= Src
->getType()->getScalarSizeInBits();
8539 unsigned DestBits
= CI
.getType()->getScalarSizeInBits();
8540 unsigned NumSignBits
= ComputeNumSignBits(Op
);
8542 if (OpBits
== DestBits
) {
8543 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8544 // bits, it is already ready.
8545 if (NumSignBits
> DestBits
-MidBits
)
8546 return ReplaceInstUsesWith(CI
, Op
);
8547 } else if (OpBits
< DestBits
) {
8548 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8549 // bits, just sext from i32.
8550 if (NumSignBits
> OpBits
-MidBits
)
8551 return new SExtInst(Op
, CI
.getType(), "tmp");
8553 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8554 // bits, just truncate to i32.
8555 if (NumSignBits
> OpBits
-MidBits
)
8556 return new TruncInst(Op
, CI
.getType(), "tmp");
8560 // If the input is a shl/ashr pair of a same constant, then this is a sign
8561 // extension from a smaller value. If we could trust arbitrary bitwidth
8562 // integers, we could turn this into a truncate to the smaller bit and then
8563 // use a sext for the whole extension. Since we don't, look deeper and check
8564 // for a truncate. If the source and dest are the same type, eliminate the
8565 // trunc and extend and just do shifts. For example, turn:
8566 // %a = trunc i32 %i to i8
8567 // %b = shl i8 %a, 6
8568 // %c = ashr i8 %b, 6
8569 // %d = sext i8 %c to i32
8571 // %a = shl i32 %i, 30
8572 // %d = ashr i32 %a, 30
8574 ConstantInt
*BA
= 0, *CA
= 0;
8575 if (match(Src
, m_AShr(m_Shl(m_Value(A
), m_ConstantInt(BA
)),
8576 m_ConstantInt(CA
))) &&
8577 BA
== CA
&& isa
<TruncInst
>(A
)) {
8578 Value
*I
= cast
<TruncInst
>(A
)->getOperand(0);
8579 if (I
->getType() == CI
.getType()) {
8580 unsigned MidSize
= Src
->getType()->getScalarSizeInBits();
8581 unsigned SrcDstSize
= CI
.getType()->getScalarSizeInBits();
8582 unsigned ShAmt
= CA
->getZExtValue()+SrcDstSize
-MidSize
;
8583 Constant
*ShAmtV
= ConstantInt::get(CI
.getType(), ShAmt
);
8584 I
= Builder
->CreateShl(I
, ShAmtV
, CI
.getName());
8585 return BinaryOperator::CreateAShr(I
, ShAmtV
);
8592 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
8593 /// in the specified FP type without changing its value.
8594 static Constant
*FitsInFPType(ConstantFP
*CFP
, const fltSemantics
&Sem
,
8595 LLVMContext
*Context
) {
8597 APFloat F
= CFP
->getValueAPF();
8598 (void)F
.convert(Sem
, APFloat::rmNearestTiesToEven
, &losesInfo
);
8600 return ConstantFP::get(*Context
, F
);
8604 /// LookThroughFPExtensions - If this is an fp extension instruction, look
8605 /// through it until we get the source value.
8606 static Value
*LookThroughFPExtensions(Value
*V
, LLVMContext
*Context
) {
8607 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
8608 if (I
->getOpcode() == Instruction::FPExt
)
8609 return LookThroughFPExtensions(I
->getOperand(0), Context
);
8611 // If this value is a constant, return the constant in the smallest FP type
8612 // that can accurately represent it. This allows us to turn
8613 // (float)((double)X+2.0) into x+2.0f.
8614 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(V
)) {
8615 if (CFP
->getType() == Type::getPPC_FP128Ty(*Context
))
8616 return V
; // No constant folding of this.
8617 // See if the value can be truncated to float and then reextended.
8618 if (Value
*V
= FitsInFPType(CFP
, APFloat::IEEEsingle
, Context
))
8620 if (CFP
->getType() == Type::getDoubleTy(*Context
))
8621 return V
; // Won't shrink.
8622 if (Value
*V
= FitsInFPType(CFP
, APFloat::IEEEdouble
, Context
))
8624 // Don't try to shrink to various long double types.
8630 Instruction
*InstCombiner::visitFPTrunc(FPTruncInst
&CI
) {
8631 if (Instruction
*I
= commonCastTransforms(CI
))
8634 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
8635 // smaller than the destination type, we can eliminate the truncate by doing
8636 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
8637 // many builtins (sqrt, etc).
8638 BinaryOperator
*OpI
= dyn_cast
<BinaryOperator
>(CI
.getOperand(0));
8639 if (OpI
&& OpI
->hasOneUse()) {
8640 switch (OpI
->getOpcode()) {
8642 case Instruction::FAdd
:
8643 case Instruction::FSub
:
8644 case Instruction::FMul
:
8645 case Instruction::FDiv
:
8646 case Instruction::FRem
:
8647 const Type
*SrcTy
= OpI
->getType();
8648 Value
*LHSTrunc
= LookThroughFPExtensions(OpI
->getOperand(0), Context
);
8649 Value
*RHSTrunc
= LookThroughFPExtensions(OpI
->getOperand(1), Context
);
8650 if (LHSTrunc
->getType() != SrcTy
&&
8651 RHSTrunc
->getType() != SrcTy
) {
8652 unsigned DstSize
= CI
.getType()->getScalarSizeInBits();
8653 // If the source types were both smaller than the destination type of
8654 // the cast, do this xform.
8655 if (LHSTrunc
->getType()->getScalarSizeInBits() <= DstSize
&&
8656 RHSTrunc
->getType()->getScalarSizeInBits() <= DstSize
) {
8657 LHSTrunc
= Builder
->CreateFPExt(LHSTrunc
, CI
.getType());
8658 RHSTrunc
= Builder
->CreateFPExt(RHSTrunc
, CI
.getType());
8659 return BinaryOperator::Create(OpI
->getOpcode(), LHSTrunc
, RHSTrunc
);
8668 Instruction
*InstCombiner::visitFPExt(CastInst
&CI
) {
8669 return commonCastTransforms(CI
);
8672 Instruction
*InstCombiner::visitFPToUI(FPToUIInst
&FI
) {
8673 Instruction
*OpI
= dyn_cast
<Instruction
>(FI
.getOperand(0));
8675 return commonCastTransforms(FI
);
8677 // fptoui(uitofp(X)) --> X
8678 // fptoui(sitofp(X)) --> X
8679 // This is safe if the intermediate type has enough bits in its mantissa to
8680 // accurately represent all values of X. For example, do not do this with
8681 // i64->float->i64. This is also safe for sitofp case, because any negative
8682 // 'X' value would cause an undefined result for the fptoui.
8683 if ((isa
<UIToFPInst
>(OpI
) || isa
<SIToFPInst
>(OpI
)) &&
8684 OpI
->getOperand(0)->getType() == FI
.getType() &&
8685 (int)FI
.getType()->getScalarSizeInBits() < /*extra bit for sign */
8686 OpI
->getType()->getFPMantissaWidth())
8687 return ReplaceInstUsesWith(FI
, OpI
->getOperand(0));
8689 return commonCastTransforms(FI
);
8692 Instruction
*InstCombiner::visitFPToSI(FPToSIInst
&FI
) {
8693 Instruction
*OpI
= dyn_cast
<Instruction
>(FI
.getOperand(0));
8695 return commonCastTransforms(FI
);
8697 // fptosi(sitofp(X)) --> X
8698 // fptosi(uitofp(X)) --> X
8699 // This is safe if the intermediate type has enough bits in its mantissa to
8700 // accurately represent all values of X. For example, do not do this with
8701 // i64->float->i64. This is also safe for sitofp case, because any negative
8702 // 'X' value would cause an undefined result for the fptoui.
8703 if ((isa
<UIToFPInst
>(OpI
) || isa
<SIToFPInst
>(OpI
)) &&
8704 OpI
->getOperand(0)->getType() == FI
.getType() &&
8705 (int)FI
.getType()->getScalarSizeInBits() <=
8706 OpI
->getType()->getFPMantissaWidth())
8707 return ReplaceInstUsesWith(FI
, OpI
->getOperand(0));
8709 return commonCastTransforms(FI
);
8712 Instruction
*InstCombiner::visitUIToFP(CastInst
&CI
) {
8713 return commonCastTransforms(CI
);
8716 Instruction
*InstCombiner::visitSIToFP(CastInst
&CI
) {
8717 return commonCastTransforms(CI
);
8720 Instruction
*InstCombiner::visitPtrToInt(PtrToIntInst
&CI
) {
8721 // If the destination integer type is smaller than the intptr_t type for
8722 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
8723 // trunc to be exposed to other transforms. Don't do this for extending
8724 // ptrtoint's, because we don't know if the target sign or zero extends its
8727 CI
.getType()->getScalarSizeInBits() < TD
->getPointerSizeInBits()) {
8728 Value
*P
= Builder
->CreatePtrToInt(CI
.getOperand(0),
8729 TD
->getIntPtrType(CI
.getContext()),
8731 return new TruncInst(P
, CI
.getType());
8734 return commonPointerCastTransforms(CI
);
8737 Instruction
*InstCombiner::visitIntToPtr(IntToPtrInst
&CI
) {
8738 // If the source integer type is larger than the intptr_t type for
8739 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
8740 // allows the trunc to be exposed to other transforms. Don't do this for
8741 // extending inttoptr's, because we don't know if the target sign or zero
8742 // extends to pointers.
8743 if (TD
&& CI
.getOperand(0)->getType()->getScalarSizeInBits() >
8744 TD
->getPointerSizeInBits()) {
8745 Value
*P
= Builder
->CreateTrunc(CI
.getOperand(0),
8746 TD
->getIntPtrType(CI
.getContext()), "tmp");
8747 return new IntToPtrInst(P
, CI
.getType());
8750 if (Instruction
*I
= commonCastTransforms(CI
))
8756 Instruction
*InstCombiner::visitBitCast(BitCastInst
&CI
) {
8757 // If the operands are integer typed then apply the integer transforms,
8758 // otherwise just apply the common ones.
8759 Value
*Src
= CI
.getOperand(0);
8760 const Type
*SrcTy
= Src
->getType();
8761 const Type
*DestTy
= CI
.getType();
8763 if (isa
<PointerType
>(SrcTy
)) {
8764 if (Instruction
*I
= commonPointerCastTransforms(CI
))
8767 if (Instruction
*Result
= commonCastTransforms(CI
))
8772 // Get rid of casts from one type to the same type. These are useless and can
8773 // be replaced by the operand.
8774 if (DestTy
== Src
->getType())
8775 return ReplaceInstUsesWith(CI
, Src
);
8777 if (const PointerType
*DstPTy
= dyn_cast
<PointerType
>(DestTy
)) {
8778 const PointerType
*SrcPTy
= cast
<PointerType
>(SrcTy
);
8779 const Type
*DstElTy
= DstPTy
->getElementType();
8780 const Type
*SrcElTy
= SrcPTy
->getElementType();
8782 // If the address spaces don't match, don't eliminate the bitcast, which is
8783 // required for changing types.
8784 if (SrcPTy
->getAddressSpace() != DstPTy
->getAddressSpace())
8787 // If we are casting a malloc or alloca to a pointer to a type of the same
8788 // size, rewrite the allocation instruction to allocate the "right" type.
8789 if (AllocationInst
*AI
= dyn_cast
<AllocationInst
>(Src
))
8790 if (Instruction
*V
= PromoteCastOfAllocation(CI
, *AI
))
8793 // If the source and destination are pointers, and this cast is equivalent
8794 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
8795 // This can enhance SROA and other transforms that want type-safe pointers.
8796 Constant
*ZeroUInt
= Constant::getNullValue(Type::getInt32Ty(*Context
));
8797 unsigned NumZeros
= 0;
8798 while (SrcElTy
!= DstElTy
&&
8799 isa
<CompositeType
>(SrcElTy
) && !isa
<PointerType
>(SrcElTy
) &&
8800 SrcElTy
->getNumContainedTypes() /* not "{}" */) {
8801 SrcElTy
= cast
<CompositeType
>(SrcElTy
)->getTypeAtIndex(ZeroUInt
);
8805 // If we found a path from the src to dest, create the getelementptr now.
8806 if (SrcElTy
== DstElTy
) {
8807 SmallVector
<Value
*, 8> Idxs(NumZeros
+1, ZeroUInt
);
8808 return GetElementPtrInst::CreateInBounds(Src
, Idxs
.begin(), Idxs
.end(), "",
8809 ((Instruction
*) NULL
));
8813 if (const VectorType
*DestVTy
= dyn_cast
<VectorType
>(DestTy
)) {
8814 if (DestVTy
->getNumElements() == 1) {
8815 if (!isa
<VectorType
>(SrcTy
)) {
8816 Value
*Elem
= Builder
->CreateBitCast(Src
, DestVTy
->getElementType());
8817 return InsertElementInst::Create(UndefValue::get(DestTy
), Elem
,
8818 Constant::getNullValue(Type::getInt32Ty(*Context
)));
8820 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
8824 if (const VectorType
*SrcVTy
= dyn_cast
<VectorType
>(SrcTy
)) {
8825 if (SrcVTy
->getNumElements() == 1) {
8826 if (!isa
<VectorType
>(DestTy
)) {
8828 Builder
->CreateExtractElement(Src
,
8829 Constant::getNullValue(Type::getInt32Ty(*Context
)));
8830 return CastInst::Create(Instruction::BitCast
, Elem
, DestTy
);
8835 if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(Src
)) {
8836 if (SVI
->hasOneUse()) {
8837 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
8838 // a bitconvert to a vector with the same # elts.
8839 if (isa
<VectorType
>(DestTy
) &&
8840 cast
<VectorType
>(DestTy
)->getNumElements() ==
8841 SVI
->getType()->getNumElements() &&
8842 SVI
->getType()->getNumElements() ==
8843 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements()) {
8845 // If either of the operands is a cast from CI.getType(), then
8846 // evaluating the shuffle in the casted destination's type will allow
8847 // us to eliminate at least one cast.
8848 if (((Tmp
= dyn_cast
<CastInst
>(SVI
->getOperand(0))) &&
8849 Tmp
->getOperand(0)->getType() == DestTy
) ||
8850 ((Tmp
= dyn_cast
<CastInst
>(SVI
->getOperand(1))) &&
8851 Tmp
->getOperand(0)->getType() == DestTy
)) {
8852 Value
*LHS
= Builder
->CreateBitCast(SVI
->getOperand(0), DestTy
);
8853 Value
*RHS
= Builder
->CreateBitCast(SVI
->getOperand(1), DestTy
);
8854 // Return a new shuffle vector. Use the same element ID's, as we
8855 // know the vector types match #elts.
8856 return new ShuffleVectorInst(LHS
, RHS
, SVI
->getOperand(2));
8864 /// GetSelectFoldableOperands - We want to turn code that looks like this:
8866 /// %D = select %cond, %C, %A
8868 /// %C = select %cond, %B, 0
8871 /// Assuming that the specified instruction is an operand to the select, return
8872 /// a bitmask indicating which operands of this instruction are foldable if they
8873 /// equal the other incoming value of the select.
8875 static unsigned GetSelectFoldableOperands(Instruction
*I
) {
8876 switch (I
->getOpcode()) {
8877 case Instruction::Add
:
8878 case Instruction::Mul
:
8879 case Instruction::And
:
8880 case Instruction::Or
:
8881 case Instruction::Xor
:
8882 return 3; // Can fold through either operand.
8883 case Instruction::Sub
: // Can only fold on the amount subtracted.
8884 case Instruction::Shl
: // Can only fold on the shift amount.
8885 case Instruction::LShr
:
8886 case Instruction::AShr
:
8889 return 0; // Cannot fold
8893 /// GetSelectFoldableConstant - For the same transformation as the previous
8894 /// function, return the identity constant that goes into the select.
8895 static Constant
*GetSelectFoldableConstant(Instruction
*I
,
8896 LLVMContext
*Context
) {
8897 switch (I
->getOpcode()) {
8898 default: llvm_unreachable("This cannot happen!");
8899 case Instruction::Add
:
8900 case Instruction::Sub
:
8901 case Instruction::Or
:
8902 case Instruction::Xor
:
8903 case Instruction::Shl
:
8904 case Instruction::LShr
:
8905 case Instruction::AShr
:
8906 return Constant::getNullValue(I
->getType());
8907 case Instruction::And
:
8908 return Constant::getAllOnesValue(I
->getType());
8909 case Instruction::Mul
:
8910 return ConstantInt::get(I
->getType(), 1);
8914 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
8915 /// have the same opcode and only one use each. Try to simplify this.
8916 Instruction
*InstCombiner::FoldSelectOpOp(SelectInst
&SI
, Instruction
*TI
,
8918 if (TI
->getNumOperands() == 1) {
8919 // If this is a non-volatile load or a cast from the same type,
8922 if (TI
->getOperand(0)->getType() != FI
->getOperand(0)->getType())
8925 return 0; // unknown unary op.
8928 // Fold this by inserting a select from the input values.
8929 SelectInst
*NewSI
= SelectInst::Create(SI
.getCondition(), TI
->getOperand(0),
8930 FI
->getOperand(0), SI
.getName()+".v");
8931 InsertNewInstBefore(NewSI
, SI
);
8932 return CastInst::Create(Instruction::CastOps(TI
->getOpcode()), NewSI
,
8936 // Only handle binary operators here.
8937 if (!isa
<BinaryOperator
>(TI
))
8940 // Figure out if the operations have any operands in common.
8941 Value
*MatchOp
, *OtherOpT
, *OtherOpF
;
8943 if (TI
->getOperand(0) == FI
->getOperand(0)) {
8944 MatchOp
= TI
->getOperand(0);
8945 OtherOpT
= TI
->getOperand(1);
8946 OtherOpF
= FI
->getOperand(1);
8947 MatchIsOpZero
= true;
8948 } else if (TI
->getOperand(1) == FI
->getOperand(1)) {
8949 MatchOp
= TI
->getOperand(1);
8950 OtherOpT
= TI
->getOperand(0);
8951 OtherOpF
= FI
->getOperand(0);
8952 MatchIsOpZero
= false;
8953 } else if (!TI
->isCommutative()) {
8955 } else if (TI
->getOperand(0) == FI
->getOperand(1)) {
8956 MatchOp
= TI
->getOperand(0);
8957 OtherOpT
= TI
->getOperand(1);
8958 OtherOpF
= FI
->getOperand(0);
8959 MatchIsOpZero
= true;
8960 } else if (TI
->getOperand(1) == FI
->getOperand(0)) {
8961 MatchOp
= TI
->getOperand(1);
8962 OtherOpT
= TI
->getOperand(0);
8963 OtherOpF
= FI
->getOperand(1);
8964 MatchIsOpZero
= true;
8969 // If we reach here, they do have operations in common.
8970 SelectInst
*NewSI
= SelectInst::Create(SI
.getCondition(), OtherOpT
,
8971 OtherOpF
, SI
.getName()+".v");
8972 InsertNewInstBefore(NewSI
, SI
);
8974 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(TI
)) {
8976 return BinaryOperator::Create(BO
->getOpcode(), MatchOp
, NewSI
);
8978 return BinaryOperator::Create(BO
->getOpcode(), NewSI
, MatchOp
);
8980 llvm_unreachable("Shouldn't get here");
8984 static bool isSelect01(Constant
*C1
, Constant
*C2
) {
8985 ConstantInt
*C1I
= dyn_cast
<ConstantInt
>(C1
);
8988 ConstantInt
*C2I
= dyn_cast
<ConstantInt
>(C2
);
8991 return (C1I
->isZero() || C1I
->isOne()) && (C2I
->isZero() || C2I
->isOne());
8994 /// FoldSelectIntoOp - Try fold the select into one of the operands to
8995 /// facilitate further optimization.
8996 Instruction
*InstCombiner::FoldSelectIntoOp(SelectInst
&SI
, Value
*TrueVal
,
8998 // See the comment above GetSelectFoldableOperands for a description of the
8999 // transformation we are doing here.
9000 if (Instruction
*TVI
= dyn_cast
<Instruction
>(TrueVal
)) {
9001 if (TVI
->hasOneUse() && TVI
->getNumOperands() == 2 &&
9002 !isa
<Constant
>(FalseVal
)) {
9003 if (unsigned SFO
= GetSelectFoldableOperands(TVI
)) {
9004 unsigned OpToFold
= 0;
9005 if ((SFO
& 1) && FalseVal
== TVI
->getOperand(0)) {
9007 } else if ((SFO
& 2) && FalseVal
== TVI
->getOperand(1)) {
9012 Constant
*C
= GetSelectFoldableConstant(TVI
, Context
);
9013 Value
*OOp
= TVI
->getOperand(2-OpToFold
);
9014 // Avoid creating select between 2 constants unless it's selecting
9016 if (!isa
<Constant
>(OOp
) || isSelect01(C
, cast
<Constant
>(OOp
))) {
9017 Instruction
*NewSel
= SelectInst::Create(SI
.getCondition(), OOp
, C
);
9018 InsertNewInstBefore(NewSel
, SI
);
9019 NewSel
->takeName(TVI
);
9020 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(TVI
))
9021 return BinaryOperator::Create(BO
->getOpcode(), FalseVal
, NewSel
);
9022 llvm_unreachable("Unknown instruction!!");
9029 if (Instruction
*FVI
= dyn_cast
<Instruction
>(FalseVal
)) {
9030 if (FVI
->hasOneUse() && FVI
->getNumOperands() == 2 &&
9031 !isa
<Constant
>(TrueVal
)) {
9032 if (unsigned SFO
= GetSelectFoldableOperands(FVI
)) {
9033 unsigned OpToFold
= 0;
9034 if ((SFO
& 1) && TrueVal
== FVI
->getOperand(0)) {
9036 } else if ((SFO
& 2) && TrueVal
== FVI
->getOperand(1)) {
9041 Constant
*C
= GetSelectFoldableConstant(FVI
, Context
);
9042 Value
*OOp
= FVI
->getOperand(2-OpToFold
);
9043 // Avoid creating select between 2 constants unless it's selecting
9045 if (!isa
<Constant
>(OOp
) || isSelect01(C
, cast
<Constant
>(OOp
))) {
9046 Instruction
*NewSel
= SelectInst::Create(SI
.getCondition(), C
, OOp
);
9047 InsertNewInstBefore(NewSel
, SI
);
9048 NewSel
->takeName(FVI
);
9049 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FVI
))
9050 return BinaryOperator::Create(BO
->getOpcode(), TrueVal
, NewSel
);
9051 llvm_unreachable("Unknown instruction!!");
9061 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9062 /// ICmpInst as its first operand.
9064 Instruction
*InstCombiner::visitSelectInstWithICmp(SelectInst
&SI
,
9066 bool Changed
= false;
9067 ICmpInst::Predicate Pred
= ICI
->getPredicate();
9068 Value
*CmpLHS
= ICI
->getOperand(0);
9069 Value
*CmpRHS
= ICI
->getOperand(1);
9070 Value
*TrueVal
= SI
.getTrueValue();
9071 Value
*FalseVal
= SI
.getFalseValue();
9073 // Check cases where the comparison is with a constant that
9074 // can be adjusted to fit the min/max idiom. We may edit ICI in
9075 // place here, so make sure the select is the only user.
9076 if (ICI
->hasOneUse())
9077 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CmpRHS
)) {
9080 case ICmpInst::ICMP_ULT
:
9081 case ICmpInst::ICMP_SLT
: {
9082 // X < MIN ? T : F --> F
9083 if (CI
->isMinValue(Pred
== ICmpInst::ICMP_SLT
))
9084 return ReplaceInstUsesWith(SI
, FalseVal
);
9085 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9086 Constant
*AdjustedRHS
= SubOne(CI
);
9087 if ((CmpLHS
== TrueVal
&& AdjustedRHS
== FalseVal
) ||
9088 (CmpLHS
== FalseVal
&& AdjustedRHS
== TrueVal
)) {
9089 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9090 CmpRHS
= AdjustedRHS
;
9091 std::swap(FalseVal
, TrueVal
);
9092 ICI
->setPredicate(Pred
);
9093 ICI
->setOperand(1, CmpRHS
);
9094 SI
.setOperand(1, TrueVal
);
9095 SI
.setOperand(2, FalseVal
);
9100 case ICmpInst::ICMP_UGT
:
9101 case ICmpInst::ICMP_SGT
: {
9102 // X > MAX ? T : F --> F
9103 if (CI
->isMaxValue(Pred
== ICmpInst::ICMP_SGT
))
9104 return ReplaceInstUsesWith(SI
, FalseVal
);
9105 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9106 Constant
*AdjustedRHS
= AddOne(CI
);
9107 if ((CmpLHS
== TrueVal
&& AdjustedRHS
== FalseVal
) ||
9108 (CmpLHS
== FalseVal
&& AdjustedRHS
== TrueVal
)) {
9109 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9110 CmpRHS
= AdjustedRHS
;
9111 std::swap(FalseVal
, TrueVal
);
9112 ICI
->setPredicate(Pred
);
9113 ICI
->setOperand(1, CmpRHS
);
9114 SI
.setOperand(1, TrueVal
);
9115 SI
.setOperand(2, FalseVal
);
9122 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9123 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9124 CmpInst::Predicate Pred
= CmpInst::BAD_ICMP_PREDICATE
;
9125 if (match(TrueVal
, m_ConstantInt
<-1>()) &&
9126 match(FalseVal
, m_ConstantInt
<0>()))
9127 Pred
= ICI
->getPredicate();
9128 else if (match(TrueVal
, m_ConstantInt
<0>()) &&
9129 match(FalseVal
, m_ConstantInt
<-1>()))
9130 Pred
= CmpInst::getInversePredicate(ICI
->getPredicate());
9132 if (Pred
!= CmpInst::BAD_ICMP_PREDICATE
) {
9133 // If we are just checking for a icmp eq of a single bit and zext'ing it
9134 // to an integer, then shift the bit to the appropriate place and then
9135 // cast to integer to avoid the comparison.
9136 const APInt
&Op1CV
= CI
->getValue();
9138 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9139 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9140 if ((Pred
== ICmpInst::ICMP_SLT
&& Op1CV
== 0) ||
9141 (Pred
== ICmpInst::ICMP_SGT
&& Op1CV
.isAllOnesValue())) {
9142 Value
*In
= ICI
->getOperand(0);
9143 Value
*Sh
= ConstantInt::get(In
->getType(),
9144 In
->getType()->getScalarSizeInBits()-1);
9145 In
= InsertNewInstBefore(BinaryOperator::CreateAShr(In
, Sh
,
9146 In
->getName()+".lobit"),
9148 if (In
->getType() != SI
.getType())
9149 In
= CastInst::CreateIntegerCast(In
, SI
.getType(),
9150 true/*SExt*/, "tmp", ICI
);
9152 if (Pred
== ICmpInst::ICMP_SGT
)
9153 In
= InsertNewInstBefore(BinaryOperator::CreateNot(In
,
9154 In
->getName()+".not"), *ICI
);
9156 return ReplaceInstUsesWith(SI
, In
);
9161 if (CmpLHS
== TrueVal
&& CmpRHS
== FalseVal
) {
9162 // Transform (X == Y) ? X : Y -> Y
9163 if (Pred
== ICmpInst::ICMP_EQ
)
9164 return ReplaceInstUsesWith(SI
, FalseVal
);
9165 // Transform (X != Y) ? X : Y -> X
9166 if (Pred
== ICmpInst::ICMP_NE
)
9167 return ReplaceInstUsesWith(SI
, TrueVal
);
9168 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9170 } else if (CmpLHS
== FalseVal
&& CmpRHS
== TrueVal
) {
9171 // Transform (X == Y) ? Y : X -> X
9172 if (Pred
== ICmpInst::ICMP_EQ
)
9173 return ReplaceInstUsesWith(SI
, FalseVal
);
9174 // Transform (X != Y) ? Y : X -> Y
9175 if (Pred
== ICmpInst::ICMP_NE
)
9176 return ReplaceInstUsesWith(SI
, TrueVal
);
9177 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9180 /// NOTE: if we wanted to, this is where to detect integer ABS
9182 return Changed
? &SI
: 0;
9185 Instruction
*InstCombiner::visitSelectInst(SelectInst
&SI
) {
9186 Value
*CondVal
= SI
.getCondition();
9187 Value
*TrueVal
= SI
.getTrueValue();
9188 Value
*FalseVal
= SI
.getFalseValue();
9190 // select true, X, Y -> X
9191 // select false, X, Y -> Y
9192 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(CondVal
))
9193 return ReplaceInstUsesWith(SI
, C
->getZExtValue() ? TrueVal
: FalseVal
);
9195 // select C, X, X -> X
9196 if (TrueVal
== FalseVal
)
9197 return ReplaceInstUsesWith(SI
, TrueVal
);
9199 if (isa
<UndefValue
>(TrueVal
)) // select C, undef, X -> X
9200 return ReplaceInstUsesWith(SI
, FalseVal
);
9201 if (isa
<UndefValue
>(FalseVal
)) // select C, X, undef -> X
9202 return ReplaceInstUsesWith(SI
, TrueVal
);
9203 if (isa
<UndefValue
>(CondVal
)) { // select undef, X, Y -> X or Y
9204 if (isa
<Constant
>(TrueVal
))
9205 return ReplaceInstUsesWith(SI
, TrueVal
);
9207 return ReplaceInstUsesWith(SI
, FalseVal
);
9210 if (SI
.getType() == Type::getInt1Ty(*Context
)) {
9211 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(TrueVal
)) {
9212 if (C
->getZExtValue()) {
9213 // Change: A = select B, true, C --> A = or B, C
9214 return BinaryOperator::CreateOr(CondVal
, FalseVal
);
9216 // Change: A = select B, false, C --> A = and !B, C
9218 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal
,
9219 "not."+CondVal
->getName()), SI
);
9220 return BinaryOperator::CreateAnd(NotCond
, FalseVal
);
9222 } else if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(FalseVal
)) {
9223 if (C
->getZExtValue() == false) {
9224 // Change: A = select B, C, false --> A = and B, C
9225 return BinaryOperator::CreateAnd(CondVal
, TrueVal
);
9227 // Change: A = select B, C, true --> A = or !B, C
9229 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal
,
9230 "not."+CondVal
->getName()), SI
);
9231 return BinaryOperator::CreateOr(NotCond
, TrueVal
);
9235 // select a, b, a -> a&b
9236 // select a, a, b -> a|b
9237 if (CondVal
== TrueVal
)
9238 return BinaryOperator::CreateOr(CondVal
, FalseVal
);
9239 else if (CondVal
== FalseVal
)
9240 return BinaryOperator::CreateAnd(CondVal
, TrueVal
);
9243 // Selecting between two integer constants?
9244 if (ConstantInt
*TrueValC
= dyn_cast
<ConstantInt
>(TrueVal
))
9245 if (ConstantInt
*FalseValC
= dyn_cast
<ConstantInt
>(FalseVal
)) {
9246 // select C, 1, 0 -> zext C to int
9247 if (FalseValC
->isZero() && TrueValC
->getValue() == 1) {
9248 return CastInst::Create(Instruction::ZExt
, CondVal
, SI
.getType());
9249 } else if (TrueValC
->isZero() && FalseValC
->getValue() == 1) {
9250 // select C, 0, 1 -> zext !C to int
9252 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal
,
9253 "not."+CondVal
->getName()), SI
);
9254 return CastInst::Create(Instruction::ZExt
, NotCond
, SI
.getType());
9257 if (ICmpInst
*IC
= dyn_cast
<ICmpInst
>(SI
.getCondition())) {
9258 // If one of the constants is zero (we know they can't both be) and we
9259 // have an icmp instruction with zero, and we have an 'and' with the
9260 // non-constant value, eliminate this whole mess. This corresponds to
9261 // cases like this: ((X & 27) ? 27 : 0)
9262 if (TrueValC
->isZero() || FalseValC
->isZero())
9263 if (IC
->isEquality() && isa
<ConstantInt
>(IC
->getOperand(1)) &&
9264 cast
<Constant
>(IC
->getOperand(1))->isNullValue())
9265 if (Instruction
*ICA
= dyn_cast
<Instruction
>(IC
->getOperand(0)))
9266 if (ICA
->getOpcode() == Instruction::And
&&
9267 isa
<ConstantInt
>(ICA
->getOperand(1)) &&
9268 (ICA
->getOperand(1) == TrueValC
||
9269 ICA
->getOperand(1) == FalseValC
) &&
9270 isOneBitSet(cast
<ConstantInt
>(ICA
->getOperand(1)))) {
9271 // Okay, now we know that everything is set up, we just don't
9272 // know whether we have a icmp_ne or icmp_eq and whether the
9273 // true or false val is the zero.
9274 bool ShouldNotVal
= !TrueValC
->isZero();
9275 ShouldNotVal
^= IC
->getPredicate() == ICmpInst::ICMP_NE
;
9278 V
= InsertNewInstBefore(BinaryOperator::Create(
9279 Instruction::Xor
, V
, ICA
->getOperand(1)), SI
);
9280 return ReplaceInstUsesWith(SI
, V
);
9285 // See if we are selecting two values based on a comparison of the two values.
9286 if (FCmpInst
*FCI
= dyn_cast
<FCmpInst
>(CondVal
)) {
9287 if (FCI
->getOperand(0) == TrueVal
&& FCI
->getOperand(1) == FalseVal
) {
9288 // Transform (X == Y) ? X : Y -> Y
9289 if (FCI
->getPredicate() == FCmpInst::FCMP_OEQ
) {
9290 // This is not safe in general for floating point:
9291 // consider X== -0, Y== +0.
9292 // It becomes safe if either operand is a nonzero constant.
9293 ConstantFP
*CFPt
, *CFPf
;
9294 if (((CFPt
= dyn_cast
<ConstantFP
>(TrueVal
)) &&
9295 !CFPt
->getValueAPF().isZero()) ||
9296 ((CFPf
= dyn_cast
<ConstantFP
>(FalseVal
)) &&
9297 !CFPf
->getValueAPF().isZero()))
9298 return ReplaceInstUsesWith(SI
, FalseVal
);
9300 // Transform (X != Y) ? X : Y -> X
9301 if (FCI
->getPredicate() == FCmpInst::FCMP_ONE
)
9302 return ReplaceInstUsesWith(SI
, TrueVal
);
9303 // NOTE: if we wanted to, this is where to detect MIN/MAX
9305 } else if (FCI
->getOperand(0) == FalseVal
&& FCI
->getOperand(1) == TrueVal
){
9306 // Transform (X == Y) ? Y : X -> X
9307 if (FCI
->getPredicate() == FCmpInst::FCMP_OEQ
) {
9308 // This is not safe in general for floating point:
9309 // consider X== -0, Y== +0.
9310 // It becomes safe if either operand is a nonzero constant.
9311 ConstantFP
*CFPt
, *CFPf
;
9312 if (((CFPt
= dyn_cast
<ConstantFP
>(TrueVal
)) &&
9313 !CFPt
->getValueAPF().isZero()) ||
9314 ((CFPf
= dyn_cast
<ConstantFP
>(FalseVal
)) &&
9315 !CFPf
->getValueAPF().isZero()))
9316 return ReplaceInstUsesWith(SI
, FalseVal
);
9318 // Transform (X != Y) ? Y : X -> Y
9319 if (FCI
->getPredicate() == FCmpInst::FCMP_ONE
)
9320 return ReplaceInstUsesWith(SI
, TrueVal
);
9321 // NOTE: if we wanted to, this is where to detect MIN/MAX
9323 // NOTE: if we wanted to, this is where to detect ABS
9326 // See if we are selecting two values based on a comparison of the two values.
9327 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(CondVal
))
9328 if (Instruction
*Result
= visitSelectInstWithICmp(SI
, ICI
))
9331 if (Instruction
*TI
= dyn_cast
<Instruction
>(TrueVal
))
9332 if (Instruction
*FI
= dyn_cast
<Instruction
>(FalseVal
))
9333 if (TI
->hasOneUse() && FI
->hasOneUse()) {
9334 Instruction
*AddOp
= 0, *SubOp
= 0;
9336 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
9337 if (TI
->getOpcode() == FI
->getOpcode())
9338 if (Instruction
*IV
= FoldSelectOpOp(SI
, TI
, FI
))
9341 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
9342 // even legal for FP.
9343 if ((TI
->getOpcode() == Instruction::Sub
&&
9344 FI
->getOpcode() == Instruction::Add
) ||
9345 (TI
->getOpcode() == Instruction::FSub
&&
9346 FI
->getOpcode() == Instruction::FAdd
)) {
9347 AddOp
= FI
; SubOp
= TI
;
9348 } else if ((FI
->getOpcode() == Instruction::Sub
&&
9349 TI
->getOpcode() == Instruction::Add
) ||
9350 (FI
->getOpcode() == Instruction::FSub
&&
9351 TI
->getOpcode() == Instruction::FAdd
)) {
9352 AddOp
= TI
; SubOp
= FI
;
9356 Value
*OtherAddOp
= 0;
9357 if (SubOp
->getOperand(0) == AddOp
->getOperand(0)) {
9358 OtherAddOp
= AddOp
->getOperand(1);
9359 } else if (SubOp
->getOperand(0) == AddOp
->getOperand(1)) {
9360 OtherAddOp
= AddOp
->getOperand(0);
9364 // So at this point we know we have (Y -> OtherAddOp):
9365 // select C, (add X, Y), (sub X, Z)
9366 Value
*NegVal
; // Compute -Z
9367 if (Constant
*C
= dyn_cast
<Constant
>(SubOp
->getOperand(1))) {
9368 NegVal
= ConstantExpr::getNeg(C
);
9370 NegVal
= InsertNewInstBefore(
9371 BinaryOperator::CreateNeg(SubOp
->getOperand(1),
9375 Value
*NewTrueOp
= OtherAddOp
;
9376 Value
*NewFalseOp
= NegVal
;
9378 std::swap(NewTrueOp
, NewFalseOp
);
9379 Instruction
*NewSel
=
9380 SelectInst::Create(CondVal
, NewTrueOp
,
9381 NewFalseOp
, SI
.getName() + ".p");
9383 NewSel
= InsertNewInstBefore(NewSel
, SI
);
9384 return BinaryOperator::CreateAdd(SubOp
->getOperand(0), NewSel
);
9389 // See if we can fold the select into one of our operands.
9390 if (SI
.getType()->isInteger()) {
9391 Instruction
*FoldI
= FoldSelectIntoOp(SI
, TrueVal
, FalseVal
);
9396 if (BinaryOperator::isNot(CondVal
)) {
9397 SI
.setOperand(0, BinaryOperator::getNotArgument(CondVal
));
9398 SI
.setOperand(1, FalseVal
);
9399 SI
.setOperand(2, TrueVal
);
9406 /// EnforceKnownAlignment - If the specified pointer points to an object that
9407 /// we control, modify the object's alignment to PrefAlign. This isn't
9408 /// often possible though. If alignment is important, a more reliable approach
9409 /// is to simply align all global variables and allocation instructions to
9410 /// their preferred alignment from the beginning.
9412 static unsigned EnforceKnownAlignment(Value
*V
,
9413 unsigned Align
, unsigned PrefAlign
) {
9415 User
*U
= dyn_cast
<User
>(V
);
9416 if (!U
) return Align
;
9418 switch (Operator::getOpcode(U
)) {
9420 case Instruction::BitCast
:
9421 return EnforceKnownAlignment(U
->getOperand(0), Align
, PrefAlign
);
9422 case Instruction::GetElementPtr
: {
9423 // If all indexes are zero, it is just the alignment of the base pointer.
9424 bool AllZeroOperands
= true;
9425 for (User::op_iterator i
= U
->op_begin() + 1, e
= U
->op_end(); i
!= e
; ++i
)
9426 if (!isa
<Constant
>(*i
) ||
9427 !cast
<Constant
>(*i
)->isNullValue()) {
9428 AllZeroOperands
= false;
9432 if (AllZeroOperands
) {
9433 // Treat this like a bitcast.
9434 return EnforceKnownAlignment(U
->getOperand(0), Align
, PrefAlign
);
9440 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
9441 // If there is a large requested alignment and we can, bump up the alignment
9443 if (!GV
->isDeclaration()) {
9444 if (GV
->getAlignment() >= PrefAlign
)
9445 Align
= GV
->getAlignment();
9447 GV
->setAlignment(PrefAlign
);
9451 } else if (AllocationInst
*AI
= dyn_cast
<AllocationInst
>(V
)) {
9452 // If there is a requested alignment and if this is an alloca, round up. We
9453 // don't do this for malloc, because some systems can't respect the request.
9454 if (isa
<AllocaInst
>(AI
)) {
9455 if (AI
->getAlignment() >= PrefAlign
)
9456 Align
= AI
->getAlignment();
9458 AI
->setAlignment(PrefAlign
);
9467 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
9468 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
9469 /// and it is more than the alignment of the ultimate object, see if we can
9470 /// increase the alignment of the ultimate object, making this check succeed.
9471 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value
*V
,
9472 unsigned PrefAlign
) {
9473 unsigned BitWidth
= TD
? TD
->getTypeSizeInBits(V
->getType()) :
9474 sizeof(PrefAlign
) * CHAR_BIT
;
9475 APInt Mask
= APInt::getAllOnesValue(BitWidth
);
9476 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
9477 ComputeMaskedBits(V
, Mask
, KnownZero
, KnownOne
);
9478 unsigned TrailZ
= KnownZero
.countTrailingOnes();
9479 unsigned Align
= 1u << std::min(BitWidth
- 1, TrailZ
);
9481 if (PrefAlign
> Align
)
9482 Align
= EnforceKnownAlignment(V
, Align
, PrefAlign
);
9484 // We don't need to make any adjustment.
9488 Instruction
*InstCombiner::SimplifyMemTransfer(MemIntrinsic
*MI
) {
9489 unsigned DstAlign
= GetOrEnforceKnownAlignment(MI
->getOperand(1));
9490 unsigned SrcAlign
= GetOrEnforceKnownAlignment(MI
->getOperand(2));
9491 unsigned MinAlign
= std::min(DstAlign
, SrcAlign
);
9492 unsigned CopyAlign
= MI
->getAlignment();
9494 if (CopyAlign
< MinAlign
) {
9495 MI
->setAlignment(ConstantInt::get(MI
->getAlignmentType(),
9500 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
9502 ConstantInt
*MemOpLength
= dyn_cast
<ConstantInt
>(MI
->getOperand(3));
9503 if (MemOpLength
== 0) return 0;
9505 // Source and destination pointer types are always "i8*" for intrinsic. See
9506 // if the size is something we can handle with a single primitive load/store.
9507 // A single load+store correctly handles overlapping memory in the memmove
9509 unsigned Size
= MemOpLength
->getZExtValue();
9510 if (Size
== 0) return MI
; // Delete this mem transfer.
9512 if (Size
> 8 || (Size
&(Size
-1)))
9513 return 0; // If not 1/2/4/8 bytes, exit.
9515 // Use an integer load+store unless we can find something better.
9517 PointerType::getUnqual(IntegerType::get(*Context
, Size
<<3));
9519 // Memcpy forces the use of i8* for the source and destination. That means
9520 // that if you're using memcpy to move one double around, you'll get a cast
9521 // from double* to i8*. We'd much rather use a double load+store rather than
9522 // an i64 load+store, here because this improves the odds that the source or
9523 // dest address will be promotable. See if we can find a better type than the
9524 // integer datatype.
9525 if (Value
*Op
= getBitCastOperand(MI
->getOperand(1))) {
9526 const Type
*SrcETy
= cast
<PointerType
>(Op
->getType())->getElementType();
9527 if (TD
&& SrcETy
->isSized() && TD
->getTypeStoreSize(SrcETy
) == Size
) {
9528 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
9529 // down through these levels if so.
9530 while (!SrcETy
->isSingleValueType()) {
9531 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcETy
)) {
9532 if (STy
->getNumElements() == 1)
9533 SrcETy
= STy
->getElementType(0);
9536 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcETy
)) {
9537 if (ATy
->getNumElements() == 1)
9538 SrcETy
= ATy
->getElementType();
9545 if (SrcETy
->isSingleValueType())
9546 NewPtrTy
= PointerType::getUnqual(SrcETy
);
9551 // If the memcpy/memmove provides better alignment info than we can
9553 SrcAlign
= std::max(SrcAlign
, CopyAlign
);
9554 DstAlign
= std::max(DstAlign
, CopyAlign
);
9556 Value
*Src
= Builder
->CreateBitCast(MI
->getOperand(2), NewPtrTy
);
9557 Value
*Dest
= Builder
->CreateBitCast(MI
->getOperand(1), NewPtrTy
);
9558 Instruction
*L
= new LoadInst(Src
, "tmp", false, SrcAlign
);
9559 InsertNewInstBefore(L
, *MI
);
9560 InsertNewInstBefore(new StoreInst(L
, Dest
, false, DstAlign
), *MI
);
9562 // Set the size of the copy to 0, it will be deleted on the next iteration.
9563 MI
->setOperand(3, Constant::getNullValue(MemOpLength
->getType()));
9567 Instruction
*InstCombiner::SimplifyMemSet(MemSetInst
*MI
) {
9568 unsigned Alignment
= GetOrEnforceKnownAlignment(MI
->getDest());
9569 if (MI
->getAlignment() < Alignment
) {
9570 MI
->setAlignment(ConstantInt::get(MI
->getAlignmentType(),
9575 // Extract the length and alignment and fill if they are constant.
9576 ConstantInt
*LenC
= dyn_cast
<ConstantInt
>(MI
->getLength());
9577 ConstantInt
*FillC
= dyn_cast
<ConstantInt
>(MI
->getValue());
9578 if (!LenC
|| !FillC
|| FillC
->getType() != Type::getInt8Ty(*Context
))
9580 uint64_t Len
= LenC
->getZExtValue();
9581 Alignment
= MI
->getAlignment();
9583 // If the length is zero, this is a no-op
9584 if (Len
== 0) return MI
; // memset(d,c,0,a) -> noop
9586 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
9587 if (Len
<= 8 && isPowerOf2_32((uint32_t)Len
)) {
9588 const Type
*ITy
= IntegerType::get(*Context
, Len
*8); // n=1 -> i8.
9590 Value
*Dest
= MI
->getDest();
9591 Dest
= Builder
->CreateBitCast(Dest
, PointerType::getUnqual(ITy
));
9593 // Alignment 0 is identity for alignment 1 for memset, but not store.
9594 if (Alignment
== 0) Alignment
= 1;
9596 // Extract the fill value and store.
9597 uint64_t Fill
= FillC
->getZExtValue()*0x0101010101010101ULL
;
9598 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy
, Fill
),
9599 Dest
, false, Alignment
), *MI
);
9601 // Set the size of the copy to 0, it will be deleted on the next iteration.
9602 MI
->setLength(Constant::getNullValue(LenC
->getType()));
9610 /// visitCallInst - CallInst simplification. This mostly only handles folding
9611 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
9612 /// the heavy lifting.
9614 Instruction
*InstCombiner::visitCallInst(CallInst
&CI
) {
9615 // If the caller function is nounwind, mark the call as nounwind, even if the
9617 if (CI
.getParent()->getParent()->doesNotThrow() &&
9618 !CI
.doesNotThrow()) {
9619 CI
.setDoesNotThrow();
9623 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(&CI
);
9624 if (!II
) return visitCallSite(&CI
);
9626 // Intrinsics cannot occur in an invoke, so handle them here instead of in
9628 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(II
)) {
9629 bool Changed
= false;
9631 // memmove/cpy/set of zero bytes is a noop.
9632 if (Constant
*NumBytes
= dyn_cast
<Constant
>(MI
->getLength())) {
9633 if (NumBytes
->isNullValue()) return EraseInstFromFunction(CI
);
9635 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(NumBytes
))
9636 if (CI
->getZExtValue() == 1) {
9637 // Replace the instruction with just byte operations. We would
9638 // transform other cases to loads/stores, but we don't know if
9639 // alignment is sufficient.
9643 // If we have a memmove and the source operation is a constant global,
9644 // then the source and dest pointers can't alias, so we can change this
9645 // into a call to memcpy.
9646 if (MemMoveInst
*MMI
= dyn_cast
<MemMoveInst
>(MI
)) {
9647 if (GlobalVariable
*GVSrc
= dyn_cast
<GlobalVariable
>(MMI
->getSource()))
9648 if (GVSrc
->isConstant()) {
9649 Module
*M
= CI
.getParent()->getParent()->getParent();
9650 Intrinsic::ID MemCpyID
= Intrinsic::memcpy
;
9652 Tys
[0] = CI
.getOperand(3)->getType();
9654 Intrinsic::getDeclaration(M
, MemCpyID
, Tys
, 1));
9658 // memmove(x,x,size) -> noop.
9659 if (MMI
->getSource() == MMI
->getDest())
9660 return EraseInstFromFunction(CI
);
9663 // If we can determine a pointer alignment that is bigger than currently
9664 // set, update the alignment.
9665 if (isa
<MemTransferInst
>(MI
)) {
9666 if (Instruction
*I
= SimplifyMemTransfer(MI
))
9668 } else if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(MI
)) {
9669 if (Instruction
*I
= SimplifyMemSet(MSI
))
9673 if (Changed
) return II
;
9676 switch (II
->getIntrinsicID()) {
9678 case Intrinsic::bswap
:
9679 // bswap(bswap(x)) -> x
9680 if (IntrinsicInst
*Operand
= dyn_cast
<IntrinsicInst
>(II
->getOperand(1)))
9681 if (Operand
->getIntrinsicID() == Intrinsic::bswap
)
9682 return ReplaceInstUsesWith(CI
, Operand
->getOperand(1));
9684 case Intrinsic::ppc_altivec_lvx
:
9685 case Intrinsic::ppc_altivec_lvxl
:
9686 case Intrinsic::x86_sse_loadu_ps
:
9687 case Intrinsic::x86_sse2_loadu_pd
:
9688 case Intrinsic::x86_sse2_loadu_dq
:
9689 // Turn PPC lvx -> load if the pointer is known aligned.
9690 // Turn X86 loadups -> load if the pointer is known aligned.
9691 if (GetOrEnforceKnownAlignment(II
->getOperand(1), 16) >= 16) {
9692 Value
*Ptr
= Builder
->CreateBitCast(II
->getOperand(1),
9693 PointerType::getUnqual(II
->getType()));
9694 return new LoadInst(Ptr
);
9697 case Intrinsic::ppc_altivec_stvx
:
9698 case Intrinsic::ppc_altivec_stvxl
:
9699 // Turn stvx -> store if the pointer is known aligned.
9700 if (GetOrEnforceKnownAlignment(II
->getOperand(2), 16) >= 16) {
9701 const Type
*OpPtrTy
=
9702 PointerType::getUnqual(II
->getOperand(1)->getType());
9703 Value
*Ptr
= Builder
->CreateBitCast(II
->getOperand(2), OpPtrTy
);
9704 return new StoreInst(II
->getOperand(1), Ptr
);
9707 case Intrinsic::x86_sse_storeu_ps
:
9708 case Intrinsic::x86_sse2_storeu_pd
:
9709 case Intrinsic::x86_sse2_storeu_dq
:
9710 // Turn X86 storeu -> store if the pointer is known aligned.
9711 if (GetOrEnforceKnownAlignment(II
->getOperand(1), 16) >= 16) {
9712 const Type
*OpPtrTy
=
9713 PointerType::getUnqual(II
->getOperand(2)->getType());
9714 Value
*Ptr
= Builder
->CreateBitCast(II
->getOperand(1), OpPtrTy
);
9715 return new StoreInst(II
->getOperand(2), Ptr
);
9719 case Intrinsic::x86_sse_cvttss2si
: {
9720 // These intrinsics only demands the 0th element of its input vector. If
9721 // we can simplify the input based on that, do so now.
9723 cast
<VectorType
>(II
->getOperand(1)->getType())->getNumElements();
9724 APInt
DemandedElts(VWidth
, 1);
9725 APInt
UndefElts(VWidth
, 0);
9726 if (Value
*V
= SimplifyDemandedVectorElts(II
->getOperand(1), DemandedElts
,
9728 II
->setOperand(1, V
);
9734 case Intrinsic::ppc_altivec_vperm
:
9735 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
9736 if (ConstantVector
*Mask
= dyn_cast
<ConstantVector
>(II
->getOperand(3))) {
9737 assert(Mask
->getNumOperands() == 16 && "Bad type for intrinsic!");
9739 // Check that all of the elements are integer constants or undefs.
9740 bool AllEltsOk
= true;
9741 for (unsigned i
= 0; i
!= 16; ++i
) {
9742 if (!isa
<ConstantInt
>(Mask
->getOperand(i
)) &&
9743 !isa
<UndefValue
>(Mask
->getOperand(i
))) {
9750 // Cast the input vectors to byte vectors.
9751 Value
*Op0
= Builder
->CreateBitCast(II
->getOperand(1), Mask
->getType());
9752 Value
*Op1
= Builder
->CreateBitCast(II
->getOperand(2), Mask
->getType());
9753 Value
*Result
= UndefValue::get(Op0
->getType());
9755 // Only extract each element once.
9756 Value
*ExtractedElts
[32];
9757 memset(ExtractedElts
, 0, sizeof(ExtractedElts
));
9759 for (unsigned i
= 0; i
!= 16; ++i
) {
9760 if (isa
<UndefValue
>(Mask
->getOperand(i
)))
9762 unsigned Idx
=cast
<ConstantInt
>(Mask
->getOperand(i
))->getZExtValue();
9763 Idx
&= 31; // Match the hardware behavior.
9765 if (ExtractedElts
[Idx
] == 0) {
9766 ExtractedElts
[Idx
] =
9767 Builder
->CreateExtractElement(Idx
< 16 ? Op0
: Op1
,
9768 ConstantInt::get(Type::getInt32Ty(*Context
), Idx
&15, false),
9772 // Insert this value into the result vector.
9773 Result
= Builder
->CreateInsertElement(Result
, ExtractedElts
[Idx
],
9774 ConstantInt::get(Type::getInt32Ty(*Context
), i
, false),
9777 return CastInst::Create(Instruction::BitCast
, Result
, CI
.getType());
9782 case Intrinsic::stackrestore
: {
9783 // If the save is right next to the restore, remove the restore. This can
9784 // happen when variable allocas are DCE'd.
9785 if (IntrinsicInst
*SS
= dyn_cast
<IntrinsicInst
>(II
->getOperand(1))) {
9786 if (SS
->getIntrinsicID() == Intrinsic::stacksave
) {
9787 BasicBlock::iterator BI
= SS
;
9789 return EraseInstFromFunction(CI
);
9793 // Scan down this block to see if there is another stack restore in the
9794 // same block without an intervening call/alloca.
9795 BasicBlock::iterator BI
= II
;
9796 TerminatorInst
*TI
= II
->getParent()->getTerminator();
9797 bool CannotRemove
= false;
9798 for (++BI
; &*BI
!= TI
; ++BI
) {
9799 if (isa
<AllocaInst
>(BI
)) {
9800 CannotRemove
= true;
9803 if (CallInst
*BCI
= dyn_cast
<CallInst
>(BI
)) {
9804 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(BCI
)) {
9805 // If there is a stackrestore below this one, remove this one.
9806 if (II
->getIntrinsicID() == Intrinsic::stackrestore
)
9807 return EraseInstFromFunction(CI
);
9808 // Otherwise, ignore the intrinsic.
9810 // If we found a non-intrinsic call, we can't remove the stack
9812 CannotRemove
= true;
9818 // If the stack restore is in a return/unwind block and if there are no
9819 // allocas or calls between the restore and the return, nuke the restore.
9820 if (!CannotRemove
&& (isa
<ReturnInst
>(TI
) || isa
<UnwindInst
>(TI
)))
9821 return EraseInstFromFunction(CI
);
9826 return visitCallSite(II
);
9829 // InvokeInst simplification
9831 Instruction
*InstCombiner::visitInvokeInst(InvokeInst
&II
) {
9832 return visitCallSite(&II
);
9835 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
9836 /// passed through the varargs area, we can eliminate the use of the cast.
9837 static bool isSafeToEliminateVarargsCast(const CallSite CS
,
9838 const CastInst
* const CI
,
9839 const TargetData
* const TD
,
9841 if (!CI
->isLosslessCast())
9844 // The size of ByVal arguments is derived from the type, so we
9845 // can't change to a type with a different size. If the size were
9846 // passed explicitly we could avoid this check.
9847 if (!CS
.paramHasAttr(ix
, Attribute::ByVal
))
9851 cast
<PointerType
>(CI
->getOperand(0)->getType())->getElementType();
9852 const Type
* DstTy
= cast
<PointerType
>(CI
->getType())->getElementType();
9853 if (!SrcTy
->isSized() || !DstTy
->isSized())
9855 if (!TD
|| TD
->getTypeAllocSize(SrcTy
) != TD
->getTypeAllocSize(DstTy
))
9860 // visitCallSite - Improvements for call and invoke instructions.
9862 Instruction
*InstCombiner::visitCallSite(CallSite CS
) {
9863 bool Changed
= false;
9865 // If the callee is a constexpr cast of a function, attempt to move the cast
9866 // to the arguments of the call/invoke.
9867 if (transformConstExprCastCall(CS
)) return 0;
9869 Value
*Callee
= CS
.getCalledValue();
9871 if (Function
*CalleeF
= dyn_cast
<Function
>(Callee
))
9872 if (CalleeF
->getCallingConv() != CS
.getCallingConv()) {
9873 Instruction
*OldCall
= CS
.getInstruction();
9874 // If the call and callee calling conventions don't match, this call must
9875 // be unreachable, as the call is undefined.
9876 new StoreInst(ConstantInt::getTrue(*Context
),
9877 UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context
))),
9879 if (!OldCall
->use_empty())
9880 OldCall
->replaceAllUsesWith(UndefValue::get(OldCall
->getType()));
9881 if (isa
<CallInst
>(OldCall
)) // Not worth removing an invoke here.
9882 return EraseInstFromFunction(*OldCall
);
9886 if (isa
<ConstantPointerNull
>(Callee
) || isa
<UndefValue
>(Callee
)) {
9887 // This instruction is not reachable, just remove it. We insert a store to
9888 // undef so that we know that this code is not reachable, despite the fact
9889 // that we can't modify the CFG here.
9890 new StoreInst(ConstantInt::getTrue(*Context
),
9891 UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context
))),
9892 CS
.getInstruction());
9894 if (!CS
.getInstruction()->use_empty())
9895 CS
.getInstruction()->
9896 replaceAllUsesWith(UndefValue::get(CS
.getInstruction()->getType()));
9898 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(CS
.getInstruction())) {
9899 // Don't break the CFG, insert a dummy cond branch.
9900 BranchInst::Create(II
->getNormalDest(), II
->getUnwindDest(),
9901 ConstantInt::getTrue(*Context
), II
);
9903 return EraseInstFromFunction(*CS
.getInstruction());
9906 if (BitCastInst
*BC
= dyn_cast
<BitCastInst
>(Callee
))
9907 if (IntrinsicInst
*In
= dyn_cast
<IntrinsicInst
>(BC
->getOperand(0)))
9908 if (In
->getIntrinsicID() == Intrinsic::init_trampoline
)
9909 return transformCallThroughTrampoline(CS
);
9911 const PointerType
*PTy
= cast
<PointerType
>(Callee
->getType());
9912 const FunctionType
*FTy
= cast
<FunctionType
>(PTy
->getElementType());
9913 if (FTy
->isVarArg()) {
9914 int ix
= FTy
->getNumParams() + (isa
<InvokeInst
>(Callee
) ? 3 : 1);
9915 // See if we can optimize any arguments passed through the varargs area of
9917 for (CallSite::arg_iterator I
= CS
.arg_begin()+FTy
->getNumParams(),
9918 E
= CS
.arg_end(); I
!= E
; ++I
, ++ix
) {
9919 CastInst
*CI
= dyn_cast
<CastInst
>(*I
);
9920 if (CI
&& isSafeToEliminateVarargsCast(CS
, CI
, TD
, ix
)) {
9921 *I
= CI
->getOperand(0);
9927 if (isa
<InlineAsm
>(Callee
) && !CS
.doesNotThrow()) {
9928 // Inline asm calls cannot throw - mark them 'nounwind'.
9929 CS
.setDoesNotThrow();
9933 return Changed
? CS
.getInstruction() : 0;
9936 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
9937 // attempt to move the cast to the arguments of the call/invoke.
9939 bool InstCombiner::transformConstExprCastCall(CallSite CS
) {
9940 if (!isa
<ConstantExpr
>(CS
.getCalledValue())) return false;
9941 ConstantExpr
*CE
= cast
<ConstantExpr
>(CS
.getCalledValue());
9942 if (CE
->getOpcode() != Instruction::BitCast
||
9943 !isa
<Function
>(CE
->getOperand(0)))
9945 Function
*Callee
= cast
<Function
>(CE
->getOperand(0));
9946 Instruction
*Caller
= CS
.getInstruction();
9947 const AttrListPtr
&CallerPAL
= CS
.getAttributes();
9949 // Okay, this is a cast from a function to a different type. Unless doing so
9950 // would cause a type conversion of one of our arguments, change this call to
9951 // be a direct call with arguments casted to the appropriate types.
9953 const FunctionType
*FT
= Callee
->getFunctionType();
9954 const Type
*OldRetTy
= Caller
->getType();
9955 const Type
*NewRetTy
= FT
->getReturnType();
9957 if (isa
<StructType
>(NewRetTy
))
9958 return false; // TODO: Handle multiple return values.
9960 // Check to see if we are changing the return type...
9961 if (OldRetTy
!= NewRetTy
) {
9962 if (Callee
->isDeclaration() &&
9963 // Conversion is ok if changing from one pointer type to another or from
9964 // a pointer to an integer of the same size.
9965 !((isa
<PointerType
>(OldRetTy
) || !TD
||
9966 OldRetTy
== TD
->getIntPtrType(Caller
->getContext())) &&
9967 (isa
<PointerType
>(NewRetTy
) || !TD
||
9968 NewRetTy
== TD
->getIntPtrType(Caller
->getContext()))))
9969 return false; // Cannot transform this return value.
9971 if (!Caller
->use_empty() &&
9972 // void -> non-void is handled specially
9973 NewRetTy
!= Type::getVoidTy(*Context
) && !CastInst::isCastable(NewRetTy
, OldRetTy
))
9974 return false; // Cannot transform this return value.
9976 if (!CallerPAL
.isEmpty() && !Caller
->use_empty()) {
9977 Attributes RAttrs
= CallerPAL
.getRetAttributes();
9978 if (RAttrs
& Attribute::typeIncompatible(NewRetTy
))
9979 return false; // Attribute not compatible with transformed value.
9982 // If the callsite is an invoke instruction, and the return value is used by
9983 // a PHI node in a successor, we cannot change the return type of the call
9984 // because there is no place to put the cast instruction (without breaking
9985 // the critical edge). Bail out in this case.
9986 if (!Caller
->use_empty())
9987 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
))
9988 for (Value::use_iterator UI
= II
->use_begin(), E
= II
->use_end();
9990 if (PHINode
*PN
= dyn_cast
<PHINode
>(*UI
))
9991 if (PN
->getParent() == II
->getNormalDest() ||
9992 PN
->getParent() == II
->getUnwindDest())
9996 unsigned NumActualArgs
= unsigned(CS
.arg_end()-CS
.arg_begin());
9997 unsigned NumCommonArgs
= std::min(FT
->getNumParams(), NumActualArgs
);
9999 CallSite::arg_iterator AI
= CS
.arg_begin();
10000 for (unsigned i
= 0, e
= NumCommonArgs
; i
!= e
; ++i
, ++AI
) {
10001 const Type
*ParamTy
= FT
->getParamType(i
);
10002 const Type
*ActTy
= (*AI
)->getType();
10004 if (!CastInst::isCastable(ActTy
, ParamTy
))
10005 return false; // Cannot transform this parameter value.
10007 if (CallerPAL
.getParamAttributes(i
+ 1)
10008 & Attribute::typeIncompatible(ParamTy
))
10009 return false; // Attribute not compatible with transformed value.
10011 // Converting from one pointer type to another or between a pointer and an
10012 // integer of the same size is safe even if we do not have a body.
10013 bool isConvertible
= ActTy
== ParamTy
||
10014 (TD
&& ((isa
<PointerType
>(ParamTy
) ||
10015 ParamTy
== TD
->getIntPtrType(Caller
->getContext())) &&
10016 (isa
<PointerType
>(ActTy
) ||
10017 ActTy
== TD
->getIntPtrType(Caller
->getContext()))));
10018 if (Callee
->isDeclaration() && !isConvertible
) return false;
10021 if (FT
->getNumParams() < NumActualArgs
&& !FT
->isVarArg() &&
10022 Callee
->isDeclaration())
10023 return false; // Do not delete arguments unless we have a function body.
10025 if (FT
->getNumParams() < NumActualArgs
&& FT
->isVarArg() &&
10026 !CallerPAL
.isEmpty())
10027 // In this case we have more arguments than the new function type, but we
10028 // won't be dropping them. Check that these extra arguments have attributes
10029 // that are compatible with being a vararg call argument.
10030 for (unsigned i
= CallerPAL
.getNumSlots(); i
; --i
) {
10031 if (CallerPAL
.getSlot(i
- 1).Index
<= FT
->getNumParams())
10033 Attributes PAttrs
= CallerPAL
.getSlot(i
- 1).Attrs
;
10034 if (PAttrs
& Attribute::VarArgsIncompatible
)
10038 // Okay, we decided that this is a safe thing to do: go ahead and start
10039 // inserting cast instructions as necessary...
10040 std::vector
<Value
*> Args
;
10041 Args
.reserve(NumActualArgs
);
10042 SmallVector
<AttributeWithIndex
, 8> attrVec
;
10043 attrVec
.reserve(NumCommonArgs
);
10045 // Get any return attributes.
10046 Attributes RAttrs
= CallerPAL
.getRetAttributes();
10048 // If the return value is not being used, the type may not be compatible
10049 // with the existing attributes. Wipe out any problematic attributes.
10050 RAttrs
&= ~Attribute::typeIncompatible(NewRetTy
);
10052 // Add the new return attributes.
10054 attrVec
.push_back(AttributeWithIndex::get(0, RAttrs
));
10056 AI
= CS
.arg_begin();
10057 for (unsigned i
= 0; i
!= NumCommonArgs
; ++i
, ++AI
) {
10058 const Type
*ParamTy
= FT
->getParamType(i
);
10059 if ((*AI
)->getType() == ParamTy
) {
10060 Args
.push_back(*AI
);
10062 Instruction::CastOps opcode
= CastInst::getCastOpcode(*AI
,
10063 false, ParamTy
, false);
10064 Args
.push_back(Builder
->CreateCast(opcode
, *AI
, ParamTy
, "tmp"));
10067 // Add any parameter attributes.
10068 if (Attributes PAttrs
= CallerPAL
.getParamAttributes(i
+ 1))
10069 attrVec
.push_back(AttributeWithIndex::get(i
+ 1, PAttrs
));
10072 // If the function takes more arguments than the call was taking, add them
10074 for (unsigned i
= NumCommonArgs
; i
!= FT
->getNumParams(); ++i
)
10075 Args
.push_back(Constant::getNullValue(FT
->getParamType(i
)));
10077 // If we are removing arguments to the function, emit an obnoxious warning.
10078 if (FT
->getNumParams() < NumActualArgs
) {
10079 if (!FT
->isVarArg()) {
10080 errs() << "WARNING: While resolving call to function '"
10081 << Callee
->getName() << "' arguments were dropped!\n";
10083 // Add all of the arguments in their promoted form to the arg list.
10084 for (unsigned i
= FT
->getNumParams(); i
!= NumActualArgs
; ++i
, ++AI
) {
10085 const Type
*PTy
= getPromotedType((*AI
)->getType());
10086 if (PTy
!= (*AI
)->getType()) {
10087 // Must promote to pass through va_arg area!
10088 Instruction::CastOps opcode
=
10089 CastInst::getCastOpcode(*AI
, false, PTy
, false);
10090 Args
.push_back(Builder
->CreateCast(opcode
, *AI
, PTy
, "tmp"));
10092 Args
.push_back(*AI
);
10095 // Add any parameter attributes.
10096 if (Attributes PAttrs
= CallerPAL
.getParamAttributes(i
+ 1))
10097 attrVec
.push_back(AttributeWithIndex::get(i
+ 1, PAttrs
));
10102 if (Attributes FnAttrs
= CallerPAL
.getFnAttributes())
10103 attrVec
.push_back(AttributeWithIndex::get(~0, FnAttrs
));
10105 if (NewRetTy
== Type::getVoidTy(*Context
))
10106 Caller
->setName(""); // Void type should not have a name.
10108 const AttrListPtr
&NewCallerPAL
= AttrListPtr::get(attrVec
.begin(),
10112 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10113 NC
= InvokeInst::Create(Callee
, II
->getNormalDest(), II
->getUnwindDest(),
10114 Args
.begin(), Args
.end(),
10115 Caller
->getName(), Caller
);
10116 cast
<InvokeInst
>(NC
)->setCallingConv(II
->getCallingConv());
10117 cast
<InvokeInst
>(NC
)->setAttributes(NewCallerPAL
);
10119 NC
= CallInst::Create(Callee
, Args
.begin(), Args
.end(),
10120 Caller
->getName(), Caller
);
10121 CallInst
*CI
= cast
<CallInst
>(Caller
);
10122 if (CI
->isTailCall())
10123 cast
<CallInst
>(NC
)->setTailCall();
10124 cast
<CallInst
>(NC
)->setCallingConv(CI
->getCallingConv());
10125 cast
<CallInst
>(NC
)->setAttributes(NewCallerPAL
);
10128 // Insert a cast of the return type as necessary.
10130 if (OldRetTy
!= NV
->getType() && !Caller
->use_empty()) {
10131 if (NV
->getType() != Type::getVoidTy(*Context
)) {
10132 Instruction::CastOps opcode
= CastInst::getCastOpcode(NC
, false,
10134 NV
= NC
= CastInst::Create(opcode
, NC
, OldRetTy
, "tmp");
10136 // If this is an invoke instruction, we should insert it after the first
10137 // non-phi, instruction in the normal successor block.
10138 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10139 BasicBlock::iterator I
= II
->getNormalDest()->getFirstNonPHI();
10140 InsertNewInstBefore(NC
, *I
);
10142 // Otherwise, it's a call, just insert cast right after the call instr
10143 InsertNewInstBefore(NC
, *Caller
);
10145 Worklist
.AddUsersToWorkList(*Caller
);
10147 NV
= UndefValue::get(Caller
->getType());
10152 if (!Caller
->use_empty())
10153 Caller
->replaceAllUsesWith(NV
);
10155 EraseInstFromFunction(*Caller
);
10159 // transformCallThroughTrampoline - Turn a call to a function created by the
10160 // init_trampoline intrinsic into a direct call to the underlying function.
10162 Instruction
*InstCombiner::transformCallThroughTrampoline(CallSite CS
) {
10163 Value
*Callee
= CS
.getCalledValue();
10164 const PointerType
*PTy
= cast
<PointerType
>(Callee
->getType());
10165 const FunctionType
*FTy
= cast
<FunctionType
>(PTy
->getElementType());
10166 const AttrListPtr
&Attrs
= CS
.getAttributes();
10168 // If the call already has the 'nest' attribute somewhere then give up -
10169 // otherwise 'nest' would occur twice after splicing in the chain.
10170 if (Attrs
.hasAttrSomewhere(Attribute::Nest
))
10173 IntrinsicInst
*Tramp
=
10174 cast
<IntrinsicInst
>(cast
<BitCastInst
>(Callee
)->getOperand(0));
10176 Function
*NestF
= cast
<Function
>(Tramp
->getOperand(2)->stripPointerCasts());
10177 const PointerType
*NestFPTy
= cast
<PointerType
>(NestF
->getType());
10178 const FunctionType
*NestFTy
= cast
<FunctionType
>(NestFPTy
->getElementType());
10180 const AttrListPtr
&NestAttrs
= NestF
->getAttributes();
10181 if (!NestAttrs
.isEmpty()) {
10182 unsigned NestIdx
= 1;
10183 const Type
*NestTy
= 0;
10184 Attributes NestAttr
= Attribute::None
;
10186 // Look for a parameter marked with the 'nest' attribute.
10187 for (FunctionType::param_iterator I
= NestFTy
->param_begin(),
10188 E
= NestFTy
->param_end(); I
!= E
; ++NestIdx
, ++I
)
10189 if (NestAttrs
.paramHasAttr(NestIdx
, Attribute::Nest
)) {
10190 // Record the parameter type and any other attributes.
10192 NestAttr
= NestAttrs
.getParamAttributes(NestIdx
);
10197 Instruction
*Caller
= CS
.getInstruction();
10198 std::vector
<Value
*> NewArgs
;
10199 NewArgs
.reserve(unsigned(CS
.arg_end()-CS
.arg_begin())+1);
10201 SmallVector
<AttributeWithIndex
, 8> NewAttrs
;
10202 NewAttrs
.reserve(Attrs
.getNumSlots() + 1);
10204 // Insert the nest argument into the call argument list, which may
10205 // mean appending it. Likewise for attributes.
10207 // Add any result attributes.
10208 if (Attributes Attr
= Attrs
.getRetAttributes())
10209 NewAttrs
.push_back(AttributeWithIndex::get(0, Attr
));
10213 CallSite::arg_iterator I
= CS
.arg_begin(), E
= CS
.arg_end();
10215 if (Idx
== NestIdx
) {
10216 // Add the chain argument and attributes.
10217 Value
*NestVal
= Tramp
->getOperand(3);
10218 if (NestVal
->getType() != NestTy
)
10219 NestVal
= new BitCastInst(NestVal
, NestTy
, "nest", Caller
);
10220 NewArgs
.push_back(NestVal
);
10221 NewAttrs
.push_back(AttributeWithIndex::get(NestIdx
, NestAttr
));
10227 // Add the original argument and attributes.
10228 NewArgs
.push_back(*I
);
10229 if (Attributes Attr
= Attrs
.getParamAttributes(Idx
))
10231 (AttributeWithIndex::get(Idx
+ (Idx
>= NestIdx
), Attr
));
10237 // Add any function attributes.
10238 if (Attributes Attr
= Attrs
.getFnAttributes())
10239 NewAttrs
.push_back(AttributeWithIndex::get(~0, Attr
));
10241 // The trampoline may have been bitcast to a bogus type (FTy).
10242 // Handle this by synthesizing a new function type, equal to FTy
10243 // with the chain parameter inserted.
10245 std::vector
<const Type
*> NewTypes
;
10246 NewTypes
.reserve(FTy
->getNumParams()+1);
10248 // Insert the chain's type into the list of parameter types, which may
10249 // mean appending it.
10252 FunctionType::param_iterator I
= FTy
->param_begin(),
10253 E
= FTy
->param_end();
10256 if (Idx
== NestIdx
)
10257 // Add the chain's type.
10258 NewTypes
.push_back(NestTy
);
10263 // Add the original type.
10264 NewTypes
.push_back(*I
);
10270 // Replace the trampoline call with a direct call. Let the generic
10271 // code sort out any function type mismatches.
10272 FunctionType
*NewFTy
= FunctionType::get(FTy
->getReturnType(), NewTypes
,
10274 Constant
*NewCallee
=
10275 NestF
->getType() == PointerType::getUnqual(NewFTy
) ?
10276 NestF
: ConstantExpr::getBitCast(NestF
,
10277 PointerType::getUnqual(NewFTy
));
10278 const AttrListPtr
&NewPAL
= AttrListPtr::get(NewAttrs
.begin(),
10281 Instruction
*NewCaller
;
10282 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10283 NewCaller
= InvokeInst::Create(NewCallee
,
10284 II
->getNormalDest(), II
->getUnwindDest(),
10285 NewArgs
.begin(), NewArgs
.end(),
10286 Caller
->getName(), Caller
);
10287 cast
<InvokeInst
>(NewCaller
)->setCallingConv(II
->getCallingConv());
10288 cast
<InvokeInst
>(NewCaller
)->setAttributes(NewPAL
);
10290 NewCaller
= CallInst::Create(NewCallee
, NewArgs
.begin(), NewArgs
.end(),
10291 Caller
->getName(), Caller
);
10292 if (cast
<CallInst
>(Caller
)->isTailCall())
10293 cast
<CallInst
>(NewCaller
)->setTailCall();
10294 cast
<CallInst
>(NewCaller
)->
10295 setCallingConv(cast
<CallInst
>(Caller
)->getCallingConv());
10296 cast
<CallInst
>(NewCaller
)->setAttributes(NewPAL
);
10298 if (Caller
->getType() != Type::getVoidTy(*Context
) && !Caller
->use_empty())
10299 Caller
->replaceAllUsesWith(NewCaller
);
10300 Caller
->eraseFromParent();
10301 Worklist
.Remove(Caller
);
10306 // Replace the trampoline call with a direct call. Since there is no 'nest'
10307 // parameter, there is no need to adjust the argument list. Let the generic
10308 // code sort out any function type mismatches.
10309 Constant
*NewCallee
=
10310 NestF
->getType() == PTy
? NestF
:
10311 ConstantExpr::getBitCast(NestF
, PTy
);
10312 CS
.setCalledFunction(NewCallee
);
10313 return CS
.getInstruction();
10316 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(c,d)]
10317 /// and if a/b/c/d and the add's all have a single use, turn this into two phi's
10318 /// and a single binop.
10319 Instruction
*InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode
&PN
) {
10320 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
10321 assert(isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
));
10322 unsigned Opc
= FirstInst
->getOpcode();
10323 Value
*LHSVal
= FirstInst
->getOperand(0);
10324 Value
*RHSVal
= FirstInst
->getOperand(1);
10326 const Type
*LHSType
= LHSVal
->getType();
10327 const Type
*RHSType
= RHSVal
->getType();
10329 // Scan to see if all operands are the same opcode, all have one use, and all
10330 // kill their operands (i.e. the operands have one use).
10331 for (unsigned i
= 1; i
!= PN
.getNumIncomingValues(); ++i
) {
10332 Instruction
*I
= dyn_cast
<Instruction
>(PN
.getIncomingValue(i
));
10333 if (!I
|| I
->getOpcode() != Opc
|| !I
->hasOneUse() ||
10334 // Verify type of the LHS matches so we don't fold cmp's of different
10335 // types or GEP's with different index types.
10336 I
->getOperand(0)->getType() != LHSType
||
10337 I
->getOperand(1)->getType() != RHSType
)
10340 // If they are CmpInst instructions, check their predicates
10341 if (Opc
== Instruction::ICmp
|| Opc
== Instruction::FCmp
)
10342 if (cast
<CmpInst
>(I
)->getPredicate() !=
10343 cast
<CmpInst
>(FirstInst
)->getPredicate())
10346 // Keep track of which operand needs a phi node.
10347 if (I
->getOperand(0) != LHSVal
) LHSVal
= 0;
10348 if (I
->getOperand(1) != RHSVal
) RHSVal
= 0;
10351 // Otherwise, this is safe to transform!
10353 Value
*InLHS
= FirstInst
->getOperand(0);
10354 Value
*InRHS
= FirstInst
->getOperand(1);
10355 PHINode
*NewLHS
= 0, *NewRHS
= 0;
10357 NewLHS
= PHINode::Create(LHSType
,
10358 FirstInst
->getOperand(0)->getName() + ".pn");
10359 NewLHS
->reserveOperandSpace(PN
.getNumOperands()/2);
10360 NewLHS
->addIncoming(InLHS
, PN
.getIncomingBlock(0));
10361 InsertNewInstBefore(NewLHS
, PN
);
10366 NewRHS
= PHINode::Create(RHSType
,
10367 FirstInst
->getOperand(1)->getName() + ".pn");
10368 NewRHS
->reserveOperandSpace(PN
.getNumOperands()/2);
10369 NewRHS
->addIncoming(InRHS
, PN
.getIncomingBlock(0));
10370 InsertNewInstBefore(NewRHS
, PN
);
10374 // Add all operands to the new PHIs.
10375 if (NewLHS
|| NewRHS
) {
10376 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10377 Instruction
*InInst
= cast
<Instruction
>(PN
.getIncomingValue(i
));
10379 Value
*NewInLHS
= InInst
->getOperand(0);
10380 NewLHS
->addIncoming(NewInLHS
, PN
.getIncomingBlock(i
));
10383 Value
*NewInRHS
= InInst
->getOperand(1);
10384 NewRHS
->addIncoming(NewInRHS
, PN
.getIncomingBlock(i
));
10389 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(FirstInst
))
10390 return BinaryOperator::Create(BinOp
->getOpcode(), LHSVal
, RHSVal
);
10391 CmpInst
*CIOp
= cast
<CmpInst
>(FirstInst
);
10392 return CmpInst::Create(CIOp
->getOpcode(), CIOp
->getPredicate(),
10396 Instruction
*InstCombiner::FoldPHIArgGEPIntoPHI(PHINode
&PN
) {
10397 GetElementPtrInst
*FirstInst
=cast
<GetElementPtrInst
>(PN
.getIncomingValue(0));
10399 SmallVector
<Value
*, 16> FixedOperands(FirstInst
->op_begin(),
10400 FirstInst
->op_end());
10401 // This is true if all GEP bases are allocas and if all indices into them are
10403 bool AllBasePointersAreAllocas
= true;
10405 // Scan to see if all operands are the same opcode, all have one use, and all
10406 // kill their operands (i.e. the operands have one use).
10407 for (unsigned i
= 1; i
!= PN
.getNumIncomingValues(); ++i
) {
10408 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(PN
.getIncomingValue(i
));
10409 if (!GEP
|| !GEP
->hasOneUse() || GEP
->getType() != FirstInst
->getType() ||
10410 GEP
->getNumOperands() != FirstInst
->getNumOperands())
10413 // Keep track of whether or not all GEPs are of alloca pointers.
10414 if (AllBasePointersAreAllocas
&&
10415 (!isa
<AllocaInst
>(GEP
->getOperand(0)) ||
10416 !GEP
->hasAllConstantIndices()))
10417 AllBasePointersAreAllocas
= false;
10419 // Compare the operand lists.
10420 for (unsigned op
= 0, e
= FirstInst
->getNumOperands(); op
!= e
; ++op
) {
10421 if (FirstInst
->getOperand(op
) == GEP
->getOperand(op
))
10424 // Don't merge two GEPs when two operands differ (introducing phi nodes)
10425 // if one of the PHIs has a constant for the index. The index may be
10426 // substantially cheaper to compute for the constants, so making it a
10427 // variable index could pessimize the path. This also handles the case
10428 // for struct indices, which must always be constant.
10429 if (isa
<ConstantInt
>(FirstInst
->getOperand(op
)) ||
10430 isa
<ConstantInt
>(GEP
->getOperand(op
)))
10433 if (FirstInst
->getOperand(op
)->getType() !=GEP
->getOperand(op
)->getType())
10435 FixedOperands
[op
] = 0; // Needs a PHI.
10439 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
10440 // bother doing this transformation. At best, this will just save a bit of
10441 // offset calculation, but all the predecessors will have to materialize the
10442 // stack address into a register anyway. We'd actually rather *clone* the
10443 // load up into the predecessors so that we have a load of a gep of an alloca,
10444 // which can usually all be folded into the load.
10445 if (AllBasePointersAreAllocas
)
10448 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
10449 // that is variable.
10450 SmallVector
<PHINode
*, 16> OperandPhis(FixedOperands
.size());
10452 bool HasAnyPHIs
= false;
10453 for (unsigned i
= 0, e
= FixedOperands
.size(); i
!= e
; ++i
) {
10454 if (FixedOperands
[i
]) continue; // operand doesn't need a phi.
10455 Value
*FirstOp
= FirstInst
->getOperand(i
);
10456 PHINode
*NewPN
= PHINode::Create(FirstOp
->getType(),
10457 FirstOp
->getName()+".pn");
10458 InsertNewInstBefore(NewPN
, PN
);
10460 NewPN
->reserveOperandSpace(e
);
10461 NewPN
->addIncoming(FirstOp
, PN
.getIncomingBlock(0));
10462 OperandPhis
[i
] = NewPN
;
10463 FixedOperands
[i
] = NewPN
;
10468 // Add all operands to the new PHIs.
10470 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10471 GetElementPtrInst
*InGEP
=cast
<GetElementPtrInst
>(PN
.getIncomingValue(i
));
10472 BasicBlock
*InBB
= PN
.getIncomingBlock(i
);
10474 for (unsigned op
= 0, e
= OperandPhis
.size(); op
!= e
; ++op
)
10475 if (PHINode
*OpPhi
= OperandPhis
[op
])
10476 OpPhi
->addIncoming(InGEP
->getOperand(op
), InBB
);
10480 Value
*Base
= FixedOperands
[0];
10481 return cast
<GEPOperator
>(FirstInst
)->isInBounds() ?
10482 GetElementPtrInst::CreateInBounds(Base
, FixedOperands
.begin()+1,
10483 FixedOperands
.end()) :
10484 GetElementPtrInst::Create(Base
, FixedOperands
.begin()+1,
10485 FixedOperands
.end());
10489 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10490 /// sink the load out of the block that defines it. This means that it must be
10491 /// obvious the value of the load is not changed from the point of the load to
10492 /// the end of the block it is in.
10494 /// Finally, it is safe, but not profitable, to sink a load targetting a
10495 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10497 static bool isSafeAndProfitableToSinkLoad(LoadInst
*L
) {
10498 BasicBlock::iterator BBI
= L
, E
= L
->getParent()->end();
10500 for (++BBI
; BBI
!= E
; ++BBI
)
10501 if (BBI
->mayWriteToMemory())
10504 // Check for non-address taken alloca. If not address-taken already, it isn't
10505 // profitable to do this xform.
10506 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(L
->getOperand(0))) {
10507 bool isAddressTaken
= false;
10508 for (Value::use_iterator UI
= AI
->use_begin(), E
= AI
->use_end();
10510 if (isa
<LoadInst
>(UI
)) continue;
10511 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(*UI
)) {
10512 // If storing TO the alloca, then the address isn't taken.
10513 if (SI
->getOperand(1) == AI
) continue;
10515 isAddressTaken
= true;
10519 if (!isAddressTaken
&& AI
->isStaticAlloca())
10523 // If this load is a load from a GEP with a constant offset from an alloca,
10524 // then we don't want to sink it. In its present form, it will be
10525 // load [constant stack offset]. Sinking it will cause us to have to
10526 // materialize the stack addresses in each predecessor in a register only to
10527 // do a shared load from register in the successor.
10528 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(L
->getOperand(0)))
10529 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(GEP
->getOperand(0)))
10530 if (AI
->isStaticAlloca() && GEP
->hasAllConstantIndices())
10537 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10538 // operator and they all are only used by the PHI, PHI together their
10539 // inputs, and do the operation once, to the result of the PHI.
10540 Instruction
*InstCombiner::FoldPHIArgOpIntoPHI(PHINode
&PN
) {
10541 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
10543 // Scan the instruction, looking for input operations that can be folded away.
10544 // If all input operands to the phi are the same instruction (e.g. a cast from
10545 // the same type or "+42") we can pull the operation through the PHI, reducing
10546 // code size and simplifying code.
10547 Constant
*ConstantOp
= 0;
10548 const Type
*CastSrcTy
= 0;
10549 bool isVolatile
= false;
10550 if (isa
<CastInst
>(FirstInst
)) {
10551 CastSrcTy
= FirstInst
->getOperand(0)->getType();
10552 } else if (isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
)) {
10553 // Can fold binop, compare or shift here if the RHS is a constant,
10554 // otherwise call FoldPHIArgBinOpIntoPHI.
10555 ConstantOp
= dyn_cast
<Constant
>(FirstInst
->getOperand(1));
10556 if (ConstantOp
== 0)
10557 return FoldPHIArgBinOpIntoPHI(PN
);
10558 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(FirstInst
)) {
10559 isVolatile
= LI
->isVolatile();
10560 // We can't sink the load if the loaded value could be modified between the
10561 // load and the PHI.
10562 if (LI
->getParent() != PN
.getIncomingBlock(0) ||
10563 !isSafeAndProfitableToSinkLoad(LI
))
10566 // If the PHI is of volatile loads and the load block has multiple
10567 // successors, sinking it would remove a load of the volatile value from
10568 // the path through the other successor.
10570 LI
->getParent()->getTerminator()->getNumSuccessors() != 1)
10573 } else if (isa
<GetElementPtrInst
>(FirstInst
)) {
10574 return FoldPHIArgGEPIntoPHI(PN
);
10576 return 0; // Cannot fold this operation.
10579 // Check to see if all arguments are the same operation.
10580 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10581 if (!isa
<Instruction
>(PN
.getIncomingValue(i
))) return 0;
10582 Instruction
*I
= cast
<Instruction
>(PN
.getIncomingValue(i
));
10583 if (!I
->hasOneUse() || !I
->isSameOperationAs(FirstInst
))
10586 if (I
->getOperand(0)->getType() != CastSrcTy
)
10587 return 0; // Cast operation must match.
10588 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
10589 // We can't sink the load if the loaded value could be modified between
10590 // the load and the PHI.
10591 if (LI
->isVolatile() != isVolatile
||
10592 LI
->getParent() != PN
.getIncomingBlock(i
) ||
10593 !isSafeAndProfitableToSinkLoad(LI
))
10596 // If the PHI is of volatile loads and the load block has multiple
10597 // successors, sinking it would remove a load of the volatile value from
10598 // the path through the other successor.
10600 LI
->getParent()->getTerminator()->getNumSuccessors() != 1)
10603 } else if (I
->getOperand(1) != ConstantOp
) {
10608 // Okay, they are all the same operation. Create a new PHI node of the
10609 // correct type, and PHI together all of the LHS's of the instructions.
10610 PHINode
*NewPN
= PHINode::Create(FirstInst
->getOperand(0)->getType(),
10611 PN
.getName()+".in");
10612 NewPN
->reserveOperandSpace(PN
.getNumOperands()/2);
10614 Value
*InVal
= FirstInst
->getOperand(0);
10615 NewPN
->addIncoming(InVal
, PN
.getIncomingBlock(0));
10617 // Add all operands to the new PHI.
10618 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10619 Value
*NewInVal
= cast
<Instruction
>(PN
.getIncomingValue(i
))->getOperand(0);
10620 if (NewInVal
!= InVal
)
10622 NewPN
->addIncoming(NewInVal
, PN
.getIncomingBlock(i
));
10627 // The new PHI unions all of the same values together. This is really
10628 // common, so we handle it intelligently here for compile-time speed.
10632 InsertNewInstBefore(NewPN
, PN
);
10636 // Insert and return the new operation.
10637 if (CastInst
* FirstCI
= dyn_cast
<CastInst
>(FirstInst
))
10638 return CastInst::Create(FirstCI
->getOpcode(), PhiVal
, PN
.getType());
10639 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(FirstInst
))
10640 return BinaryOperator::Create(BinOp
->getOpcode(), PhiVal
, ConstantOp
);
10641 if (CmpInst
*CIOp
= dyn_cast
<CmpInst
>(FirstInst
))
10642 return CmpInst::Create(CIOp
->getOpcode(), CIOp
->getPredicate(),
10643 PhiVal
, ConstantOp
);
10644 assert(isa
<LoadInst
>(FirstInst
) && "Unknown operation");
10646 // If this was a volatile load that we are merging, make sure to loop through
10647 // and mark all the input loads as non-volatile. If we don't do this, we will
10648 // insert a new volatile load and the old ones will not be deletable.
10650 for (unsigned i
= 0, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
)
10651 cast
<LoadInst
>(PN
.getIncomingValue(i
))->setVolatile(false);
10653 return new LoadInst(PhiVal
, "", isVolatile
);
10656 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
10658 static bool DeadPHICycle(PHINode
*PN
,
10659 SmallPtrSet
<PHINode
*, 16> &PotentiallyDeadPHIs
) {
10660 if (PN
->use_empty()) return true;
10661 if (!PN
->hasOneUse()) return false;
10663 // Remember this node, and if we find the cycle, return.
10664 if (!PotentiallyDeadPHIs
.insert(PN
))
10667 // Don't scan crazily complex things.
10668 if (PotentiallyDeadPHIs
.size() == 16)
10671 if (PHINode
*PU
= dyn_cast
<PHINode
>(PN
->use_back()))
10672 return DeadPHICycle(PU
, PotentiallyDeadPHIs
);
10677 /// PHIsEqualValue - Return true if this phi node is always equal to
10678 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
10679 /// z = some value; x = phi (y, z); y = phi (x, z)
10680 static bool PHIsEqualValue(PHINode
*PN
, Value
*NonPhiInVal
,
10681 SmallPtrSet
<PHINode
*, 16> &ValueEqualPHIs
) {
10682 // See if we already saw this PHI node.
10683 if (!ValueEqualPHIs
.insert(PN
))
10686 // Don't scan crazily complex things.
10687 if (ValueEqualPHIs
.size() == 16)
10690 // Scan the operands to see if they are either phi nodes or are equal to
10692 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
10693 Value
*Op
= PN
->getIncomingValue(i
);
10694 if (PHINode
*OpPN
= dyn_cast
<PHINode
>(Op
)) {
10695 if (!PHIsEqualValue(OpPN
, NonPhiInVal
, ValueEqualPHIs
))
10697 } else if (Op
!= NonPhiInVal
)
10705 // PHINode simplification
10707 Instruction
*InstCombiner::visitPHINode(PHINode
&PN
) {
10708 // If LCSSA is around, don't mess with Phi nodes
10709 if (MustPreserveLCSSA
) return 0;
10711 if (Value
*V
= PN
.hasConstantValue())
10712 return ReplaceInstUsesWith(PN
, V
);
10714 // If all PHI operands are the same operation, pull them through the PHI,
10715 // reducing code size.
10716 if (isa
<Instruction
>(PN
.getIncomingValue(0)) &&
10717 isa
<Instruction
>(PN
.getIncomingValue(1)) &&
10718 cast
<Instruction
>(PN
.getIncomingValue(0))->getOpcode() ==
10719 cast
<Instruction
>(PN
.getIncomingValue(1))->getOpcode() &&
10720 // FIXME: The hasOneUse check will fail for PHIs that use the value more
10721 // than themselves more than once.
10722 PN
.getIncomingValue(0)->hasOneUse())
10723 if (Instruction
*Result
= FoldPHIArgOpIntoPHI(PN
))
10726 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
10727 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
10728 // PHI)... break the cycle.
10729 if (PN
.hasOneUse()) {
10730 Instruction
*PHIUser
= cast
<Instruction
>(PN
.use_back());
10731 if (PHINode
*PU
= dyn_cast
<PHINode
>(PHIUser
)) {
10732 SmallPtrSet
<PHINode
*, 16> PotentiallyDeadPHIs
;
10733 PotentiallyDeadPHIs
.insert(&PN
);
10734 if (DeadPHICycle(PU
, PotentiallyDeadPHIs
))
10735 return ReplaceInstUsesWith(PN
, UndefValue::get(PN
.getType()));
10738 // If this phi has a single use, and if that use just computes a value for
10739 // the next iteration of a loop, delete the phi. This occurs with unused
10740 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
10741 // common case here is good because the only other things that catch this
10742 // are induction variable analysis (sometimes) and ADCE, which is only run
10744 if (PHIUser
->hasOneUse() &&
10745 (isa
<BinaryOperator
>(PHIUser
) || isa
<GetElementPtrInst
>(PHIUser
)) &&
10746 PHIUser
->use_back() == &PN
) {
10747 return ReplaceInstUsesWith(PN
, UndefValue::get(PN
.getType()));
10751 // We sometimes end up with phi cycles that non-obviously end up being the
10752 // same value, for example:
10753 // z = some value; x = phi (y, z); y = phi (x, z)
10754 // where the phi nodes don't necessarily need to be in the same block. Do a
10755 // quick check to see if the PHI node only contains a single non-phi value, if
10756 // so, scan to see if the phi cycle is actually equal to that value.
10758 unsigned InValNo
= 0, NumOperandVals
= PN
.getNumIncomingValues();
10759 // Scan for the first non-phi operand.
10760 while (InValNo
!= NumOperandVals
&&
10761 isa
<PHINode
>(PN
.getIncomingValue(InValNo
)))
10764 if (InValNo
!= NumOperandVals
) {
10765 Value
*NonPhiInVal
= PN
.getOperand(InValNo
);
10767 // Scan the rest of the operands to see if there are any conflicts, if so
10768 // there is no need to recursively scan other phis.
10769 for (++InValNo
; InValNo
!= NumOperandVals
; ++InValNo
) {
10770 Value
*OpVal
= PN
.getIncomingValue(InValNo
);
10771 if (OpVal
!= NonPhiInVal
&& !isa
<PHINode
>(OpVal
))
10775 // If we scanned over all operands, then we have one unique value plus
10776 // phi values. Scan PHI nodes to see if they all merge in each other or
10778 if (InValNo
== NumOperandVals
) {
10779 SmallPtrSet
<PHINode
*, 16> ValueEqualPHIs
;
10780 if (PHIsEqualValue(&PN
, NonPhiInVal
, ValueEqualPHIs
))
10781 return ReplaceInstUsesWith(PN
, NonPhiInVal
);
10788 Instruction
*InstCombiner::visitGetElementPtrInst(GetElementPtrInst
&GEP
) {
10789 Value
*PtrOp
= GEP
.getOperand(0);
10790 // Eliminate 'getelementptr %P, i32 0' and 'getelementptr %P', they are noops.
10791 if (GEP
.getNumOperands() == 1)
10792 return ReplaceInstUsesWith(GEP
, PtrOp
);
10794 if (isa
<UndefValue
>(GEP
.getOperand(0)))
10795 return ReplaceInstUsesWith(GEP
, UndefValue::get(GEP
.getType()));
10797 bool HasZeroPointerIndex
= false;
10798 if (Constant
*C
= dyn_cast
<Constant
>(GEP
.getOperand(1)))
10799 HasZeroPointerIndex
= C
->isNullValue();
10801 if (GEP
.getNumOperands() == 2 && HasZeroPointerIndex
)
10802 return ReplaceInstUsesWith(GEP
, PtrOp
);
10804 // Eliminate unneeded casts for indices.
10806 bool MadeChange
= false;
10807 unsigned PtrSize
= TD
->getPointerSizeInBits();
10809 gep_type_iterator GTI
= gep_type_begin(GEP
);
10810 for (User::op_iterator I
= GEP
.op_begin() + 1, E
= GEP
.op_end();
10811 I
!= E
; ++I
, ++GTI
) {
10812 if (!isa
<SequentialType
>(*GTI
)) continue;
10814 // If we are using a wider index than needed for this platform, shrink it
10815 // to what we need. If narrower, sign-extend it to what we need. This
10816 // explicit cast can make subsequent optimizations more obvious.
10817 unsigned OpBits
= cast
<IntegerType
>((*I
)->getType())->getBitWidth();
10818 if (OpBits
== PtrSize
)
10821 *I
= Builder
->CreateIntCast(*I
, TD
->getIntPtrType(GEP
.getContext()),true);
10824 if (MadeChange
) return &GEP
;
10827 // Combine Indices - If the source pointer to this getelementptr instruction
10828 // is a getelementptr instruction, combine the indices of the two
10829 // getelementptr instructions into a single instruction.
10831 if (GEPOperator
*Src
= dyn_cast
<GEPOperator
>(PtrOp
)) {
10832 // Note that if our source is a gep chain itself that we wait for that
10833 // chain to be resolved before we perform this transformation. This
10834 // avoids us creating a TON of code in some cases.
10836 if (GetElementPtrInst
*SrcGEP
=
10837 dyn_cast
<GetElementPtrInst
>(Src
->getOperand(0)))
10838 if (SrcGEP
->getNumOperands() == 2)
10839 return 0; // Wait until our source is folded to completion.
10841 SmallVector
<Value
*, 8> Indices
;
10843 // Find out whether the last index in the source GEP is a sequential idx.
10844 bool EndsWithSequential
= false;
10845 for (gep_type_iterator I
= gep_type_begin(*Src
), E
= gep_type_end(*Src
);
10847 EndsWithSequential
= !isa
<StructType
>(*I
);
10849 // Can we combine the two pointer arithmetics offsets?
10850 if (EndsWithSequential
) {
10851 // Replace: gep (gep %P, long B), long A, ...
10852 // With: T = long A+B; gep %P, T, ...
10855 Value
*SO1
= Src
->getOperand(Src
->getNumOperands()-1);
10856 Value
*GO1
= GEP
.getOperand(1);
10857 if (SO1
== Constant::getNullValue(SO1
->getType())) {
10859 } else if (GO1
== Constant::getNullValue(GO1
->getType())) {
10862 // If they aren't the same type, then the input hasn't been processed
10863 // by the loop above yet (which canonicalizes sequential index types to
10864 // intptr_t). Just avoid transforming this until the input has been
10866 if (SO1
->getType() != GO1
->getType())
10868 Sum
= Builder
->CreateAdd(SO1
, GO1
, PtrOp
->getName()+".sum");
10871 // Update the GEP in place if possible.
10872 if (Src
->getNumOperands() == 2) {
10873 GEP
.setOperand(0, Src
->getOperand(0));
10874 GEP
.setOperand(1, Sum
);
10877 Indices
.append(Src
->op_begin()+1, Src
->op_end()-1);
10878 Indices
.push_back(Sum
);
10879 Indices
.append(GEP
.op_begin()+2, GEP
.op_end());
10880 } else if (isa
<Constant
>(*GEP
.idx_begin()) &&
10881 cast
<Constant
>(*GEP
.idx_begin())->isNullValue() &&
10882 Src
->getNumOperands() != 1) {
10883 // Otherwise we can do the fold if the first index of the GEP is a zero
10884 Indices
.append(Src
->op_begin()+1, Src
->op_end());
10885 Indices
.append(GEP
.idx_begin()+1, GEP
.idx_end());
10888 if (!Indices
.empty())
10889 return (cast
<GEPOperator
>(&GEP
)->isInBounds() &&
10890 Src
->isInBounds()) ?
10891 GetElementPtrInst::CreateInBounds(Src
->getOperand(0), Indices
.begin(),
10892 Indices
.end(), GEP
.getName()) :
10893 GetElementPtrInst::Create(Src
->getOperand(0), Indices
.begin(),
10894 Indices
.end(), GEP
.getName());
10897 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
10898 if (Value
*X
= getBitCastOperand(PtrOp
)) {
10899 assert(isa
<PointerType
>(X
->getType()) && "Must be cast from pointer");
10901 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
10902 // want to change the gep until the bitcasts are eliminated.
10903 if (getBitCastOperand(X
)) {
10904 Worklist
.AddValue(PtrOp
);
10908 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
10909 // into : GEP [10 x i8]* X, i32 0, ...
10911 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
10912 // into : GEP i8* X, ...
10914 // This occurs when the program declares an array extern like "int X[];"
10915 if (HasZeroPointerIndex
) {
10916 const PointerType
*CPTy
= cast
<PointerType
>(PtrOp
->getType());
10917 const PointerType
*XTy
= cast
<PointerType
>(X
->getType());
10918 if (const ArrayType
*CATy
=
10919 dyn_cast
<ArrayType
>(CPTy
->getElementType())) {
10920 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
10921 if (CATy
->getElementType() == XTy
->getElementType()) {
10922 // -> GEP i8* X, ...
10923 SmallVector
<Value
*, 8> Indices(GEP
.idx_begin()+1, GEP
.idx_end());
10924 return cast
<GEPOperator
>(&GEP
)->isInBounds() ?
10925 GetElementPtrInst::CreateInBounds(X
, Indices
.begin(), Indices
.end(),
10927 GetElementPtrInst::Create(X
, Indices
.begin(), Indices
.end(),
10931 if (const ArrayType
*XATy
= dyn_cast
<ArrayType
>(XTy
->getElementType())){
10932 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
10933 if (CATy
->getElementType() == XATy
->getElementType()) {
10934 // -> GEP [10 x i8]* X, i32 0, ...
10935 // At this point, we know that the cast source type is a pointer
10936 // to an array of the same type as the destination pointer
10937 // array. Because the array type is never stepped over (there
10938 // is a leading zero) we can fold the cast into this GEP.
10939 GEP
.setOperand(0, X
);
10944 } else if (GEP
.getNumOperands() == 2) {
10945 // Transform things like:
10946 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
10947 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
10948 const Type
*SrcElTy
= cast
<PointerType
>(X
->getType())->getElementType();
10949 const Type
*ResElTy
=cast
<PointerType
>(PtrOp
->getType())->getElementType();
10950 if (TD
&& isa
<ArrayType
>(SrcElTy
) &&
10951 TD
->getTypeAllocSize(cast
<ArrayType
>(SrcElTy
)->getElementType()) ==
10952 TD
->getTypeAllocSize(ResElTy
)) {
10954 Idx
[0] = Constant::getNullValue(Type::getInt32Ty(*Context
));
10955 Idx
[1] = GEP
.getOperand(1);
10956 Value
*NewGEP
= cast
<GEPOperator
>(&GEP
)->isInBounds() ?
10957 Builder
->CreateInBoundsGEP(X
, Idx
, Idx
+ 2, GEP
.getName()) :
10958 Builder
->CreateGEP(X
, Idx
, Idx
+ 2, GEP
.getName());
10959 // V and GEP are both pointer types --> BitCast
10960 return new BitCastInst(NewGEP
, GEP
.getType());
10963 // Transform things like:
10964 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
10965 // (where tmp = 8*tmp2) into:
10966 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
10968 if (TD
&& isa
<ArrayType
>(SrcElTy
) && ResElTy
== Type::getInt8Ty(*Context
)) {
10969 uint64_t ArrayEltSize
=
10970 TD
->getTypeAllocSize(cast
<ArrayType
>(SrcElTy
)->getElementType());
10972 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
10973 // allow either a mul, shift, or constant here.
10975 ConstantInt
*Scale
= 0;
10976 if (ArrayEltSize
== 1) {
10977 NewIdx
= GEP
.getOperand(1);
10978 Scale
= ConstantInt::get(cast
<IntegerType
>(NewIdx
->getType()), 1);
10979 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
.getOperand(1))) {
10980 NewIdx
= ConstantInt::get(CI
->getType(), 1);
10982 } else if (Instruction
*Inst
=dyn_cast
<Instruction
>(GEP
.getOperand(1))){
10983 if (Inst
->getOpcode() == Instruction::Shl
&&
10984 isa
<ConstantInt
>(Inst
->getOperand(1))) {
10985 ConstantInt
*ShAmt
= cast
<ConstantInt
>(Inst
->getOperand(1));
10986 uint32_t ShAmtVal
= ShAmt
->getLimitedValue(64);
10987 Scale
= ConstantInt::get(cast
<IntegerType
>(Inst
->getType()),
10989 NewIdx
= Inst
->getOperand(0);
10990 } else if (Inst
->getOpcode() == Instruction::Mul
&&
10991 isa
<ConstantInt
>(Inst
->getOperand(1))) {
10992 Scale
= cast
<ConstantInt
>(Inst
->getOperand(1));
10993 NewIdx
= Inst
->getOperand(0);
10997 // If the index will be to exactly the right offset with the scale taken
10998 // out, perform the transformation. Note, we don't know whether Scale is
10999 // signed or not. We'll use unsigned version of division/modulo
11000 // operation after making sure Scale doesn't have the sign bit set.
11001 if (ArrayEltSize
&& Scale
&& Scale
->getSExtValue() >= 0LL &&
11002 Scale
->getZExtValue() % ArrayEltSize
== 0) {
11003 Scale
= ConstantInt::get(Scale
->getType(),
11004 Scale
->getZExtValue() / ArrayEltSize
);
11005 if (Scale
->getZExtValue() != 1) {
11006 Constant
*C
= ConstantExpr::getIntegerCast(Scale
, NewIdx
->getType(),
11008 NewIdx
= Builder
->CreateMul(NewIdx
, C
, "idxscale");
11011 // Insert the new GEP instruction.
11013 Idx
[0] = Constant::getNullValue(Type::getInt32Ty(*Context
));
11015 Value
*NewGEP
= cast
<GEPOperator
>(&GEP
)->isInBounds() ?
11016 Builder
->CreateInBoundsGEP(X
, Idx
, Idx
+ 2, GEP
.getName()) :
11017 Builder
->CreateGEP(X
, Idx
, Idx
+ 2, GEP
.getName());
11018 // The NewGEP must be pointer typed, so must the old one -> BitCast
11019 return new BitCastInst(NewGEP
, GEP
.getType());
11025 /// See if we can simplify:
11026 /// X = bitcast A* to B*
11027 /// Y = gep X, <...constant indices...>
11028 /// into a gep of the original struct. This is important for SROA and alias
11029 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
11030 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(PtrOp
)) {
11032 !isa
<BitCastInst
>(BCI
->getOperand(0)) && GEP
.hasAllConstantIndices()) {
11033 // Determine how much the GEP moves the pointer. We are guaranteed to get
11034 // a constant back from EmitGEPOffset.
11035 ConstantInt
*OffsetV
=
11036 cast
<ConstantInt
>(EmitGEPOffset(&GEP
, GEP
, *this));
11037 int64_t Offset
= OffsetV
->getSExtValue();
11039 // If this GEP instruction doesn't move the pointer, just replace the GEP
11040 // with a bitcast of the real input to the dest type.
11042 // If the bitcast is of an allocation, and the allocation will be
11043 // converted to match the type of the cast, don't touch this.
11044 if (isa
<AllocationInst
>(BCI
->getOperand(0))) {
11045 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
11046 if (Instruction
*I
= visitBitCast(*BCI
)) {
11049 BCI
->getParent()->getInstList().insert(BCI
, I
);
11050 ReplaceInstUsesWith(*BCI
, I
);
11055 return new BitCastInst(BCI
->getOperand(0), GEP
.getType());
11058 // Otherwise, if the offset is non-zero, we need to find out if there is a
11059 // field at Offset in 'A's type. If so, we can pull the cast through the
11061 SmallVector
<Value
*, 8> NewIndices
;
11063 cast
<PointerType
>(BCI
->getOperand(0)->getType())->getElementType();
11064 if (FindElementAtOffset(InTy
, Offset
, NewIndices
, TD
, Context
)) {
11065 Value
*NGEP
= cast
<GEPOperator
>(&GEP
)->isInBounds() ?
11066 Builder
->CreateInBoundsGEP(BCI
->getOperand(0), NewIndices
.begin(),
11067 NewIndices
.end()) :
11068 Builder
->CreateGEP(BCI
->getOperand(0), NewIndices
.begin(),
11071 if (NGEP
->getType() == GEP
.getType())
11072 return ReplaceInstUsesWith(GEP
, NGEP
);
11073 NGEP
->takeName(&GEP
);
11074 return new BitCastInst(NGEP
, GEP
.getType());
11082 Instruction
*InstCombiner::visitAllocationInst(AllocationInst
&AI
) {
11083 // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
11084 if (AI
.isArrayAllocation()) { // Check C != 1
11085 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
11086 const Type
*NewTy
=
11087 ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
11088 AllocationInst
*New
= 0;
11090 // Create and insert the replacement instruction...
11091 if (isa
<MallocInst
>(AI
))
11092 New
= Builder
->CreateMalloc(NewTy
, 0, AI
.getName());
11094 assert(isa
<AllocaInst
>(AI
) && "Unknown type of allocation inst!");
11095 New
= Builder
->CreateAlloca(NewTy
, 0, AI
.getName());
11097 New
->setAlignment(AI
.getAlignment());
11099 // Scan to the end of the allocation instructions, to skip over a block of
11100 // allocas if possible...also skip interleaved debug info
11102 BasicBlock::iterator It
= New
;
11103 while (isa
<AllocationInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
)) ++It
;
11105 // Now that I is pointing to the first non-allocation-inst in the block,
11106 // insert our getelementptr instruction...
11108 Value
*NullIdx
= Constant::getNullValue(Type::getInt32Ty(*Context
));
11112 Value
*V
= GetElementPtrInst::CreateInBounds(New
, Idx
, Idx
+ 2,
11113 New
->getName()+".sub", It
);
11115 // Now make everything use the getelementptr instead of the original
11117 return ReplaceInstUsesWith(AI
, V
);
11118 } else if (isa
<UndefValue
>(AI
.getArraySize())) {
11119 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
11123 if (TD
&& isa
<AllocaInst
>(AI
) && AI
.getAllocatedType()->isSized()) {
11124 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
11125 // Note that we only do this for alloca's, because malloc should allocate
11126 // and return a unique pointer, even for a zero byte allocation.
11127 if (TD
->getTypeAllocSize(AI
.getAllocatedType()) == 0)
11128 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
11130 // If the alignment is 0 (unspecified), assign it the preferred alignment.
11131 if (AI
.getAlignment() == 0)
11132 AI
.setAlignment(TD
->getPrefTypeAlignment(AI
.getAllocatedType()));
11138 Instruction
*InstCombiner::visitFreeInst(FreeInst
&FI
) {
11139 Value
*Op
= FI
.getOperand(0);
11141 // free undef -> unreachable.
11142 if (isa
<UndefValue
>(Op
)) {
11143 // Insert a new store to null because we cannot modify the CFG here.
11144 new StoreInst(ConstantInt::getTrue(*Context
),
11145 UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context
))), &FI
);
11146 return EraseInstFromFunction(FI
);
11149 // If we have 'free null' delete the instruction. This can happen in stl code
11150 // when lots of inlining happens.
11151 if (isa
<ConstantPointerNull
>(Op
))
11152 return EraseInstFromFunction(FI
);
11154 // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
11155 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(Op
)) {
11156 FI
.setOperand(0, CI
->getOperand(0));
11160 // Change free (gep X, 0,0,0,0) into free(X)
11161 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
11162 if (GEPI
->hasAllZeroIndices()) {
11163 Worklist
.Add(GEPI
);
11164 FI
.setOperand(0, GEPI
->getOperand(0));
11169 // Change free(malloc) into nothing, if the malloc has a single use.
11170 if (MallocInst
*MI
= dyn_cast
<MallocInst
>(Op
))
11171 if (MI
->hasOneUse()) {
11172 EraseInstFromFunction(FI
);
11173 return EraseInstFromFunction(*MI
);
11180 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11181 static Instruction
*InstCombineLoadCast(InstCombiner
&IC
, LoadInst
&LI
,
11182 const TargetData
*TD
) {
11183 User
*CI
= cast
<User
>(LI
.getOperand(0));
11184 Value
*CastOp
= CI
->getOperand(0);
11185 LLVMContext
*Context
= IC
.getContext();
11188 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(CI
)) {
11189 // Instead of loading constant c string, use corresponding integer value
11190 // directly if string length is small enough.
11192 if (GetConstantStringInfo(CE
->getOperand(0), Str
) && !Str
.empty()) {
11193 unsigned len
= Str
.length();
11194 const Type
*Ty
= cast
<PointerType
>(CE
->getType())->getElementType();
11195 unsigned numBits
= Ty
->getPrimitiveSizeInBits();
11196 // Replace LI with immediate integer store.
11197 if ((numBits
>> 3) == len
+ 1) {
11198 APInt
StrVal(numBits
, 0);
11199 APInt
SingleChar(numBits
, 0);
11200 if (TD
->isLittleEndian()) {
11201 for (signed i
= len
-1; i
>= 0; i
--) {
11202 SingleChar
= (uint64_t) Str
[i
] & UCHAR_MAX
;
11203 StrVal
= (StrVal
<< 8) | SingleChar
;
11206 for (unsigned i
= 0; i
< len
; i
++) {
11207 SingleChar
= (uint64_t) Str
[i
] & UCHAR_MAX
;
11208 StrVal
= (StrVal
<< 8) | SingleChar
;
11210 // Append NULL at the end.
11212 StrVal
= (StrVal
<< 8) | SingleChar
;
11214 Value
*NL
= ConstantInt::get(*Context
, StrVal
);
11215 return IC
.ReplaceInstUsesWith(LI
, NL
);
11221 const PointerType
*DestTy
= cast
<PointerType
>(CI
->getType());
11222 const Type
*DestPTy
= DestTy
->getElementType();
11223 if (const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType())) {
11225 // If the address spaces don't match, don't eliminate the cast.
11226 if (DestTy
->getAddressSpace() != SrcTy
->getAddressSpace())
11229 const Type
*SrcPTy
= SrcTy
->getElementType();
11231 if (DestPTy
->isInteger() || isa
<PointerType
>(DestPTy
) ||
11232 isa
<VectorType
>(DestPTy
)) {
11233 // If the source is an array, the code below will not succeed. Check to
11234 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11236 if (const ArrayType
*ASrcTy
= dyn_cast
<ArrayType
>(SrcPTy
))
11237 if (Constant
*CSrc
= dyn_cast
<Constant
>(CastOp
))
11238 if (ASrcTy
->getNumElements() != 0) {
11240 Idxs
[0] = Idxs
[1] = Constant::getNullValue(Type::getInt32Ty(*Context
));
11241 CastOp
= ConstantExpr::getGetElementPtr(CSrc
, Idxs
, 2);
11242 SrcTy
= cast
<PointerType
>(CastOp
->getType());
11243 SrcPTy
= SrcTy
->getElementType();
11246 if (IC
.getTargetData() &&
11247 (SrcPTy
->isInteger() || isa
<PointerType
>(SrcPTy
) ||
11248 isa
<VectorType
>(SrcPTy
)) &&
11249 // Do not allow turning this into a load of an integer, which is then
11250 // casted to a pointer, this pessimizes pointer analysis a lot.
11251 (isa
<PointerType
>(SrcPTy
) == isa
<PointerType
>(LI
.getType())) &&
11252 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) ==
11253 IC
.getTargetData()->getTypeSizeInBits(DestPTy
)) {
11255 // Okay, we are casting from one integer or pointer type to another of
11256 // the same size. Instead of casting the pointer before the load, cast
11257 // the result of the loaded value.
11259 IC
.Builder
->CreateLoad(CastOp
, LI
.isVolatile(), CI
->getName());
11260 // Now cast the result of the load.
11261 return new BitCastInst(NewLoad
, LI
.getType());
11268 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
11269 Value
*Op
= LI
.getOperand(0);
11271 // Attempt to improve the alignment.
11273 unsigned KnownAlign
=
11274 GetOrEnforceKnownAlignment(Op
, TD
->getPrefTypeAlignment(LI
.getType()));
11276 (LI
.getAlignment() == 0 ? TD
->getABITypeAlignment(LI
.getType()) :
11277 LI
.getAlignment()))
11278 LI
.setAlignment(KnownAlign
);
11281 // load (cast X) --> cast (load X) iff safe.
11282 if (isa
<CastInst
>(Op
))
11283 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
11286 // None of the following transforms are legal for volatile loads.
11287 if (LI
.isVolatile()) return 0;
11289 // Do really simple store-to-load forwarding and load CSE, to catch cases
11290 // where there are several consequtive memory accesses to the same location,
11291 // separated by a few arithmetic operations.
11292 BasicBlock::iterator BBI
= &LI
;
11293 if (Value
*AvailableVal
= FindAvailableLoadedValue(Op
, LI
.getParent(), BBI
,6))
11294 return ReplaceInstUsesWith(LI
, AvailableVal
);
11296 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
11297 const Value
*GEPI0
= GEPI
->getOperand(0);
11298 // TODO: Consider a target hook for valid address spaces for this xform.
11299 if (isa
<ConstantPointerNull
>(GEPI0
) && GEPI
->getPointerAddressSpace() == 0){
11300 // Insert a new store to null instruction before the load to indicate
11301 // that this code is not reachable. We do this instead of inserting
11302 // an unreachable instruction directly because we cannot modify the
11304 new StoreInst(UndefValue::get(LI
.getType()),
11305 Constant::getNullValue(Op
->getType()), &LI
);
11306 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11310 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
11311 // load null/undef -> undef
11312 // TODO: Consider a target hook for valid address spaces for this xform.
11313 if (isa
<UndefValue
>(C
) ||
11314 (C
->isNullValue() && LI
.getPointerAddressSpace() == 0)) {
11315 // Insert a new store to null instruction before the load to indicate that
11316 // this code is not reachable. We do this instead of inserting an
11317 // unreachable instruction directly because we cannot modify the CFG.
11318 new StoreInst(UndefValue::get(LI
.getType()),
11319 Constant::getNullValue(Op
->getType()), &LI
);
11320 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11323 // Instcombine load (constant global) into the value loaded.
11324 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(Op
))
11325 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
11326 return ReplaceInstUsesWith(LI
, GV
->getInitializer());
11328 // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
11329 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
)) {
11330 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
11331 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(CE
->getOperand(0)))
11332 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
11334 ConstantFoldLoadThroughGEPConstantExpr(GV
->getInitializer(), CE
,
11336 return ReplaceInstUsesWith(LI
, V
);
11337 if (CE
->getOperand(0)->isNullValue()) {
11338 // Insert a new store to null instruction before the load to indicate
11339 // that this code is not reachable. We do this instead of inserting
11340 // an unreachable instruction directly because we cannot modify the
11342 new StoreInst(UndefValue::get(LI
.getType()),
11343 Constant::getNullValue(Op
->getType()), &LI
);
11344 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11347 } else if (CE
->isCast()) {
11348 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
11354 // If this load comes from anywhere in a constant global, and if the global
11355 // is all undef or zero, we know what it loads.
11356 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(Op
->getUnderlyingObject())){
11357 if (GV
->isConstant() && GV
->hasDefinitiveInitializer()) {
11358 if (GV
->getInitializer()->isNullValue())
11359 return ReplaceInstUsesWith(LI
, Constant::getNullValue(LI
.getType()));
11360 else if (isa
<UndefValue
>(GV
->getInitializer()))
11361 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11365 if (Op
->hasOneUse()) {
11366 // Change select and PHI nodes to select values instead of addresses: this
11367 // helps alias analysis out a lot, allows many others simplifications, and
11368 // exposes redundancy in the code.
11370 // Note that we cannot do the transformation unless we know that the
11371 // introduced loads cannot trap! Something like this is valid as long as
11372 // the condition is always false: load (select bool %C, int* null, int* %G),
11373 // but it would not be valid if we transformed it to load from null
11374 // unconditionally.
11376 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
11377 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11378 if (isSafeToLoadUnconditionally(SI
->getOperand(1), SI
) &&
11379 isSafeToLoadUnconditionally(SI
->getOperand(2), SI
)) {
11380 Value
*V1
= Builder
->CreateLoad(SI
->getOperand(1),
11381 SI
->getOperand(1)->getName()+".val");
11382 Value
*V2
= Builder
->CreateLoad(SI
->getOperand(2),
11383 SI
->getOperand(2)->getName()+".val");
11384 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
11387 // load (select (cond, null, P)) -> load P
11388 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(1)))
11389 if (C
->isNullValue()) {
11390 LI
.setOperand(0, SI
->getOperand(2));
11394 // load (select (cond, P, null)) -> load P
11395 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(2)))
11396 if (C
->isNullValue()) {
11397 LI
.setOperand(0, SI
->getOperand(1));
11405 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11406 /// when possible. This makes it generally easy to do alias analysis and/or
11407 /// SROA/mem2reg of the memory object.
11408 static Instruction
*InstCombineStoreToCast(InstCombiner
&IC
, StoreInst
&SI
) {
11409 User
*CI
= cast
<User
>(SI
.getOperand(1));
11410 Value
*CastOp
= CI
->getOperand(0);
11412 const Type
*DestPTy
= cast
<PointerType
>(CI
->getType())->getElementType();
11413 const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType());
11414 if (SrcTy
== 0) return 0;
11416 const Type
*SrcPTy
= SrcTy
->getElementType();
11418 if (!DestPTy
->isInteger() && !isa
<PointerType
>(DestPTy
))
11421 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11422 /// to its first element. This allows us to handle things like:
11423 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11424 /// on 32-bit hosts.
11425 SmallVector
<Value
*, 4> NewGEPIndices
;
11427 // If the source is an array, the code below will not succeed. Check to
11428 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11430 if (isa
<ArrayType
>(SrcPTy
) || isa
<StructType
>(SrcPTy
)) {
11431 // Index through pointer.
11432 Constant
*Zero
= Constant::getNullValue(Type::getInt32Ty(*IC
.getContext()));
11433 NewGEPIndices
.push_back(Zero
);
11436 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcPTy
)) {
11437 if (!STy
->getNumElements()) /* Struct can be empty {} */
11439 NewGEPIndices
.push_back(Zero
);
11440 SrcPTy
= STy
->getElementType(0);
11441 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcPTy
)) {
11442 NewGEPIndices
.push_back(Zero
);
11443 SrcPTy
= ATy
->getElementType();
11449 SrcTy
= PointerType::get(SrcPTy
, SrcTy
->getAddressSpace());
11452 if (!SrcPTy
->isInteger() && !isa
<PointerType
>(SrcPTy
))
11455 // If the pointers point into different address spaces or if they point to
11456 // values with different sizes, we can't do the transformation.
11457 if (!IC
.getTargetData() ||
11458 SrcTy
->getAddressSpace() !=
11459 cast
<PointerType
>(CI
->getType())->getAddressSpace() ||
11460 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) !=
11461 IC
.getTargetData()->getTypeSizeInBits(DestPTy
))
11464 // Okay, we are casting from one integer or pointer type to another of
11465 // the same size. Instead of casting the pointer before
11466 // the store, cast the value to be stored.
11468 Value
*SIOp0
= SI
.getOperand(0);
11469 Instruction::CastOps opcode
= Instruction::BitCast
;
11470 const Type
* CastSrcTy
= SIOp0
->getType();
11471 const Type
* CastDstTy
= SrcPTy
;
11472 if (isa
<PointerType
>(CastDstTy
)) {
11473 if (CastSrcTy
->isInteger())
11474 opcode
= Instruction::IntToPtr
;
11475 } else if (isa
<IntegerType
>(CastDstTy
)) {
11476 if (isa
<PointerType
>(SIOp0
->getType()))
11477 opcode
= Instruction::PtrToInt
;
11480 // SIOp0 is a pointer to aggregate and this is a store to the first field,
11481 // emit a GEP to index into its first field.
11482 if (!NewGEPIndices
.empty())
11483 CastOp
= IC
.Builder
->CreateInBoundsGEP(CastOp
, NewGEPIndices
.begin(),
11484 NewGEPIndices
.end());
11486 NewCast
= IC
.Builder
->CreateCast(opcode
, SIOp0
, CastDstTy
,
11487 SIOp0
->getName()+".c");
11488 return new StoreInst(NewCast
, CastOp
);
11491 /// equivalentAddressValues - Test if A and B will obviously have the same
11492 /// value. This includes recognizing that %t0 and %t1 will have the same
11493 /// value in code like this:
11494 /// %t0 = getelementptr \@a, 0, 3
11495 /// store i32 0, i32* %t0
11496 /// %t1 = getelementptr \@a, 0, 3
11497 /// %t2 = load i32* %t1
11499 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
11500 // Test if the values are trivially equivalent.
11501 if (A
== B
) return true;
11503 // Test if the values come form identical arithmetic instructions.
11504 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
11505 // its only used to compare two uses within the same basic block, which
11506 // means that they'll always either have the same value or one of them
11507 // will have an undefined value.
11508 if (isa
<BinaryOperator
>(A
) ||
11509 isa
<CastInst
>(A
) ||
11511 isa
<GetElementPtrInst
>(A
))
11512 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
11513 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
11516 // Otherwise they may not be equivalent.
11520 // If this instruction has two uses, one of which is a llvm.dbg.declare,
11521 // return the llvm.dbg.declare.
11522 DbgDeclareInst
*InstCombiner::hasOneUsePlusDeclare(Value
*V
) {
11523 if (!V
->hasNUses(2))
11525 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end();
11527 if (DbgDeclareInst
*DI
= dyn_cast
<DbgDeclareInst
>(UI
))
11529 if (isa
<BitCastInst
>(UI
) && UI
->hasOneUse()) {
11530 if (DbgDeclareInst
*DI
= dyn_cast
<DbgDeclareInst
>(UI
->use_begin()))
11537 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
11538 Value
*Val
= SI
.getOperand(0);
11539 Value
*Ptr
= SI
.getOperand(1);
11541 if (isa
<UndefValue
>(Ptr
)) { // store X, undef -> noop (even if volatile)
11542 EraseInstFromFunction(SI
);
11547 // If the RHS is an alloca with a single use, zapify the store, making the
11549 // If the RHS is an alloca with a two uses, the other one being a
11550 // llvm.dbg.declare, zapify the store and the declare, making the
11551 // alloca dead. We must do this to prevent declare's from affecting
11553 if (!SI
.isVolatile()) {
11554 if (Ptr
->hasOneUse()) {
11555 if (isa
<AllocaInst
>(Ptr
)) {
11556 EraseInstFromFunction(SI
);
11560 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
11561 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
11562 if (GEP
->getOperand(0)->hasOneUse()) {
11563 EraseInstFromFunction(SI
);
11567 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(GEP
->getOperand(0))) {
11568 EraseInstFromFunction(*DI
);
11569 EraseInstFromFunction(SI
);
11576 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(Ptr
)) {
11577 EraseInstFromFunction(*DI
);
11578 EraseInstFromFunction(SI
);
11584 // Attempt to improve the alignment.
11586 unsigned KnownAlign
=
11587 GetOrEnforceKnownAlignment(Ptr
, TD
->getPrefTypeAlignment(Val
->getType()));
11589 (SI
.getAlignment() == 0 ? TD
->getABITypeAlignment(Val
->getType()) :
11590 SI
.getAlignment()))
11591 SI
.setAlignment(KnownAlign
);
11594 // Do really simple DSE, to catch cases where there are several consecutive
11595 // stores to the same location, separated by a few arithmetic operations. This
11596 // situation often occurs with bitfield accesses.
11597 BasicBlock::iterator BBI
= &SI
;
11598 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
11601 // Don't count debug info directives, lest they affect codegen,
11602 // and we skip pointer-to-pointer bitcasts, which are NOPs.
11603 // It is necessary for correctness to skip those that feed into a
11604 // llvm.dbg.declare, as these are not present when debugging is off.
11605 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
11606 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType()))) {
11611 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
11612 // Prev store isn't volatile, and stores to the same location?
11613 if (!PrevSI
->isVolatile() &&equivalentAddressValues(PrevSI
->getOperand(1),
11614 SI
.getOperand(1))) {
11617 EraseInstFromFunction(*PrevSI
);
11623 // If this is a load, we have to stop. However, if the loaded value is from
11624 // the pointer we're loading and is producing the pointer we're storing,
11625 // then *this* store is dead (X = load P; store X -> P).
11626 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
11627 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
) &&
11628 !SI
.isVolatile()) {
11629 EraseInstFromFunction(SI
);
11633 // Otherwise, this is a load from some other location. Stores before it
11634 // may not be dead.
11638 // Don't skip over loads or things that can modify memory.
11639 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory())
11644 if (SI
.isVolatile()) return 0; // Don't hack volatile stores.
11646 // store X, null -> turns into 'unreachable' in SimplifyCFG
11647 if (isa
<ConstantPointerNull
>(Ptr
) && SI
.getPointerAddressSpace() == 0) {
11648 if (!isa
<UndefValue
>(Val
)) {
11649 SI
.setOperand(0, UndefValue::get(Val
->getType()));
11650 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
11651 Worklist
.Add(U
); // Dropped a use.
11654 return 0; // Do not modify these!
11657 // store undef, Ptr -> noop
11658 if (isa
<UndefValue
>(Val
)) {
11659 EraseInstFromFunction(SI
);
11664 // If the pointer destination is a cast, see if we can fold the cast into the
11666 if (isa
<CastInst
>(Ptr
))
11667 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
11669 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
11671 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
11675 // If this store is the last instruction in the basic block (possibly
11676 // excepting debug info instructions and the pointer bitcasts that feed
11677 // into them), and if the block ends with an unconditional branch, try
11678 // to move it to the successor block.
11682 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
11683 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType())));
11684 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
11685 if (BI
->isUnconditional())
11686 if (SimplifyStoreAtEndOfBlock(SI
))
11687 return 0; // xform done!
11692 /// SimplifyStoreAtEndOfBlock - Turn things like:
11693 /// if () { *P = v1; } else { *P = v2 }
11694 /// into a phi node with a store in the successor.
11696 /// Simplify things like:
11697 /// *P = v1; if () { *P = v2; }
11698 /// into a phi node with a store in the successor.
11700 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst
&SI
) {
11701 BasicBlock
*StoreBB
= SI
.getParent();
11703 // Check to see if the successor block has exactly two incoming edges. If
11704 // so, see if the other predecessor contains a store to the same location.
11705 // if so, insert a PHI node (if needed) and move the stores down.
11706 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
11708 // Determine whether Dest has exactly two predecessors and, if so, compute
11709 // the other predecessor.
11710 pred_iterator PI
= pred_begin(DestBB
);
11711 BasicBlock
*OtherBB
= 0;
11712 if (*PI
!= StoreBB
)
11715 if (PI
== pred_end(DestBB
))
11718 if (*PI
!= StoreBB
) {
11723 if (++PI
!= pred_end(DestBB
))
11726 // Bail out if all the relevant blocks aren't distinct (this can happen,
11727 // for example, if SI is in an infinite loop)
11728 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
11731 // Verify that the other block ends in a branch and is not otherwise empty.
11732 BasicBlock::iterator BBI
= OtherBB
->getTerminator();
11733 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
11734 if (!OtherBr
|| BBI
== OtherBB
->begin())
11737 // If the other block ends in an unconditional branch, check for the 'if then
11738 // else' case. there is an instruction before the branch.
11739 StoreInst
*OtherStore
= 0;
11740 if (OtherBr
->isUnconditional()) {
11742 // Skip over debugging info.
11743 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
11744 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType()))) {
11745 if (BBI
==OtherBB
->begin())
11749 // If this isn't a store, or isn't a store to the same location, bail out.
11750 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
11751 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1))
11754 // Otherwise, the other block ended with a conditional branch. If one of the
11755 // destinations is StoreBB, then we have the if/then case.
11756 if (OtherBr
->getSuccessor(0) != StoreBB
&&
11757 OtherBr
->getSuccessor(1) != StoreBB
)
11760 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
11761 // if/then triangle. See if there is a store to the same ptr as SI that
11762 // lives in OtherBB.
11764 // Check to see if we find the matching store.
11765 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
11766 if (OtherStore
->getOperand(1) != SI
.getOperand(1))
11770 // If we find something that may be using or overwriting the stored
11771 // value, or if we run out of instructions, we can't do the xform.
11772 if (BBI
->mayReadFromMemory() || BBI
->mayWriteToMemory() ||
11773 BBI
== OtherBB
->begin())
11777 // In order to eliminate the store in OtherBr, we have to
11778 // make sure nothing reads or overwrites the stored value in
11780 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
11781 // FIXME: This should really be AA driven.
11782 if (I
->mayReadFromMemory() || I
->mayWriteToMemory())
11787 // Insert a PHI node now if we need it.
11788 Value
*MergedVal
= OtherStore
->getOperand(0);
11789 if (MergedVal
!= SI
.getOperand(0)) {
11790 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), "storemerge");
11791 PN
->reserveOperandSpace(2);
11792 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
11793 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
11794 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
11797 // Advance to a place where it is safe to insert the new store and
11799 BBI
= DestBB
->getFirstNonPHI();
11800 InsertNewInstBefore(new StoreInst(MergedVal
, SI
.getOperand(1),
11801 OtherStore
->isVolatile()), *BBI
);
11803 // Nuke the old stores.
11804 EraseInstFromFunction(SI
);
11805 EraseInstFromFunction(*OtherStore
);
11811 Instruction
*InstCombiner::visitBranchInst(BranchInst
&BI
) {
11812 // Change br (not X), label True, label False to: br X, label False, True
11814 BasicBlock
*TrueDest
;
11815 BasicBlock
*FalseDest
;
11816 if (match(&BI
, m_Br(m_Not(m_Value(X
)), TrueDest
, FalseDest
)) &&
11817 !isa
<Constant
>(X
)) {
11818 // Swap Destinations and condition...
11819 BI
.setCondition(X
);
11820 BI
.setSuccessor(0, FalseDest
);
11821 BI
.setSuccessor(1, TrueDest
);
11825 // Cannonicalize fcmp_one -> fcmp_oeq
11826 FCmpInst::Predicate FPred
; Value
*Y
;
11827 if (match(&BI
, m_Br(m_FCmp(FPred
, m_Value(X
), m_Value(Y
)),
11828 TrueDest
, FalseDest
)) &&
11829 BI
.getCondition()->hasOneUse())
11830 if (FPred
== FCmpInst::FCMP_ONE
|| FPred
== FCmpInst::FCMP_OLE
||
11831 FPred
== FCmpInst::FCMP_OGE
) {
11832 FCmpInst
*Cond
= cast
<FCmpInst
>(BI
.getCondition());
11833 Cond
->setPredicate(FCmpInst::getInversePredicate(FPred
));
11835 // Swap Destinations and condition.
11836 BI
.setSuccessor(0, FalseDest
);
11837 BI
.setSuccessor(1, TrueDest
);
11838 Worklist
.Add(Cond
);
11842 // Cannonicalize icmp_ne -> icmp_eq
11843 ICmpInst::Predicate IPred
;
11844 if (match(&BI
, m_Br(m_ICmp(IPred
, m_Value(X
), m_Value(Y
)),
11845 TrueDest
, FalseDest
)) &&
11846 BI
.getCondition()->hasOneUse())
11847 if (IPred
== ICmpInst::ICMP_NE
|| IPred
== ICmpInst::ICMP_ULE
||
11848 IPred
== ICmpInst::ICMP_SLE
|| IPred
== ICmpInst::ICMP_UGE
||
11849 IPred
== ICmpInst::ICMP_SGE
) {
11850 ICmpInst
*Cond
= cast
<ICmpInst
>(BI
.getCondition());
11851 Cond
->setPredicate(ICmpInst::getInversePredicate(IPred
));
11852 // Swap Destinations and condition.
11853 BI
.setSuccessor(0, FalseDest
);
11854 BI
.setSuccessor(1, TrueDest
);
11855 Worklist
.Add(Cond
);
11862 Instruction
*InstCombiner::visitSwitchInst(SwitchInst
&SI
) {
11863 Value
*Cond
= SI
.getCondition();
11864 if (Instruction
*I
= dyn_cast
<Instruction
>(Cond
)) {
11865 if (I
->getOpcode() == Instruction::Add
)
11866 if (ConstantInt
*AddRHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
11867 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
11868 for (unsigned i
= 2, e
= SI
.getNumOperands(); i
!= e
; i
+= 2)
11870 ConstantExpr::getSub(cast
<Constant
>(SI
.getOperand(i
)),
11872 SI
.setOperand(0, I
->getOperand(0));
11880 Instruction
*InstCombiner::visitExtractValueInst(ExtractValueInst
&EV
) {
11881 Value
*Agg
= EV
.getAggregateOperand();
11883 if (!EV
.hasIndices())
11884 return ReplaceInstUsesWith(EV
, Agg
);
11886 if (Constant
*C
= dyn_cast
<Constant
>(Agg
)) {
11887 if (isa
<UndefValue
>(C
))
11888 return ReplaceInstUsesWith(EV
, UndefValue::get(EV
.getType()));
11890 if (isa
<ConstantAggregateZero
>(C
))
11891 return ReplaceInstUsesWith(EV
, Constant::getNullValue(EV
.getType()));
11893 if (isa
<ConstantArray
>(C
) || isa
<ConstantStruct
>(C
)) {
11894 // Extract the element indexed by the first index out of the constant
11895 Value
*V
= C
->getOperand(*EV
.idx_begin());
11896 if (EV
.getNumIndices() > 1)
11897 // Extract the remaining indices out of the constant indexed by the
11899 return ExtractValueInst::Create(V
, EV
.idx_begin() + 1, EV
.idx_end());
11901 return ReplaceInstUsesWith(EV
, V
);
11903 return 0; // Can't handle other constants
11905 if (InsertValueInst
*IV
= dyn_cast
<InsertValueInst
>(Agg
)) {
11906 // We're extracting from an insertvalue instruction, compare the indices
11907 const unsigned *exti
, *exte
, *insi
, *inse
;
11908 for (exti
= EV
.idx_begin(), insi
= IV
->idx_begin(),
11909 exte
= EV
.idx_end(), inse
= IV
->idx_end();
11910 exti
!= exte
&& insi
!= inse
;
11912 if (*insi
!= *exti
)
11913 // The insert and extract both reference distinctly different elements.
11914 // This means the extract is not influenced by the insert, and we can
11915 // replace the aggregate operand of the extract with the aggregate
11916 // operand of the insert. i.e., replace
11917 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
11918 // %E = extractvalue { i32, { i32 } } %I, 0
11920 // %E = extractvalue { i32, { i32 } } %A, 0
11921 return ExtractValueInst::Create(IV
->getAggregateOperand(),
11922 EV
.idx_begin(), EV
.idx_end());
11924 if (exti
== exte
&& insi
== inse
)
11925 // Both iterators are at the end: Index lists are identical. Replace
11926 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
11927 // %C = extractvalue { i32, { i32 } } %B, 1, 0
11929 return ReplaceInstUsesWith(EV
, IV
->getInsertedValueOperand());
11930 if (exti
== exte
) {
11931 // The extract list is a prefix of the insert list. i.e. replace
11932 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
11933 // %E = extractvalue { i32, { i32 } } %I, 1
11935 // %X = extractvalue { i32, { i32 } } %A, 1
11936 // %E = insertvalue { i32 } %X, i32 42, 0
11937 // by switching the order of the insert and extract (though the
11938 // insertvalue should be left in, since it may have other uses).
11939 Value
*NewEV
= Builder
->CreateExtractValue(IV
->getAggregateOperand(),
11940 EV
.idx_begin(), EV
.idx_end());
11941 return InsertValueInst::Create(NewEV
, IV
->getInsertedValueOperand(),
11945 // The insert list is a prefix of the extract list
11946 // We can simply remove the common indices from the extract and make it
11947 // operate on the inserted value instead of the insertvalue result.
11949 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
11950 // %E = extractvalue { i32, { i32 } } %I, 1, 0
11952 // %E extractvalue { i32 } { i32 42 }, 0
11953 return ExtractValueInst::Create(IV
->getInsertedValueOperand(),
11956 // Can't simplify extracts from other values. Note that nested extracts are
11957 // already simplified implicitely by the above (extract ( extract (insert) )
11958 // will be translated into extract ( insert ( extract ) ) first and then just
11959 // the value inserted, if appropriate).
11963 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
11964 /// is to leave as a vector operation.
11965 static bool CheapToScalarize(Value
*V
, bool isConstant
) {
11966 if (isa
<ConstantAggregateZero
>(V
))
11968 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
)) {
11969 if (isConstant
) return true;
11970 // If all elts are the same, we can extract.
11971 Constant
*Op0
= C
->getOperand(0);
11972 for (unsigned i
= 1; i
< C
->getNumOperands(); ++i
)
11973 if (C
->getOperand(i
) != Op0
)
11977 Instruction
*I
= dyn_cast
<Instruction
>(V
);
11978 if (!I
) return false;
11980 // Insert element gets simplified to the inserted element or is deleted if
11981 // this is constant idx extract element and its a constant idx insertelt.
11982 if (I
->getOpcode() == Instruction::InsertElement
&& isConstant
&&
11983 isa
<ConstantInt
>(I
->getOperand(2)))
11985 if (I
->getOpcode() == Instruction::Load
&& I
->hasOneUse())
11987 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(I
))
11988 if (BO
->hasOneUse() &&
11989 (CheapToScalarize(BO
->getOperand(0), isConstant
) ||
11990 CheapToScalarize(BO
->getOperand(1), isConstant
)))
11992 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
11993 if (CI
->hasOneUse() &&
11994 (CheapToScalarize(CI
->getOperand(0), isConstant
) ||
11995 CheapToScalarize(CI
->getOperand(1), isConstant
)))
12001 /// Read and decode a shufflevector mask.
12003 /// It turns undef elements into values that are larger than the number of
12004 /// elements in the input.
12005 static std::vector
<unsigned> getShuffleMask(const ShuffleVectorInst
*SVI
) {
12006 unsigned NElts
= SVI
->getType()->getNumElements();
12007 if (isa
<ConstantAggregateZero
>(SVI
->getOperand(2)))
12008 return std::vector
<unsigned>(NElts
, 0);
12009 if (isa
<UndefValue
>(SVI
->getOperand(2)))
12010 return std::vector
<unsigned>(NElts
, 2*NElts
);
12012 std::vector
<unsigned> Result
;
12013 const ConstantVector
*CP
= cast
<ConstantVector
>(SVI
->getOperand(2));
12014 for (User::const_op_iterator i
= CP
->op_begin(), e
= CP
->op_end(); i
!=e
; ++i
)
12015 if (isa
<UndefValue
>(*i
))
12016 Result
.push_back(NElts
*2); // undef -> 8
12018 Result
.push_back(cast
<ConstantInt
>(*i
)->getZExtValue());
12022 /// FindScalarElement - Given a vector and an element number, see if the scalar
12023 /// value is already around as a register, for example if it were inserted then
12024 /// extracted from the vector.
12025 static Value
*FindScalarElement(Value
*V
, unsigned EltNo
,
12026 LLVMContext
*Context
) {
12027 assert(isa
<VectorType
>(V
->getType()) && "Not looking at a vector?");
12028 const VectorType
*PTy
= cast
<VectorType
>(V
->getType());
12029 unsigned Width
= PTy
->getNumElements();
12030 if (EltNo
>= Width
) // Out of range access.
12031 return UndefValue::get(PTy
->getElementType());
12033 if (isa
<UndefValue
>(V
))
12034 return UndefValue::get(PTy
->getElementType());
12035 else if (isa
<ConstantAggregateZero
>(V
))
12036 return Constant::getNullValue(PTy
->getElementType());
12037 else if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(V
))
12038 return CP
->getOperand(EltNo
);
12039 else if (InsertElementInst
*III
= dyn_cast
<InsertElementInst
>(V
)) {
12040 // If this is an insert to a variable element, we don't know what it is.
12041 if (!isa
<ConstantInt
>(III
->getOperand(2)))
12043 unsigned IIElt
= cast
<ConstantInt
>(III
->getOperand(2))->getZExtValue();
12045 // If this is an insert to the element we are looking for, return the
12047 if (EltNo
== IIElt
)
12048 return III
->getOperand(1);
12050 // Otherwise, the insertelement doesn't modify the value, recurse on its
12052 return FindScalarElement(III
->getOperand(0), EltNo
, Context
);
12053 } else if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(V
)) {
12054 unsigned LHSWidth
=
12055 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
12056 unsigned InEl
= getShuffleMask(SVI
)[EltNo
];
12057 if (InEl
< LHSWidth
)
12058 return FindScalarElement(SVI
->getOperand(0), InEl
, Context
);
12059 else if (InEl
< LHSWidth
*2)
12060 return FindScalarElement(SVI
->getOperand(1), InEl
- LHSWidth
, Context
);
12062 return UndefValue::get(PTy
->getElementType());
12065 // Otherwise, we don't know.
12069 Instruction
*InstCombiner::visitExtractElementInst(ExtractElementInst
&EI
) {
12070 // If vector val is undef, replace extract with scalar undef.
12071 if (isa
<UndefValue
>(EI
.getOperand(0)))
12072 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12074 // If vector val is constant 0, replace extract with scalar 0.
12075 if (isa
<ConstantAggregateZero
>(EI
.getOperand(0)))
12076 return ReplaceInstUsesWith(EI
, Constant::getNullValue(EI
.getType()));
12078 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(EI
.getOperand(0))) {
12079 // If vector val is constant with all elements the same, replace EI with
12080 // that element. When the elements are not identical, we cannot replace yet
12081 // (we do that below, but only when the index is constant).
12082 Constant
*op0
= C
->getOperand(0);
12083 for (unsigned i
= 1; i
!= C
->getNumOperands(); ++i
)
12084 if (C
->getOperand(i
) != op0
) {
12089 return ReplaceInstUsesWith(EI
, op0
);
12092 // If extracting a specified index from the vector, see if we can recursively
12093 // find a previously computed scalar that was inserted into the vector.
12094 if (ConstantInt
*IdxC
= dyn_cast
<ConstantInt
>(EI
.getOperand(1))) {
12095 unsigned IndexVal
= IdxC
->getZExtValue();
12096 unsigned VectorWidth
= EI
.getVectorOperandType()->getNumElements();
12098 // If this is extracting an invalid index, turn this into undef, to avoid
12099 // crashing the code below.
12100 if (IndexVal
>= VectorWidth
)
12101 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12103 // This instruction only demands the single element from the input vector.
12104 // If the input vector has a single use, simplify it based on this use
12106 if (EI
.getOperand(0)->hasOneUse() && VectorWidth
!= 1) {
12107 APInt
UndefElts(VectorWidth
, 0);
12108 APInt
DemandedMask(VectorWidth
, 1 << IndexVal
);
12109 if (Value
*V
= SimplifyDemandedVectorElts(EI
.getOperand(0),
12110 DemandedMask
, UndefElts
)) {
12111 EI
.setOperand(0, V
);
12116 if (Value
*Elt
= FindScalarElement(EI
.getOperand(0), IndexVal
, Context
))
12117 return ReplaceInstUsesWith(EI
, Elt
);
12119 // If the this extractelement is directly using a bitcast from a vector of
12120 // the same number of elements, see if we can find the source element from
12121 // it. In this case, we will end up needing to bitcast the scalars.
12122 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(EI
.getOperand(0))) {
12123 if (const VectorType
*VT
=
12124 dyn_cast
<VectorType
>(BCI
->getOperand(0)->getType()))
12125 if (VT
->getNumElements() == VectorWidth
)
12126 if (Value
*Elt
= FindScalarElement(BCI
->getOperand(0),
12127 IndexVal
, Context
))
12128 return new BitCastInst(Elt
, EI
.getType());
12132 if (Instruction
*I
= dyn_cast
<Instruction
>(EI
.getOperand(0))) {
12133 // Push extractelement into predecessor operation if legal and
12134 // profitable to do so
12135 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(I
)) {
12136 if (I
->hasOneUse() &&
12137 CheapToScalarize(BO
, isa
<ConstantInt
>(EI
.getOperand(1)))) {
12139 Builder
->CreateExtractElement(BO
->getOperand(0), EI
.getOperand(1),
12140 EI
.getName()+".lhs");
12142 Builder
->CreateExtractElement(BO
->getOperand(1), EI
.getOperand(1),
12143 EI
.getName()+".rhs");
12144 return BinaryOperator::Create(BO
->getOpcode(), newEI0
, newEI1
);
12146 } else if (InsertElementInst
*IE
= dyn_cast
<InsertElementInst
>(I
)) {
12147 // Extracting the inserted element?
12148 if (IE
->getOperand(2) == EI
.getOperand(1))
12149 return ReplaceInstUsesWith(EI
, IE
->getOperand(1));
12150 // If the inserted and extracted elements are constants, they must not
12151 // be the same value, extract from the pre-inserted value instead.
12152 if (isa
<Constant
>(IE
->getOperand(2)) && isa
<Constant
>(EI
.getOperand(1))) {
12153 Worklist
.AddValue(EI
.getOperand(0));
12154 EI
.setOperand(0, IE
->getOperand(0));
12157 } else if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(I
)) {
12158 // If this is extracting an element from a shufflevector, figure out where
12159 // it came from and extract from the appropriate input element instead.
12160 if (ConstantInt
*Elt
= dyn_cast
<ConstantInt
>(EI
.getOperand(1))) {
12161 unsigned SrcIdx
= getShuffleMask(SVI
)[Elt
->getZExtValue()];
12163 unsigned LHSWidth
=
12164 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
12166 if (SrcIdx
< LHSWidth
)
12167 Src
= SVI
->getOperand(0);
12168 else if (SrcIdx
< LHSWidth
*2) {
12169 SrcIdx
-= LHSWidth
;
12170 Src
= SVI
->getOperand(1);
12172 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12174 return ExtractElementInst::Create(Src
,
12175 ConstantInt::get(Type::getInt32Ty(*Context
), SrcIdx
,
12179 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
12184 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
12185 /// elements from either LHS or RHS, return the shuffle mask and true.
12186 /// Otherwise, return false.
12187 static bool CollectSingleShuffleElements(Value
*V
, Value
*LHS
, Value
*RHS
,
12188 std::vector
<Constant
*> &Mask
,
12189 LLVMContext
*Context
) {
12190 assert(V
->getType() == LHS
->getType() && V
->getType() == RHS
->getType() &&
12191 "Invalid CollectSingleShuffleElements");
12192 unsigned NumElts
= cast
<VectorType
>(V
->getType())->getNumElements();
12194 if (isa
<UndefValue
>(V
)) {
12195 Mask
.assign(NumElts
, UndefValue::get(Type::getInt32Ty(*Context
)));
12197 } else if (V
== LHS
) {
12198 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12199 Mask
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), i
));
12201 } else if (V
== RHS
) {
12202 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12203 Mask
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), i
+NumElts
));
12205 } else if (InsertElementInst
*IEI
= dyn_cast
<InsertElementInst
>(V
)) {
12206 // If this is an insert of an extract from some other vector, include it.
12207 Value
*VecOp
= IEI
->getOperand(0);
12208 Value
*ScalarOp
= IEI
->getOperand(1);
12209 Value
*IdxOp
= IEI
->getOperand(2);
12211 if (!isa
<ConstantInt
>(IdxOp
))
12213 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12215 if (isa
<UndefValue
>(ScalarOp
)) { // inserting undef into vector.
12216 // Okay, we can handle this if the vector we are insertinting into is
12217 // transitively ok.
12218 if (CollectSingleShuffleElements(VecOp
, LHS
, RHS
, Mask
, Context
)) {
12219 // If so, update the mask to reflect the inserted undef.
12220 Mask
[InsertedIdx
] = UndefValue::get(Type::getInt32Ty(*Context
));
12223 } else if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)){
12224 if (isa
<ConstantInt
>(EI
->getOperand(1)) &&
12225 EI
->getOperand(0)->getType() == V
->getType()) {
12226 unsigned ExtractedIdx
=
12227 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12229 // This must be extracting from either LHS or RHS.
12230 if (EI
->getOperand(0) == LHS
|| EI
->getOperand(0) == RHS
) {
12231 // Okay, we can handle this if the vector we are insertinting into is
12232 // transitively ok.
12233 if (CollectSingleShuffleElements(VecOp
, LHS
, RHS
, Mask
, Context
)) {
12234 // If so, update the mask to reflect the inserted value.
12235 if (EI
->getOperand(0) == LHS
) {
12236 Mask
[InsertedIdx
% NumElts
] =
12237 ConstantInt::get(Type::getInt32Ty(*Context
), ExtractedIdx
);
12239 assert(EI
->getOperand(0) == RHS
);
12240 Mask
[InsertedIdx
% NumElts
] =
12241 ConstantInt::get(Type::getInt32Ty(*Context
), ExtractedIdx
+NumElts
);
12250 // TODO: Handle shufflevector here!
12255 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12256 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12257 /// that computes V and the LHS value of the shuffle.
12258 static Value
*CollectShuffleElements(Value
*V
, std::vector
<Constant
*> &Mask
,
12259 Value
*&RHS
, LLVMContext
*Context
) {
12260 assert(isa
<VectorType
>(V
->getType()) &&
12261 (RHS
== 0 || V
->getType() == RHS
->getType()) &&
12262 "Invalid shuffle!");
12263 unsigned NumElts
= cast
<VectorType
>(V
->getType())->getNumElements();
12265 if (isa
<UndefValue
>(V
)) {
12266 Mask
.assign(NumElts
, UndefValue::get(Type::getInt32Ty(*Context
)));
12268 } else if (isa
<ConstantAggregateZero
>(V
)) {
12269 Mask
.assign(NumElts
, ConstantInt::get(Type::getInt32Ty(*Context
), 0));
12271 } else if (InsertElementInst
*IEI
= dyn_cast
<InsertElementInst
>(V
)) {
12272 // If this is an insert of an extract from some other vector, include it.
12273 Value
*VecOp
= IEI
->getOperand(0);
12274 Value
*ScalarOp
= IEI
->getOperand(1);
12275 Value
*IdxOp
= IEI
->getOperand(2);
12277 if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)) {
12278 if (isa
<ConstantInt
>(EI
->getOperand(1)) && isa
<ConstantInt
>(IdxOp
) &&
12279 EI
->getOperand(0)->getType() == V
->getType()) {
12280 unsigned ExtractedIdx
=
12281 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12282 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12284 // Either the extracted from or inserted into vector must be RHSVec,
12285 // otherwise we'd end up with a shuffle of three inputs.
12286 if (EI
->getOperand(0) == RHS
|| RHS
== 0) {
12287 RHS
= EI
->getOperand(0);
12288 Value
*V
= CollectShuffleElements(VecOp
, Mask
, RHS
, Context
);
12289 Mask
[InsertedIdx
% NumElts
] =
12290 ConstantInt::get(Type::getInt32Ty(*Context
), NumElts
+ExtractedIdx
);
12294 if (VecOp
== RHS
) {
12295 Value
*V
= CollectShuffleElements(EI
->getOperand(0), Mask
,
12297 // Everything but the extracted element is replaced with the RHS.
12298 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
12299 if (i
!= InsertedIdx
)
12300 Mask
[i
] = ConstantInt::get(Type::getInt32Ty(*Context
), NumElts
+i
);
12305 // If this insertelement is a chain that comes from exactly these two
12306 // vectors, return the vector and the effective shuffle.
12307 if (CollectSingleShuffleElements(IEI
, EI
->getOperand(0), RHS
, Mask
,
12309 return EI
->getOperand(0);
12314 // TODO: Handle shufflevector here!
12316 // Otherwise, can't do anything fancy. Return an identity vector.
12317 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12318 Mask
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), i
));
12322 Instruction
*InstCombiner::visitInsertElementInst(InsertElementInst
&IE
) {
12323 Value
*VecOp
= IE
.getOperand(0);
12324 Value
*ScalarOp
= IE
.getOperand(1);
12325 Value
*IdxOp
= IE
.getOperand(2);
12327 // Inserting an undef or into an undefined place, remove this.
12328 if (isa
<UndefValue
>(ScalarOp
) || isa
<UndefValue
>(IdxOp
))
12329 ReplaceInstUsesWith(IE
, VecOp
);
12331 // If the inserted element was extracted from some other vector, and if the
12332 // indexes are constant, try to turn this into a shufflevector operation.
12333 if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)) {
12334 if (isa
<ConstantInt
>(EI
->getOperand(1)) && isa
<ConstantInt
>(IdxOp
) &&
12335 EI
->getOperand(0)->getType() == IE
.getType()) {
12336 unsigned NumVectorElts
= IE
.getType()->getNumElements();
12337 unsigned ExtractedIdx
=
12338 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12339 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12341 if (ExtractedIdx
>= NumVectorElts
) // Out of range extract.
12342 return ReplaceInstUsesWith(IE
, VecOp
);
12344 if (InsertedIdx
>= NumVectorElts
) // Out of range insert.
12345 return ReplaceInstUsesWith(IE
, UndefValue::get(IE
.getType()));
12347 // If we are extracting a value from a vector, then inserting it right
12348 // back into the same place, just use the input vector.
12349 if (EI
->getOperand(0) == VecOp
&& ExtractedIdx
== InsertedIdx
)
12350 return ReplaceInstUsesWith(IE
, VecOp
);
12352 // We could theoretically do this for ANY input. However, doing so could
12353 // turn chains of insertelement instructions into a chain of shufflevector
12354 // instructions, and right now we do not merge shufflevectors. As such,
12355 // only do this in a situation where it is clear that there is benefit.
12356 if (isa
<UndefValue
>(VecOp
) || isa
<ConstantAggregateZero
>(VecOp
)) {
12357 // Turn this into shuffle(EIOp0, VecOp, Mask). The result has all of
12358 // the values of VecOp, except then one read from EIOp0.
12359 // Build a new shuffle mask.
12360 std::vector
<Constant
*> Mask
;
12361 if (isa
<UndefValue
>(VecOp
))
12362 Mask
.assign(NumVectorElts
, UndefValue::get(Type::getInt32Ty(*Context
)));
12364 assert(isa
<ConstantAggregateZero
>(VecOp
) && "Unknown thing");
12365 Mask
.assign(NumVectorElts
, ConstantInt::get(Type::getInt32Ty(*Context
),
12368 Mask
[InsertedIdx
] =
12369 ConstantInt::get(Type::getInt32Ty(*Context
), ExtractedIdx
);
12370 return new ShuffleVectorInst(EI
->getOperand(0), VecOp
,
12371 ConstantVector::get(Mask
));
12374 // If this insertelement isn't used by some other insertelement, turn it
12375 // (and any insertelements it points to), into one big shuffle.
12376 if (!IE
.hasOneUse() || !isa
<InsertElementInst
>(IE
.use_back())) {
12377 std::vector
<Constant
*> Mask
;
12379 Value
*LHS
= CollectShuffleElements(&IE
, Mask
, RHS
, Context
);
12380 if (RHS
== 0) RHS
= UndefValue::get(LHS
->getType());
12381 // We now have a shuffle of LHS, RHS, Mask.
12382 return new ShuffleVectorInst(LHS
, RHS
,
12383 ConstantVector::get(Mask
));
12388 unsigned VWidth
= cast
<VectorType
>(VecOp
->getType())->getNumElements();
12389 APInt
UndefElts(VWidth
, 0);
12390 APInt
AllOnesEltMask(APInt::getAllOnesValue(VWidth
));
12391 if (SimplifyDemandedVectorElts(&IE
, AllOnesEltMask
, UndefElts
))
12398 Instruction
*InstCombiner::visitShuffleVectorInst(ShuffleVectorInst
&SVI
) {
12399 Value
*LHS
= SVI
.getOperand(0);
12400 Value
*RHS
= SVI
.getOperand(1);
12401 std::vector
<unsigned> Mask
= getShuffleMask(&SVI
);
12403 bool MadeChange
= false;
12405 // Undefined shuffle mask -> undefined value.
12406 if (isa
<UndefValue
>(SVI
.getOperand(2)))
12407 return ReplaceInstUsesWith(SVI
, UndefValue::get(SVI
.getType()));
12409 unsigned VWidth
= cast
<VectorType
>(SVI
.getType())->getNumElements();
12411 if (VWidth
!= cast
<VectorType
>(LHS
->getType())->getNumElements())
12414 APInt
UndefElts(VWidth
, 0);
12415 APInt
AllOnesEltMask(APInt::getAllOnesValue(VWidth
));
12416 if (SimplifyDemandedVectorElts(&SVI
, AllOnesEltMask
, UndefElts
)) {
12417 LHS
= SVI
.getOperand(0);
12418 RHS
= SVI
.getOperand(1);
12422 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12423 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12424 if (LHS
== RHS
|| isa
<UndefValue
>(LHS
)) {
12425 if (isa
<UndefValue
>(LHS
) && LHS
== RHS
) {
12426 // shuffle(undef,undef,mask) -> undef.
12427 return ReplaceInstUsesWith(SVI
, LHS
);
12430 // Remap any references to RHS to use LHS.
12431 std::vector
<Constant
*> Elts
;
12432 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
12433 if (Mask
[i
] >= 2*e
)
12434 Elts
.push_back(UndefValue::get(Type::getInt32Ty(*Context
)));
12436 if ((Mask
[i
] >= e
&& isa
<UndefValue
>(RHS
)) ||
12437 (Mask
[i
] < e
&& isa
<UndefValue
>(LHS
))) {
12438 Mask
[i
] = 2*e
; // Turn into undef.
12439 Elts
.push_back(UndefValue::get(Type::getInt32Ty(*Context
)));
12441 Mask
[i
] = Mask
[i
] % e
; // Force to LHS.
12442 Elts
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), Mask
[i
]));
12446 SVI
.setOperand(0, SVI
.getOperand(1));
12447 SVI
.setOperand(1, UndefValue::get(RHS
->getType()));
12448 SVI
.setOperand(2, ConstantVector::get(Elts
));
12449 LHS
= SVI
.getOperand(0);
12450 RHS
= SVI
.getOperand(1);
12454 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
12455 bool isLHSID
= true, isRHSID
= true;
12457 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
12458 if (Mask
[i
] >= e
*2) continue; // Ignore undef values.
12459 // Is this an identity shuffle of the LHS value?
12460 isLHSID
&= (Mask
[i
] == i
);
12462 // Is this an identity shuffle of the RHS value?
12463 isRHSID
&= (Mask
[i
]-e
== i
);
12466 // Eliminate identity shuffles.
12467 if (isLHSID
) return ReplaceInstUsesWith(SVI
, LHS
);
12468 if (isRHSID
) return ReplaceInstUsesWith(SVI
, RHS
);
12470 // If the LHS is a shufflevector itself, see if we can combine it with this
12471 // one without producing an unusual shuffle. Here we are really conservative:
12472 // we are absolutely afraid of producing a shuffle mask not in the input
12473 // program, because the code gen may not be smart enough to turn a merged
12474 // shuffle into two specific shuffles: it may produce worse code. As such,
12475 // we only merge two shuffles if the result is one of the two input shuffle
12476 // masks. In this case, merging the shuffles just removes one instruction,
12477 // which we know is safe. This is good for things like turning:
12478 // (splat(splat)) -> splat.
12479 if (ShuffleVectorInst
*LHSSVI
= dyn_cast
<ShuffleVectorInst
>(LHS
)) {
12480 if (isa
<UndefValue
>(RHS
)) {
12481 std::vector
<unsigned> LHSMask
= getShuffleMask(LHSSVI
);
12483 std::vector
<unsigned> NewMask
;
12484 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
)
12485 if (Mask
[i
] >= 2*e
)
12486 NewMask
.push_back(2*e
);
12488 NewMask
.push_back(LHSMask
[Mask
[i
]]);
12490 // If the result mask is equal to the src shuffle or this shuffle mask, do
12491 // the replacement.
12492 if (NewMask
== LHSMask
|| NewMask
== Mask
) {
12493 unsigned LHSInNElts
=
12494 cast
<VectorType
>(LHSSVI
->getOperand(0)->getType())->getNumElements();
12495 std::vector
<Constant
*> Elts
;
12496 for (unsigned i
= 0, e
= NewMask
.size(); i
!= e
; ++i
) {
12497 if (NewMask
[i
] >= LHSInNElts
*2) {
12498 Elts
.push_back(UndefValue::get(Type::getInt32Ty(*Context
)));
12500 Elts
.push_back(ConstantInt::get(Type::getInt32Ty(*Context
), NewMask
[i
]));
12503 return new ShuffleVectorInst(LHSSVI
->getOperand(0),
12504 LHSSVI
->getOperand(1),
12505 ConstantVector::get(Elts
));
12510 return MadeChange
? &SVI
: 0;
12516 /// TryToSinkInstruction - Try to move the specified instruction from its
12517 /// current block into the beginning of DestBlock, which can only happen if it's
12518 /// safe to move the instruction past all of the instructions between it and the
12519 /// end of its block.
12520 static bool TryToSinkInstruction(Instruction
*I
, BasicBlock
*DestBlock
) {
12521 assert(I
->hasOneUse() && "Invariants didn't hold!");
12523 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
12524 if (isa
<PHINode
>(I
) || I
->mayHaveSideEffects() || isa
<TerminatorInst
>(I
))
12527 // Do not sink alloca instructions out of the entry block.
12528 if (isa
<AllocaInst
>(I
) && I
->getParent() ==
12529 &DestBlock
->getParent()->getEntryBlock())
12532 // We can only sink load instructions if there is nothing between the load and
12533 // the end of block that could change the value.
12534 if (I
->mayReadFromMemory()) {
12535 for (BasicBlock::iterator Scan
= I
, E
= I
->getParent()->end();
12537 if (Scan
->mayWriteToMemory())
12541 BasicBlock::iterator InsertPos
= DestBlock
->getFirstNonPHI();
12543 CopyPrecedingStopPoint(I
, InsertPos
);
12544 I
->moveBefore(InsertPos
);
12550 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
12551 /// all reachable code to the worklist.
12553 /// This has a couple of tricks to make the code faster and more powerful. In
12554 /// particular, we constant fold and DCE instructions as we go, to avoid adding
12555 /// them to the worklist (this significantly speeds up instcombine on code where
12556 /// many instructions are dead or constant). Additionally, if we find a branch
12557 /// whose condition is a known constant, we only visit the reachable successors.
12559 static void AddReachableCodeToWorklist(BasicBlock
*BB
,
12560 SmallPtrSet
<BasicBlock
*, 64> &Visited
,
12562 const TargetData
*TD
) {
12563 SmallVector
<BasicBlock
*, 256> Worklist
;
12564 Worklist
.push_back(BB
);
12566 while (!Worklist
.empty()) {
12567 BB
= Worklist
.back();
12568 Worklist
.pop_back();
12570 // We have now visited this block! If we've already been here, ignore it.
12571 if (!Visited
.insert(BB
)) continue;
12573 DbgInfoIntrinsic
*DBI_Prev
= NULL
;
12574 for (BasicBlock::iterator BBI
= BB
->begin(), E
= BB
->end(); BBI
!= E
; ) {
12575 Instruction
*Inst
= BBI
++;
12577 // DCE instruction if trivially dead.
12578 if (isInstructionTriviallyDead(Inst
)) {
12580 DEBUG(errs() << "IC: DCE: " << *Inst
<< '\n');
12581 Inst
->eraseFromParent();
12585 // ConstantProp instruction if trivially constant.
12586 if (Constant
*C
= ConstantFoldInstruction(Inst
, BB
->getContext(), TD
)) {
12587 DEBUG(errs() << "IC: ConstFold to: " << *C
<< " from: "
12589 Inst
->replaceAllUsesWith(C
);
12591 Inst
->eraseFromParent();
12595 // If there are two consecutive llvm.dbg.stoppoint calls then
12596 // it is likely that the optimizer deleted code in between these
12598 DbgInfoIntrinsic
*DBI_Next
= dyn_cast
<DbgInfoIntrinsic
>(Inst
);
12601 && DBI_Prev
->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
12602 && DBI_Next
->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
) {
12603 IC
.Worklist
.Remove(DBI_Prev
);
12604 DBI_Prev
->eraseFromParent();
12606 DBI_Prev
= DBI_Next
;
12611 IC
.Worklist
.Add(Inst
);
12614 // Recursively visit successors. If this is a branch or switch on a
12615 // constant, only visit the reachable successor.
12616 TerminatorInst
*TI
= BB
->getTerminator();
12617 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
)) {
12618 if (BI
->isConditional() && isa
<ConstantInt
>(BI
->getCondition())) {
12619 bool CondVal
= cast
<ConstantInt
>(BI
->getCondition())->getZExtValue();
12620 BasicBlock
*ReachableBB
= BI
->getSuccessor(!CondVal
);
12621 Worklist
.push_back(ReachableBB
);
12624 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(TI
)) {
12625 if (ConstantInt
*Cond
= dyn_cast
<ConstantInt
>(SI
->getCondition())) {
12626 // See if this is an explicit destination.
12627 for (unsigned i
= 1, e
= SI
->getNumSuccessors(); i
!= e
; ++i
)
12628 if (SI
->getCaseValue(i
) == Cond
) {
12629 BasicBlock
*ReachableBB
= SI
->getSuccessor(i
);
12630 Worklist
.push_back(ReachableBB
);
12634 // Otherwise it is the default destination.
12635 Worklist
.push_back(SI
->getSuccessor(0));
12640 for (unsigned i
= 0, e
= TI
->getNumSuccessors(); i
!= e
; ++i
)
12641 Worklist
.push_back(TI
->getSuccessor(i
));
12645 bool InstCombiner::DoOneIteration(Function
&F
, unsigned Iteration
) {
12646 MadeIRChange
= false;
12647 TD
= getAnalysisIfAvailable
<TargetData
>();
12649 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration
<< " on "
12650 << F
.getNameStr() << "\n");
12653 // Do a depth-first traversal of the function, populate the worklist with
12654 // the reachable instructions. Ignore blocks that are not reachable. Keep
12655 // track of which blocks we visit.
12656 SmallPtrSet
<BasicBlock
*, 64> Visited
;
12657 AddReachableCodeToWorklist(F
.begin(), Visited
, *this, TD
);
12659 // Do a quick scan over the function. If we find any blocks that are
12660 // unreachable, remove any instructions inside of them. This prevents
12661 // the instcombine code from having to deal with some bad special cases.
12662 for (Function::iterator BB
= F
.begin(), E
= F
.end(); BB
!= E
; ++BB
)
12663 if (!Visited
.count(BB
)) {
12664 Instruction
*Term
= BB
->getTerminator();
12665 while (Term
!= BB
->begin()) { // Remove instrs bottom-up
12666 BasicBlock::iterator I
= Term
; --I
;
12668 DEBUG(errs() << "IC: DCE: " << *I
<< '\n');
12669 // A debug intrinsic shouldn't force another iteration if we weren't
12670 // going to do one without it.
12671 if (!isa
<DbgInfoIntrinsic
>(I
)) {
12673 MadeIRChange
= true;
12675 if (!I
->use_empty())
12676 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
12677 I
->eraseFromParent();
12682 while (!Worklist
.isEmpty()) {
12683 Instruction
*I
= Worklist
.RemoveOne();
12684 if (I
== 0) continue; // skip null values.
12686 // Check to see if we can DCE the instruction.
12687 if (isInstructionTriviallyDead(I
)) {
12688 DEBUG(errs() << "IC: DCE: " << *I
<< '\n');
12689 EraseInstFromFunction(*I
);
12691 MadeIRChange
= true;
12695 // Instruction isn't dead, see if we can constant propagate it.
12696 if (Constant
*C
= ConstantFoldInstruction(I
, F
.getContext(), TD
)) {
12697 DEBUG(errs() << "IC: ConstFold to: " << *C
<< " from: " << *I
<< '\n');
12699 // Add operands to the worklist.
12700 ReplaceInstUsesWith(*I
, C
);
12702 EraseInstFromFunction(*I
);
12703 MadeIRChange
= true;
12708 // See if we can constant fold its operands.
12709 for (User::op_iterator i
= I
->op_begin(), e
= I
->op_end(); i
!= e
; ++i
)
12710 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(i
))
12711 if (Constant
*NewC
= ConstantFoldConstantExpression(CE
,
12712 F
.getContext(), TD
))
12715 MadeIRChange
= true;
12719 // See if we can trivially sink this instruction to a successor basic block.
12720 if (I
->hasOneUse()) {
12721 BasicBlock
*BB
= I
->getParent();
12722 BasicBlock
*UserParent
= cast
<Instruction
>(I
->use_back())->getParent();
12723 if (UserParent
!= BB
) {
12724 bool UserIsSuccessor
= false;
12725 // See if the user is one of our successors.
12726 for (succ_iterator SI
= succ_begin(BB
), E
= succ_end(BB
); SI
!= E
; ++SI
)
12727 if (*SI
== UserParent
) {
12728 UserIsSuccessor
= true;
12732 // If the user is one of our immediate successors, and if that successor
12733 // only has us as a predecessors (we'd have to split the critical edge
12734 // otherwise), we can keep going.
12735 if (UserIsSuccessor
&& !isa
<PHINode
>(I
->use_back()) &&
12736 next(pred_begin(UserParent
)) == pred_end(UserParent
))
12737 // Okay, the CFG is simple enough, try to sink this instruction.
12738 MadeIRChange
|= TryToSinkInstruction(I
, UserParent
);
12742 // Now that we have an instruction, try combining it to simplify it.
12743 Builder
->SetInsertPoint(I
->getParent(), I
);
12748 DEBUG(raw_string_ostream
SS(OrigI
); I
->print(SS
); OrigI
= SS
.str(););
12750 if (Instruction
*Result
= visit(*I
)) {
12752 // Should we replace the old instruction with a new one?
12754 DEBUG(errs() << "IC: Old = " << *I
<< '\n'
12755 << " New = " << *Result
<< '\n');
12757 // Everything uses the new instruction now.
12758 I
->replaceAllUsesWith(Result
);
12760 // Push the new instruction and any users onto the worklist.
12761 Worklist
.Add(Result
);
12762 Worklist
.AddUsersToWorkList(*Result
);
12764 // Move the name to the new instruction first.
12765 Result
->takeName(I
);
12767 // Insert the new instruction into the basic block...
12768 BasicBlock
*InstParent
= I
->getParent();
12769 BasicBlock::iterator InsertPos
= I
;
12771 if (!isa
<PHINode
>(Result
)) // If combining a PHI, don't insert
12772 while (isa
<PHINode
>(InsertPos
)) // middle of a block of PHIs.
12775 InstParent
->getInstList().insert(InsertPos
, Result
);
12777 EraseInstFromFunction(*I
);
12780 DEBUG(errs() << "IC: Mod = " << OrigI
<< '\n'
12781 << " New = " << *I
<< '\n');
12784 // If the instruction was modified, it's possible that it is now dead.
12785 // if so, remove it.
12786 if (isInstructionTriviallyDead(I
)) {
12787 EraseInstFromFunction(*I
);
12790 Worklist
.AddUsersToWorkList(*I
);
12793 MadeIRChange
= true;
12798 return MadeIRChange
;
12802 bool InstCombiner::runOnFunction(Function
&F
) {
12803 MustPreserveLCSSA
= mustPreserveAnalysisID(LCSSAID
);
12804 Context
= &F
.getContext();
12807 /// Builder - This is an IRBuilder that automatically inserts new
12808 /// instructions into the worklist when they are created.
12809 IRBuilder
<true, ConstantFolder
, InstCombineIRInserter
>
12810 TheBuilder(F
.getContext(), ConstantFolder(F
.getContext()),
12811 InstCombineIRInserter(Worklist
));
12812 Builder
= &TheBuilder
;
12814 bool EverMadeChange
= false;
12816 // Iterate while there is work to do.
12817 unsigned Iteration
= 0;
12818 while (DoOneIteration(F
, Iteration
++))
12819 EverMadeChange
= true;
12822 return EverMadeChange
;
12825 FunctionPass
*llvm::createInstructionCombiningPass() {
12826 return new InstCombiner();