1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/ValueTracking.h"
46 #include "llvm/Target/TargetData.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/Utils/Local.h"
49 #include "llvm/Support/CallSite.h"
50 #include "llvm/Support/ConstantRange.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/GetElementPtrTypeIterator.h"
54 #include "llvm/Support/InstVisitor.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/PatternMatch.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/ADT/DenseMap.h"
60 #include "llvm/ADT/SmallVector.h"
61 #include "llvm/ADT/SmallPtrSet.h"
62 #include "llvm/ADT/Statistic.h"
63 #include "llvm/ADT/STLExtras.h"
68 using namespace llvm::PatternMatch
;
70 STATISTIC(NumCombined
, "Number of insts combined");
71 STATISTIC(NumConstProp
, "Number of constant folds");
72 STATISTIC(NumDeadInst
, "Number of dead inst eliminated");
73 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
74 STATISTIC(NumSunkInst
, "Number of instructions sunk");
77 class VISIBILITY_HIDDEN InstCombiner
78 : public FunctionPass
,
79 public InstVisitor
<InstCombiner
, Instruction
*> {
80 // Worklist of all of the instructions that need to be simplified.
81 SmallVector
<Instruction
*, 256> Worklist
;
82 DenseMap
<Instruction
*, unsigned> WorklistMap
;
84 bool MustPreserveLCSSA
;
86 static char ID
; // Pass identification, replacement for typeid
87 InstCombiner() : FunctionPass(&ID
) {}
90 LLVMContext
*getContext() const { return Context
; }
92 /// AddToWorkList - Add the specified instruction to the worklist if it
93 /// isn't already in it.
94 void AddToWorkList(Instruction
*I
) {
95 if (WorklistMap
.insert(std::make_pair(I
, Worklist
.size())).second
)
96 Worklist
.push_back(I
);
99 // RemoveFromWorkList - remove I from the worklist if it exists.
100 void RemoveFromWorkList(Instruction
*I
) {
101 DenseMap
<Instruction
*, unsigned>::iterator It
= WorklistMap
.find(I
);
102 if (It
== WorklistMap
.end()) return; // Not in worklist.
104 // Don't bother moving everything down, just null out the slot.
105 Worklist
[It
->second
] = 0;
107 WorklistMap
.erase(It
);
110 Instruction
*RemoveOneFromWorkList() {
111 Instruction
*I
= Worklist
.back();
113 WorklistMap
.erase(I
);
118 /// AddUsersToWorkList - When an instruction is simplified, add all users of
119 /// the instruction to the work lists because they might get more simplified
122 void AddUsersToWorkList(Value
&I
) {
123 for (Value::use_iterator UI
= I
.use_begin(), UE
= I
.use_end();
125 AddToWorkList(cast
<Instruction
>(*UI
));
128 /// AddUsesToWorkList - When an instruction is simplified, add operands to
129 /// the work lists because they might get more simplified now.
131 void AddUsesToWorkList(Instruction
&I
) {
132 for (User::op_iterator i
= I
.op_begin(), e
= I
.op_end(); i
!= e
; ++i
)
133 if (Instruction
*Op
= dyn_cast
<Instruction
>(*i
))
137 /// AddSoonDeadInstToWorklist - The specified instruction is about to become
138 /// dead. Add all of its operands to the worklist, turning them into
139 /// undef's to reduce the number of uses of those instructions.
141 /// Return the specified operand before it is turned into an undef.
143 Value
*AddSoonDeadInstToWorklist(Instruction
&I
, unsigned op
) {
144 Value
*R
= I
.getOperand(op
);
146 for (User::op_iterator i
= I
.op_begin(), e
= I
.op_end(); i
!= e
; ++i
)
147 if (Instruction
*Op
= dyn_cast
<Instruction
>(*i
)) {
149 // Set the operand to undef to drop the use.
150 *i
= UndefValue::get(Op
->getType());
157 virtual bool runOnFunction(Function
&F
);
159 bool DoOneIteration(Function
&F
, unsigned ItNum
);
161 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
162 AU
.addPreservedID(LCSSAID
);
163 AU
.setPreservesCFG();
166 TargetData
*getTargetData() const { return TD
; }
168 // Visitation implementation - Implement instruction combining for different
169 // instruction types. The semantics are as follows:
171 // null - No change was made
172 // I - Change was made, I is still valid, I may be dead though
173 // otherwise - Change was made, replace I with returned instruction
175 Instruction
*visitAdd(BinaryOperator
&I
);
176 Instruction
*visitFAdd(BinaryOperator
&I
);
177 Instruction
*visitSub(BinaryOperator
&I
);
178 Instruction
*visitFSub(BinaryOperator
&I
);
179 Instruction
*visitMul(BinaryOperator
&I
);
180 Instruction
*visitFMul(BinaryOperator
&I
);
181 Instruction
*visitURem(BinaryOperator
&I
);
182 Instruction
*visitSRem(BinaryOperator
&I
);
183 Instruction
*visitFRem(BinaryOperator
&I
);
184 bool SimplifyDivRemOfSelect(BinaryOperator
&I
);
185 Instruction
*commonRemTransforms(BinaryOperator
&I
);
186 Instruction
*commonIRemTransforms(BinaryOperator
&I
);
187 Instruction
*commonDivTransforms(BinaryOperator
&I
);
188 Instruction
*commonIDivTransforms(BinaryOperator
&I
);
189 Instruction
*visitUDiv(BinaryOperator
&I
);
190 Instruction
*visitSDiv(BinaryOperator
&I
);
191 Instruction
*visitFDiv(BinaryOperator
&I
);
192 Instruction
*FoldAndOfICmps(Instruction
&I
, ICmpInst
*LHS
, ICmpInst
*RHS
);
193 Instruction
*FoldAndOfFCmps(Instruction
&I
, FCmpInst
*LHS
, FCmpInst
*RHS
);
194 Instruction
*visitAnd(BinaryOperator
&I
);
195 Instruction
*FoldOrOfICmps(Instruction
&I
, ICmpInst
*LHS
, ICmpInst
*RHS
);
196 Instruction
*FoldOrOfFCmps(Instruction
&I
, FCmpInst
*LHS
, FCmpInst
*RHS
);
197 Instruction
*FoldOrWithConstants(BinaryOperator
&I
, Value
*Op
,
198 Value
*A
, Value
*B
, Value
*C
);
199 Instruction
*visitOr (BinaryOperator
&I
);
200 Instruction
*visitXor(BinaryOperator
&I
);
201 Instruction
*visitShl(BinaryOperator
&I
);
202 Instruction
*visitAShr(BinaryOperator
&I
);
203 Instruction
*visitLShr(BinaryOperator
&I
);
204 Instruction
*commonShiftTransforms(BinaryOperator
&I
);
205 Instruction
*FoldFCmp_IntToFP_Cst(FCmpInst
&I
, Instruction
*LHSI
,
207 Instruction
*visitFCmpInst(FCmpInst
&I
);
208 Instruction
*visitICmpInst(ICmpInst
&I
);
209 Instruction
*visitICmpInstWithCastAndCast(ICmpInst
&ICI
);
210 Instruction
*visitICmpInstWithInstAndIntCst(ICmpInst
&ICI
,
213 Instruction
*FoldICmpDivCst(ICmpInst
&ICI
, BinaryOperator
*DivI
,
214 ConstantInt
*DivRHS
);
216 Instruction
*FoldGEPICmp(GEPOperator
*GEPLHS
, Value
*RHS
,
217 ICmpInst::Predicate Cond
, Instruction
&I
);
218 Instruction
*FoldShiftByConstant(Value
*Op0
, ConstantInt
*Op1
,
220 Instruction
*commonCastTransforms(CastInst
&CI
);
221 Instruction
*commonIntCastTransforms(CastInst
&CI
);
222 Instruction
*commonPointerCastTransforms(CastInst
&CI
);
223 Instruction
*visitTrunc(TruncInst
&CI
);
224 Instruction
*visitZExt(ZExtInst
&CI
);
225 Instruction
*visitSExt(SExtInst
&CI
);
226 Instruction
*visitFPTrunc(FPTruncInst
&CI
);
227 Instruction
*visitFPExt(CastInst
&CI
);
228 Instruction
*visitFPToUI(FPToUIInst
&FI
);
229 Instruction
*visitFPToSI(FPToSIInst
&FI
);
230 Instruction
*visitUIToFP(CastInst
&CI
);
231 Instruction
*visitSIToFP(CastInst
&CI
);
232 Instruction
*visitPtrToInt(PtrToIntInst
&CI
);
233 Instruction
*visitIntToPtr(IntToPtrInst
&CI
);
234 Instruction
*visitBitCast(BitCastInst
&CI
);
235 Instruction
*FoldSelectOpOp(SelectInst
&SI
, Instruction
*TI
,
237 Instruction
*FoldSelectIntoOp(SelectInst
&SI
, Value
*, Value
*);
238 Instruction
*visitSelectInst(SelectInst
&SI
);
239 Instruction
*visitSelectInstWithICmp(SelectInst
&SI
, ICmpInst
*ICI
);
240 Instruction
*visitCallInst(CallInst
&CI
);
241 Instruction
*visitInvokeInst(InvokeInst
&II
);
242 Instruction
*visitPHINode(PHINode
&PN
);
243 Instruction
*visitGetElementPtrInst(GetElementPtrInst
&GEP
);
244 Instruction
*visitAllocationInst(AllocationInst
&AI
);
245 Instruction
*visitFreeInst(FreeInst
&FI
);
246 Instruction
*visitLoadInst(LoadInst
&LI
);
247 Instruction
*visitStoreInst(StoreInst
&SI
);
248 Instruction
*visitBranchInst(BranchInst
&BI
);
249 Instruction
*visitSwitchInst(SwitchInst
&SI
);
250 Instruction
*visitInsertElementInst(InsertElementInst
&IE
);
251 Instruction
*visitExtractElementInst(ExtractElementInst
&EI
);
252 Instruction
*visitShuffleVectorInst(ShuffleVectorInst
&SVI
);
253 Instruction
*visitExtractValueInst(ExtractValueInst
&EV
);
255 // visitInstruction - Specify what to return for unhandled instructions...
256 Instruction
*visitInstruction(Instruction
&I
) { return 0; }
259 Instruction
*visitCallSite(CallSite CS
);
260 bool transformConstExprCastCall(CallSite CS
);
261 Instruction
*transformCallThroughTrampoline(CallSite CS
);
262 Instruction
*transformZExtICmp(ICmpInst
*ICI
, Instruction
&CI
,
263 bool DoXform
= true);
264 bool WillNotOverflowSignedAdd(Value
*LHS
, Value
*RHS
);
265 DbgDeclareInst
*hasOneUsePlusDeclare(Value
*V
);
269 // InsertNewInstBefore - insert an instruction New before instruction Old
270 // in the program. Add the new instruction to the worklist.
272 Instruction
*InsertNewInstBefore(Instruction
*New
, Instruction
&Old
) {
273 assert(New
&& New
->getParent() == 0 &&
274 "New instruction already inserted into a basic block!");
275 BasicBlock
*BB
= Old
.getParent();
276 BB
->getInstList().insert(&Old
, New
); // Insert inst
281 /// InsertCastBefore - Insert a cast of V to TY before the instruction POS.
282 /// This also adds the cast to the worklist. Finally, this returns the
284 Value
*InsertCastBefore(Instruction::CastOps opc
, Value
*V
, const Type
*Ty
,
286 if (V
->getType() == Ty
) return V
;
288 if (Constant
*CV
= dyn_cast
<Constant
>(V
))
289 return ConstantExpr::getCast(opc
, CV
, Ty
);
291 Instruction
*C
= CastInst::Create(opc
, V
, Ty
, V
->getName(), &Pos
);
296 Value
*InsertBitCastBefore(Value
*V
, const Type
*Ty
, Instruction
&Pos
) {
297 return InsertCastBefore(Instruction::BitCast
, V
, Ty
, Pos
);
301 // ReplaceInstUsesWith - This method is to be used when an instruction is
302 // found to be dead, replacable with another preexisting expression. Here
303 // we add all uses of I to the worklist, replace all uses of I with the new
304 // value, then return I, so that the inst combiner will know that I was
307 Instruction
*ReplaceInstUsesWith(Instruction
&I
, Value
*V
) {
308 AddUsersToWorkList(I
); // Add all modified instrs to worklist
310 I
.replaceAllUsesWith(V
);
313 // If we are replacing the instruction with itself, this must be in a
314 // segment of unreachable code, so just clobber the instruction.
315 I
.replaceAllUsesWith(UndefValue::get(I
.getType()));
320 // EraseInstFromFunction - When dealing with an instruction that has side
321 // effects or produces a void value, we can't rely on DCE to delete the
322 // instruction. Instead, visit methods should return the value returned by
324 Instruction
*EraseInstFromFunction(Instruction
&I
) {
325 assert(I
.use_empty() && "Cannot erase instruction that is used!");
326 AddUsesToWorkList(I
);
327 RemoveFromWorkList(&I
);
329 return 0; // Don't do anything with FI
332 void ComputeMaskedBits(Value
*V
, const APInt
&Mask
, APInt
&KnownZero
,
333 APInt
&KnownOne
, unsigned Depth
= 0) const {
334 return llvm::ComputeMaskedBits(V
, Mask
, KnownZero
, KnownOne
, TD
, Depth
);
337 bool MaskedValueIsZero(Value
*V
, const APInt
&Mask
,
338 unsigned Depth
= 0) const {
339 return llvm::MaskedValueIsZero(V
, Mask
, TD
, Depth
);
341 unsigned ComputeNumSignBits(Value
*Op
, unsigned Depth
= 0) const {
342 return llvm::ComputeNumSignBits(Op
, TD
, Depth
);
347 /// SimplifyCommutative - This performs a few simplifications for
348 /// commutative operators.
349 bool SimplifyCommutative(BinaryOperator
&I
);
351 /// SimplifyCompare - This reorders the operands of a CmpInst to get them in
352 /// most-complex to least-complex order.
353 bool SimplifyCompare(CmpInst
&I
);
355 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
356 /// based on the demanded bits.
357 Value
*SimplifyDemandedUseBits(Value
*V
, APInt DemandedMask
,
358 APInt
& KnownZero
, APInt
& KnownOne
,
360 bool SimplifyDemandedBits(Use
&U
, APInt DemandedMask
,
361 APInt
& KnownZero
, APInt
& KnownOne
,
364 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
365 /// SimplifyDemandedBits knows about. See if the instruction has any
366 /// properties that allow us to simplify its operands.
367 bool SimplifyDemandedInstructionBits(Instruction
&Inst
);
369 Value
*SimplifyDemandedVectorElts(Value
*V
, APInt DemandedElts
,
370 APInt
& UndefElts
, unsigned Depth
= 0);
372 // FoldOpIntoPhi - Given a binary operator or cast instruction which has a
373 // PHI node as operand #0, see if we can fold the instruction into the PHI
374 // (which is only possible if all operands to the PHI are constants).
375 Instruction
*FoldOpIntoPhi(Instruction
&I
);
377 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
378 // operator and they all are only used by the PHI, PHI together their
379 // inputs, and do the operation once, to the result of the PHI.
380 Instruction
*FoldPHIArgOpIntoPHI(PHINode
&PN
);
381 Instruction
*FoldPHIArgBinOpIntoPHI(PHINode
&PN
);
382 Instruction
*FoldPHIArgGEPIntoPHI(PHINode
&PN
);
385 Instruction
*OptAndOp(Instruction
*Op
, ConstantInt
*OpRHS
,
386 ConstantInt
*AndRHS
, BinaryOperator
&TheAnd
);
388 Value
*FoldLogicalPlusAnd(Value
*LHS
, Value
*RHS
, ConstantInt
*Mask
,
389 bool isSub
, Instruction
&I
);
390 Instruction
*InsertRangeTest(Value
*V
, Constant
*Lo
, Constant
*Hi
,
391 bool isSigned
, bool Inside
, Instruction
&IB
);
392 Instruction
*PromoteCastOfAllocation(BitCastInst
&CI
, AllocationInst
&AI
);
393 Instruction
*MatchBSwap(BinaryOperator
&I
);
394 bool SimplifyStoreAtEndOfBlock(StoreInst
&SI
);
395 Instruction
*SimplifyMemTransfer(MemIntrinsic
*MI
);
396 Instruction
*SimplifyMemSet(MemSetInst
*MI
);
399 Value
*EvaluateInDifferentType(Value
*V
, const Type
*Ty
, bool isSigned
);
401 bool CanEvaluateInDifferentType(Value
*V
, const Type
*Ty
,
402 unsigned CastOpc
, int &NumCastsRemoved
);
403 unsigned GetOrEnforceKnownAlignment(Value
*V
,
404 unsigned PrefAlign
= 0);
409 char InstCombiner::ID
= 0;
410 static RegisterPass
<InstCombiner
>
411 X("instcombine", "Combine redundant instructions");
413 // getComplexity: Assign a complexity or rank value to LLVM Values...
414 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
415 static unsigned getComplexity(LLVMContext
*Context
, Value
*V
) {
416 if (isa
<Instruction
>(V
)) {
417 if (BinaryOperator::isNeg(V
) ||
418 BinaryOperator::isFNeg(V
) ||
419 BinaryOperator::isNot(V
))
423 if (isa
<Argument
>(V
)) return 3;
424 return isa
<Constant
>(V
) ? (isa
<UndefValue
>(V
) ? 0 : 1) : 2;
427 // isOnlyUse - Return true if this instruction will be deleted if we stop using
429 static bool isOnlyUse(Value
*V
) {
430 return V
->hasOneUse() || isa
<Constant
>(V
);
433 // getPromotedType - Return the specified type promoted as it would be to pass
434 // though a va_arg area...
435 static const Type
*getPromotedType(const Type
*Ty
) {
436 if (const IntegerType
* ITy
= dyn_cast
<IntegerType
>(Ty
)) {
437 if (ITy
->getBitWidth() < 32)
438 return Type::Int32Ty
;
443 /// getBitCastOperand - If the specified operand is a CastInst, a constant
444 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
445 /// operand value, otherwise return null.
446 static Value
*getBitCastOperand(Value
*V
) {
447 if (Operator
*O
= dyn_cast
<Operator
>(V
)) {
448 if (O
->getOpcode() == Instruction::BitCast
)
449 return O
->getOperand(0);
450 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
))
451 if (GEP
->hasAllZeroIndices())
452 return GEP
->getPointerOperand();
457 /// This function is a wrapper around CastInst::isEliminableCastPair. It
458 /// simply extracts arguments and returns what that function returns.
459 static Instruction::CastOps
460 isEliminableCastPair(
461 const CastInst
*CI
, ///< The first cast instruction
462 unsigned opcode
, ///< The opcode of the second cast instruction
463 const Type
*DstTy
, ///< The target type for the second cast instruction
464 TargetData
*TD
///< The target data for pointer size
467 const Type
*SrcTy
= CI
->getOperand(0)->getType(); // A from above
468 const Type
*MidTy
= CI
->getType(); // B from above
470 // Get the opcodes of the two Cast instructions
471 Instruction::CastOps firstOp
= Instruction::CastOps(CI
->getOpcode());
472 Instruction::CastOps secondOp
= Instruction::CastOps(opcode
);
474 unsigned Res
= CastInst::isEliminableCastPair(firstOp
, secondOp
, SrcTy
, MidTy
,
476 TD
? TD
->getIntPtrType() : 0);
478 // We don't want to form an inttoptr or ptrtoint that converts to an integer
479 // type that differs from the pointer size.
480 if ((Res
== Instruction::IntToPtr
&& SrcTy
!= TD
->getIntPtrType()) ||
481 (Res
== Instruction::PtrToInt
&& DstTy
!= TD
->getIntPtrType()))
484 return Instruction::CastOps(Res
);
487 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
488 /// in any code being generated. It does not require codegen if V is simple
489 /// enough or if the cast can be folded into other casts.
490 static bool ValueRequiresCast(Instruction::CastOps opcode
, const Value
*V
,
491 const Type
*Ty
, TargetData
*TD
) {
492 if (V
->getType() == Ty
|| isa
<Constant
>(V
)) return false;
494 // If this is another cast that can be eliminated, it isn't codegen either.
495 if (const CastInst
*CI
= dyn_cast
<CastInst
>(V
))
496 if (isEliminableCastPair(CI
, opcode
, Ty
, TD
))
501 // SimplifyCommutative - This performs a few simplifications for commutative
504 // 1. Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
508 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
509 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
511 bool InstCombiner::SimplifyCommutative(BinaryOperator
&I
) {
512 bool Changed
= false;
513 if (getComplexity(Context
, I
.getOperand(0)) <
514 getComplexity(Context
, I
.getOperand(1)))
515 Changed
= !I
.swapOperands();
517 if (!I
.isAssociative()) return Changed
;
518 Instruction::BinaryOps Opcode
= I
.getOpcode();
519 if (BinaryOperator
*Op
= dyn_cast
<BinaryOperator
>(I
.getOperand(0)))
520 if (Op
->getOpcode() == Opcode
&& isa
<Constant
>(Op
->getOperand(1))) {
521 if (isa
<Constant
>(I
.getOperand(1))) {
522 Constant
*Folded
= ConstantExpr::get(I
.getOpcode(),
523 cast
<Constant
>(I
.getOperand(1)),
524 cast
<Constant
>(Op
->getOperand(1)));
525 I
.setOperand(0, Op
->getOperand(0));
526 I
.setOperand(1, Folded
);
528 } else if (BinaryOperator
*Op1
=dyn_cast
<BinaryOperator
>(I
.getOperand(1)))
529 if (Op1
->getOpcode() == Opcode
&& isa
<Constant
>(Op1
->getOperand(1)) &&
530 isOnlyUse(Op
) && isOnlyUse(Op1
)) {
531 Constant
*C1
= cast
<Constant
>(Op
->getOperand(1));
532 Constant
*C2
= cast
<Constant
>(Op1
->getOperand(1));
534 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
535 Constant
*Folded
= ConstantExpr::get(I
.getOpcode(), C1
, C2
);
536 Instruction
*New
= BinaryOperator::Create(Opcode
, Op
->getOperand(0),
540 I
.setOperand(0, New
);
541 I
.setOperand(1, Folded
);
548 /// SimplifyCompare - For a CmpInst this function just orders the operands
549 /// so that theyare listed from right (least complex) to left (most complex).
550 /// This puts constants before unary operators before binary operators.
551 bool InstCombiner::SimplifyCompare(CmpInst
&I
) {
552 if (getComplexity(Context
, I
.getOperand(0)) >=
553 getComplexity(Context
, I
.getOperand(1)))
556 // Compare instructions are not associative so there's nothing else we can do.
560 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
561 // if the LHS is a constant zero (which is the 'negate' form).
563 static inline Value
*dyn_castNegVal(Value
*V
, LLVMContext
*Context
) {
564 if (BinaryOperator::isNeg(V
))
565 return BinaryOperator::getNegArgument(V
);
567 // Constants can be considered to be negated values if they can be folded.
568 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(V
))
569 return ConstantExpr::getNeg(C
);
571 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
))
572 if (C
->getType()->getElementType()->isInteger())
573 return ConstantExpr::getNeg(C
);
578 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
579 // instruction if the LHS is a constant negative zero (which is the 'negate'
582 static inline Value
*dyn_castFNegVal(Value
*V
, LLVMContext
*Context
) {
583 if (BinaryOperator::isFNeg(V
))
584 return BinaryOperator::getFNegArgument(V
);
586 // Constants can be considered to be negated values if they can be folded.
587 if (ConstantFP
*C
= dyn_cast
<ConstantFP
>(V
))
588 return ConstantExpr::getFNeg(C
);
590 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
))
591 if (C
->getType()->getElementType()->isFloatingPoint())
592 return ConstantExpr::getFNeg(C
);
597 static inline Value
*dyn_castNotVal(Value
*V
, LLVMContext
*Context
) {
598 if (BinaryOperator::isNot(V
))
599 return BinaryOperator::getNotArgument(V
);
601 // Constants can be considered to be not'ed values...
602 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(V
))
603 return ConstantInt::get(*Context
, ~C
->getValue());
607 // dyn_castFoldableMul - If this value is a multiply that can be folded into
608 // other computations (because it has a constant operand), return the
609 // non-constant operand of the multiply, and set CST to point to the multiplier.
610 // Otherwise, return null.
612 static inline Value
*dyn_castFoldableMul(Value
*V
, ConstantInt
*&CST
,
613 LLVMContext
*Context
) {
614 if (V
->hasOneUse() && V
->getType()->isInteger())
615 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
616 if (I
->getOpcode() == Instruction::Mul
)
617 if ((CST
= dyn_cast
<ConstantInt
>(I
->getOperand(1))))
618 return I
->getOperand(0);
619 if (I
->getOpcode() == Instruction::Shl
)
620 if ((CST
= dyn_cast
<ConstantInt
>(I
->getOperand(1)))) {
621 // The multiplier is really 1 << CST.
622 uint32_t BitWidth
= cast
<IntegerType
>(V
->getType())->getBitWidth();
623 uint32_t CSTVal
= CST
->getLimitedValue(BitWidth
);
624 CST
= ConstantInt::get(*Context
, APInt(BitWidth
, 1).shl(CSTVal
));
625 return I
->getOperand(0);
631 /// AddOne - Add one to a ConstantInt
632 static Constant
*AddOne(Constant
*C
, LLVMContext
*Context
) {
633 return ConstantExpr::getAdd(C
,
634 ConstantInt::get(C
->getType(), 1));
636 /// SubOne - Subtract one from a ConstantInt
637 static Constant
*SubOne(ConstantInt
*C
, LLVMContext
*Context
) {
638 return ConstantExpr::getSub(C
,
639 ConstantInt::get(C
->getType(), 1));
641 /// MultiplyOverflows - True if the multiply can not be expressed in an int
643 static bool MultiplyOverflows(ConstantInt
*C1
, ConstantInt
*C2
, bool sign
,
644 LLVMContext
*Context
) {
645 uint32_t W
= C1
->getBitWidth();
646 APInt LHSExt
= C1
->getValue(), RHSExt
= C2
->getValue();
655 APInt MulExt
= LHSExt
* RHSExt
;
658 APInt Min
= APInt::getSignedMinValue(W
).sext(W
* 2);
659 APInt Max
= APInt::getSignedMaxValue(W
).sext(W
* 2);
660 return MulExt
.slt(Min
) || MulExt
.sgt(Max
);
662 return MulExt
.ugt(APInt::getLowBitsSet(W
* 2, W
));
666 /// ShrinkDemandedConstant - Check to see if the specified operand of the
667 /// specified instruction is a constant integer. If so, check to see if there
668 /// are any bits set in the constant that are not demanded. If so, shrink the
669 /// constant and return true.
670 static bool ShrinkDemandedConstant(Instruction
*I
, unsigned OpNo
,
671 APInt Demanded
, LLVMContext
*Context
) {
672 assert(I
&& "No instruction?");
673 assert(OpNo
< I
->getNumOperands() && "Operand index too large");
675 // If the operand is not a constant integer, nothing to do.
676 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(I
->getOperand(OpNo
));
677 if (!OpC
) return false;
679 // If there are no bits set that aren't demanded, nothing to do.
680 Demanded
.zextOrTrunc(OpC
->getValue().getBitWidth());
681 if ((~Demanded
& OpC
->getValue()) == 0)
684 // This instruction is producing bits that are not demanded. Shrink the RHS.
685 Demanded
&= OpC
->getValue();
686 I
->setOperand(OpNo
, ConstantInt::get(*Context
, Demanded
));
690 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
691 // set of known zero and one bits, compute the maximum and minimum values that
692 // could have the specified known zero and known one bits, returning them in
694 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt
& KnownZero
,
695 const APInt
& KnownOne
,
696 APInt
& Min
, APInt
& Max
) {
697 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
698 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
699 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
700 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
701 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
703 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
704 // bit if it is unknown.
706 Max
= KnownOne
|UnknownBits
;
708 if (UnknownBits
.isNegative()) { // Sign bit is unknown
709 Min
.set(Min
.getBitWidth()-1);
710 Max
.clear(Max
.getBitWidth()-1);
714 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
715 // a set of known zero and one bits, compute the maximum and minimum values that
716 // could have the specified known zero and known one bits, returning them in
718 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt
&KnownZero
,
719 const APInt
&KnownOne
,
720 APInt
&Min
, APInt
&Max
) {
721 assert(KnownZero
.getBitWidth() == KnownOne
.getBitWidth() &&
722 KnownZero
.getBitWidth() == Min
.getBitWidth() &&
723 KnownZero
.getBitWidth() == Max
.getBitWidth() &&
724 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
725 APInt UnknownBits
= ~(KnownZero
|KnownOne
);
727 // The minimum value is when the unknown bits are all zeros.
729 // The maximum value is when the unknown bits are all ones.
730 Max
= KnownOne
|UnknownBits
;
733 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
734 /// SimplifyDemandedBits knows about. See if the instruction has any
735 /// properties that allow us to simplify its operands.
736 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction
&Inst
) {
737 unsigned BitWidth
= Inst
.getType()->getScalarSizeInBits();
738 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
739 APInt
DemandedMask(APInt::getAllOnesValue(BitWidth
));
741 Value
*V
= SimplifyDemandedUseBits(&Inst
, DemandedMask
,
742 KnownZero
, KnownOne
, 0);
743 if (V
== 0) return false;
744 if (V
== &Inst
) return true;
745 ReplaceInstUsesWith(Inst
, V
);
749 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
750 /// specified instruction operand if possible, updating it in place. It returns
751 /// true if it made any change and false otherwise.
752 bool InstCombiner::SimplifyDemandedBits(Use
&U
, APInt DemandedMask
,
753 APInt
&KnownZero
, APInt
&KnownOne
,
755 Value
*NewVal
= SimplifyDemandedUseBits(U
.get(), DemandedMask
,
756 KnownZero
, KnownOne
, Depth
);
757 if (NewVal
== 0) return false;
763 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
764 /// value based on the demanded bits. When this function is called, it is known
765 /// that only the bits set in DemandedMask of the result of V are ever used
766 /// downstream. Consequently, depending on the mask and V, it may be possible
767 /// to replace V with a constant or one of its operands. In such cases, this
768 /// function does the replacement and returns true. In all other cases, it
769 /// returns false after analyzing the expression and setting KnownOne and known
770 /// to be one in the expression. KnownZero contains all the bits that are known
771 /// to be zero in the expression. These are provided to potentially allow the
772 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
773 /// the expression. KnownOne and KnownZero always follow the invariant that
774 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
775 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
776 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
777 /// and KnownOne must all be the same.
779 /// This returns null if it did not change anything and it permits no
780 /// simplification. This returns V itself if it did some simplification of V's
781 /// operands based on the information about what bits are demanded. This returns
782 /// some other non-null value if it found out that V is equal to another value
783 /// in the context where the specified bits are demanded, but not for all users.
784 Value
*InstCombiner::SimplifyDemandedUseBits(Value
*V
, APInt DemandedMask
,
785 APInt
&KnownZero
, APInt
&KnownOne
,
787 assert(V
!= 0 && "Null pointer of Value???");
788 assert(Depth
<= 6 && "Limit Search Depth");
789 uint32_t BitWidth
= DemandedMask
.getBitWidth();
790 const Type
*VTy
= V
->getType();
791 assert((TD
|| !isa
<PointerType
>(VTy
)) &&
792 "SimplifyDemandedBits needs to know bit widths!");
793 assert((!TD
|| TD
->getTypeSizeInBits(VTy
->getScalarType()) == BitWidth
) &&
794 (!VTy
->isIntOrIntVector() ||
795 VTy
->getScalarSizeInBits() == BitWidth
) &&
796 KnownZero
.getBitWidth() == BitWidth
&&
797 KnownOne
.getBitWidth() == BitWidth
&&
798 "Value *V, DemandedMask, KnownZero and KnownOne "
799 "must have same BitWidth");
800 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
801 // We know all of the bits for a constant!
802 KnownOne
= CI
->getValue() & DemandedMask
;
803 KnownZero
= ~KnownOne
& DemandedMask
;
806 if (isa
<ConstantPointerNull
>(V
)) {
807 // We know all of the bits for a constant!
809 KnownZero
= DemandedMask
;
815 if (DemandedMask
== 0) { // Not demanding any bits from V.
816 if (isa
<UndefValue
>(V
))
818 return UndefValue::get(VTy
);
821 if (Depth
== 6) // Limit search depth.
824 APInt
LHSKnownZero(BitWidth
, 0), LHSKnownOne(BitWidth
, 0);
825 APInt
&RHSKnownZero
= KnownZero
, &RHSKnownOne
= KnownOne
;
827 Instruction
*I
= dyn_cast
<Instruction
>(V
);
829 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
830 return 0; // Only analyze instructions.
833 // If there are multiple uses of this value and we aren't at the root, then
834 // we can't do any simplifications of the operands, because DemandedMask
835 // only reflects the bits demanded by *one* of the users.
836 if (Depth
!= 0 && !I
->hasOneUse()) {
837 // Despite the fact that we can't simplify this instruction in all User's
838 // context, we can at least compute the knownzero/knownone bits, and we can
839 // do simplifications that apply to *just* the one user if we know that
840 // this instruction has a simpler value in that context.
841 if (I
->getOpcode() == Instruction::And
) {
842 // If either the LHS or the RHS are Zero, the result is zero.
843 ComputeMaskedBits(I
->getOperand(1), DemandedMask
,
844 RHSKnownZero
, RHSKnownOne
, Depth
+1);
845 ComputeMaskedBits(I
->getOperand(0), DemandedMask
& ~RHSKnownZero
,
846 LHSKnownZero
, LHSKnownOne
, Depth
+1);
848 // If all of the demanded bits are known 1 on one side, return the other.
849 // These bits cannot contribute to the result of the 'and' in this
851 if ((DemandedMask
& ~LHSKnownZero
& RHSKnownOne
) ==
852 (DemandedMask
& ~LHSKnownZero
))
853 return I
->getOperand(0);
854 if ((DemandedMask
& ~RHSKnownZero
& LHSKnownOne
) ==
855 (DemandedMask
& ~RHSKnownZero
))
856 return I
->getOperand(1);
858 // If all of the demanded bits in the inputs are known zeros, return zero.
859 if ((DemandedMask
& (RHSKnownZero
|LHSKnownZero
)) == DemandedMask
)
860 return Constant::getNullValue(VTy
);
862 } else if (I
->getOpcode() == Instruction::Or
) {
863 // We can simplify (X|Y) -> X or Y in the user's context if we know that
864 // only bits from X or Y are demanded.
866 // If either the LHS or the RHS are One, the result is One.
867 ComputeMaskedBits(I
->getOperand(1), DemandedMask
,
868 RHSKnownZero
, RHSKnownOne
, Depth
+1);
869 ComputeMaskedBits(I
->getOperand(0), DemandedMask
& ~RHSKnownOne
,
870 LHSKnownZero
, LHSKnownOne
, Depth
+1);
872 // If all of the demanded bits are known zero on one side, return the
873 // other. These bits cannot contribute to the result of the 'or' in this
875 if ((DemandedMask
& ~LHSKnownOne
& RHSKnownZero
) ==
876 (DemandedMask
& ~LHSKnownOne
))
877 return I
->getOperand(0);
878 if ((DemandedMask
& ~RHSKnownOne
& LHSKnownZero
) ==
879 (DemandedMask
& ~RHSKnownOne
))
880 return I
->getOperand(1);
882 // If all of the potentially set bits on one side are known to be set on
883 // the other side, just use the 'other' side.
884 if ((DemandedMask
& (~RHSKnownZero
) & LHSKnownOne
) ==
885 (DemandedMask
& (~RHSKnownZero
)))
886 return I
->getOperand(0);
887 if ((DemandedMask
& (~LHSKnownZero
) & RHSKnownOne
) ==
888 (DemandedMask
& (~LHSKnownZero
)))
889 return I
->getOperand(1);
892 // Compute the KnownZero/KnownOne bits to simplify things downstream.
893 ComputeMaskedBits(I
, DemandedMask
, KnownZero
, KnownOne
, Depth
);
897 // If this is the root being simplified, allow it to have multiple uses,
898 // just set the DemandedMask to all bits so that we can try to simplify the
899 // operands. This allows visitTruncInst (for example) to simplify the
900 // operand of a trunc without duplicating all the logic below.
901 if (Depth
== 0 && !V
->hasOneUse())
902 DemandedMask
= APInt::getAllOnesValue(BitWidth
);
904 switch (I
->getOpcode()) {
906 ComputeMaskedBits(I
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
908 case Instruction::And
:
909 // If either the LHS or the RHS are Zero, the result is zero.
910 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
911 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
912 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
& ~RHSKnownZero
,
913 LHSKnownZero
, LHSKnownOne
, Depth
+1))
915 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
916 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
918 // If all of the demanded bits are known 1 on one side, return the other.
919 // These bits cannot contribute to the result of the 'and'.
920 if ((DemandedMask
& ~LHSKnownZero
& RHSKnownOne
) ==
921 (DemandedMask
& ~LHSKnownZero
))
922 return I
->getOperand(0);
923 if ((DemandedMask
& ~RHSKnownZero
& LHSKnownOne
) ==
924 (DemandedMask
& ~RHSKnownZero
))
925 return I
->getOperand(1);
927 // If all of the demanded bits in the inputs are known zeros, return zero.
928 if ((DemandedMask
& (RHSKnownZero
|LHSKnownZero
)) == DemandedMask
)
929 return Constant::getNullValue(VTy
);
931 // If the RHS is a constant, see if we can simplify it.
932 if (ShrinkDemandedConstant(I
, 1, DemandedMask
& ~LHSKnownZero
, Context
))
935 // Output known-1 bits are only known if set in both the LHS & RHS.
936 RHSKnownOne
&= LHSKnownOne
;
937 // Output known-0 are known to be clear if zero in either the LHS | RHS.
938 RHSKnownZero
|= LHSKnownZero
;
940 case Instruction::Or
:
941 // If either the LHS or the RHS are One, the result is One.
942 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
943 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
944 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
& ~RHSKnownOne
,
945 LHSKnownZero
, LHSKnownOne
, Depth
+1))
947 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
948 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
950 // If all of the demanded bits are known zero on one side, return the other.
951 // These bits cannot contribute to the result of the 'or'.
952 if ((DemandedMask
& ~LHSKnownOne
& RHSKnownZero
) ==
953 (DemandedMask
& ~LHSKnownOne
))
954 return I
->getOperand(0);
955 if ((DemandedMask
& ~RHSKnownOne
& LHSKnownZero
) ==
956 (DemandedMask
& ~RHSKnownOne
))
957 return I
->getOperand(1);
959 // If all of the potentially set bits on one side are known to be set on
960 // the other side, just use the 'other' side.
961 if ((DemandedMask
& (~RHSKnownZero
) & LHSKnownOne
) ==
962 (DemandedMask
& (~RHSKnownZero
)))
963 return I
->getOperand(0);
964 if ((DemandedMask
& (~LHSKnownZero
) & RHSKnownOne
) ==
965 (DemandedMask
& (~LHSKnownZero
)))
966 return I
->getOperand(1);
968 // If the RHS is a constant, see if we can simplify it.
969 if (ShrinkDemandedConstant(I
, 1, DemandedMask
, Context
))
972 // Output known-0 bits are only known if clear in both the LHS & RHS.
973 RHSKnownZero
&= LHSKnownZero
;
974 // Output known-1 are known to be set if set in either the LHS | RHS.
975 RHSKnownOne
|= LHSKnownOne
;
977 case Instruction::Xor
: {
978 if (SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
979 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
980 SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
981 LHSKnownZero
, LHSKnownOne
, Depth
+1))
983 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
984 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
986 // If all of the demanded bits are known zero on one side, return the other.
987 // These bits cannot contribute to the result of the 'xor'.
988 if ((DemandedMask
& RHSKnownZero
) == DemandedMask
)
989 return I
->getOperand(0);
990 if ((DemandedMask
& LHSKnownZero
) == DemandedMask
)
991 return I
->getOperand(1);
993 // Output known-0 bits are known if clear or set in both the LHS & RHS.
994 APInt KnownZeroOut
= (RHSKnownZero
& LHSKnownZero
) |
995 (RHSKnownOne
& LHSKnownOne
);
996 // Output known-1 are known to be set if set in only one of the LHS, RHS.
997 APInt KnownOneOut
= (RHSKnownZero
& LHSKnownOne
) |
998 (RHSKnownOne
& LHSKnownZero
);
1000 // If all of the demanded bits are known to be zero on one side or the
1001 // other, turn this into an *inclusive* or.
1002 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1003 if ((DemandedMask
& ~RHSKnownZero
& ~LHSKnownZero
) == 0) {
1005 BinaryOperator::CreateOr(I
->getOperand(0), I
->getOperand(1),
1007 return InsertNewInstBefore(Or
, *I
);
1010 // If all of the demanded bits on one side are known, and all of the set
1011 // bits on that side are also known to be set on the other side, turn this
1012 // into an AND, as we know the bits will be cleared.
1013 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1014 if ((DemandedMask
& (RHSKnownZero
|RHSKnownOne
)) == DemandedMask
) {
1016 if ((RHSKnownOne
& LHSKnownOne
) == RHSKnownOne
) {
1017 Constant
*AndC
= Constant::getIntegerValue(VTy
,
1018 ~RHSKnownOne
& DemandedMask
);
1020 BinaryOperator::CreateAnd(I
->getOperand(0), AndC
, "tmp");
1021 return InsertNewInstBefore(And
, *I
);
1025 // If the RHS is a constant, see if we can simplify it.
1026 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1027 if (ShrinkDemandedConstant(I
, 1, DemandedMask
, Context
))
1030 RHSKnownZero
= KnownZeroOut
;
1031 RHSKnownOne
= KnownOneOut
;
1034 case Instruction::Select
:
1035 if (SimplifyDemandedBits(I
->getOperandUse(2), DemandedMask
,
1036 RHSKnownZero
, RHSKnownOne
, Depth
+1) ||
1037 SimplifyDemandedBits(I
->getOperandUse(1), DemandedMask
,
1038 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1040 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1041 assert(!(LHSKnownZero
& LHSKnownOne
) && "Bits known to be one AND zero?");
1043 // If the operands are constants, see if we can simplify them.
1044 if (ShrinkDemandedConstant(I
, 1, DemandedMask
, Context
) ||
1045 ShrinkDemandedConstant(I
, 2, DemandedMask
, Context
))
1048 // Only known if known in both the LHS and RHS.
1049 RHSKnownOne
&= LHSKnownOne
;
1050 RHSKnownZero
&= LHSKnownZero
;
1052 case Instruction::Trunc
: {
1053 unsigned truncBf
= I
->getOperand(0)->getType()->getScalarSizeInBits();
1054 DemandedMask
.zext(truncBf
);
1055 RHSKnownZero
.zext(truncBf
);
1056 RHSKnownOne
.zext(truncBf
);
1057 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1058 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1060 DemandedMask
.trunc(BitWidth
);
1061 RHSKnownZero
.trunc(BitWidth
);
1062 RHSKnownOne
.trunc(BitWidth
);
1063 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1066 case Instruction::BitCast
:
1067 if (!I
->getOperand(0)->getType()->isIntOrIntVector())
1068 return false; // vector->int or fp->int?
1070 if (const VectorType
*DstVTy
= dyn_cast
<VectorType
>(I
->getType())) {
1071 if (const VectorType
*SrcVTy
=
1072 dyn_cast
<VectorType
>(I
->getOperand(0)->getType())) {
1073 if (DstVTy
->getNumElements() != SrcVTy
->getNumElements())
1074 // Don't touch a bitcast between vectors of different element counts.
1077 // Don't touch a scalar-to-vector bitcast.
1079 } else if (isa
<VectorType
>(I
->getOperand(0)->getType()))
1080 // Don't touch a vector-to-scalar bitcast.
1083 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1084 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1086 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1088 case Instruction::ZExt
: {
1089 // Compute the bits in the result that are not present in the input.
1090 unsigned SrcBitWidth
=I
->getOperand(0)->getType()->getScalarSizeInBits();
1092 DemandedMask
.trunc(SrcBitWidth
);
1093 RHSKnownZero
.trunc(SrcBitWidth
);
1094 RHSKnownOne
.trunc(SrcBitWidth
);
1095 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMask
,
1096 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1098 DemandedMask
.zext(BitWidth
);
1099 RHSKnownZero
.zext(BitWidth
);
1100 RHSKnownOne
.zext(BitWidth
);
1101 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1102 // The top bits are known to be zero.
1103 RHSKnownZero
|= APInt::getHighBitsSet(BitWidth
, BitWidth
- SrcBitWidth
);
1106 case Instruction::SExt
: {
1107 // Compute the bits in the result that are not present in the input.
1108 unsigned SrcBitWidth
=I
->getOperand(0)->getType()->getScalarSizeInBits();
1110 APInt InputDemandedBits
= DemandedMask
&
1111 APInt::getLowBitsSet(BitWidth
, SrcBitWidth
);
1113 APInt
NewBits(APInt::getHighBitsSet(BitWidth
, BitWidth
- SrcBitWidth
));
1114 // If any of the sign extended bits are demanded, we know that the sign
1116 if ((NewBits
& DemandedMask
) != 0)
1117 InputDemandedBits
.set(SrcBitWidth
-1);
1119 InputDemandedBits
.trunc(SrcBitWidth
);
1120 RHSKnownZero
.trunc(SrcBitWidth
);
1121 RHSKnownOne
.trunc(SrcBitWidth
);
1122 if (SimplifyDemandedBits(I
->getOperandUse(0), InputDemandedBits
,
1123 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1125 InputDemandedBits
.zext(BitWidth
);
1126 RHSKnownZero
.zext(BitWidth
);
1127 RHSKnownOne
.zext(BitWidth
);
1128 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1130 // If the sign bit of the input is known set or clear, then we know the
1131 // top bits of the result.
1133 // If the input sign bit is known zero, or if the NewBits are not demanded
1134 // convert this into a zero extension.
1135 if (RHSKnownZero
[SrcBitWidth
-1] || (NewBits
& ~DemandedMask
) == NewBits
) {
1136 // Convert to ZExt cast
1137 CastInst
*NewCast
= new ZExtInst(I
->getOperand(0), VTy
, I
->getName());
1138 return InsertNewInstBefore(NewCast
, *I
);
1139 } else if (RHSKnownOne
[SrcBitWidth
-1]) { // Input sign bit known set
1140 RHSKnownOne
|= NewBits
;
1144 case Instruction::Add
: {
1145 // Figure out what the input bits are. If the top bits of the and result
1146 // are not demanded, then the add doesn't demand them from its input
1148 unsigned NLZ
= DemandedMask
.countLeadingZeros();
1150 // If there is a constant on the RHS, there are a variety of xformations
1152 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1153 // If null, this should be simplified elsewhere. Some of the xforms here
1154 // won't work if the RHS is zero.
1158 // If the top bit of the output is demanded, demand everything from the
1159 // input. Otherwise, we demand all the input bits except NLZ top bits.
1160 APInt
InDemandedBits(APInt::getLowBitsSet(BitWidth
, BitWidth
- NLZ
));
1162 // Find information about known zero/one bits in the input.
1163 if (SimplifyDemandedBits(I
->getOperandUse(0), InDemandedBits
,
1164 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1167 // If the RHS of the add has bits set that can't affect the input, reduce
1169 if (ShrinkDemandedConstant(I
, 1, InDemandedBits
, Context
))
1172 // Avoid excess work.
1173 if (LHSKnownZero
== 0 && LHSKnownOne
== 0)
1176 // Turn it into OR if input bits are zero.
1177 if ((LHSKnownZero
& RHS
->getValue()) == RHS
->getValue()) {
1179 BinaryOperator::CreateOr(I
->getOperand(0), I
->getOperand(1),
1181 return InsertNewInstBefore(Or
, *I
);
1184 // We can say something about the output known-zero and known-one bits,
1185 // depending on potential carries from the input constant and the
1186 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1187 // bits set and the RHS constant is 0x01001, then we know we have a known
1188 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1190 // To compute this, we first compute the potential carry bits. These are
1191 // the bits which may be modified. I'm not aware of a better way to do
1193 const APInt
&RHSVal
= RHS
->getValue();
1194 APInt
CarryBits((~LHSKnownZero
+ RHSVal
) ^ (~LHSKnownZero
^ RHSVal
));
1196 // Now that we know which bits have carries, compute the known-1/0 sets.
1198 // Bits are known one if they are known zero in one operand and one in the
1199 // other, and there is no input carry.
1200 RHSKnownOne
= ((LHSKnownZero
& RHSVal
) |
1201 (LHSKnownOne
& ~RHSVal
)) & ~CarryBits
;
1203 // Bits are known zero if they are known zero in both operands and there
1204 // is no input carry.
1205 RHSKnownZero
= LHSKnownZero
& ~RHSVal
& ~CarryBits
;
1207 // If the high-bits of this ADD are not demanded, then it does not demand
1208 // the high bits of its LHS or RHS.
1209 if (DemandedMask
[BitWidth
-1] == 0) {
1210 // Right fill the mask of bits for this ADD to demand the most
1211 // significant bit and all those below it.
1212 APInt
DemandedFromOps(APInt::getLowBitsSet(BitWidth
, BitWidth
-NLZ
));
1213 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedFromOps
,
1214 LHSKnownZero
, LHSKnownOne
, Depth
+1) ||
1215 SimplifyDemandedBits(I
->getOperandUse(1), DemandedFromOps
,
1216 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1222 case Instruction::Sub
:
1223 // If the high-bits of this SUB are not demanded, then it does not demand
1224 // the high bits of its LHS or RHS.
1225 if (DemandedMask
[BitWidth
-1] == 0) {
1226 // Right fill the mask of bits for this SUB to demand the most
1227 // significant bit and all those below it.
1228 uint32_t NLZ
= DemandedMask
.countLeadingZeros();
1229 APInt
DemandedFromOps(APInt::getLowBitsSet(BitWidth
, BitWidth
-NLZ
));
1230 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedFromOps
,
1231 LHSKnownZero
, LHSKnownOne
, Depth
+1) ||
1232 SimplifyDemandedBits(I
->getOperandUse(1), DemandedFromOps
,
1233 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1236 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1237 // the known zeros and ones.
1238 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
1240 case Instruction::Shl
:
1241 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1242 uint64_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1243 APInt
DemandedMaskIn(DemandedMask
.lshr(ShiftAmt
));
1244 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1245 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1247 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1248 RHSKnownZero
<<= ShiftAmt
;
1249 RHSKnownOne
<<= ShiftAmt
;
1250 // low bits known zero.
1252 RHSKnownZero
|= APInt::getLowBitsSet(BitWidth
, ShiftAmt
);
1255 case Instruction::LShr
:
1256 // For a logical shift right
1257 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1258 uint64_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1260 // Unsigned shift right.
1261 APInt
DemandedMaskIn(DemandedMask
.shl(ShiftAmt
));
1262 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1263 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1265 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1266 RHSKnownZero
= APIntOps::lshr(RHSKnownZero
, ShiftAmt
);
1267 RHSKnownOne
= APIntOps::lshr(RHSKnownOne
, ShiftAmt
);
1269 // Compute the new bits that are at the top now.
1270 APInt
HighBits(APInt::getHighBitsSet(BitWidth
, ShiftAmt
));
1271 RHSKnownZero
|= HighBits
; // high bits known zero.
1275 case Instruction::AShr
:
1276 // If this is an arithmetic shift right and only the low-bit is set, we can
1277 // always convert this into a logical shr, even if the shift amount is
1278 // variable. The low bit of the shift cannot be an input sign bit unless
1279 // the shift amount is >= the size of the datatype, which is undefined.
1280 if (DemandedMask
== 1) {
1281 // Perform the logical shift right.
1282 Instruction
*NewVal
= BinaryOperator::CreateLShr(
1283 I
->getOperand(0), I
->getOperand(1), I
->getName());
1284 return InsertNewInstBefore(NewVal
, *I
);
1287 // If the sign bit is the only bit demanded by this ashr, then there is no
1288 // need to do it, the shift doesn't change the high bit.
1289 if (DemandedMask
.isSignBit())
1290 return I
->getOperand(0);
1292 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1293 uint32_t ShiftAmt
= SA
->getLimitedValue(BitWidth
);
1295 // Signed shift right.
1296 APInt
DemandedMaskIn(DemandedMask
.shl(ShiftAmt
));
1297 // If any of the "high bits" are demanded, we should set the sign bit as
1299 if (DemandedMask
.countLeadingZeros() <= ShiftAmt
)
1300 DemandedMaskIn
.set(BitWidth
-1);
1301 if (SimplifyDemandedBits(I
->getOperandUse(0), DemandedMaskIn
,
1302 RHSKnownZero
, RHSKnownOne
, Depth
+1))
1304 assert(!(RHSKnownZero
& RHSKnownOne
) && "Bits known to be one AND zero?");
1305 // Compute the new bits that are at the top now.
1306 APInt
HighBits(APInt::getHighBitsSet(BitWidth
, ShiftAmt
));
1307 RHSKnownZero
= APIntOps::lshr(RHSKnownZero
, ShiftAmt
);
1308 RHSKnownOne
= APIntOps::lshr(RHSKnownOne
, ShiftAmt
);
1310 // Handle the sign bits.
1311 APInt
SignBit(APInt::getSignBit(BitWidth
));
1312 // Adjust to where it is now in the mask.
1313 SignBit
= APIntOps::lshr(SignBit
, ShiftAmt
);
1315 // If the input sign bit is known to be zero, or if none of the top bits
1316 // are demanded, turn this into an unsigned shift right.
1317 if (BitWidth
<= ShiftAmt
|| RHSKnownZero
[BitWidth
-ShiftAmt
-1] ||
1318 (HighBits
& ~DemandedMask
) == HighBits
) {
1319 // Perform the logical shift right.
1320 Instruction
*NewVal
= BinaryOperator::CreateLShr(
1321 I
->getOperand(0), SA
, I
->getName());
1322 return InsertNewInstBefore(NewVal
, *I
);
1323 } else if ((RHSKnownOne
& SignBit
) != 0) { // New bits are known one.
1324 RHSKnownOne
|= HighBits
;
1328 case Instruction::SRem
:
1329 if (ConstantInt
*Rem
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1330 APInt RA
= Rem
->getValue().abs();
1331 if (RA
.isPowerOf2()) {
1332 if (DemandedMask
.ult(RA
)) // srem won't affect demanded bits
1333 return I
->getOperand(0);
1335 APInt LowBits
= RA
- 1;
1336 APInt Mask2
= LowBits
| APInt::getSignBit(BitWidth
);
1337 if (SimplifyDemandedBits(I
->getOperandUse(0), Mask2
,
1338 LHSKnownZero
, LHSKnownOne
, Depth
+1))
1341 if (LHSKnownZero
[BitWidth
-1] || ((LHSKnownZero
& LowBits
) == LowBits
))
1342 LHSKnownZero
|= ~LowBits
;
1344 KnownZero
|= LHSKnownZero
& DemandedMask
;
1346 assert(!(KnownZero
& KnownOne
) && "Bits known to be one AND zero?");
1350 case Instruction::URem
: {
1351 APInt
KnownZero2(BitWidth
, 0), KnownOne2(BitWidth
, 0);
1352 APInt AllOnes
= APInt::getAllOnesValue(BitWidth
);
1353 if (SimplifyDemandedBits(I
->getOperandUse(0), AllOnes
,
1354 KnownZero2
, KnownOne2
, Depth
+1) ||
1355 SimplifyDemandedBits(I
->getOperandUse(1), AllOnes
,
1356 KnownZero2
, KnownOne2
, Depth
+1))
1359 unsigned Leaders
= KnownZero2
.countLeadingOnes();
1360 Leaders
= std::max(Leaders
,
1361 KnownZero2
.countLeadingOnes());
1362 KnownZero
= APInt::getHighBitsSet(BitWidth
, Leaders
) & DemandedMask
;
1365 case Instruction::Call
:
1366 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
1367 switch (II
->getIntrinsicID()) {
1369 case Intrinsic::bswap
: {
1370 // If the only bits demanded come from one byte of the bswap result,
1371 // just shift the input byte into position to eliminate the bswap.
1372 unsigned NLZ
= DemandedMask
.countLeadingZeros();
1373 unsigned NTZ
= DemandedMask
.countTrailingZeros();
1375 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1376 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1377 // have 14 leading zeros, round to 8.
1380 // If we need exactly one byte, we can do this transformation.
1381 if (BitWidth
-NLZ
-NTZ
== 8) {
1382 unsigned ResultBit
= NTZ
;
1383 unsigned InputBit
= BitWidth
-NTZ
-8;
1385 // Replace this with either a left or right shift to get the byte into
1387 Instruction
*NewVal
;
1388 if (InputBit
> ResultBit
)
1389 NewVal
= BinaryOperator::CreateLShr(I
->getOperand(1),
1390 ConstantInt::get(I
->getType(), InputBit
-ResultBit
));
1392 NewVal
= BinaryOperator::CreateShl(I
->getOperand(1),
1393 ConstantInt::get(I
->getType(), ResultBit
-InputBit
));
1394 NewVal
->takeName(I
);
1395 return InsertNewInstBefore(NewVal
, *I
);
1398 // TODO: Could compute known zero/one bits based on the input.
1403 ComputeMaskedBits(V
, DemandedMask
, RHSKnownZero
, RHSKnownOne
, Depth
);
1407 // If the client is only demanding bits that we know, return the known
1409 if ((DemandedMask
& (RHSKnownZero
|RHSKnownOne
)) == DemandedMask
)
1410 return Constant::getIntegerValue(VTy
, RHSKnownOne
);
1415 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1416 /// any number of elements. DemandedElts contains the set of elements that are
1417 /// actually used by the caller. This method analyzes which elements of the
1418 /// operand are undef and returns that information in UndefElts.
1420 /// If the information about demanded elements can be used to simplify the
1421 /// operation, the operation is simplified, then the resultant value is
1422 /// returned. This returns null if no change was made.
1423 Value
*InstCombiner::SimplifyDemandedVectorElts(Value
*V
, APInt DemandedElts
,
1426 unsigned VWidth
= cast
<VectorType
>(V
->getType())->getNumElements();
1427 APInt
EltMask(APInt::getAllOnesValue(VWidth
));
1428 assert((DemandedElts
& ~EltMask
) == 0 && "Invalid DemandedElts!");
1430 if (isa
<UndefValue
>(V
)) {
1431 // If the entire vector is undefined, just return this info.
1432 UndefElts
= EltMask
;
1434 } else if (DemandedElts
== 0) { // If nothing is demanded, provide undef.
1435 UndefElts
= EltMask
;
1436 return UndefValue::get(V
->getType());
1440 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(V
)) {
1441 const Type
*EltTy
= cast
<VectorType
>(V
->getType())->getElementType();
1442 Constant
*Undef
= UndefValue::get(EltTy
);
1444 std::vector
<Constant
*> Elts
;
1445 for (unsigned i
= 0; i
!= VWidth
; ++i
)
1446 if (!DemandedElts
[i
]) { // If not demanded, set to undef.
1447 Elts
.push_back(Undef
);
1449 } else if (isa
<UndefValue
>(CP
->getOperand(i
))) { // Already undef.
1450 Elts
.push_back(Undef
);
1452 } else { // Otherwise, defined.
1453 Elts
.push_back(CP
->getOperand(i
));
1456 // If we changed the constant, return it.
1457 Constant
*NewCP
= ConstantVector::get(Elts
);
1458 return NewCP
!= CP
? NewCP
: 0;
1459 } else if (isa
<ConstantAggregateZero
>(V
)) {
1460 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1463 // Check if this is identity. If so, return 0 since we are not simplifying
1465 if (DemandedElts
== ((1ULL << VWidth
) -1))
1468 const Type
*EltTy
= cast
<VectorType
>(V
->getType())->getElementType();
1469 Constant
*Zero
= Constant::getNullValue(EltTy
);
1470 Constant
*Undef
= UndefValue::get(EltTy
);
1471 std::vector
<Constant
*> Elts
;
1472 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
1473 Constant
*Elt
= DemandedElts
[i
] ? Zero
: Undef
;
1474 Elts
.push_back(Elt
);
1476 UndefElts
= DemandedElts
^ EltMask
;
1477 return ConstantVector::get(Elts
);
1480 // Limit search depth.
1484 // If multiple users are using the root value, procede with
1485 // simplification conservatively assuming that all elements
1487 if (!V
->hasOneUse()) {
1488 // Quit if we find multiple users of a non-root value though.
1489 // They'll be handled when it's their turn to be visited by
1490 // the main instcombine process.
1492 // TODO: Just compute the UndefElts information recursively.
1495 // Conservatively assume that all elements are needed.
1496 DemandedElts
= EltMask
;
1499 Instruction
*I
= dyn_cast
<Instruction
>(V
);
1500 if (!I
) return 0; // Only analyze instructions.
1502 bool MadeChange
= false;
1503 APInt
UndefElts2(VWidth
, 0);
1505 switch (I
->getOpcode()) {
1508 case Instruction::InsertElement
: {
1509 // If this is a variable index, we don't know which element it overwrites.
1510 // demand exactly the same input as we produce.
1511 ConstantInt
*Idx
= dyn_cast
<ConstantInt
>(I
->getOperand(2));
1513 // Note that we can't propagate undef elt info, because we don't know
1514 // which elt is getting updated.
1515 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts
,
1516 UndefElts2
, Depth
+1);
1517 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1521 // If this is inserting an element that isn't demanded, remove this
1523 unsigned IdxNo
= Idx
->getZExtValue();
1524 if (IdxNo
>= VWidth
|| !DemandedElts
[IdxNo
])
1525 return AddSoonDeadInstToWorklist(*I
, 0);
1527 // Otherwise, the element inserted overwrites whatever was there, so the
1528 // input demanded set is simpler than the output set.
1529 APInt DemandedElts2
= DemandedElts
;
1530 DemandedElts2
.clear(IdxNo
);
1531 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts2
,
1532 UndefElts
, Depth
+1);
1533 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1535 // The inserted element is defined.
1536 UndefElts
.clear(IdxNo
);
1539 case Instruction::ShuffleVector
: {
1540 ShuffleVectorInst
*Shuffle
= cast
<ShuffleVectorInst
>(I
);
1541 uint64_t LHSVWidth
=
1542 cast
<VectorType
>(Shuffle
->getOperand(0)->getType())->getNumElements();
1543 APInt
LeftDemanded(LHSVWidth
, 0), RightDemanded(LHSVWidth
, 0);
1544 for (unsigned i
= 0; i
< VWidth
; i
++) {
1545 if (DemandedElts
[i
]) {
1546 unsigned MaskVal
= Shuffle
->getMaskValue(i
);
1547 if (MaskVal
!= -1u) {
1548 assert(MaskVal
< LHSVWidth
* 2 &&
1549 "shufflevector mask index out of range!");
1550 if (MaskVal
< LHSVWidth
)
1551 LeftDemanded
.set(MaskVal
);
1553 RightDemanded
.set(MaskVal
- LHSVWidth
);
1558 APInt
UndefElts4(LHSVWidth
, 0);
1559 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), LeftDemanded
,
1560 UndefElts4
, Depth
+1);
1561 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1563 APInt
UndefElts3(LHSVWidth
, 0);
1564 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(1), RightDemanded
,
1565 UndefElts3
, Depth
+1);
1566 if (TmpV
) { I
->setOperand(1, TmpV
); MadeChange
= true; }
1568 bool NewUndefElts
= false;
1569 for (unsigned i
= 0; i
< VWidth
; i
++) {
1570 unsigned MaskVal
= Shuffle
->getMaskValue(i
);
1571 if (MaskVal
== -1u) {
1573 } else if (MaskVal
< LHSVWidth
) {
1574 if (UndefElts4
[MaskVal
]) {
1575 NewUndefElts
= true;
1579 if (UndefElts3
[MaskVal
- LHSVWidth
]) {
1580 NewUndefElts
= true;
1587 // Add additional discovered undefs.
1588 std::vector
<Constant
*> Elts
;
1589 for (unsigned i
= 0; i
< VWidth
; ++i
) {
1591 Elts
.push_back(UndefValue::get(Type::Int32Ty
));
1593 Elts
.push_back(ConstantInt::get(Type::Int32Ty
,
1594 Shuffle
->getMaskValue(i
)));
1596 I
->setOperand(2, ConstantVector::get(Elts
));
1601 case Instruction::BitCast
: {
1602 // Vector->vector casts only.
1603 const VectorType
*VTy
= dyn_cast
<VectorType
>(I
->getOperand(0)->getType());
1605 unsigned InVWidth
= VTy
->getNumElements();
1606 APInt
InputDemandedElts(InVWidth
, 0);
1609 if (VWidth
== InVWidth
) {
1610 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1611 // elements as are demanded of us.
1613 InputDemandedElts
= DemandedElts
;
1614 } else if (VWidth
> InVWidth
) {
1618 // If there are more elements in the result than there are in the source,
1619 // then an input element is live if any of the corresponding output
1620 // elements are live.
1621 Ratio
= VWidth
/InVWidth
;
1622 for (unsigned OutIdx
= 0; OutIdx
!= VWidth
; ++OutIdx
) {
1623 if (DemandedElts
[OutIdx
])
1624 InputDemandedElts
.set(OutIdx
/Ratio
);
1630 // If there are more elements in the source than there are in the result,
1631 // then an input element is live if the corresponding output element is
1633 Ratio
= InVWidth
/VWidth
;
1634 for (unsigned InIdx
= 0; InIdx
!= InVWidth
; ++InIdx
)
1635 if (DemandedElts
[InIdx
/Ratio
])
1636 InputDemandedElts
.set(InIdx
);
1639 // div/rem demand all inputs, because they don't want divide by zero.
1640 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), InputDemandedElts
,
1641 UndefElts2
, Depth
+1);
1643 I
->setOperand(0, TmpV
);
1647 UndefElts
= UndefElts2
;
1648 if (VWidth
> InVWidth
) {
1649 llvm_unreachable("Unimp");
1650 // If there are more elements in the result than there are in the source,
1651 // then an output element is undef if the corresponding input element is
1653 for (unsigned OutIdx
= 0; OutIdx
!= VWidth
; ++OutIdx
)
1654 if (UndefElts2
[OutIdx
/Ratio
])
1655 UndefElts
.set(OutIdx
);
1656 } else if (VWidth
< InVWidth
) {
1657 llvm_unreachable("Unimp");
1658 // If there are more elements in the source than there are in the result,
1659 // then a result element is undef if all of the corresponding input
1660 // elements are undef.
1661 UndefElts
= ~0ULL >> (64-VWidth
); // Start out all undef.
1662 for (unsigned InIdx
= 0; InIdx
!= InVWidth
; ++InIdx
)
1663 if (!UndefElts2
[InIdx
]) // Not undef?
1664 UndefElts
.clear(InIdx
/Ratio
); // Clear undef bit.
1668 case Instruction::And
:
1669 case Instruction::Or
:
1670 case Instruction::Xor
:
1671 case Instruction::Add
:
1672 case Instruction::Sub
:
1673 case Instruction::Mul
:
1674 // div/rem demand all inputs, because they don't want divide by zero.
1675 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(0), DemandedElts
,
1676 UndefElts
, Depth
+1);
1677 if (TmpV
) { I
->setOperand(0, TmpV
); MadeChange
= true; }
1678 TmpV
= SimplifyDemandedVectorElts(I
->getOperand(1), DemandedElts
,
1679 UndefElts2
, Depth
+1);
1680 if (TmpV
) { I
->setOperand(1, TmpV
); MadeChange
= true; }
1682 // Output elements are undefined if both are undefined. Consider things
1683 // like undef&0. The result is known zero, not undef.
1684 UndefElts
&= UndefElts2
;
1687 case Instruction::Call
: {
1688 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
);
1690 switch (II
->getIntrinsicID()) {
1693 // Binary vector operations that work column-wise. A dest element is a
1694 // function of the corresponding input elements from the two inputs.
1695 case Intrinsic::x86_sse_sub_ss
:
1696 case Intrinsic::x86_sse_mul_ss
:
1697 case Intrinsic::x86_sse_min_ss
:
1698 case Intrinsic::x86_sse_max_ss
:
1699 case Intrinsic::x86_sse2_sub_sd
:
1700 case Intrinsic::x86_sse2_mul_sd
:
1701 case Intrinsic::x86_sse2_min_sd
:
1702 case Intrinsic::x86_sse2_max_sd
:
1703 TmpV
= SimplifyDemandedVectorElts(II
->getOperand(1), DemandedElts
,
1704 UndefElts
, Depth
+1);
1705 if (TmpV
) { II
->setOperand(1, TmpV
); MadeChange
= true; }
1706 TmpV
= SimplifyDemandedVectorElts(II
->getOperand(2), DemandedElts
,
1707 UndefElts2
, Depth
+1);
1708 if (TmpV
) { II
->setOperand(2, TmpV
); MadeChange
= true; }
1710 // If only the low elt is demanded and this is a scalarizable intrinsic,
1711 // scalarize it now.
1712 if (DemandedElts
== 1) {
1713 switch (II
->getIntrinsicID()) {
1715 case Intrinsic::x86_sse_sub_ss
:
1716 case Intrinsic::x86_sse_mul_ss
:
1717 case Intrinsic::x86_sse2_sub_sd
:
1718 case Intrinsic::x86_sse2_mul_sd
:
1719 // TODO: Lower MIN/MAX/ABS/etc
1720 Value
*LHS
= II
->getOperand(1);
1721 Value
*RHS
= II
->getOperand(2);
1722 // Extract the element as scalars.
1723 LHS
= InsertNewInstBefore(ExtractElementInst::Create(LHS
,
1724 ConstantInt::get(Type::Int32Ty
, 0U, false), "tmp"), *II
);
1725 RHS
= InsertNewInstBefore(ExtractElementInst::Create(RHS
,
1726 ConstantInt::get(Type::Int32Ty
, 0U, false), "tmp"), *II
);
1728 switch (II
->getIntrinsicID()) {
1729 default: llvm_unreachable("Case stmts out of sync!");
1730 case Intrinsic::x86_sse_sub_ss
:
1731 case Intrinsic::x86_sse2_sub_sd
:
1732 TmpV
= InsertNewInstBefore(BinaryOperator::CreateFSub(LHS
, RHS
,
1733 II
->getName()), *II
);
1735 case Intrinsic::x86_sse_mul_ss
:
1736 case Intrinsic::x86_sse2_mul_sd
:
1737 TmpV
= InsertNewInstBefore(BinaryOperator::CreateFMul(LHS
, RHS
,
1738 II
->getName()), *II
);
1743 InsertElementInst::Create(
1744 UndefValue::get(II
->getType()), TmpV
,
1745 ConstantInt::get(Type::Int32Ty
, 0U, false), II
->getName());
1746 InsertNewInstBefore(New
, *II
);
1747 AddSoonDeadInstToWorklist(*II
, 0);
1752 // Output elements are undefined if both are undefined. Consider things
1753 // like undef&0. The result is known zero, not undef.
1754 UndefElts
&= UndefElts2
;
1760 return MadeChange
? I
: 0;
1764 /// AssociativeOpt - Perform an optimization on an associative operator. This
1765 /// function is designed to check a chain of associative operators for a
1766 /// potential to apply a certain optimization. Since the optimization may be
1767 /// applicable if the expression was reassociated, this checks the chain, then
1768 /// reassociates the expression as necessary to expose the optimization
1769 /// opportunity. This makes use of a special Functor, which must define
1770 /// 'shouldApply' and 'apply' methods.
1772 template<typename Functor
>
1773 static Instruction
*AssociativeOpt(BinaryOperator
&Root
, const Functor
&F
,
1774 LLVMContext
*Context
) {
1775 unsigned Opcode
= Root
.getOpcode();
1776 Value
*LHS
= Root
.getOperand(0);
1778 // Quick check, see if the immediate LHS matches...
1779 if (F
.shouldApply(LHS
))
1780 return F
.apply(Root
);
1782 // Otherwise, if the LHS is not of the same opcode as the root, return.
1783 Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
);
1784 while (LHSI
&& LHSI
->getOpcode() == Opcode
&& LHSI
->hasOneUse()) {
1785 // Should we apply this transform to the RHS?
1786 bool ShouldApply
= F
.shouldApply(LHSI
->getOperand(1));
1788 // If not to the RHS, check to see if we should apply to the LHS...
1789 if (!ShouldApply
&& F
.shouldApply(LHSI
->getOperand(0))) {
1790 cast
<BinaryOperator
>(LHSI
)->swapOperands(); // Make the LHS the RHS
1794 // If the functor wants to apply the optimization to the RHS of LHSI,
1795 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1797 // Now all of the instructions are in the current basic block, go ahead
1798 // and perform the reassociation.
1799 Instruction
*TmpLHSI
= cast
<Instruction
>(Root
.getOperand(0));
1801 // First move the selected RHS to the LHS of the root...
1802 Root
.setOperand(0, LHSI
->getOperand(1));
1804 // Make what used to be the LHS of the root be the user of the root...
1805 Value
*ExtraOperand
= TmpLHSI
->getOperand(1);
1806 if (&Root
== TmpLHSI
) {
1807 Root
.replaceAllUsesWith(Constant::getNullValue(TmpLHSI
->getType()));
1810 Root
.replaceAllUsesWith(TmpLHSI
); // Users now use TmpLHSI
1811 TmpLHSI
->setOperand(1, &Root
); // TmpLHSI now uses the root
1812 BasicBlock::iterator ARI
= &Root
; ++ARI
;
1813 TmpLHSI
->moveBefore(ARI
); // Move TmpLHSI to after Root
1816 // Now propagate the ExtraOperand down the chain of instructions until we
1818 while (TmpLHSI
!= LHSI
) {
1819 Instruction
*NextLHSI
= cast
<Instruction
>(TmpLHSI
->getOperand(0));
1820 // Move the instruction to immediately before the chain we are
1821 // constructing to avoid breaking dominance properties.
1822 NextLHSI
->moveBefore(ARI
);
1825 Value
*NextOp
= NextLHSI
->getOperand(1);
1826 NextLHSI
->setOperand(1, ExtraOperand
);
1828 ExtraOperand
= NextOp
;
1831 // Now that the instructions are reassociated, have the functor perform
1832 // the transformation...
1833 return F
.apply(Root
);
1836 LHSI
= dyn_cast
<Instruction
>(LHSI
->getOperand(0));
1843 // AddRHS - Implements: X + X --> X << 1
1846 LLVMContext
*Context
;
1847 AddRHS(Value
*rhs
, LLVMContext
*C
) : RHS(rhs
), Context(C
) {}
1848 bool shouldApply(Value
*LHS
) const { return LHS
== RHS
; }
1849 Instruction
*apply(BinaryOperator
&Add
) const {
1850 return BinaryOperator::CreateShl(Add
.getOperand(0),
1851 ConstantInt::get(Add
.getType(), 1));
1855 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
1857 struct AddMaskingAnd
{
1859 LLVMContext
*Context
;
1860 AddMaskingAnd(Constant
*c
, LLVMContext
*C
) : C2(c
), Context(C
) {}
1861 bool shouldApply(Value
*LHS
) const {
1863 return match(LHS
, m_And(m_Value(), m_ConstantInt(C1
)), *Context
) &&
1864 ConstantExpr::getAnd(C1
, C2
)->isNullValue();
1866 Instruction
*apply(BinaryOperator
&Add
) const {
1867 return BinaryOperator::CreateOr(Add
.getOperand(0), Add
.getOperand(1));
1873 static Value
*FoldOperationIntoSelectOperand(Instruction
&I
, Value
*SO
,
1875 LLVMContext
*Context
= IC
->getContext();
1877 if (CastInst
*CI
= dyn_cast
<CastInst
>(&I
)) {
1878 return IC
->InsertCastBefore(CI
->getOpcode(), SO
, I
.getType(), I
);
1881 // Figure out if the constant is the left or the right argument.
1882 bool ConstIsRHS
= isa
<Constant
>(I
.getOperand(1));
1883 Constant
*ConstOperand
= cast
<Constant
>(I
.getOperand(ConstIsRHS
));
1885 if (Constant
*SOC
= dyn_cast
<Constant
>(SO
)) {
1887 return ConstantExpr::get(I
.getOpcode(), SOC
, ConstOperand
);
1888 return ConstantExpr::get(I
.getOpcode(), ConstOperand
, SOC
);
1891 Value
*Op0
= SO
, *Op1
= ConstOperand
;
1893 std::swap(Op0
, Op1
);
1895 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(&I
))
1896 New
= BinaryOperator::Create(BO
->getOpcode(), Op0
, Op1
,SO
->getName()+".op");
1897 else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(&I
))
1898 New
= CmpInst::Create(*Context
, CI
->getOpcode(), CI
->getPredicate(),
1899 Op0
, Op1
, SO
->getName()+".cmp");
1901 llvm_unreachable("Unknown binary instruction type!");
1903 return IC
->InsertNewInstBefore(New
, I
);
1906 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
1907 // constant as the other operand, try to fold the binary operator into the
1908 // select arguments. This also works for Cast instructions, which obviously do
1909 // not have a second operand.
1910 static Instruction
*FoldOpIntoSelect(Instruction
&Op
, SelectInst
*SI
,
1912 // Don't modify shared select instructions
1913 if (!SI
->hasOneUse()) return 0;
1914 Value
*TV
= SI
->getOperand(1);
1915 Value
*FV
= SI
->getOperand(2);
1917 if (isa
<Constant
>(TV
) || isa
<Constant
>(FV
)) {
1918 // Bool selects with constant operands can be folded to logical ops.
1919 if (SI
->getType() == Type::Int1Ty
) return 0;
1921 Value
*SelectTrueVal
= FoldOperationIntoSelectOperand(Op
, TV
, IC
);
1922 Value
*SelectFalseVal
= FoldOperationIntoSelectOperand(Op
, FV
, IC
);
1924 return SelectInst::Create(SI
->getCondition(), SelectTrueVal
,
1931 /// FoldOpIntoPhi - Given a binary operator or cast instruction which has a PHI
1932 /// node as operand #0, see if we can fold the instruction into the PHI (which
1933 /// is only possible if all operands to the PHI are constants).
1934 Instruction
*InstCombiner::FoldOpIntoPhi(Instruction
&I
) {
1935 PHINode
*PN
= cast
<PHINode
>(I
.getOperand(0));
1936 unsigned NumPHIValues
= PN
->getNumIncomingValues();
1937 if (!PN
->hasOneUse() || NumPHIValues
== 0) return 0;
1939 // Check to see if all of the operands of the PHI are constants. If there is
1940 // one non-constant value, remember the BB it is. If there is more than one
1941 // or if *it* is a PHI, bail out.
1942 BasicBlock
*NonConstBB
= 0;
1943 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
)
1944 if (!isa
<Constant
>(PN
->getIncomingValue(i
))) {
1945 if (NonConstBB
) return 0; // More than one non-const value.
1946 if (isa
<PHINode
>(PN
->getIncomingValue(i
))) return 0; // Itself a phi.
1947 NonConstBB
= PN
->getIncomingBlock(i
);
1949 // If the incoming non-constant value is in I's block, we have an infinite
1951 if (NonConstBB
== I
.getParent())
1955 // If there is exactly one non-constant value, we can insert a copy of the
1956 // operation in that block. However, if this is a critical edge, we would be
1957 // inserting the computation one some other paths (e.g. inside a loop). Only
1958 // do this if the pred block is unconditionally branching into the phi block.
1960 BranchInst
*BI
= dyn_cast
<BranchInst
>(NonConstBB
->getTerminator());
1961 if (!BI
|| !BI
->isUnconditional()) return 0;
1964 // Okay, we can do the transformation: create the new PHI node.
1965 PHINode
*NewPN
= PHINode::Create(I
.getType(), "");
1966 NewPN
->reserveOperandSpace(PN
->getNumOperands()/2);
1967 InsertNewInstBefore(NewPN
, *PN
);
1968 NewPN
->takeName(PN
);
1970 // Next, add all of the operands to the PHI.
1971 if (I
.getNumOperands() == 2) {
1972 Constant
*C
= cast
<Constant
>(I
.getOperand(1));
1973 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
) {
1975 if (Constant
*InC
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
))) {
1976 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(&I
))
1977 InV
= ConstantExpr::getCompare(CI
->getPredicate(), InC
, C
);
1979 InV
= ConstantExpr::get(I
.getOpcode(), InC
, C
);
1981 assert(PN
->getIncomingBlock(i
) == NonConstBB
);
1982 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(&I
))
1983 InV
= BinaryOperator::Create(BO
->getOpcode(),
1984 PN
->getIncomingValue(i
), C
, "phitmp",
1985 NonConstBB
->getTerminator());
1986 else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(&I
))
1987 InV
= CmpInst::Create(*Context
, CI
->getOpcode(),
1989 PN
->getIncomingValue(i
), C
, "phitmp",
1990 NonConstBB
->getTerminator());
1992 llvm_unreachable("Unknown binop!");
1994 AddToWorkList(cast
<Instruction
>(InV
));
1996 NewPN
->addIncoming(InV
, PN
->getIncomingBlock(i
));
1999 CastInst
*CI
= cast
<CastInst
>(&I
);
2000 const Type
*RetTy
= CI
->getType();
2001 for (unsigned i
= 0; i
!= NumPHIValues
; ++i
) {
2003 if (Constant
*InC
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
))) {
2004 InV
= ConstantExpr::getCast(CI
->getOpcode(), InC
, RetTy
);
2006 assert(PN
->getIncomingBlock(i
) == NonConstBB
);
2007 InV
= CastInst::Create(CI
->getOpcode(), PN
->getIncomingValue(i
),
2008 I
.getType(), "phitmp",
2009 NonConstBB
->getTerminator());
2010 AddToWorkList(cast
<Instruction
>(InV
));
2012 NewPN
->addIncoming(InV
, PN
->getIncomingBlock(i
));
2015 return ReplaceInstUsesWith(I
, NewPN
);
2019 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2020 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2021 /// This basically requires proving that the add in the original type would not
2022 /// overflow to change the sign bit or have a carry out.
2023 bool InstCombiner::WillNotOverflowSignedAdd(Value
*LHS
, Value
*RHS
) {
2024 // There are different heuristics we can use for this. Here are some simple
2027 // Add has the property that adding any two 2's complement numbers can only
2028 // have one carry bit which can change a sign. As such, if LHS and RHS each
2029 // have at least two sign bits, we know that the addition of the two values will
2030 // sign extend fine.
2031 if (ComputeNumSignBits(LHS
) > 1 && ComputeNumSignBits(RHS
) > 1)
2035 // If one of the operands only has one non-zero bit, and if the other operand
2036 // has a known-zero bit in a more significant place than it (not including the
2037 // sign bit) the ripple may go up to and fill the zero, but won't change the
2038 // sign. For example, (X & ~4) + 1.
2046 Instruction
*InstCombiner::visitAdd(BinaryOperator
&I
) {
2047 bool Changed
= SimplifyCommutative(I
);
2048 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
2050 if (Constant
*RHSC
= dyn_cast
<Constant
>(RHS
)) {
2051 // X + undef -> undef
2052 if (isa
<UndefValue
>(RHS
))
2053 return ReplaceInstUsesWith(I
, RHS
);
2056 if (RHSC
->isNullValue())
2057 return ReplaceInstUsesWith(I
, LHS
);
2059 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHSC
)) {
2060 // X + (signbit) --> X ^ signbit
2061 const APInt
& Val
= CI
->getValue();
2062 uint32_t BitWidth
= Val
.getBitWidth();
2063 if (Val
== APInt::getSignBit(BitWidth
))
2064 return BinaryOperator::CreateXor(LHS
, RHS
);
2066 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2067 // (X & 254)+1 -> (X&254)|1
2068 if (SimplifyDemandedInstructionBits(I
))
2071 // zext(bool) + C -> bool ? C + 1 : C
2072 if (ZExtInst
*ZI
= dyn_cast
<ZExtInst
>(LHS
))
2073 if (ZI
->getSrcTy() == Type::Int1Ty
)
2074 return SelectInst::Create(ZI
->getOperand(0), AddOne(CI
, Context
), CI
);
2077 if (isa
<PHINode
>(LHS
))
2078 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2081 ConstantInt
*XorRHS
= 0;
2083 if (isa
<ConstantInt
>(RHSC
) &&
2084 match(LHS
, m_Xor(m_Value(XorLHS
), m_ConstantInt(XorRHS
)), *Context
)) {
2085 uint32_t TySizeBits
= I
.getType()->getScalarSizeInBits();
2086 const APInt
& RHSVal
= cast
<ConstantInt
>(RHSC
)->getValue();
2088 uint32_t Size
= TySizeBits
/ 2;
2089 APInt
C0080Val(APInt(TySizeBits
, 1ULL).shl(Size
- 1));
2090 APInt
CFF80Val(-C0080Val
);
2092 if (TySizeBits
> Size
) {
2093 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2094 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2095 if ((RHSVal
== CFF80Val
&& XorRHS
->getValue() == C0080Val
) ||
2096 (RHSVal
== C0080Val
&& XorRHS
->getValue() == CFF80Val
)) {
2097 // This is a sign extend if the top bits are known zero.
2098 if (!MaskedValueIsZero(XorLHS
,
2099 APInt::getHighBitsSet(TySizeBits
, TySizeBits
- Size
)))
2100 Size
= 0; // Not a sign ext, but can't be any others either.
2105 C0080Val
= APIntOps::lshr(C0080Val
, Size
);
2106 CFF80Val
= APIntOps::ashr(CFF80Val
, Size
);
2107 } while (Size
>= 1);
2109 // FIXME: This shouldn't be necessary. When the backends can handle types
2110 // with funny bit widths then this switch statement should be removed. It
2111 // is just here to get the size of the "middle" type back up to something
2112 // that the back ends can handle.
2113 const Type
*MiddleType
= 0;
2116 case 32: MiddleType
= Type::Int32Ty
; break;
2117 case 16: MiddleType
= Type::Int16Ty
; break;
2118 case 8: MiddleType
= Type::Int8Ty
; break;
2121 Instruction
*NewTrunc
= new TruncInst(XorLHS
, MiddleType
, "sext");
2122 InsertNewInstBefore(NewTrunc
, I
);
2123 return new SExtInst(NewTrunc
, I
.getType(), I
.getName());
2128 if (I
.getType() == Type::Int1Ty
)
2129 return BinaryOperator::CreateXor(LHS
, RHS
);
2132 if (I
.getType()->isInteger()) {
2133 if (Instruction
*Result
= AssociativeOpt(I
, AddRHS(RHS
, Context
), Context
))
2136 if (Instruction
*RHSI
= dyn_cast
<Instruction
>(RHS
)) {
2137 if (RHSI
->getOpcode() == Instruction::Sub
)
2138 if (LHS
== RHSI
->getOperand(1)) // A + (B - A) --> B
2139 return ReplaceInstUsesWith(I
, RHSI
->getOperand(0));
2141 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
)) {
2142 if (LHSI
->getOpcode() == Instruction::Sub
)
2143 if (RHS
== LHSI
->getOperand(1)) // (B - A) + A --> B
2144 return ReplaceInstUsesWith(I
, LHSI
->getOperand(0));
2149 // -A + -B --> -(A + B)
2150 if (Value
*LHSV
= dyn_castNegVal(LHS
, Context
)) {
2151 if (LHS
->getType()->isIntOrIntVector()) {
2152 if (Value
*RHSV
= dyn_castNegVal(RHS
, Context
)) {
2153 Instruction
*NewAdd
= BinaryOperator::CreateAdd(LHSV
, RHSV
, "sum");
2154 InsertNewInstBefore(NewAdd
, I
);
2155 return BinaryOperator::CreateNeg(*Context
, NewAdd
);
2159 return BinaryOperator::CreateSub(RHS
, LHSV
);
2163 if (!isa
<Constant
>(RHS
))
2164 if (Value
*V
= dyn_castNegVal(RHS
, Context
))
2165 return BinaryOperator::CreateSub(LHS
, V
);
2169 if (Value
*X
= dyn_castFoldableMul(LHS
, C2
, Context
)) {
2170 if (X
== RHS
) // X*C + X --> X * (C+1)
2171 return BinaryOperator::CreateMul(RHS
, AddOne(C2
, Context
));
2173 // X*C1 + X*C2 --> X * (C1+C2)
2175 if (X
== dyn_castFoldableMul(RHS
, C1
, Context
))
2176 return BinaryOperator::CreateMul(X
, ConstantExpr::getAdd(C1
, C2
));
2179 // X + X*C --> X * (C+1)
2180 if (dyn_castFoldableMul(RHS
, C2
, Context
) == LHS
)
2181 return BinaryOperator::CreateMul(LHS
, AddOne(C2
, Context
));
2183 // X + ~X --> -1 since ~X = -X-1
2184 if (dyn_castNotVal(LHS
, Context
) == RHS
||
2185 dyn_castNotVal(RHS
, Context
) == LHS
)
2186 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
2189 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2190 if (match(RHS
, m_And(m_Value(), m_ConstantInt(C2
)), *Context
))
2191 if (Instruction
*R
= AssociativeOpt(I
, AddMaskingAnd(C2
, Context
), Context
))
2194 // A+B --> A|B iff A and B have no bits set in common.
2195 if (const IntegerType
*IT
= dyn_cast
<IntegerType
>(I
.getType())) {
2196 APInt Mask
= APInt::getAllOnesValue(IT
->getBitWidth());
2197 APInt
LHSKnownOne(IT
->getBitWidth(), 0);
2198 APInt
LHSKnownZero(IT
->getBitWidth(), 0);
2199 ComputeMaskedBits(LHS
, Mask
, LHSKnownZero
, LHSKnownOne
);
2200 if (LHSKnownZero
!= 0) {
2201 APInt
RHSKnownOne(IT
->getBitWidth(), 0);
2202 APInt
RHSKnownZero(IT
->getBitWidth(), 0);
2203 ComputeMaskedBits(RHS
, Mask
, RHSKnownZero
, RHSKnownOne
);
2205 // No bits in common -> bitwise or.
2206 if ((LHSKnownZero
|RHSKnownZero
).isAllOnesValue())
2207 return BinaryOperator::CreateOr(LHS
, RHS
);
2211 // W*X + Y*Z --> W * (X+Z) iff W == Y
2212 if (I
.getType()->isIntOrIntVector()) {
2213 Value
*W
, *X
, *Y
, *Z
;
2214 if (match(LHS
, m_Mul(m_Value(W
), m_Value(X
)), *Context
) &&
2215 match(RHS
, m_Mul(m_Value(Y
), m_Value(Z
)), *Context
)) {
2219 } else if (Y
== X
) {
2221 } else if (X
== Z
) {
2228 Value
*NewAdd
= InsertNewInstBefore(BinaryOperator::CreateAdd(X
, Z
,
2229 LHS
->getName()), I
);
2230 return BinaryOperator::CreateMul(W
, NewAdd
);
2235 if (ConstantInt
*CRHS
= dyn_cast
<ConstantInt
>(RHS
)) {
2237 if (match(LHS
, m_Not(m_Value(X
)), *Context
)) // ~X + C --> (C-1) - X
2238 return BinaryOperator::CreateSub(SubOne(CRHS
, Context
), X
);
2240 // (X & FF00) + xx00 -> (X+xx00) & FF00
2241 if (LHS
->hasOneUse() &&
2242 match(LHS
, m_And(m_Value(X
), m_ConstantInt(C2
)), *Context
)) {
2243 Constant
*Anded
= ConstantExpr::getAnd(CRHS
, C2
);
2244 if (Anded
== CRHS
) {
2245 // See if all bits from the first bit set in the Add RHS up are included
2246 // in the mask. First, get the rightmost bit.
2247 const APInt
& AddRHSV
= CRHS
->getValue();
2249 // Form a mask of all bits from the lowest bit added through the top.
2250 APInt
AddRHSHighBits(~((AddRHSV
& -AddRHSV
)-1));
2252 // See if the and mask includes all of these bits.
2253 APInt
AddRHSHighBitsAnd(AddRHSHighBits
& C2
->getValue());
2255 if (AddRHSHighBits
== AddRHSHighBitsAnd
) {
2256 // Okay, the xform is safe. Insert the new add pronto.
2257 Value
*NewAdd
= InsertNewInstBefore(BinaryOperator::CreateAdd(X
, CRHS
,
2258 LHS
->getName()), I
);
2259 return BinaryOperator::CreateAnd(NewAdd
, C2
);
2264 // Try to fold constant add into select arguments.
2265 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(LHS
))
2266 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2270 // add (select X 0 (sub n A)) A --> select X A n
2272 SelectInst
*SI
= dyn_cast
<SelectInst
>(LHS
);
2275 SI
= dyn_cast
<SelectInst
>(RHS
);
2278 if (SI
&& SI
->hasOneUse()) {
2279 Value
*TV
= SI
->getTrueValue();
2280 Value
*FV
= SI
->getFalseValue();
2283 // Can we fold the add into the argument of the select?
2284 // We check both true and false select arguments for a matching subtract.
2285 if (match(FV
, m_Zero(), *Context
) &&
2286 match(TV
, m_Sub(m_Value(N
), m_Specific(A
)), *Context
))
2287 // Fold the add into the true select value.
2288 return SelectInst::Create(SI
->getCondition(), N
, A
);
2289 if (match(TV
, m_Zero(), *Context
) &&
2290 match(FV
, m_Sub(m_Value(N
), m_Specific(A
)), *Context
))
2291 // Fold the add into the false select value.
2292 return SelectInst::Create(SI
->getCondition(), A
, N
);
2296 // Check for (add (sext x), y), see if we can merge this into an
2297 // integer add followed by a sext.
2298 if (SExtInst
*LHSConv
= dyn_cast
<SExtInst
>(LHS
)) {
2299 // (add (sext x), cst) --> (sext (add x, cst'))
2300 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(RHS
)) {
2302 ConstantExpr::getTrunc(RHSC
, LHSConv
->getOperand(0)->getType());
2303 if (LHSConv
->hasOneUse() &&
2304 ConstantExpr::getSExt(CI
, I
.getType()) == RHSC
&&
2305 WillNotOverflowSignedAdd(LHSConv
->getOperand(0), CI
)) {
2306 // Insert the new, smaller add.
2307 Instruction
*NewAdd
= BinaryOperator::CreateAdd(LHSConv
->getOperand(0),
2309 InsertNewInstBefore(NewAdd
, I
);
2310 return new SExtInst(NewAdd
, I
.getType());
2314 // (add (sext x), (sext y)) --> (sext (add int x, y))
2315 if (SExtInst
*RHSConv
= dyn_cast
<SExtInst
>(RHS
)) {
2316 // Only do this if x/y have the same type, if at last one of them has a
2317 // single use (so we don't increase the number of sexts), and if the
2318 // integer add will not overflow.
2319 if (LHSConv
->getOperand(0)->getType()==RHSConv
->getOperand(0)->getType()&&
2320 (LHSConv
->hasOneUse() || RHSConv
->hasOneUse()) &&
2321 WillNotOverflowSignedAdd(LHSConv
->getOperand(0),
2322 RHSConv
->getOperand(0))) {
2323 // Insert the new integer add.
2324 Instruction
*NewAdd
= BinaryOperator::CreateAdd(LHSConv
->getOperand(0),
2325 RHSConv
->getOperand(0),
2327 InsertNewInstBefore(NewAdd
, I
);
2328 return new SExtInst(NewAdd
, I
.getType());
2333 return Changed
? &I
: 0;
2336 Instruction
*InstCombiner::visitFAdd(BinaryOperator
&I
) {
2337 bool Changed
= SimplifyCommutative(I
);
2338 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
2340 if (Constant
*RHSC
= dyn_cast
<Constant
>(RHS
)) {
2342 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHSC
)) {
2343 if (CFP
->isExactlyValue(ConstantFP::getNegativeZero
2344 (I
.getType())->getValueAPF()))
2345 return ReplaceInstUsesWith(I
, LHS
);
2348 if (isa
<PHINode
>(LHS
))
2349 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2354 // -A + -B --> -(A + B)
2355 if (Value
*LHSV
= dyn_castFNegVal(LHS
, Context
))
2356 return BinaryOperator::CreateFSub(RHS
, LHSV
);
2359 if (!isa
<Constant
>(RHS
))
2360 if (Value
*V
= dyn_castFNegVal(RHS
, Context
))
2361 return BinaryOperator::CreateFSub(LHS
, V
);
2363 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2364 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHS
))
2365 if (CFP
->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS
))
2366 return ReplaceInstUsesWith(I
, LHS
);
2368 // Check for (add double (sitofp x), y), see if we can merge this into an
2369 // integer add followed by a promotion.
2370 if (SIToFPInst
*LHSConv
= dyn_cast
<SIToFPInst
>(LHS
)) {
2371 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2372 // ... if the constant fits in the integer value. This is useful for things
2373 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2374 // requires a constant pool load, and generally allows the add to be better
2376 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHS
)) {
2378 ConstantExpr::getFPToSI(CFP
, LHSConv
->getOperand(0)->getType());
2379 if (LHSConv
->hasOneUse() &&
2380 ConstantExpr::getSIToFP(CI
, I
.getType()) == CFP
&&
2381 WillNotOverflowSignedAdd(LHSConv
->getOperand(0), CI
)) {
2382 // Insert the new integer add.
2383 Instruction
*NewAdd
= BinaryOperator::CreateAdd(LHSConv
->getOperand(0),
2385 InsertNewInstBefore(NewAdd
, I
);
2386 return new SIToFPInst(NewAdd
, I
.getType());
2390 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2391 if (SIToFPInst
*RHSConv
= dyn_cast
<SIToFPInst
>(RHS
)) {
2392 // Only do this if x/y have the same type, if at last one of them has a
2393 // single use (so we don't increase the number of int->fp conversions),
2394 // and if the integer add will not overflow.
2395 if (LHSConv
->getOperand(0)->getType()==RHSConv
->getOperand(0)->getType()&&
2396 (LHSConv
->hasOneUse() || RHSConv
->hasOneUse()) &&
2397 WillNotOverflowSignedAdd(LHSConv
->getOperand(0),
2398 RHSConv
->getOperand(0))) {
2399 // Insert the new integer add.
2400 Instruction
*NewAdd
= BinaryOperator::CreateAdd(LHSConv
->getOperand(0),
2401 RHSConv
->getOperand(0),
2403 InsertNewInstBefore(NewAdd
, I
);
2404 return new SIToFPInst(NewAdd
, I
.getType());
2409 return Changed
? &I
: 0;
2412 Instruction
*InstCombiner::visitSub(BinaryOperator
&I
) {
2413 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2415 if (Op0
== Op1
) // sub X, X -> 0
2416 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2418 // If this is a 'B = x-(-A)', change to B = x+A...
2419 if (Value
*V
= dyn_castNegVal(Op1
, Context
))
2420 return BinaryOperator::CreateAdd(Op0
, V
);
2422 if (isa
<UndefValue
>(Op0
))
2423 return ReplaceInstUsesWith(I
, Op0
); // undef - X -> undef
2424 if (isa
<UndefValue
>(Op1
))
2425 return ReplaceInstUsesWith(I
, Op1
); // X - undef -> undef
2427 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Op0
)) {
2428 // Replace (-1 - A) with (~A)...
2429 if (C
->isAllOnesValue())
2430 return BinaryOperator::CreateNot(*Context
, Op1
);
2432 // C - ~X == X + (1+C)
2434 if (match(Op1
, m_Not(m_Value(X
)), *Context
))
2435 return BinaryOperator::CreateAdd(X
, AddOne(C
, Context
));
2437 // -(X >>u 31) -> (X >>s 31)
2438 // -(X >>s 31) -> (X >>u 31)
2440 if (BinaryOperator
*SI
= dyn_cast
<BinaryOperator
>(Op1
)) {
2441 if (SI
->getOpcode() == Instruction::LShr
) {
2442 if (ConstantInt
*CU
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
2443 // Check to see if we are shifting out everything but the sign bit.
2444 if (CU
->getLimitedValue(SI
->getType()->getPrimitiveSizeInBits()) ==
2445 SI
->getType()->getPrimitiveSizeInBits()-1) {
2446 // Ok, the transformation is safe. Insert AShr.
2447 return BinaryOperator::Create(Instruction::AShr
,
2448 SI
->getOperand(0), CU
, SI
->getName());
2452 else if (SI
->getOpcode() == Instruction::AShr
) {
2453 if (ConstantInt
*CU
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
2454 // Check to see if we are shifting out everything but the sign bit.
2455 if (CU
->getLimitedValue(SI
->getType()->getPrimitiveSizeInBits()) ==
2456 SI
->getType()->getPrimitiveSizeInBits()-1) {
2457 // Ok, the transformation is safe. Insert LShr.
2458 return BinaryOperator::CreateLShr(
2459 SI
->getOperand(0), CU
, SI
->getName());
2466 // Try to fold constant sub into select arguments.
2467 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
2468 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2471 // C - zext(bool) -> bool ? C - 1 : C
2472 if (ZExtInst
*ZI
= dyn_cast
<ZExtInst
>(Op1
))
2473 if (ZI
->getSrcTy() == Type::Int1Ty
)
2474 return SelectInst::Create(ZI
->getOperand(0), SubOne(C
, Context
), C
);
2477 if (I
.getType() == Type::Int1Ty
)
2478 return BinaryOperator::CreateXor(Op0
, Op1
);
2480 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
2481 if (Op1I
->getOpcode() == Instruction::Add
) {
2482 if (Op1I
->getOperand(0) == Op0
) // X-(X+Y) == -Y
2483 return BinaryOperator::CreateNeg(*Context
, Op1I
->getOperand(1),
2485 else if (Op1I
->getOperand(1) == Op0
) // X-(Y+X) == -Y
2486 return BinaryOperator::CreateNeg(*Context
, Op1I
->getOperand(0),
2488 else if (ConstantInt
*CI1
= dyn_cast
<ConstantInt
>(I
.getOperand(0))) {
2489 if (ConstantInt
*CI2
= dyn_cast
<ConstantInt
>(Op1I
->getOperand(1)))
2490 // C1-(X+C2) --> (C1-C2)-X
2491 return BinaryOperator::CreateSub(
2492 ConstantExpr::getSub(CI1
, CI2
), Op1I
->getOperand(0));
2496 if (Op1I
->hasOneUse()) {
2497 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2498 // is not used by anyone else...
2500 if (Op1I
->getOpcode() == Instruction::Sub
) {
2501 // Swap the two operands of the subexpr...
2502 Value
*IIOp0
= Op1I
->getOperand(0), *IIOp1
= Op1I
->getOperand(1);
2503 Op1I
->setOperand(0, IIOp1
);
2504 Op1I
->setOperand(1, IIOp0
);
2506 // Create the new top level add instruction...
2507 return BinaryOperator::CreateAdd(Op0
, Op1
);
2510 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2512 if (Op1I
->getOpcode() == Instruction::And
&&
2513 (Op1I
->getOperand(0) == Op0
|| Op1I
->getOperand(1) == Op0
)) {
2514 Value
*OtherOp
= Op1I
->getOperand(Op1I
->getOperand(0) == Op0
);
2517 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
,
2518 OtherOp
, "B.not"), I
);
2519 return BinaryOperator::CreateAnd(Op0
, NewNot
);
2522 // 0 - (X sdiv C) -> (X sdiv -C)
2523 if (Op1I
->getOpcode() == Instruction::SDiv
)
2524 if (ConstantInt
*CSI
= dyn_cast
<ConstantInt
>(Op0
))
2526 if (Constant
*DivRHS
= dyn_cast
<Constant
>(Op1I
->getOperand(1)))
2527 return BinaryOperator::CreateSDiv(Op1I
->getOperand(0),
2528 ConstantExpr::getNeg(DivRHS
));
2530 // X - X*C --> X * (1-C)
2531 ConstantInt
*C2
= 0;
2532 if (dyn_castFoldableMul(Op1I
, C2
, Context
) == Op0
) {
2534 ConstantExpr::getSub(ConstantInt::get(I
.getType(), 1),
2536 return BinaryOperator::CreateMul(Op0
, CP1
);
2541 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
2542 if (Op0I
->getOpcode() == Instruction::Add
) {
2543 if (Op0I
->getOperand(0) == Op1
) // (Y+X)-Y == X
2544 return ReplaceInstUsesWith(I
, Op0I
->getOperand(1));
2545 else if (Op0I
->getOperand(1) == Op1
) // (X+Y)-Y == X
2546 return ReplaceInstUsesWith(I
, Op0I
->getOperand(0));
2547 } else if (Op0I
->getOpcode() == Instruction::Sub
) {
2548 if (Op0I
->getOperand(0) == Op1
) // (X-Y)-X == -Y
2549 return BinaryOperator::CreateNeg(*Context
, Op0I
->getOperand(1),
2555 if (Value
*X
= dyn_castFoldableMul(Op0
, C1
, Context
)) {
2556 if (X
== Op1
) // X*C - X --> X * (C-1)
2557 return BinaryOperator::CreateMul(Op1
, SubOne(C1
, Context
));
2559 ConstantInt
*C2
; // X*C1 - X*C2 -> X * (C1-C2)
2560 if (X
== dyn_castFoldableMul(Op1
, C2
, Context
))
2561 return BinaryOperator::CreateMul(X
, ConstantExpr::getSub(C1
, C2
));
2566 Instruction
*InstCombiner::visitFSub(BinaryOperator
&I
) {
2567 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2569 // If this is a 'B = x-(-A)', change to B = x+A...
2570 if (Value
*V
= dyn_castFNegVal(Op1
, Context
))
2571 return BinaryOperator::CreateFAdd(Op0
, V
);
2573 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
2574 if (Op1I
->getOpcode() == Instruction::FAdd
) {
2575 if (Op1I
->getOperand(0) == Op0
) // X-(X+Y) == -Y
2576 return BinaryOperator::CreateFNeg(*Context
, Op1I
->getOperand(1),
2578 else if (Op1I
->getOperand(1) == Op0
) // X-(Y+X) == -Y
2579 return BinaryOperator::CreateFNeg(*Context
, Op1I
->getOperand(0),
2587 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
2588 /// comparison only checks the sign bit. If it only checks the sign bit, set
2589 /// TrueIfSigned if the result of the comparison is true when the input value is
2591 static bool isSignBitCheck(ICmpInst::Predicate pred
, ConstantInt
*RHS
,
2592 bool &TrueIfSigned
) {
2594 case ICmpInst::ICMP_SLT
: // True if LHS s< 0
2595 TrueIfSigned
= true;
2596 return RHS
->isZero();
2597 case ICmpInst::ICMP_SLE
: // True if LHS s<= RHS and RHS == -1
2598 TrueIfSigned
= true;
2599 return RHS
->isAllOnesValue();
2600 case ICmpInst::ICMP_SGT
: // True if LHS s> -1
2601 TrueIfSigned
= false;
2602 return RHS
->isAllOnesValue();
2603 case ICmpInst::ICMP_UGT
:
2604 // True if LHS u> RHS and RHS == high-bit-mask - 1
2605 TrueIfSigned
= true;
2606 return RHS
->getValue() ==
2607 APInt::getSignedMaxValue(RHS
->getType()->getPrimitiveSizeInBits());
2608 case ICmpInst::ICMP_UGE
:
2609 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2610 TrueIfSigned
= true;
2611 return RHS
->getValue().isSignBit();
2617 Instruction
*InstCombiner::visitMul(BinaryOperator
&I
) {
2618 bool Changed
= SimplifyCommutative(I
);
2619 Value
*Op0
= I
.getOperand(0);
2621 if (isa
<UndefValue
>(I
.getOperand(1))) // undef * X -> 0
2622 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2624 // Simplify mul instructions with a constant RHS...
2625 if (Constant
*Op1
= dyn_cast
<Constant
>(I
.getOperand(1))) {
2626 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
2628 // ((X << C1)*C2) == (X * (C2 << C1))
2629 if (BinaryOperator
*SI
= dyn_cast
<BinaryOperator
>(Op0
))
2630 if (SI
->getOpcode() == Instruction::Shl
)
2631 if (Constant
*ShOp
= dyn_cast
<Constant
>(SI
->getOperand(1)))
2632 return BinaryOperator::CreateMul(SI
->getOperand(0),
2633 ConstantExpr::getShl(CI
, ShOp
));
2636 return ReplaceInstUsesWith(I
, Op1
); // X * 0 == 0
2637 if (CI
->equalsInt(1)) // X * 1 == X
2638 return ReplaceInstUsesWith(I
, Op0
);
2639 if (CI
->isAllOnesValue()) // X * -1 == 0 - X
2640 return BinaryOperator::CreateNeg(*Context
, Op0
, I
.getName());
2642 const APInt
& Val
= cast
<ConstantInt
>(CI
)->getValue();
2643 if (Val
.isPowerOf2()) { // Replace X*(2^C) with X << C
2644 return BinaryOperator::CreateShl(Op0
,
2645 ConstantInt::get(Op0
->getType(), Val
.logBase2()));
2647 } else if (isa
<VectorType
>(Op1
->getType())) {
2648 if (Op1
->isNullValue())
2649 return ReplaceInstUsesWith(I
, Op1
);
2651 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2652 if (Op1V
->isAllOnesValue()) // X * -1 == 0 - X
2653 return BinaryOperator::CreateNeg(*Context
, Op0
, I
.getName());
2655 // As above, vector X*splat(1.0) -> X in all defined cases.
2656 if (Constant
*Splat
= Op1V
->getSplatValue()) {
2657 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Splat
))
2658 if (CI
->equalsInt(1))
2659 return ReplaceInstUsesWith(I
, Op0
);
2664 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
))
2665 if (Op0I
->getOpcode() == Instruction::Add
&& Op0I
->hasOneUse() &&
2666 isa
<ConstantInt
>(Op0I
->getOperand(1)) && isa
<ConstantInt
>(Op1
)) {
2667 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
2668 Instruction
*Add
= BinaryOperator::CreateMul(Op0I
->getOperand(0),
2670 InsertNewInstBefore(Add
, I
);
2671 Value
*C1C2
= ConstantExpr::getMul(Op1
,
2672 cast
<Constant
>(Op0I
->getOperand(1)));
2673 return BinaryOperator::CreateAdd(Add
, C1C2
);
2677 // Try to fold constant mul into select arguments.
2678 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2679 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2682 if (isa
<PHINode
>(Op0
))
2683 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2687 if (Value
*Op0v
= dyn_castNegVal(Op0
, Context
)) // -X * -Y = X*Y
2688 if (Value
*Op1v
= dyn_castNegVal(I
.getOperand(1), Context
))
2689 return BinaryOperator::CreateMul(Op0v
, Op1v
);
2691 // (X / Y) * Y = X - (X % Y)
2692 // (X / Y) * -Y = (X % Y) - X
2694 Value
*Op1
= I
.getOperand(1);
2695 BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(Op0
);
2697 (BO
->getOpcode() != Instruction::UDiv
&&
2698 BO
->getOpcode() != Instruction::SDiv
)) {
2700 BO
= dyn_cast
<BinaryOperator
>(I
.getOperand(1));
2702 Value
*Neg
= dyn_castNegVal(Op1
, Context
);
2703 if (BO
&& BO
->hasOneUse() &&
2704 (BO
->getOperand(1) == Op1
|| BO
->getOperand(1) == Neg
) &&
2705 (BO
->getOpcode() == Instruction::UDiv
||
2706 BO
->getOpcode() == Instruction::SDiv
)) {
2707 Value
*Op0BO
= BO
->getOperand(0), *Op1BO
= BO
->getOperand(1);
2710 if (BO
->getOpcode() == Instruction::UDiv
)
2711 Rem
= BinaryOperator::CreateURem(Op0BO
, Op1BO
);
2713 Rem
= BinaryOperator::CreateSRem(Op0BO
, Op1BO
);
2715 InsertNewInstBefore(Rem
, I
);
2719 return BinaryOperator::CreateSub(Op0BO
, Rem
);
2721 return BinaryOperator::CreateSub(Rem
, Op0BO
);
2725 if (I
.getType() == Type::Int1Ty
)
2726 return BinaryOperator::CreateAnd(Op0
, I
.getOperand(1));
2728 // If one of the operands of the multiply is a cast from a boolean value, then
2729 // we know the bool is either zero or one, so this is a 'masking' multiply.
2730 // See if we can simplify things based on how the boolean was originally
2732 CastInst
*BoolCast
= 0;
2733 if (ZExtInst
*CI
= dyn_cast
<ZExtInst
>(Op0
))
2734 if (CI
->getOperand(0)->getType() == Type::Int1Ty
)
2737 if (ZExtInst
*CI
= dyn_cast
<ZExtInst
>(I
.getOperand(1)))
2738 if (CI
->getOperand(0)->getType() == Type::Int1Ty
)
2741 if (ICmpInst
*SCI
= dyn_cast
<ICmpInst
>(BoolCast
->getOperand(0))) {
2742 Value
*SCIOp0
= SCI
->getOperand(0), *SCIOp1
= SCI
->getOperand(1);
2743 const Type
*SCOpTy
= SCIOp0
->getType();
2746 // If the icmp is true iff the sign bit of X is set, then convert this
2747 // multiply into a shift/and combination.
2748 if (isa
<ConstantInt
>(SCIOp1
) &&
2749 isSignBitCheck(SCI
->getPredicate(), cast
<ConstantInt
>(SCIOp1
), TIS
) &&
2751 // Shift the X value right to turn it into "all signbits".
2752 Constant
*Amt
= ConstantInt::get(SCIOp0
->getType(),
2753 SCOpTy
->getPrimitiveSizeInBits()-1);
2755 InsertNewInstBefore(
2756 BinaryOperator::Create(Instruction::AShr
, SCIOp0
, Amt
,
2757 BoolCast
->getOperand(0)->getName()+
2760 // If the multiply type is not the same as the source type, sign extend
2761 // or truncate to the multiply type.
2762 if (I
.getType() != V
->getType()) {
2763 uint32_t SrcBits
= V
->getType()->getPrimitiveSizeInBits();
2764 uint32_t DstBits
= I
.getType()->getPrimitiveSizeInBits();
2765 Instruction::CastOps opcode
=
2766 (SrcBits
== DstBits
? Instruction::BitCast
:
2767 (SrcBits
< DstBits
? Instruction::SExt
: Instruction::Trunc
));
2768 V
= InsertCastBefore(opcode
, V
, I
.getType(), I
);
2771 Value
*OtherOp
= Op0
== BoolCast
? I
.getOperand(1) : Op0
;
2772 return BinaryOperator::CreateAnd(V
, OtherOp
);
2777 return Changed
? &I
: 0;
2780 Instruction
*InstCombiner::visitFMul(BinaryOperator
&I
) {
2781 bool Changed
= SimplifyCommutative(I
);
2782 Value
*Op0
= I
.getOperand(0);
2784 // Simplify mul instructions with a constant RHS...
2785 if (Constant
*Op1
= dyn_cast
<Constant
>(I
.getOperand(1))) {
2786 if (ConstantFP
*Op1F
= dyn_cast
<ConstantFP
>(Op1
)) {
2787 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
2788 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
2789 if (Op1F
->isExactlyValue(1.0))
2790 return ReplaceInstUsesWith(I
, Op0
); // Eliminate 'mul double %X, 1.0'
2791 } else if (isa
<VectorType
>(Op1
->getType())) {
2792 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2793 // As above, vector X*splat(1.0) -> X in all defined cases.
2794 if (Constant
*Splat
= Op1V
->getSplatValue()) {
2795 if (ConstantFP
*F
= dyn_cast
<ConstantFP
>(Splat
))
2796 if (F
->isExactlyValue(1.0))
2797 return ReplaceInstUsesWith(I
, Op0
);
2802 // Try to fold constant mul into select arguments.
2803 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2804 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2807 if (isa
<PHINode
>(Op0
))
2808 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2812 if (Value
*Op0v
= dyn_castFNegVal(Op0
, Context
)) // -X * -Y = X*Y
2813 if (Value
*Op1v
= dyn_castFNegVal(I
.getOperand(1), Context
))
2814 return BinaryOperator::CreateFMul(Op0v
, Op1v
);
2816 return Changed
? &I
: 0;
2819 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
2821 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator
&I
) {
2822 SelectInst
*SI
= cast
<SelectInst
>(I
.getOperand(1));
2824 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
2825 int NonNullOperand
= -1;
2826 if (Constant
*ST
= dyn_cast
<Constant
>(SI
->getOperand(1)))
2827 if (ST
->isNullValue())
2829 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
2830 if (Constant
*ST
= dyn_cast
<Constant
>(SI
->getOperand(2)))
2831 if (ST
->isNullValue())
2834 if (NonNullOperand
== -1)
2837 Value
*SelectCond
= SI
->getOperand(0);
2839 // Change the div/rem to use 'Y' instead of the select.
2840 I
.setOperand(1, SI
->getOperand(NonNullOperand
));
2842 // Okay, we know we replace the operand of the div/rem with 'Y' with no
2843 // problem. However, the select, or the condition of the select may have
2844 // multiple uses. Based on our knowledge that the operand must be non-zero,
2845 // propagate the known value for the select into other uses of it, and
2846 // propagate a known value of the condition into its other users.
2848 // If the select and condition only have a single use, don't bother with this,
2850 if (SI
->use_empty() && SelectCond
->hasOneUse())
2853 // Scan the current block backward, looking for other uses of SI.
2854 BasicBlock::iterator BBI
= &I
, BBFront
= I
.getParent()->begin();
2856 while (BBI
!= BBFront
) {
2858 // If we found a call to a function, we can't assume it will return, so
2859 // information from below it cannot be propagated above it.
2860 if (isa
<CallInst
>(BBI
) && !isa
<IntrinsicInst
>(BBI
))
2863 // Replace uses of the select or its condition with the known values.
2864 for (Instruction::op_iterator I
= BBI
->op_begin(), E
= BBI
->op_end();
2867 *I
= SI
->getOperand(NonNullOperand
);
2869 } else if (*I
== SelectCond
) {
2870 *I
= NonNullOperand
== 1 ? ConstantInt::getTrue(*Context
) :
2871 ConstantInt::getFalse(*Context
);
2876 // If we past the instruction, quit looking for it.
2879 if (&*BBI
== SelectCond
)
2882 // If we ran out of things to eliminate, break out of the loop.
2883 if (SelectCond
== 0 && SI
== 0)
2891 /// This function implements the transforms on div instructions that work
2892 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
2893 /// used by the visitors to those instructions.
2894 /// @brief Transforms common to all three div instructions
2895 Instruction
*InstCombiner::commonDivTransforms(BinaryOperator
&I
) {
2896 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2898 // undef / X -> 0 for integer.
2899 // undef / X -> undef for FP (the undef could be a snan).
2900 if (isa
<UndefValue
>(Op0
)) {
2901 if (Op0
->getType()->isFPOrFPVector())
2902 return ReplaceInstUsesWith(I
, Op0
);
2903 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2906 // X / undef -> undef
2907 if (isa
<UndefValue
>(Op1
))
2908 return ReplaceInstUsesWith(I
, Op1
);
2913 /// This function implements the transforms common to both integer division
2914 /// instructions (udiv and sdiv). It is called by the visitors to those integer
2915 /// division instructions.
2916 /// @brief Common integer divide transforms
2917 Instruction
*InstCombiner::commonIDivTransforms(BinaryOperator
&I
) {
2918 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2920 // (sdiv X, X) --> 1 (udiv X, X) --> 1
2922 if (const VectorType
*Ty
= dyn_cast
<VectorType
>(I
.getType())) {
2923 Constant
*CI
= ConstantInt::get(Ty
->getElementType(), 1);
2924 std::vector
<Constant
*> Elts(Ty
->getNumElements(), CI
);
2925 return ReplaceInstUsesWith(I
, ConstantVector::get(Elts
));
2928 Constant
*CI
= ConstantInt::get(I
.getType(), 1);
2929 return ReplaceInstUsesWith(I
, CI
);
2932 if (Instruction
*Common
= commonDivTransforms(I
))
2935 // Handle cases involving: [su]div X, (select Cond, Y, Z)
2936 // This does not apply for fdiv.
2937 if (isa
<SelectInst
>(Op1
) && SimplifyDivRemOfSelect(I
))
2940 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
2942 if (RHS
->equalsInt(1))
2943 return ReplaceInstUsesWith(I
, Op0
);
2945 // (X / C1) / C2 -> X / (C1*C2)
2946 if (Instruction
*LHS
= dyn_cast
<Instruction
>(Op0
))
2947 if (Instruction::BinaryOps(LHS
->getOpcode()) == I
.getOpcode())
2948 if (ConstantInt
*LHSRHS
= dyn_cast
<ConstantInt
>(LHS
->getOperand(1))) {
2949 if (MultiplyOverflows(RHS
, LHSRHS
,
2950 I
.getOpcode()==Instruction::SDiv
, Context
))
2951 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2953 return BinaryOperator::Create(I
.getOpcode(), LHS
->getOperand(0),
2954 ConstantExpr::getMul(RHS
, LHSRHS
));
2957 if (!RHS
->isZero()) { // avoid X udiv 0
2958 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
2959 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
2961 if (isa
<PHINode
>(Op0
))
2962 if (Instruction
*NV
= FoldOpIntoPhi(I
))
2967 // 0 / X == 0, we don't need to preserve faults!
2968 if (ConstantInt
*LHS
= dyn_cast
<ConstantInt
>(Op0
))
2969 if (LHS
->equalsInt(0))
2970 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
2972 // It can't be division by zero, hence it must be division by one.
2973 if (I
.getType() == Type::Int1Ty
)
2974 return ReplaceInstUsesWith(I
, Op0
);
2976 if (ConstantVector
*Op1V
= dyn_cast
<ConstantVector
>(Op1
)) {
2977 if (ConstantInt
*X
= cast_or_null
<ConstantInt
>(Op1V
->getSplatValue()))
2980 return ReplaceInstUsesWith(I
, Op0
);
2986 Instruction
*InstCombiner::visitUDiv(BinaryOperator
&I
) {
2987 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
2989 // Handle the integer div common cases
2990 if (Instruction
*Common
= commonIDivTransforms(I
))
2993 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(Op1
)) {
2994 // X udiv C^2 -> X >> C
2995 // Check to see if this is an unsigned division with an exact power of 2,
2996 // if so, convert to a right shift.
2997 if (C
->getValue().isPowerOf2()) // 0 not included in isPowerOf2
2998 return BinaryOperator::CreateLShr(Op0
,
2999 ConstantInt::get(Op0
->getType(), C
->getValue().logBase2()));
3001 // X udiv C, where C >= signbit
3002 if (C
->getValue().isNegative()) {
3003 Value
*IC
= InsertNewInstBefore(new ICmpInst(*Context
,
3004 ICmpInst::ICMP_ULT
, Op0
, C
),
3006 return SelectInst::Create(IC
, Constant::getNullValue(I
.getType()),
3007 ConstantInt::get(I
.getType(), 1));
3011 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3012 if (BinaryOperator
*RHSI
= dyn_cast
<BinaryOperator
>(I
.getOperand(1))) {
3013 if (RHSI
->getOpcode() == Instruction::Shl
&&
3014 isa
<ConstantInt
>(RHSI
->getOperand(0))) {
3015 const APInt
& C1
= cast
<ConstantInt
>(RHSI
->getOperand(0))->getValue();
3016 if (C1
.isPowerOf2()) {
3017 Value
*N
= RHSI
->getOperand(1);
3018 const Type
*NTy
= N
->getType();
3019 if (uint32_t C2
= C1
.logBase2()) {
3020 Constant
*C2V
= ConstantInt::get(NTy
, C2
);
3021 N
= InsertNewInstBefore(BinaryOperator::CreateAdd(N
, C2V
, "tmp"), I
);
3023 return BinaryOperator::CreateLShr(Op0
, N
);
3028 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3029 // where C1&C2 are powers of two.
3030 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
3031 if (ConstantInt
*STO
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
3032 if (ConstantInt
*SFO
= dyn_cast
<ConstantInt
>(SI
->getOperand(2))) {
3033 const APInt
&TVA
= STO
->getValue(), &FVA
= SFO
->getValue();
3034 if (TVA
.isPowerOf2() && FVA
.isPowerOf2()) {
3035 // Compute the shift amounts
3036 uint32_t TSA
= TVA
.logBase2(), FSA
= FVA
.logBase2();
3037 // Construct the "on true" case of the select
3038 Constant
*TC
= ConstantInt::get(Op0
->getType(), TSA
);
3039 Instruction
*TSI
= BinaryOperator::CreateLShr(
3040 Op0
, TC
, SI
->getName()+".t");
3041 TSI
= InsertNewInstBefore(TSI
, I
);
3043 // Construct the "on false" case of the select
3044 Constant
*FC
= ConstantInt::get(Op0
->getType(), FSA
);
3045 Instruction
*FSI
= BinaryOperator::CreateLShr(
3046 Op0
, FC
, SI
->getName()+".f");
3047 FSI
= InsertNewInstBefore(FSI
, I
);
3049 // construct the select instruction and return it.
3050 return SelectInst::Create(SI
->getOperand(0), TSI
, FSI
, SI
->getName());
3056 Instruction
*InstCombiner::visitSDiv(BinaryOperator
&I
) {
3057 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3059 // Handle the integer div common cases
3060 if (Instruction
*Common
= commonIDivTransforms(I
))
3063 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3065 if (RHS
->isAllOnesValue())
3066 return BinaryOperator::CreateNeg(*Context
, Op0
);
3069 // If the sign bits of both operands are zero (i.e. we can prove they are
3070 // unsigned inputs), turn this into a udiv.
3071 if (I
.getType()->isInteger()) {
3072 APInt
Mask(APInt::getSignBit(I
.getType()->getPrimitiveSizeInBits()));
3073 if (MaskedValueIsZero(Op0
, Mask
)) {
3074 if (MaskedValueIsZero(Op1
, Mask
)) {
3075 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3076 return BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
3078 ConstantInt
*ShiftedInt
;
3079 if (match(Op1
, m_Shl(m_ConstantInt(ShiftedInt
), m_Value()), *Context
) &&
3080 ShiftedInt
->getValue().isPowerOf2()) {
3081 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
3082 // Safe because the only negative value (1 << Y) can take on is
3083 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
3084 // the sign bit set.
3085 return BinaryOperator::CreateUDiv(Op0
, Op1
, I
.getName());
3093 Instruction
*InstCombiner::visitFDiv(BinaryOperator
&I
) {
3094 return commonDivTransforms(I
);
3097 /// This function implements the transforms on rem instructions that work
3098 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3099 /// is used by the visitors to those instructions.
3100 /// @brief Transforms common to all three rem instructions
3101 Instruction
*InstCombiner::commonRemTransforms(BinaryOperator
&I
) {
3102 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3104 if (isa
<UndefValue
>(Op0
)) { // undef % X -> 0
3105 if (I
.getType()->isFPOrFPVector())
3106 return ReplaceInstUsesWith(I
, Op0
); // X % undef -> undef (could be SNaN)
3107 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3109 if (isa
<UndefValue
>(Op1
))
3110 return ReplaceInstUsesWith(I
, Op1
); // X % undef -> undef
3112 // Handle cases involving: rem X, (select Cond, Y, Z)
3113 if (isa
<SelectInst
>(Op1
) && SimplifyDivRemOfSelect(I
))
3119 /// This function implements the transforms common to both integer remainder
3120 /// instructions (urem and srem). It is called by the visitors to those integer
3121 /// remainder instructions.
3122 /// @brief Common integer remainder transforms
3123 Instruction
*InstCombiner::commonIRemTransforms(BinaryOperator
&I
) {
3124 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3126 if (Instruction
*common
= commonRemTransforms(I
))
3129 // 0 % X == 0 for integer, we don't need to preserve faults!
3130 if (Constant
*LHS
= dyn_cast
<Constant
>(Op0
))
3131 if (LHS
->isNullValue())
3132 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3134 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3135 // X % 0 == undef, we don't need to preserve faults!
3136 if (RHS
->equalsInt(0))
3137 return ReplaceInstUsesWith(I
, UndefValue::get(I
.getType()));
3139 if (RHS
->equalsInt(1)) // X % 1 == 0
3140 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
3142 if (Instruction
*Op0I
= dyn_cast
<Instruction
>(Op0
)) {
3143 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0I
)) {
3144 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
3146 } else if (isa
<PHINode
>(Op0I
)) {
3147 if (Instruction
*NV
= FoldOpIntoPhi(I
))
3151 // See if we can fold away this rem instruction.
3152 if (SimplifyDemandedInstructionBits(I
))
3160 Instruction
*InstCombiner::visitURem(BinaryOperator
&I
) {
3161 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3163 if (Instruction
*common
= commonIRemTransforms(I
))
3166 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
3167 // X urem C^2 -> X and C
3168 // Check to see if this is an unsigned remainder with an exact power of 2,
3169 // if so, convert to a bitwise and.
3170 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(RHS
))
3171 if (C
->getValue().isPowerOf2())
3172 return BinaryOperator::CreateAnd(Op0
, SubOne(C
, Context
));
3175 if (Instruction
*RHSI
= dyn_cast
<Instruction
>(I
.getOperand(1))) {
3176 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3177 if (RHSI
->getOpcode() == Instruction::Shl
&&
3178 isa
<ConstantInt
>(RHSI
->getOperand(0))) {
3179 if (cast
<ConstantInt
>(RHSI
->getOperand(0))->getValue().isPowerOf2()) {
3180 Constant
*N1
= Constant::getAllOnesValue(I
.getType());
3181 Value
*Add
= InsertNewInstBefore(BinaryOperator::CreateAdd(RHSI
, N1
,
3183 return BinaryOperator::CreateAnd(Op0
, Add
);
3188 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3189 // where C1&C2 are powers of two.
3190 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
)) {
3191 if (ConstantInt
*STO
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
3192 if (ConstantInt
*SFO
= dyn_cast
<ConstantInt
>(SI
->getOperand(2))) {
3193 // STO == 0 and SFO == 0 handled above.
3194 if ((STO
->getValue().isPowerOf2()) &&
3195 (SFO
->getValue().isPowerOf2())) {
3196 Value
*TrueAnd
= InsertNewInstBefore(
3197 BinaryOperator::CreateAnd(Op0
, SubOne(STO
, Context
),
3198 SI
->getName()+".t"), I
);
3199 Value
*FalseAnd
= InsertNewInstBefore(
3200 BinaryOperator::CreateAnd(Op0
, SubOne(SFO
, Context
),
3201 SI
->getName()+".f"), I
);
3202 return SelectInst::Create(SI
->getOperand(0), TrueAnd
, FalseAnd
);
3210 Instruction
*InstCombiner::visitSRem(BinaryOperator
&I
) {
3211 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
3213 // Handle the integer rem common cases
3214 if (Instruction
*common
= commonIRemTransforms(I
))
3217 if (Value
*RHSNeg
= dyn_castNegVal(Op1
, Context
))
3218 if (!isa
<Constant
>(RHSNeg
) ||
3219 (isa
<ConstantInt
>(RHSNeg
) &&
3220 cast
<ConstantInt
>(RHSNeg
)->getValue().isStrictlyPositive())) {
3222 AddUsesToWorkList(I
);
3223 I
.setOperand(1, RHSNeg
);
3227 // If the sign bits of both operands are zero (i.e. we can prove they are
3228 // unsigned inputs), turn this into a urem.
3229 if (I
.getType()->isInteger()) {
3230 APInt
Mask(APInt::getSignBit(I
.getType()->getPrimitiveSizeInBits()));
3231 if (MaskedValueIsZero(Op1
, Mask
) && MaskedValueIsZero(Op0
, Mask
)) {
3232 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3233 return BinaryOperator::CreateURem(Op0
, Op1
, I
.getName());
3237 // If it's a constant vector, flip any negative values positive.
3238 if (ConstantVector
*RHSV
= dyn_cast
<ConstantVector
>(Op1
)) {
3239 unsigned VWidth
= RHSV
->getNumOperands();
3241 bool hasNegative
= false;
3242 for (unsigned i
= 0; !hasNegative
&& i
!= VWidth
; ++i
)
3243 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
->getOperand(i
)))
3244 if (RHS
->getValue().isNegative())
3248 std::vector
<Constant
*> Elts(VWidth
);
3249 for (unsigned i
= 0; i
!= VWidth
; ++i
) {
3250 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
->getOperand(i
))) {
3251 if (RHS
->getValue().isNegative())
3252 Elts
[i
] = cast
<ConstantInt
>(ConstantExpr::getNeg(RHS
));
3258 Constant
*NewRHSV
= ConstantVector::get(Elts
);
3259 if (NewRHSV
!= RHSV
) {
3260 AddUsesToWorkList(I
);
3261 I
.setOperand(1, NewRHSV
);
3270 Instruction
*InstCombiner::visitFRem(BinaryOperator
&I
) {
3271 return commonRemTransforms(I
);
3274 // isOneBitSet - Return true if there is exactly one bit set in the specified
3276 static bool isOneBitSet(const ConstantInt
*CI
) {
3277 return CI
->getValue().isPowerOf2();
3280 // isHighOnes - Return true if the constant is of the form 1+0+.
3281 // This is the same as lowones(~X).
3282 static bool isHighOnes(const ConstantInt
*CI
) {
3283 return (~CI
->getValue() + 1).isPowerOf2();
3286 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3287 /// are carefully arranged to allow folding of expressions such as:
3289 /// (A < B) | (A > B) --> (A != B)
3291 /// Note that this is only valid if the first and second predicates have the
3292 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3294 /// Three bits are used to represent the condition, as follows:
3299 /// <=> Value Definition
3300 /// 000 0 Always false
3307 /// 111 7 Always true
3309 static unsigned getICmpCode(const ICmpInst
*ICI
) {
3310 switch (ICI
->getPredicate()) {
3312 case ICmpInst::ICMP_UGT
: return 1; // 001
3313 case ICmpInst::ICMP_SGT
: return 1; // 001
3314 case ICmpInst::ICMP_EQ
: return 2; // 010
3315 case ICmpInst::ICMP_UGE
: return 3; // 011
3316 case ICmpInst::ICMP_SGE
: return 3; // 011
3317 case ICmpInst::ICMP_ULT
: return 4; // 100
3318 case ICmpInst::ICMP_SLT
: return 4; // 100
3319 case ICmpInst::ICMP_NE
: return 5; // 101
3320 case ICmpInst::ICMP_ULE
: return 6; // 110
3321 case ICmpInst::ICMP_SLE
: return 6; // 110
3324 llvm_unreachable("Invalid ICmp predicate!");
3329 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3330 /// predicate into a three bit mask. It also returns whether it is an ordered
3331 /// predicate by reference.
3332 static unsigned getFCmpCode(FCmpInst::Predicate CC
, bool &isOrdered
) {
3335 case FCmpInst::FCMP_ORD
: isOrdered
= true; return 0; // 000
3336 case FCmpInst::FCMP_UNO
: return 0; // 000
3337 case FCmpInst::FCMP_OGT
: isOrdered
= true; return 1; // 001
3338 case FCmpInst::FCMP_UGT
: return 1; // 001
3339 case FCmpInst::FCMP_OEQ
: isOrdered
= true; return 2; // 010
3340 case FCmpInst::FCMP_UEQ
: return 2; // 010
3341 case FCmpInst::FCMP_OGE
: isOrdered
= true; return 3; // 011
3342 case FCmpInst::FCMP_UGE
: return 3; // 011
3343 case FCmpInst::FCMP_OLT
: isOrdered
= true; return 4; // 100
3344 case FCmpInst::FCMP_ULT
: return 4; // 100
3345 case FCmpInst::FCMP_ONE
: isOrdered
= true; return 5; // 101
3346 case FCmpInst::FCMP_UNE
: return 5; // 101
3347 case FCmpInst::FCMP_OLE
: isOrdered
= true; return 6; // 110
3348 case FCmpInst::FCMP_ULE
: return 6; // 110
3351 // Not expecting FCMP_FALSE and FCMP_TRUE;
3352 llvm_unreachable("Unexpected FCmp predicate!");
3357 /// getICmpValue - This is the complement of getICmpCode, which turns an
3358 /// opcode and two operands into either a constant true or false, or a brand
3359 /// new ICmp instruction. The sign is passed in to determine which kind
3360 /// of predicate to use in the new icmp instruction.
3361 static Value
*getICmpValue(bool sign
, unsigned code
, Value
*LHS
, Value
*RHS
,
3362 LLVMContext
*Context
) {
3364 default: llvm_unreachable("Illegal ICmp code!");
3365 case 0: return ConstantInt::getFalse(*Context
);
3368 return new ICmpInst(*Context
, ICmpInst::ICMP_SGT
, LHS
, RHS
);
3370 return new ICmpInst(*Context
, ICmpInst::ICMP_UGT
, LHS
, RHS
);
3371 case 2: return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, LHS
, RHS
);
3374 return new ICmpInst(*Context
, ICmpInst::ICMP_SGE
, LHS
, RHS
);
3376 return new ICmpInst(*Context
, ICmpInst::ICMP_UGE
, LHS
, RHS
);
3379 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, LHS
, RHS
);
3381 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, LHS
, RHS
);
3382 case 5: return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, LHS
, RHS
);
3385 return new ICmpInst(*Context
, ICmpInst::ICMP_SLE
, LHS
, RHS
);
3387 return new ICmpInst(*Context
, ICmpInst::ICMP_ULE
, LHS
, RHS
);
3388 case 7: return ConstantInt::getTrue(*Context
);
3392 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3393 /// opcode and two operands into either a FCmp instruction. isordered is passed
3394 /// in to determine which kind of predicate to use in the new fcmp instruction.
3395 static Value
*getFCmpValue(bool isordered
, unsigned code
,
3396 Value
*LHS
, Value
*RHS
, LLVMContext
*Context
) {
3398 default: llvm_unreachable("Illegal FCmp code!");
3401 return new FCmpInst(*Context
, FCmpInst::FCMP_ORD
, LHS
, RHS
);
3403 return new FCmpInst(*Context
, FCmpInst::FCMP_UNO
, LHS
, RHS
);
3406 return new FCmpInst(*Context
, FCmpInst::FCMP_OGT
, LHS
, RHS
);
3408 return new FCmpInst(*Context
, FCmpInst::FCMP_UGT
, LHS
, RHS
);
3411 return new FCmpInst(*Context
, FCmpInst::FCMP_OEQ
, LHS
, RHS
);
3413 return new FCmpInst(*Context
, FCmpInst::FCMP_UEQ
, LHS
, RHS
);
3416 return new FCmpInst(*Context
, FCmpInst::FCMP_OGE
, LHS
, RHS
);
3418 return new FCmpInst(*Context
, FCmpInst::FCMP_UGE
, LHS
, RHS
);
3421 return new FCmpInst(*Context
, FCmpInst::FCMP_OLT
, LHS
, RHS
);
3423 return new FCmpInst(*Context
, FCmpInst::FCMP_ULT
, LHS
, RHS
);
3426 return new FCmpInst(*Context
, FCmpInst::FCMP_ONE
, LHS
, RHS
);
3428 return new FCmpInst(*Context
, FCmpInst::FCMP_UNE
, LHS
, RHS
);
3431 return new FCmpInst(*Context
, FCmpInst::FCMP_OLE
, LHS
, RHS
);
3433 return new FCmpInst(*Context
, FCmpInst::FCMP_ULE
, LHS
, RHS
);
3434 case 7: return ConstantInt::getTrue(*Context
);
3438 /// PredicatesFoldable - Return true if both predicates match sign or if at
3439 /// least one of them is an equality comparison (which is signless).
3440 static bool PredicatesFoldable(ICmpInst::Predicate p1
, ICmpInst::Predicate p2
) {
3441 return (ICmpInst::isSignedPredicate(p1
) == ICmpInst::isSignedPredicate(p2
)) ||
3442 (ICmpInst::isSignedPredicate(p1
) && ICmpInst::isEquality(p2
)) ||
3443 (ICmpInst::isSignedPredicate(p2
) && ICmpInst::isEquality(p1
));
3447 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3448 struct FoldICmpLogical
{
3451 ICmpInst::Predicate pred
;
3452 FoldICmpLogical(InstCombiner
&ic
, ICmpInst
*ICI
)
3453 : IC(ic
), LHS(ICI
->getOperand(0)), RHS(ICI
->getOperand(1)),
3454 pred(ICI
->getPredicate()) {}
3455 bool shouldApply(Value
*V
) const {
3456 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(V
))
3457 if (PredicatesFoldable(pred
, ICI
->getPredicate()))
3458 return ((ICI
->getOperand(0) == LHS
&& ICI
->getOperand(1) == RHS
) ||
3459 (ICI
->getOperand(0) == RHS
&& ICI
->getOperand(1) == LHS
));
3462 Instruction
*apply(Instruction
&Log
) const {
3463 ICmpInst
*ICI
= cast
<ICmpInst
>(Log
.getOperand(0));
3464 if (ICI
->getOperand(0) != LHS
) {
3465 assert(ICI
->getOperand(1) == LHS
);
3466 ICI
->swapOperands(); // Swap the LHS and RHS of the ICmp
3469 ICmpInst
*RHSICI
= cast
<ICmpInst
>(Log
.getOperand(1));
3470 unsigned LHSCode
= getICmpCode(ICI
);
3471 unsigned RHSCode
= getICmpCode(RHSICI
);
3473 switch (Log
.getOpcode()) {
3474 case Instruction::And
: Code
= LHSCode
& RHSCode
; break;
3475 case Instruction::Or
: Code
= LHSCode
| RHSCode
; break;
3476 case Instruction::Xor
: Code
= LHSCode
^ RHSCode
; break;
3477 default: llvm_unreachable("Illegal logical opcode!"); return 0;
3480 bool isSigned
= ICmpInst::isSignedPredicate(RHSICI
->getPredicate()) ||
3481 ICmpInst::isSignedPredicate(ICI
->getPredicate());
3483 Value
*RV
= getICmpValue(isSigned
, Code
, LHS
, RHS
, IC
.getContext());
3484 if (Instruction
*I
= dyn_cast
<Instruction
>(RV
))
3486 // Otherwise, it's a constant boolean value...
3487 return IC
.ReplaceInstUsesWith(Log
, RV
);
3490 } // end anonymous namespace
3492 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3493 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3494 // guaranteed to be a binary operator.
3495 Instruction
*InstCombiner::OptAndOp(Instruction
*Op
,
3497 ConstantInt
*AndRHS
,
3498 BinaryOperator
&TheAnd
) {
3499 Value
*X
= Op
->getOperand(0);
3500 Constant
*Together
= 0;
3502 Together
= ConstantExpr::getAnd(AndRHS
, OpRHS
);
3504 switch (Op
->getOpcode()) {
3505 case Instruction::Xor
:
3506 if (Op
->hasOneUse()) {
3507 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3508 Instruction
*And
= BinaryOperator::CreateAnd(X
, AndRHS
);
3509 InsertNewInstBefore(And
, TheAnd
);
3511 return BinaryOperator::CreateXor(And
, Together
);
3514 case Instruction::Or
:
3515 if (Together
== AndRHS
) // (X | C) & C --> C
3516 return ReplaceInstUsesWith(TheAnd
, AndRHS
);
3518 if (Op
->hasOneUse() && Together
!= OpRHS
) {
3519 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3520 Instruction
*Or
= BinaryOperator::CreateOr(X
, Together
);
3521 InsertNewInstBefore(Or
, TheAnd
);
3523 return BinaryOperator::CreateAnd(Or
, AndRHS
);
3526 case Instruction::Add
:
3527 if (Op
->hasOneUse()) {
3528 // Adding a one to a single bit bit-field should be turned into an XOR
3529 // of the bit. First thing to check is to see if this AND is with a
3530 // single bit constant.
3531 const APInt
& AndRHSV
= cast
<ConstantInt
>(AndRHS
)->getValue();
3533 // If there is only one bit set...
3534 if (isOneBitSet(cast
<ConstantInt
>(AndRHS
))) {
3535 // Ok, at this point, we know that we are masking the result of the
3536 // ADD down to exactly one bit. If the constant we are adding has
3537 // no bits set below this bit, then we can eliminate the ADD.
3538 const APInt
& AddRHS
= cast
<ConstantInt
>(OpRHS
)->getValue();
3540 // Check to see if any bits below the one bit set in AndRHSV are set.
3541 if ((AddRHS
& (AndRHSV
-1)) == 0) {
3542 // If not, the only thing that can effect the output of the AND is
3543 // the bit specified by AndRHSV. If that bit is set, the effect of
3544 // the XOR is to toggle the bit. If it is clear, then the ADD has
3546 if ((AddRHS
& AndRHSV
) == 0) { // Bit is not set, noop
3547 TheAnd
.setOperand(0, X
);
3550 // Pull the XOR out of the AND.
3551 Instruction
*NewAnd
= BinaryOperator::CreateAnd(X
, AndRHS
);
3552 InsertNewInstBefore(NewAnd
, TheAnd
);
3553 NewAnd
->takeName(Op
);
3554 return BinaryOperator::CreateXor(NewAnd
, AndRHS
);
3561 case Instruction::Shl
: {
3562 // We know that the AND will not produce any of the bits shifted in, so if
3563 // the anded constant includes them, clear them now!
3565 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3566 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3567 APInt
ShlMask(APInt::getHighBitsSet(BitWidth
, BitWidth
-OpRHSVal
));
3568 ConstantInt
*CI
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShlMask
);
3570 if (CI
->getValue() == ShlMask
) {
3571 // Masking out bits that the shift already masks
3572 return ReplaceInstUsesWith(TheAnd
, Op
); // No need for the and.
3573 } else if (CI
!= AndRHS
) { // Reducing bits set in and.
3574 TheAnd
.setOperand(1, CI
);
3579 case Instruction::LShr
:
3581 // We know that the AND will not produce any of the bits shifted in, so if
3582 // the anded constant includes them, clear them now! This only applies to
3583 // unsigned shifts, because a signed shr may bring in set bits!
3585 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3586 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3587 APInt
ShrMask(APInt::getLowBitsSet(BitWidth
, BitWidth
- OpRHSVal
));
3588 ConstantInt
*CI
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShrMask
);
3590 if (CI
->getValue() == ShrMask
) {
3591 // Masking out bits that the shift already masks.
3592 return ReplaceInstUsesWith(TheAnd
, Op
);
3593 } else if (CI
!= AndRHS
) {
3594 TheAnd
.setOperand(1, CI
); // Reduce bits set in and cst.
3599 case Instruction::AShr
:
3601 // See if this is shifting in some sign extension, then masking it out
3603 if (Op
->hasOneUse()) {
3604 uint32_t BitWidth
= AndRHS
->getType()->getBitWidth();
3605 uint32_t OpRHSVal
= OpRHS
->getLimitedValue(BitWidth
);
3606 APInt
ShrMask(APInt::getLowBitsSet(BitWidth
, BitWidth
- OpRHSVal
));
3607 Constant
*C
= ConstantInt::get(*Context
, AndRHS
->getValue() & ShrMask
);
3608 if (C
== AndRHS
) { // Masking out bits shifted in.
3609 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3610 // Make the argument unsigned.
3611 Value
*ShVal
= Op
->getOperand(0);
3612 ShVal
= InsertNewInstBefore(
3613 BinaryOperator::CreateLShr(ShVal
, OpRHS
,
3614 Op
->getName()), TheAnd
);
3615 return BinaryOperator::CreateAnd(ShVal
, AndRHS
, TheAnd
.getName());
3624 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3625 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3626 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3627 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
3628 /// insert new instructions.
3629 Instruction
*InstCombiner::InsertRangeTest(Value
*V
, Constant
*Lo
, Constant
*Hi
,
3630 bool isSigned
, bool Inside
,
3632 assert(cast
<ConstantInt
>(ConstantExpr::getICmp((isSigned
?
3633 ICmpInst::ICMP_SLE
:ICmpInst::ICMP_ULE
), Lo
, Hi
))->getZExtValue() &&
3634 "Lo is not <= Hi in range emission code!");
3637 if (Lo
== Hi
) // Trivially false.
3638 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, V
, V
);
3640 // V >= Min && V < Hi --> V < Hi
3641 if (cast
<ConstantInt
>(Lo
)->isMinValue(isSigned
)) {
3642 ICmpInst::Predicate pred
= (isSigned
?
3643 ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
);
3644 return new ICmpInst(*Context
, pred
, V
, Hi
);
3647 // Emit V-Lo <u Hi-Lo
3648 Constant
*NegLo
= ConstantExpr::getNeg(Lo
);
3649 Instruction
*Add
= BinaryOperator::CreateAdd(V
, NegLo
, V
->getName()+".off");
3650 InsertNewInstBefore(Add
, IB
);
3651 Constant
*UpperBound
= ConstantExpr::getAdd(NegLo
, Hi
);
3652 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, Add
, UpperBound
);
3655 if (Lo
== Hi
) // Trivially true.
3656 return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, V
, V
);
3658 // V < Min || V >= Hi -> V > Hi-1
3659 Hi
= SubOne(cast
<ConstantInt
>(Hi
), Context
);
3660 if (cast
<ConstantInt
>(Lo
)->isMinValue(isSigned
)) {
3661 ICmpInst::Predicate pred
= (isSigned
?
3662 ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
);
3663 return new ICmpInst(*Context
, pred
, V
, Hi
);
3666 // Emit V-Lo >u Hi-1-Lo
3667 // Note that Hi has already had one subtracted from it, above.
3668 ConstantInt
*NegLo
= cast
<ConstantInt
>(ConstantExpr::getNeg(Lo
));
3669 Instruction
*Add
= BinaryOperator::CreateAdd(V
, NegLo
, V
->getName()+".off");
3670 InsertNewInstBefore(Add
, IB
);
3671 Constant
*LowerBound
= ConstantExpr::getAdd(NegLo
, Hi
);
3672 return new ICmpInst(*Context
, ICmpInst::ICMP_UGT
, Add
, LowerBound
);
3675 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3676 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
3677 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
3678 // not, since all 1s are not contiguous.
3679 static bool isRunOfOnes(ConstantInt
*Val
, uint32_t &MB
, uint32_t &ME
) {
3680 const APInt
& V
= Val
->getValue();
3681 uint32_t BitWidth
= Val
->getType()->getBitWidth();
3682 if (!APIntOps::isShiftedMask(BitWidth
, V
)) return false;
3684 // look for the first zero bit after the run of ones
3685 MB
= BitWidth
- ((V
- 1) ^ V
).countLeadingZeros();
3686 // look for the first non-zero bit
3687 ME
= V
.getActiveBits();
3691 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
3692 /// where isSub determines whether the operator is a sub. If we can fold one of
3693 /// the following xforms:
3695 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
3696 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3697 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3699 /// return (A +/- B).
3701 Value
*InstCombiner::FoldLogicalPlusAnd(Value
*LHS
, Value
*RHS
,
3702 ConstantInt
*Mask
, bool isSub
,
3704 Instruction
*LHSI
= dyn_cast
<Instruction
>(LHS
);
3705 if (!LHSI
|| LHSI
->getNumOperands() != 2 ||
3706 !isa
<ConstantInt
>(LHSI
->getOperand(1))) return 0;
3708 ConstantInt
*N
= cast
<ConstantInt
>(LHSI
->getOperand(1));
3710 switch (LHSI
->getOpcode()) {
3712 case Instruction::And
:
3713 if (ConstantExpr::getAnd(N
, Mask
) == Mask
) {
3714 // If the AndRHS is a power of two minus one (0+1+), this is simple.
3715 if ((Mask
->getValue().countLeadingZeros() +
3716 Mask
->getValue().countPopulation()) ==
3717 Mask
->getValue().getBitWidth())
3720 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
3721 // part, we don't need any explicit masks to take them out of A. If that
3722 // is all N is, ignore it.
3723 uint32_t MB
= 0, ME
= 0;
3724 if (isRunOfOnes(Mask
, MB
, ME
)) { // begin/end bit of run, inclusive
3725 uint32_t BitWidth
= cast
<IntegerType
>(RHS
->getType())->getBitWidth();
3726 APInt
Mask(APInt::getLowBitsSet(BitWidth
, MB
-1));
3727 if (MaskedValueIsZero(RHS
, Mask
))
3732 case Instruction::Or
:
3733 case Instruction::Xor
:
3734 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
3735 if ((Mask
->getValue().countLeadingZeros() +
3736 Mask
->getValue().countPopulation()) == Mask
->getValue().getBitWidth()
3737 && ConstantExpr::getAnd(N
, Mask
)->isNullValue())
3744 New
= BinaryOperator::CreateSub(LHSI
->getOperand(0), RHS
, "fold");
3746 New
= BinaryOperator::CreateAdd(LHSI
->getOperand(0), RHS
, "fold");
3747 return InsertNewInstBefore(New
, I
);
3750 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
3751 Instruction
*InstCombiner::FoldAndOfICmps(Instruction
&I
,
3752 ICmpInst
*LHS
, ICmpInst
*RHS
) {
3754 ConstantInt
*LHSCst
, *RHSCst
;
3755 ICmpInst::Predicate LHSCC
, RHSCC
;
3757 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
3758 if (!match(LHS
, m_ICmp(LHSCC
, m_Value(Val
),
3759 m_ConstantInt(LHSCst
)), *Context
) ||
3760 !match(RHS
, m_ICmp(RHSCC
, m_Value(Val2
),
3761 m_ConstantInt(RHSCst
)), *Context
))
3764 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
3765 // where C is a power of 2
3766 if (LHSCst
== RHSCst
&& LHSCC
== RHSCC
&& LHSCC
== ICmpInst::ICMP_ULT
&&
3767 LHSCst
->getValue().isPowerOf2()) {
3768 Instruction
*NewOr
= BinaryOperator::CreateOr(Val
, Val2
);
3769 InsertNewInstBefore(NewOr
, I
);
3770 return new ICmpInst(*Context
, LHSCC
, NewOr
, LHSCst
);
3773 // From here on, we only handle:
3774 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
3775 if (Val
!= Val2
) return 0;
3777 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
3778 if (LHSCC
== ICmpInst::ICMP_UGE
|| LHSCC
== ICmpInst::ICMP_ULE
||
3779 RHSCC
== ICmpInst::ICMP_UGE
|| RHSCC
== ICmpInst::ICMP_ULE
||
3780 LHSCC
== ICmpInst::ICMP_SGE
|| LHSCC
== ICmpInst::ICMP_SLE
||
3781 RHSCC
== ICmpInst::ICMP_SGE
|| RHSCC
== ICmpInst::ICMP_SLE
)
3784 // We can't fold (ugt x, C) & (sgt x, C2).
3785 if (!PredicatesFoldable(LHSCC
, RHSCC
))
3788 // Ensure that the larger constant is on the RHS.
3790 if (ICmpInst::isSignedPredicate(LHSCC
) ||
3791 (ICmpInst::isEquality(LHSCC
) &&
3792 ICmpInst::isSignedPredicate(RHSCC
)))
3793 ShouldSwap
= LHSCst
->getValue().sgt(RHSCst
->getValue());
3795 ShouldSwap
= LHSCst
->getValue().ugt(RHSCst
->getValue());
3798 std::swap(LHS
, RHS
);
3799 std::swap(LHSCst
, RHSCst
);
3800 std::swap(LHSCC
, RHSCC
);
3803 // At this point, we know we have have two icmp instructions
3804 // comparing a value against two constants and and'ing the result
3805 // together. Because of the above check, we know that we only have
3806 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
3807 // (from the FoldICmpLogical check above), that the two constants
3808 // are not equal and that the larger constant is on the RHS
3809 assert(LHSCst
!= RHSCst
&& "Compares not folded above?");
3812 default: llvm_unreachable("Unknown integer condition code!");
3813 case ICmpInst::ICMP_EQ
:
3815 default: llvm_unreachable("Unknown integer condition code!");
3816 case ICmpInst::ICMP_EQ
: // (X == 13 & X == 15) -> false
3817 case ICmpInst::ICMP_UGT
: // (X == 13 & X > 15) -> false
3818 case ICmpInst::ICMP_SGT
: // (X == 13 & X > 15) -> false
3819 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3820 case ICmpInst::ICMP_NE
: // (X == 13 & X != 15) -> X == 13
3821 case ICmpInst::ICMP_ULT
: // (X == 13 & X < 15) -> X == 13
3822 case ICmpInst::ICMP_SLT
: // (X == 13 & X < 15) -> X == 13
3823 return ReplaceInstUsesWith(I
, LHS
);
3825 case ICmpInst::ICMP_NE
:
3827 default: llvm_unreachable("Unknown integer condition code!");
3828 case ICmpInst::ICMP_ULT
:
3829 if (LHSCst
== SubOne(RHSCst
, Context
)) // (X != 13 & X u< 14) -> X < 13
3830 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, Val
, LHSCst
);
3831 break; // (X != 13 & X u< 15) -> no change
3832 case ICmpInst::ICMP_SLT
:
3833 if (LHSCst
== SubOne(RHSCst
, Context
)) // (X != 13 & X s< 14) -> X < 13
3834 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, Val
, LHSCst
);
3835 break; // (X != 13 & X s< 15) -> no change
3836 case ICmpInst::ICMP_EQ
: // (X != 13 & X == 15) -> X == 15
3837 case ICmpInst::ICMP_UGT
: // (X != 13 & X u> 15) -> X u> 15
3838 case ICmpInst::ICMP_SGT
: // (X != 13 & X s> 15) -> X s> 15
3839 return ReplaceInstUsesWith(I
, RHS
);
3840 case ICmpInst::ICMP_NE
:
3841 if (LHSCst
== SubOne(RHSCst
, Context
)){// (X != 13 & X != 14) -> X-13 >u 1
3842 Constant
*AddCST
= ConstantExpr::getNeg(LHSCst
);
3843 Instruction
*Add
= BinaryOperator::CreateAdd(Val
, AddCST
,
3844 Val
->getName()+".off");
3845 InsertNewInstBefore(Add
, I
);
3846 return new ICmpInst(*Context
, ICmpInst::ICMP_UGT
, Add
,
3847 ConstantInt::get(Add
->getType(), 1));
3849 break; // (X != 13 & X != 15) -> no change
3852 case ICmpInst::ICMP_ULT
:
3854 default: llvm_unreachable("Unknown integer condition code!");
3855 case ICmpInst::ICMP_EQ
: // (X u< 13 & X == 15) -> false
3856 case ICmpInst::ICMP_UGT
: // (X u< 13 & X u> 15) -> false
3857 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3858 case ICmpInst::ICMP_SGT
: // (X u< 13 & X s> 15) -> no change
3860 case ICmpInst::ICMP_NE
: // (X u< 13 & X != 15) -> X u< 13
3861 case ICmpInst::ICMP_ULT
: // (X u< 13 & X u< 15) -> X u< 13
3862 return ReplaceInstUsesWith(I
, LHS
);
3863 case ICmpInst::ICMP_SLT
: // (X u< 13 & X s< 15) -> no change
3867 case ICmpInst::ICMP_SLT
:
3869 default: llvm_unreachable("Unknown integer condition code!");
3870 case ICmpInst::ICMP_EQ
: // (X s< 13 & X == 15) -> false
3871 case ICmpInst::ICMP_SGT
: // (X s< 13 & X s> 15) -> false
3872 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3873 case ICmpInst::ICMP_UGT
: // (X s< 13 & X u> 15) -> no change
3875 case ICmpInst::ICMP_NE
: // (X s< 13 & X != 15) -> X < 13
3876 case ICmpInst::ICMP_SLT
: // (X s< 13 & X s< 15) -> X < 13
3877 return ReplaceInstUsesWith(I
, LHS
);
3878 case ICmpInst::ICMP_ULT
: // (X s< 13 & X u< 15) -> no change
3882 case ICmpInst::ICMP_UGT
:
3884 default: llvm_unreachable("Unknown integer condition code!");
3885 case ICmpInst::ICMP_EQ
: // (X u> 13 & X == 15) -> X == 15
3886 case ICmpInst::ICMP_UGT
: // (X u> 13 & X u> 15) -> X u> 15
3887 return ReplaceInstUsesWith(I
, RHS
);
3888 case ICmpInst::ICMP_SGT
: // (X u> 13 & X s> 15) -> no change
3890 case ICmpInst::ICMP_NE
:
3891 if (RHSCst
== AddOne(LHSCst
, Context
)) // (X u> 13 & X != 14) -> X u> 14
3892 return new ICmpInst(*Context
, LHSCC
, Val
, RHSCst
);
3893 break; // (X u> 13 & X != 15) -> no change
3894 case ICmpInst::ICMP_ULT
: // (X u> 13 & X u< 15) -> (X-14) <u 1
3895 return InsertRangeTest(Val
, AddOne(LHSCst
, Context
),
3896 RHSCst
, false, true, I
);
3897 case ICmpInst::ICMP_SLT
: // (X u> 13 & X s< 15) -> no change
3901 case ICmpInst::ICMP_SGT
:
3903 default: llvm_unreachable("Unknown integer condition code!");
3904 case ICmpInst::ICMP_EQ
: // (X s> 13 & X == 15) -> X == 15
3905 case ICmpInst::ICMP_SGT
: // (X s> 13 & X s> 15) -> X s> 15
3906 return ReplaceInstUsesWith(I
, RHS
);
3907 case ICmpInst::ICMP_UGT
: // (X s> 13 & X u> 15) -> no change
3909 case ICmpInst::ICMP_NE
:
3910 if (RHSCst
== AddOne(LHSCst
, Context
)) // (X s> 13 & X != 14) -> X s> 14
3911 return new ICmpInst(*Context
, LHSCC
, Val
, RHSCst
);
3912 break; // (X s> 13 & X != 15) -> no change
3913 case ICmpInst::ICMP_SLT
: // (X s> 13 & X s< 15) -> (X-14) s< 1
3914 return InsertRangeTest(Val
, AddOne(LHSCst
, Context
),
3915 RHSCst
, true, true, I
);
3916 case ICmpInst::ICMP_ULT
: // (X s> 13 & X u< 15) -> no change
3925 Instruction
*InstCombiner::FoldAndOfFCmps(Instruction
&I
, FCmpInst
*LHS
,
3928 if (LHS
->getPredicate() == FCmpInst::FCMP_ORD
&&
3929 RHS
->getPredicate() == FCmpInst::FCMP_ORD
) {
3930 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
3931 if (ConstantFP
*LHSC
= dyn_cast
<ConstantFP
>(LHS
->getOperand(1)))
3932 if (ConstantFP
*RHSC
= dyn_cast
<ConstantFP
>(RHS
->getOperand(1))) {
3933 // If either of the constants are nans, then the whole thing returns
3935 if (LHSC
->getValueAPF().isNaN() || RHSC
->getValueAPF().isNaN())
3936 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3937 return new FCmpInst(*Context
, FCmpInst::FCMP_ORD
,
3938 LHS
->getOperand(0), RHS
->getOperand(0));
3941 // Handle vector zeros. This occurs because the canonical form of
3942 // "fcmp ord x,x" is "fcmp ord x, 0".
3943 if (isa
<ConstantAggregateZero
>(LHS
->getOperand(1)) &&
3944 isa
<ConstantAggregateZero
>(RHS
->getOperand(1)))
3945 return new FCmpInst(*Context
, FCmpInst::FCMP_ORD
,
3946 LHS
->getOperand(0), RHS
->getOperand(0));
3950 Value
*Op0LHS
= LHS
->getOperand(0), *Op0RHS
= LHS
->getOperand(1);
3951 Value
*Op1LHS
= RHS
->getOperand(0), *Op1RHS
= RHS
->getOperand(1);
3952 FCmpInst::Predicate Op0CC
= LHS
->getPredicate(), Op1CC
= RHS
->getPredicate();
3955 if (Op0LHS
== Op1RHS
&& Op0RHS
== Op1LHS
) {
3956 // Swap RHS operands to match LHS.
3957 Op1CC
= FCmpInst::getSwappedPredicate(Op1CC
);
3958 std::swap(Op1LHS
, Op1RHS
);
3961 if (Op0LHS
== Op1LHS
&& Op0RHS
== Op1RHS
) {
3962 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
3964 return new FCmpInst(*Context
, (FCmpInst::Predicate
)Op0CC
, Op0LHS
, Op0RHS
);
3966 if (Op0CC
== FCmpInst::FCMP_FALSE
|| Op1CC
== FCmpInst::FCMP_FALSE
)
3967 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3968 if (Op0CC
== FCmpInst::FCMP_TRUE
)
3969 return ReplaceInstUsesWith(I
, RHS
);
3970 if (Op1CC
== FCmpInst::FCMP_TRUE
)
3971 return ReplaceInstUsesWith(I
, LHS
);
3975 unsigned Op0Pred
= getFCmpCode(Op0CC
, Op0Ordered
);
3976 unsigned Op1Pred
= getFCmpCode(Op1CC
, Op1Ordered
);
3978 std::swap(LHS
, RHS
);
3979 std::swap(Op0Pred
, Op1Pred
);
3980 std::swap(Op0Ordered
, Op1Ordered
);
3983 // uno && ueq -> uno && (uno || eq) -> ueq
3984 // ord && olt -> ord && (ord && lt) -> olt
3985 if (Op0Ordered
== Op1Ordered
)
3986 return ReplaceInstUsesWith(I
, RHS
);
3988 // uno && oeq -> uno && (ord && eq) -> false
3989 // uno && ord -> false
3991 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
3992 // ord && ueq -> ord && (uno || eq) -> oeq
3993 return cast
<Instruction
>(getFCmpValue(true, Op1Pred
,
3994 Op0LHS
, Op0RHS
, Context
));
4002 Instruction
*InstCombiner::visitAnd(BinaryOperator
&I
) {
4003 bool Changed
= SimplifyCommutative(I
);
4004 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
4006 if (isa
<UndefValue
>(Op1
)) // X & undef -> 0
4007 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
4011 return ReplaceInstUsesWith(I
, Op1
);
4013 // See if we can simplify any instructions used by the instruction whose sole
4014 // purpose is to compute bits we don't care about.
4015 if (SimplifyDemandedInstructionBits(I
))
4017 if (isa
<VectorType
>(I
.getType())) {
4018 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(Op1
)) {
4019 if (CP
->isAllOnesValue()) // X & <-1,-1> -> X
4020 return ReplaceInstUsesWith(I
, I
.getOperand(0));
4021 } else if (isa
<ConstantAggregateZero
>(Op1
)) {
4022 return ReplaceInstUsesWith(I
, Op1
); // X & <0,0> -> <0,0>
4026 if (ConstantInt
*AndRHS
= dyn_cast
<ConstantInt
>(Op1
)) {
4027 const APInt
& AndRHSMask
= AndRHS
->getValue();
4028 APInt
NotAndRHS(~AndRHSMask
);
4030 // Optimize a variety of ((val OP C1) & C2) combinations...
4031 if (isa
<BinaryOperator
>(Op0
)) {
4032 Instruction
*Op0I
= cast
<Instruction
>(Op0
);
4033 Value
*Op0LHS
= Op0I
->getOperand(0);
4034 Value
*Op0RHS
= Op0I
->getOperand(1);
4035 switch (Op0I
->getOpcode()) {
4036 case Instruction::Xor
:
4037 case Instruction::Or
:
4038 // If the mask is only needed on one incoming arm, push it up.
4039 if (Op0I
->hasOneUse()) {
4040 if (MaskedValueIsZero(Op0LHS
, NotAndRHS
)) {
4041 // Not masking anything out for the LHS, move to RHS.
4042 Instruction
*NewRHS
= BinaryOperator::CreateAnd(Op0RHS
, AndRHS
,
4043 Op0RHS
->getName()+".masked");
4044 InsertNewInstBefore(NewRHS
, I
);
4045 return BinaryOperator::Create(
4046 cast
<BinaryOperator
>(Op0I
)->getOpcode(), Op0LHS
, NewRHS
);
4048 if (!isa
<Constant
>(Op0RHS
) &&
4049 MaskedValueIsZero(Op0RHS
, NotAndRHS
)) {
4050 // Not masking anything out for the RHS, move to LHS.
4051 Instruction
*NewLHS
= BinaryOperator::CreateAnd(Op0LHS
, AndRHS
,
4052 Op0LHS
->getName()+".masked");
4053 InsertNewInstBefore(NewLHS
, I
);
4054 return BinaryOperator::Create(
4055 cast
<BinaryOperator
>(Op0I
)->getOpcode(), NewLHS
, Op0RHS
);
4060 case Instruction::Add
:
4061 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4062 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4063 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4064 if (Value
*V
= FoldLogicalPlusAnd(Op0LHS
, Op0RHS
, AndRHS
, false, I
))
4065 return BinaryOperator::CreateAnd(V
, AndRHS
);
4066 if (Value
*V
= FoldLogicalPlusAnd(Op0RHS
, Op0LHS
, AndRHS
, false, I
))
4067 return BinaryOperator::CreateAnd(V
, AndRHS
); // Add commutes
4070 case Instruction::Sub
:
4071 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4072 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4073 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4074 if (Value
*V
= FoldLogicalPlusAnd(Op0LHS
, Op0RHS
, AndRHS
, true, I
))
4075 return BinaryOperator::CreateAnd(V
, AndRHS
);
4077 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4078 // has 1's for all bits that the subtraction with A might affect.
4079 if (Op0I
->hasOneUse()) {
4080 uint32_t BitWidth
= AndRHSMask
.getBitWidth();
4081 uint32_t Zeros
= AndRHSMask
.countLeadingZeros();
4082 APInt Mask
= APInt::getLowBitsSet(BitWidth
, BitWidth
- Zeros
);
4084 ConstantInt
*A
= dyn_cast
<ConstantInt
>(Op0LHS
);
4085 if (!(A
&& A
->isZero()) && // avoid infinite recursion.
4086 MaskedValueIsZero(Op0LHS
, Mask
)) {
4087 Instruction
*NewNeg
= BinaryOperator::CreateNeg(*Context
, Op0RHS
);
4088 InsertNewInstBefore(NewNeg
, I
);
4089 return BinaryOperator::CreateAnd(NewNeg
, AndRHS
);
4094 case Instruction::Shl
:
4095 case Instruction::LShr
:
4096 // (1 << x) & 1 --> zext(x == 0)
4097 // (1 >> x) & 1 --> zext(x == 0)
4098 if (AndRHSMask
== 1 && Op0LHS
== AndRHS
) {
4099 Instruction
*NewICmp
= new ICmpInst(*Context
, ICmpInst::ICMP_EQ
,
4100 Op0RHS
, Constant::getNullValue(I
.getType()));
4101 InsertNewInstBefore(NewICmp
, I
);
4102 return new ZExtInst(NewICmp
, I
.getType());
4107 if (ConstantInt
*Op0CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1)))
4108 if (Instruction
*Res
= OptAndOp(Op0I
, Op0CI
, AndRHS
, I
))
4110 } else if (CastInst
*CI
= dyn_cast
<CastInst
>(Op0
)) {
4111 // If this is an integer truncation or change from signed-to-unsigned, and
4112 // if the source is an and/or with immediate, transform it. This
4113 // frequently occurs for bitfield accesses.
4114 if (Instruction
*CastOp
= dyn_cast
<Instruction
>(CI
->getOperand(0))) {
4115 if ((isa
<TruncInst
>(CI
) || isa
<BitCastInst
>(CI
)) &&
4116 CastOp
->getNumOperands() == 2)
4117 if (ConstantInt
*AndCI
= dyn_cast
<ConstantInt
>(CastOp
->getOperand(1))) {
4118 if (CastOp
->getOpcode() == Instruction::And
) {
4119 // Change: and (cast (and X, C1) to T), C2
4120 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4121 // This will fold the two constants together, which may allow
4122 // other simplifications.
4123 Instruction
*NewCast
= CastInst::CreateTruncOrBitCast(
4124 CastOp
->getOperand(0), I
.getType(),
4125 CastOp
->getName()+".shrunk");
4126 NewCast
= InsertNewInstBefore(NewCast
, I
);
4127 // trunc_or_bitcast(C1)&C2
4129 ConstantExpr::getTruncOrBitCast(AndCI
,I
.getType());
4130 C3
= ConstantExpr::getAnd(C3
, AndRHS
);
4131 return BinaryOperator::CreateAnd(NewCast
, C3
);
4132 } else if (CastOp
->getOpcode() == Instruction::Or
) {
4133 // Change: and (cast (or X, C1) to T), C2
4134 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4136 ConstantExpr::getTruncOrBitCast(AndCI
,I
.getType());
4137 if (ConstantExpr::getAnd(C3
, AndRHS
) == AndRHS
)
4139 return ReplaceInstUsesWith(I
, AndRHS
);
4145 // Try to fold constant and into select arguments.
4146 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
4147 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
4149 if (isa
<PHINode
>(Op0
))
4150 if (Instruction
*NV
= FoldOpIntoPhi(I
))
4154 Value
*Op0NotVal
= dyn_castNotVal(Op0
, Context
);
4155 Value
*Op1NotVal
= dyn_castNotVal(Op1
, Context
);
4157 if (Op0NotVal
== Op1
|| Op1NotVal
== Op0
) // A & ~A == ~A & A == 0
4158 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
4160 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4161 if (Op0NotVal
&& Op1NotVal
&& isOnlyUse(Op0
) && isOnlyUse(Op1
)) {
4162 Instruction
*Or
= BinaryOperator::CreateOr(Op0NotVal
, Op1NotVal
,
4163 I
.getName()+".demorgan");
4164 InsertNewInstBefore(Or
, I
);
4165 return BinaryOperator::CreateNot(*Context
, Or
);
4169 Value
*A
= 0, *B
= 0, *C
= 0, *D
= 0;
4170 if (match(Op0
, m_Or(m_Value(A
), m_Value(B
)), *Context
)) {
4171 if (A
== Op1
|| B
== Op1
) // (A | ?) & A --> A
4172 return ReplaceInstUsesWith(I
, Op1
);
4174 // (A|B) & ~(A&B) -> A^B
4175 if (match(Op1
, m_Not(m_And(m_Value(C
), m_Value(D
))), *Context
)) {
4176 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
4177 return BinaryOperator::CreateXor(A
, B
);
4181 if (match(Op1
, m_Or(m_Value(A
), m_Value(B
)), *Context
)) {
4182 if (A
== Op0
|| B
== Op0
) // A & (A | ?) --> A
4183 return ReplaceInstUsesWith(I
, Op0
);
4185 // ~(A&B) & (A|B) -> A^B
4186 if (match(Op0
, m_Not(m_And(m_Value(C
), m_Value(D
))), *Context
)) {
4187 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
4188 return BinaryOperator::CreateXor(A
, B
);
4192 if (Op0
->hasOneUse() &&
4193 match(Op0
, m_Xor(m_Value(A
), m_Value(B
)), *Context
)) {
4194 if (A
== Op1
) { // (A^B)&A -> A&(A^B)
4195 I
.swapOperands(); // Simplify below
4196 std::swap(Op0
, Op1
);
4197 } else if (B
== Op1
) { // (A^B)&B -> B&(B^A)
4198 cast
<BinaryOperator
>(Op0
)->swapOperands();
4199 I
.swapOperands(); // Simplify below
4200 std::swap(Op0
, Op1
);
4204 if (Op1
->hasOneUse() &&
4205 match(Op1
, m_Xor(m_Value(A
), m_Value(B
)), *Context
)) {
4206 if (B
== Op0
) { // B&(A^B) -> B&(B^A)
4207 cast
<BinaryOperator
>(Op1
)->swapOperands();
4210 if (A
== Op0
) { // A&(A^B) -> A & ~B
4211 Instruction
*NotB
= BinaryOperator::CreateNot(*Context
, B
, "tmp");
4212 InsertNewInstBefore(NotB
, I
);
4213 return BinaryOperator::CreateAnd(A
, NotB
);
4217 // (A&((~A)|B)) -> A&B
4218 if (match(Op0
, m_Or(m_Not(m_Specific(Op1
)), m_Value(A
)), *Context
) ||
4219 match(Op0
, m_Or(m_Value(A
), m_Not(m_Specific(Op1
))), *Context
))
4220 return BinaryOperator::CreateAnd(A
, Op1
);
4221 if (match(Op1
, m_Or(m_Not(m_Specific(Op0
)), m_Value(A
)), *Context
) ||
4222 match(Op1
, m_Or(m_Value(A
), m_Not(m_Specific(Op0
))), *Context
))
4223 return BinaryOperator::CreateAnd(A
, Op0
);
4226 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(Op1
)) {
4227 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4228 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
),Context
))
4231 if (ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(Op0
))
4232 if (Instruction
*Res
= FoldAndOfICmps(I
, LHS
, RHS
))
4236 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4237 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
))
4238 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
4239 if (Op0C
->getOpcode() == Op1C
->getOpcode()) { // same cast kind ?
4240 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
4241 if (SrcTy
== Op1C
->getOperand(0)->getType() &&
4242 SrcTy
->isIntOrIntVector() &&
4243 // Only do this if the casts both really cause code to be generated.
4244 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
4246 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
4248 Instruction
*NewOp
= BinaryOperator::CreateAnd(Op0C
->getOperand(0),
4249 Op1C
->getOperand(0),
4251 InsertNewInstBefore(NewOp
, I
);
4252 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
4256 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4257 if (BinaryOperator
*SI1
= dyn_cast
<BinaryOperator
>(Op1
)) {
4258 if (BinaryOperator
*SI0
= dyn_cast
<BinaryOperator
>(Op0
))
4259 if (SI0
->isShift() && SI0
->getOpcode() == SI1
->getOpcode() &&
4260 SI0
->getOperand(1) == SI1
->getOperand(1) &&
4261 (SI0
->hasOneUse() || SI1
->hasOneUse())) {
4262 Instruction
*NewOp
=
4263 InsertNewInstBefore(BinaryOperator::CreateAnd(SI0
->getOperand(0),
4265 SI0
->getName()), I
);
4266 return BinaryOperator::Create(SI1
->getOpcode(), NewOp
,
4267 SI1
->getOperand(1));
4271 // If and'ing two fcmp, try combine them into one.
4272 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0))) {
4273 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
4274 if (Instruction
*Res
= FoldAndOfFCmps(I
, LHS
, RHS
))
4278 return Changed
? &I
: 0;
4281 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4282 /// capable of providing pieces of a bswap. The subexpression provides pieces
4283 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4284 /// the expression came from the corresponding "byte swapped" byte in some other
4285 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4286 /// we know that the expression deposits the low byte of %X into the high byte
4287 /// of the bswap result and that all other bytes are zero. This expression is
4288 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4291 /// This function returns true if the match was unsuccessful and false if so.
4292 /// On entry to the function the "OverallLeftShift" is a signed integer value
4293 /// indicating the number of bytes that the subexpression is later shifted. For
4294 /// example, if the expression is later right shifted by 16 bits, the
4295 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4296 /// byte of ByteValues is actually being set.
4298 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4299 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4300 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4301 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4302 /// always in the local (OverallLeftShift) coordinate space.
4304 static bool CollectBSwapParts(Value
*V
, int OverallLeftShift
, uint32_t ByteMask
,
4305 SmallVector
<Value
*, 8> &ByteValues
) {
4306 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
4307 // If this is an or instruction, it may be an inner node of the bswap.
4308 if (I
->getOpcode() == Instruction::Or
) {
4309 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4311 CollectBSwapParts(I
->getOperand(1), OverallLeftShift
, ByteMask
,
4315 // If this is a logical shift by a constant multiple of 8, recurse with
4316 // OverallLeftShift and ByteMask adjusted.
4317 if (I
->isLogicalShift() && isa
<ConstantInt
>(I
->getOperand(1))) {
4319 cast
<ConstantInt
>(I
->getOperand(1))->getLimitedValue(~0U);
4320 // Ensure the shift amount is defined and of a byte value.
4321 if ((ShAmt
& 7) || (ShAmt
> 8*ByteValues
.size()))
4324 unsigned ByteShift
= ShAmt
>> 3;
4325 if (I
->getOpcode() == Instruction::Shl
) {
4326 // X << 2 -> collect(X, +2)
4327 OverallLeftShift
+= ByteShift
;
4328 ByteMask
>>= ByteShift
;
4330 // X >>u 2 -> collect(X, -2)
4331 OverallLeftShift
-= ByteShift
;
4332 ByteMask
<<= ByteShift
;
4333 ByteMask
&= (~0U >> (32-ByteValues
.size()));
4336 if (OverallLeftShift
>= (int)ByteValues
.size()) return true;
4337 if (OverallLeftShift
<= -(int)ByteValues
.size()) return true;
4339 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4343 // If this is a logical 'and' with a mask that clears bytes, clear the
4344 // corresponding bytes in ByteMask.
4345 if (I
->getOpcode() == Instruction::And
&&
4346 isa
<ConstantInt
>(I
->getOperand(1))) {
4347 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4348 unsigned NumBytes
= ByteValues
.size();
4349 APInt
Byte(I
->getType()->getPrimitiveSizeInBits(), 255);
4350 const APInt
&AndMask
= cast
<ConstantInt
>(I
->getOperand(1))->getValue();
4352 for (unsigned i
= 0; i
!= NumBytes
; ++i
, Byte
<<= 8) {
4353 // If this byte is masked out by a later operation, we don't care what
4355 if ((ByteMask
& (1 << i
)) == 0)
4358 // If the AndMask is all zeros for this byte, clear the bit.
4359 APInt MaskB
= AndMask
& Byte
;
4361 ByteMask
&= ~(1U << i
);
4365 // If the AndMask is not all ones for this byte, it's not a bytezap.
4369 // Otherwise, this byte is kept.
4372 return CollectBSwapParts(I
->getOperand(0), OverallLeftShift
, ByteMask
,
4377 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4378 // the input value to the bswap. Some observations: 1) if more than one byte
4379 // is demanded from this input, then it could not be successfully assembled
4380 // into a byteswap. At least one of the two bytes would not be aligned with
4381 // their ultimate destination.
4382 if (!isPowerOf2_32(ByteMask
)) return true;
4383 unsigned InputByteNo
= CountTrailingZeros_32(ByteMask
);
4385 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4386 // is demanded, it needs to go into byte 0 of the result. This means that the
4387 // byte needs to be shifted until it lands in the right byte bucket. The
4388 // shift amount depends on the position: if the byte is coming from the high
4389 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4390 // low part, it must be shifted left.
4391 unsigned DestByteNo
= InputByteNo
+ OverallLeftShift
;
4392 if (InputByteNo
< ByteValues
.size()/2) {
4393 if (ByteValues
.size()-1-DestByteNo
!= InputByteNo
)
4396 if (ByteValues
.size()-1-DestByteNo
!= InputByteNo
)
4400 // If the destination byte value is already defined, the values are or'd
4401 // together, which isn't a bswap (unless it's an or of the same bits).
4402 if (ByteValues
[DestByteNo
] && ByteValues
[DestByteNo
] != V
)
4404 ByteValues
[DestByteNo
] = V
;
4408 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4409 /// If so, insert the new bswap intrinsic and return it.
4410 Instruction
*InstCombiner::MatchBSwap(BinaryOperator
&I
) {
4411 const IntegerType
*ITy
= dyn_cast
<IntegerType
>(I
.getType());
4412 if (!ITy
|| ITy
->getBitWidth() % 16 ||
4413 // ByteMask only allows up to 32-byte values.
4414 ITy
->getBitWidth() > 32*8)
4415 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4417 /// ByteValues - For each byte of the result, we keep track of which value
4418 /// defines each byte.
4419 SmallVector
<Value
*, 8> ByteValues
;
4420 ByteValues
.resize(ITy
->getBitWidth()/8);
4422 // Try to find all the pieces corresponding to the bswap.
4423 uint32_t ByteMask
= ~0U >> (32-ByteValues
.size());
4424 if (CollectBSwapParts(&I
, 0, ByteMask
, ByteValues
))
4427 // Check to see if all of the bytes come from the same value.
4428 Value
*V
= ByteValues
[0];
4429 if (V
== 0) return 0; // Didn't find a byte? Must be zero.
4431 // Check to make sure that all of the bytes come from the same value.
4432 for (unsigned i
= 1, e
= ByteValues
.size(); i
!= e
; ++i
)
4433 if (ByteValues
[i
] != V
)
4435 const Type
*Tys
[] = { ITy
};
4436 Module
*M
= I
.getParent()->getParent()->getParent();
4437 Function
*F
= Intrinsic::getDeclaration(M
, Intrinsic::bswap
, Tys
, 1);
4438 return CallInst::Create(F
, V
);
4441 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4442 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4443 /// we can simplify this expression to "cond ? C : D or B".
4444 static Instruction
*MatchSelectFromAndOr(Value
*A
, Value
*B
,
4446 LLVMContext
*Context
) {
4447 // If A is not a select of -1/0, this cannot match.
4449 if (!match(A
, m_SelectCst
<-1, 0>(m_Value(Cond
)), *Context
))
4452 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4453 if (match(D
, m_SelectCst
<0, -1>(m_Specific(Cond
)), *Context
))
4454 return SelectInst::Create(Cond
, C
, B
);
4455 if (match(D
, m_Not(m_SelectCst
<-1, 0>(m_Specific(Cond
))), *Context
))
4456 return SelectInst::Create(Cond
, C
, B
);
4457 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4458 if (match(B
, m_SelectCst
<0, -1>(m_Specific(Cond
)), *Context
))
4459 return SelectInst::Create(Cond
, C
, D
);
4460 if (match(B
, m_Not(m_SelectCst
<-1, 0>(m_Specific(Cond
))), *Context
))
4461 return SelectInst::Create(Cond
, C
, D
);
4465 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4466 Instruction
*InstCombiner::FoldOrOfICmps(Instruction
&I
,
4467 ICmpInst
*LHS
, ICmpInst
*RHS
) {
4469 ConstantInt
*LHSCst
, *RHSCst
;
4470 ICmpInst::Predicate LHSCC
, RHSCC
;
4472 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4473 if (!match(LHS
, m_ICmp(LHSCC
, m_Value(Val
),
4474 m_ConstantInt(LHSCst
)), *Context
) ||
4475 !match(RHS
, m_ICmp(RHSCC
, m_Value(Val2
),
4476 m_ConstantInt(RHSCst
)), *Context
))
4479 // From here on, we only handle:
4480 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4481 if (Val
!= Val2
) return 0;
4483 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4484 if (LHSCC
== ICmpInst::ICMP_UGE
|| LHSCC
== ICmpInst::ICMP_ULE
||
4485 RHSCC
== ICmpInst::ICMP_UGE
|| RHSCC
== ICmpInst::ICMP_ULE
||
4486 LHSCC
== ICmpInst::ICMP_SGE
|| LHSCC
== ICmpInst::ICMP_SLE
||
4487 RHSCC
== ICmpInst::ICMP_SGE
|| RHSCC
== ICmpInst::ICMP_SLE
)
4490 // We can't fold (ugt x, C) | (sgt x, C2).
4491 if (!PredicatesFoldable(LHSCC
, RHSCC
))
4494 // Ensure that the larger constant is on the RHS.
4496 if (ICmpInst::isSignedPredicate(LHSCC
) ||
4497 (ICmpInst::isEquality(LHSCC
) &&
4498 ICmpInst::isSignedPredicate(RHSCC
)))
4499 ShouldSwap
= LHSCst
->getValue().sgt(RHSCst
->getValue());
4501 ShouldSwap
= LHSCst
->getValue().ugt(RHSCst
->getValue());
4504 std::swap(LHS
, RHS
);
4505 std::swap(LHSCst
, RHSCst
);
4506 std::swap(LHSCC
, RHSCC
);
4509 // At this point, we know we have have two icmp instructions
4510 // comparing a value against two constants and or'ing the result
4511 // together. Because of the above check, we know that we only have
4512 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4513 // FoldICmpLogical check above), that the two constants are not
4515 assert(LHSCst
!= RHSCst
&& "Compares not folded above?");
4518 default: llvm_unreachable("Unknown integer condition code!");
4519 case ICmpInst::ICMP_EQ
:
4521 default: llvm_unreachable("Unknown integer condition code!");
4522 case ICmpInst::ICMP_EQ
:
4523 if (LHSCst
== SubOne(RHSCst
, Context
)) {
4524 // (X == 13 | X == 14) -> X-13 <u 2
4525 Constant
*AddCST
= ConstantExpr::getNeg(LHSCst
);
4526 Instruction
*Add
= BinaryOperator::CreateAdd(Val
, AddCST
,
4527 Val
->getName()+".off");
4528 InsertNewInstBefore(Add
, I
);
4529 AddCST
= ConstantExpr::getSub(AddOne(RHSCst
, Context
), LHSCst
);
4530 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, Add
, AddCST
);
4532 break; // (X == 13 | X == 15) -> no change
4533 case ICmpInst::ICMP_UGT
: // (X == 13 | X u> 14) -> no change
4534 case ICmpInst::ICMP_SGT
: // (X == 13 | X s> 14) -> no change
4536 case ICmpInst::ICMP_NE
: // (X == 13 | X != 15) -> X != 15
4537 case ICmpInst::ICMP_ULT
: // (X == 13 | X u< 15) -> X u< 15
4538 case ICmpInst::ICMP_SLT
: // (X == 13 | X s< 15) -> X s< 15
4539 return ReplaceInstUsesWith(I
, RHS
);
4542 case ICmpInst::ICMP_NE
:
4544 default: llvm_unreachable("Unknown integer condition code!");
4545 case ICmpInst::ICMP_EQ
: // (X != 13 | X == 15) -> X != 13
4546 case ICmpInst::ICMP_UGT
: // (X != 13 | X u> 15) -> X != 13
4547 case ICmpInst::ICMP_SGT
: // (X != 13 | X s> 15) -> X != 13
4548 return ReplaceInstUsesWith(I
, LHS
);
4549 case ICmpInst::ICMP_NE
: // (X != 13 | X != 15) -> true
4550 case ICmpInst::ICMP_ULT
: // (X != 13 | X u< 15) -> true
4551 case ICmpInst::ICMP_SLT
: // (X != 13 | X s< 15) -> true
4552 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4555 case ICmpInst::ICMP_ULT
:
4557 default: llvm_unreachable("Unknown integer condition code!");
4558 case ICmpInst::ICMP_EQ
: // (X u< 13 | X == 14) -> no change
4560 case ICmpInst::ICMP_UGT
: // (X u< 13 | X u> 15) -> (X-13) u> 2
4561 // If RHSCst is [us]MAXINT, it is always false. Not handling
4562 // this can cause overflow.
4563 if (RHSCst
->isMaxValue(false))
4564 return ReplaceInstUsesWith(I
, LHS
);
4565 return InsertRangeTest(Val
, LHSCst
, AddOne(RHSCst
, Context
),
4567 case ICmpInst::ICMP_SGT
: // (X u< 13 | X s> 15) -> no change
4569 case ICmpInst::ICMP_NE
: // (X u< 13 | X != 15) -> X != 15
4570 case ICmpInst::ICMP_ULT
: // (X u< 13 | X u< 15) -> X u< 15
4571 return ReplaceInstUsesWith(I
, RHS
);
4572 case ICmpInst::ICMP_SLT
: // (X u< 13 | X s< 15) -> no change
4576 case ICmpInst::ICMP_SLT
:
4578 default: llvm_unreachable("Unknown integer condition code!");
4579 case ICmpInst::ICMP_EQ
: // (X s< 13 | X == 14) -> no change
4581 case ICmpInst::ICMP_SGT
: // (X s< 13 | X s> 15) -> (X-13) s> 2
4582 // If RHSCst is [us]MAXINT, it is always false. Not handling
4583 // this can cause overflow.
4584 if (RHSCst
->isMaxValue(true))
4585 return ReplaceInstUsesWith(I
, LHS
);
4586 return InsertRangeTest(Val
, LHSCst
, AddOne(RHSCst
, Context
),
4588 case ICmpInst::ICMP_UGT
: // (X s< 13 | X u> 15) -> no change
4590 case ICmpInst::ICMP_NE
: // (X s< 13 | X != 15) -> X != 15
4591 case ICmpInst::ICMP_SLT
: // (X s< 13 | X s< 15) -> X s< 15
4592 return ReplaceInstUsesWith(I
, RHS
);
4593 case ICmpInst::ICMP_ULT
: // (X s< 13 | X u< 15) -> no change
4597 case ICmpInst::ICMP_UGT
:
4599 default: llvm_unreachable("Unknown integer condition code!");
4600 case ICmpInst::ICMP_EQ
: // (X u> 13 | X == 15) -> X u> 13
4601 case ICmpInst::ICMP_UGT
: // (X u> 13 | X u> 15) -> X u> 13
4602 return ReplaceInstUsesWith(I
, LHS
);
4603 case ICmpInst::ICMP_SGT
: // (X u> 13 | X s> 15) -> no change
4605 case ICmpInst::ICMP_NE
: // (X u> 13 | X != 15) -> true
4606 case ICmpInst::ICMP_ULT
: // (X u> 13 | X u< 15) -> true
4607 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4608 case ICmpInst::ICMP_SLT
: // (X u> 13 | X s< 15) -> no change
4612 case ICmpInst::ICMP_SGT
:
4614 default: llvm_unreachable("Unknown integer condition code!");
4615 case ICmpInst::ICMP_EQ
: // (X s> 13 | X == 15) -> X > 13
4616 case ICmpInst::ICMP_SGT
: // (X s> 13 | X s> 15) -> X > 13
4617 return ReplaceInstUsesWith(I
, LHS
);
4618 case ICmpInst::ICMP_UGT
: // (X s> 13 | X u> 15) -> no change
4620 case ICmpInst::ICMP_NE
: // (X s> 13 | X != 15) -> true
4621 case ICmpInst::ICMP_SLT
: // (X s> 13 | X s< 15) -> true
4622 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4623 case ICmpInst::ICMP_ULT
: // (X s> 13 | X u< 15) -> no change
4631 Instruction
*InstCombiner::FoldOrOfFCmps(Instruction
&I
, FCmpInst
*LHS
,
4633 if (LHS
->getPredicate() == FCmpInst::FCMP_UNO
&&
4634 RHS
->getPredicate() == FCmpInst::FCMP_UNO
&&
4635 LHS
->getOperand(0)->getType() == RHS
->getOperand(0)->getType()) {
4636 if (ConstantFP
*LHSC
= dyn_cast
<ConstantFP
>(LHS
->getOperand(1)))
4637 if (ConstantFP
*RHSC
= dyn_cast
<ConstantFP
>(RHS
->getOperand(1))) {
4638 // If either of the constants are nans, then the whole thing returns
4640 if (LHSC
->getValueAPF().isNaN() || RHSC
->getValueAPF().isNaN())
4641 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4643 // Otherwise, no need to compare the two constants, compare the
4645 return new FCmpInst(*Context
, FCmpInst::FCMP_UNO
,
4646 LHS
->getOperand(0), RHS
->getOperand(0));
4649 // Handle vector zeros. This occurs because the canonical form of
4650 // "fcmp uno x,x" is "fcmp uno x, 0".
4651 if (isa
<ConstantAggregateZero
>(LHS
->getOperand(1)) &&
4652 isa
<ConstantAggregateZero
>(RHS
->getOperand(1)))
4653 return new FCmpInst(*Context
, FCmpInst::FCMP_UNO
,
4654 LHS
->getOperand(0), RHS
->getOperand(0));
4659 Value
*Op0LHS
= LHS
->getOperand(0), *Op0RHS
= LHS
->getOperand(1);
4660 Value
*Op1LHS
= RHS
->getOperand(0), *Op1RHS
= RHS
->getOperand(1);
4661 FCmpInst::Predicate Op0CC
= LHS
->getPredicate(), Op1CC
= RHS
->getPredicate();
4663 if (Op0LHS
== Op1RHS
&& Op0RHS
== Op1LHS
) {
4664 // Swap RHS operands to match LHS.
4665 Op1CC
= FCmpInst::getSwappedPredicate(Op1CC
);
4666 std::swap(Op1LHS
, Op1RHS
);
4668 if (Op0LHS
== Op1LHS
&& Op0RHS
== Op1RHS
) {
4669 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
4671 return new FCmpInst(*Context
, (FCmpInst::Predicate
)Op0CC
,
4673 if (Op0CC
== FCmpInst::FCMP_TRUE
|| Op1CC
== FCmpInst::FCMP_TRUE
)
4674 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
4675 if (Op0CC
== FCmpInst::FCMP_FALSE
)
4676 return ReplaceInstUsesWith(I
, RHS
);
4677 if (Op1CC
== FCmpInst::FCMP_FALSE
)
4678 return ReplaceInstUsesWith(I
, LHS
);
4681 unsigned Op0Pred
= getFCmpCode(Op0CC
, Op0Ordered
);
4682 unsigned Op1Pred
= getFCmpCode(Op1CC
, Op1Ordered
);
4683 if (Op0Ordered
== Op1Ordered
) {
4684 // If both are ordered or unordered, return a new fcmp with
4685 // or'ed predicates.
4686 Value
*RV
= getFCmpValue(Op0Ordered
, Op0Pred
|Op1Pred
,
4687 Op0LHS
, Op0RHS
, Context
);
4688 if (Instruction
*I
= dyn_cast
<Instruction
>(RV
))
4690 // Otherwise, it's a constant boolean value...
4691 return ReplaceInstUsesWith(I
, RV
);
4697 /// FoldOrWithConstants - This helper function folds:
4699 /// ((A | B) & C1) | (B & C2)
4705 /// when the XOR of the two constants is "all ones" (-1).
4706 Instruction
*InstCombiner::FoldOrWithConstants(BinaryOperator
&I
, Value
*Op
,
4707 Value
*A
, Value
*B
, Value
*C
) {
4708 ConstantInt
*CI1
= dyn_cast
<ConstantInt
>(C
);
4712 ConstantInt
*CI2
= 0;
4713 if (!match(Op
, m_And(m_Value(V1
), m_ConstantInt(CI2
)), *Context
)) return 0;
4715 APInt Xor
= CI1
->getValue() ^ CI2
->getValue();
4716 if (!Xor
.isAllOnesValue()) return 0;
4718 if (V1
== A
|| V1
== B
) {
4719 Instruction
*NewOp
=
4720 InsertNewInstBefore(BinaryOperator::CreateAnd((V1
== A
) ? B
: A
, CI1
), I
);
4721 return BinaryOperator::CreateOr(NewOp
, V1
);
4727 Instruction
*InstCombiner::visitOr(BinaryOperator
&I
) {
4728 bool Changed
= SimplifyCommutative(I
);
4729 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
4731 if (isa
<UndefValue
>(Op1
)) // X | undef -> -1
4732 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4736 return ReplaceInstUsesWith(I
, Op0
);
4738 // See if we can simplify any instructions used by the instruction whose sole
4739 // purpose is to compute bits we don't care about.
4740 if (SimplifyDemandedInstructionBits(I
))
4742 if (isa
<VectorType
>(I
.getType())) {
4743 if (isa
<ConstantAggregateZero
>(Op1
)) {
4744 return ReplaceInstUsesWith(I
, Op0
); // X | <0,0> -> X
4745 } else if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(Op1
)) {
4746 if (CP
->isAllOnesValue()) // X | <-1,-1> -> <-1,-1>
4747 return ReplaceInstUsesWith(I
, I
.getOperand(1));
4752 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
4753 ConstantInt
*C1
= 0; Value
*X
= 0;
4754 // (X & C1) | C2 --> (X | C2) & (C1|C2)
4755 if (match(Op0
, m_And(m_Value(X
), m_ConstantInt(C1
)), *Context
) &&
4757 Instruction
*Or
= BinaryOperator::CreateOr(X
, RHS
);
4758 InsertNewInstBefore(Or
, I
);
4760 return BinaryOperator::CreateAnd(Or
,
4761 ConstantInt::get(*Context
, RHS
->getValue() | C1
->getValue()));
4764 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
4765 if (match(Op0
, m_Xor(m_Value(X
), m_ConstantInt(C1
)), *Context
) &&
4767 Instruction
*Or
= BinaryOperator::CreateOr(X
, RHS
);
4768 InsertNewInstBefore(Or
, I
);
4770 return BinaryOperator::CreateXor(Or
,
4771 ConstantInt::get(*Context
, C1
->getValue() & ~RHS
->getValue()));
4774 // Try to fold constant and into select arguments.
4775 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
4776 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
4778 if (isa
<PHINode
>(Op0
))
4779 if (Instruction
*NV
= FoldOpIntoPhi(I
))
4783 Value
*A
= 0, *B
= 0;
4784 ConstantInt
*C1
= 0, *C2
= 0;
4786 if (match(Op0
, m_And(m_Value(A
), m_Value(B
)), *Context
))
4787 if (A
== Op1
|| B
== Op1
) // (A & ?) | A --> A
4788 return ReplaceInstUsesWith(I
, Op1
);
4789 if (match(Op1
, m_And(m_Value(A
), m_Value(B
)), *Context
))
4790 if (A
== Op0
|| B
== Op0
) // A | (A & ?) --> A
4791 return ReplaceInstUsesWith(I
, Op0
);
4793 // (A | B) | C and A | (B | C) -> bswap if possible.
4794 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
4795 if (match(Op0
, m_Or(m_Value(), m_Value()), *Context
) ||
4796 match(Op1
, m_Or(m_Value(), m_Value()), *Context
) ||
4797 (match(Op0
, m_Shift(m_Value(), m_Value()), *Context
) &&
4798 match(Op1
, m_Shift(m_Value(), m_Value()), *Context
))) {
4799 if (Instruction
*BSwap
= MatchBSwap(I
))
4803 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
4804 if (Op0
->hasOneUse() &&
4805 match(Op0
, m_Xor(m_Value(A
), m_ConstantInt(C1
)), *Context
) &&
4806 MaskedValueIsZero(Op1
, C1
->getValue())) {
4807 Instruction
*NOr
= BinaryOperator::CreateOr(A
, Op1
);
4808 InsertNewInstBefore(NOr
, I
);
4810 return BinaryOperator::CreateXor(NOr
, C1
);
4813 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
4814 if (Op1
->hasOneUse() &&
4815 match(Op1
, m_Xor(m_Value(A
), m_ConstantInt(C1
)), *Context
) &&
4816 MaskedValueIsZero(Op0
, C1
->getValue())) {
4817 Instruction
*NOr
= BinaryOperator::CreateOr(A
, Op0
);
4818 InsertNewInstBefore(NOr
, I
);
4820 return BinaryOperator::CreateXor(NOr
, C1
);
4824 Value
*C
= 0, *D
= 0;
4825 if (match(Op0
, m_And(m_Value(A
), m_Value(C
)), *Context
) &&
4826 match(Op1
, m_And(m_Value(B
), m_Value(D
)), *Context
)) {
4827 Value
*V1
= 0, *V2
= 0, *V3
= 0;
4828 C1
= dyn_cast
<ConstantInt
>(C
);
4829 C2
= dyn_cast
<ConstantInt
>(D
);
4830 if (C1
&& C2
) { // (A & C1)|(B & C2)
4831 // If we have: ((V + N) & C1) | (V & C2)
4832 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
4833 // replace with V+N.
4834 if (C1
->getValue() == ~C2
->getValue()) {
4835 if ((C2
->getValue() & (C2
->getValue()+1)) == 0 && // C2 == 0+1+
4836 match(A
, m_Add(m_Value(V1
), m_Value(V2
)), *Context
)) {
4837 // Add commutes, try both ways.
4838 if (V1
== B
&& MaskedValueIsZero(V2
, C2
->getValue()))
4839 return ReplaceInstUsesWith(I
, A
);
4840 if (V2
== B
&& MaskedValueIsZero(V1
, C2
->getValue()))
4841 return ReplaceInstUsesWith(I
, A
);
4843 // Or commutes, try both ways.
4844 if ((C1
->getValue() & (C1
->getValue()+1)) == 0 &&
4845 match(B
, m_Add(m_Value(V1
), m_Value(V2
)), *Context
)) {
4846 // Add commutes, try both ways.
4847 if (V1
== A
&& MaskedValueIsZero(V2
, C1
->getValue()))
4848 return ReplaceInstUsesWith(I
, B
);
4849 if (V2
== A
&& MaskedValueIsZero(V1
, C1
->getValue()))
4850 return ReplaceInstUsesWith(I
, B
);
4853 V1
= 0; V2
= 0; V3
= 0;
4856 // Check to see if we have any common things being and'ed. If so, find the
4857 // terms for V1 & (V2|V3).
4858 if (isOnlyUse(Op0
) || isOnlyUse(Op1
)) {
4859 if (A
== B
) // (A & C)|(A & D) == A & (C|D)
4860 V1
= A
, V2
= C
, V3
= D
;
4861 else if (A
== D
) // (A & C)|(B & A) == A & (B|C)
4862 V1
= A
, V2
= B
, V3
= C
;
4863 else if (C
== B
) // (A & C)|(C & D) == C & (A|D)
4864 V1
= C
, V2
= A
, V3
= D
;
4865 else if (C
== D
) // (A & C)|(B & C) == C & (A|B)
4866 V1
= C
, V2
= A
, V3
= B
;
4870 InsertNewInstBefore(BinaryOperator::CreateOr(V2
, V3
, "tmp"), I
);
4871 return BinaryOperator::CreateAnd(V1
, Or
);
4875 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
4876 if (Instruction
*Match
= MatchSelectFromAndOr(A
, B
, C
, D
, Context
))
4878 if (Instruction
*Match
= MatchSelectFromAndOr(B
, A
, D
, C
, Context
))
4880 if (Instruction
*Match
= MatchSelectFromAndOr(C
, B
, A
, D
, Context
))
4882 if (Instruction
*Match
= MatchSelectFromAndOr(D
, A
, B
, C
, Context
))
4885 // ((A&~B)|(~A&B)) -> A^B
4886 if ((match(C
, m_Not(m_Specific(D
)), *Context
) &&
4887 match(B
, m_Not(m_Specific(A
)), *Context
)))
4888 return BinaryOperator::CreateXor(A
, D
);
4889 // ((~B&A)|(~A&B)) -> A^B
4890 if ((match(A
, m_Not(m_Specific(D
)), *Context
) &&
4891 match(B
, m_Not(m_Specific(C
)), *Context
)))
4892 return BinaryOperator::CreateXor(C
, D
);
4893 // ((A&~B)|(B&~A)) -> A^B
4894 if ((match(C
, m_Not(m_Specific(B
)), *Context
) &&
4895 match(D
, m_Not(m_Specific(A
)), *Context
)))
4896 return BinaryOperator::CreateXor(A
, B
);
4897 // ((~B&A)|(B&~A)) -> A^B
4898 if ((match(A
, m_Not(m_Specific(B
)), *Context
) &&
4899 match(D
, m_Not(m_Specific(C
)), *Context
)))
4900 return BinaryOperator::CreateXor(C
, B
);
4903 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
4904 if (BinaryOperator
*SI1
= dyn_cast
<BinaryOperator
>(Op1
)) {
4905 if (BinaryOperator
*SI0
= dyn_cast
<BinaryOperator
>(Op0
))
4906 if (SI0
->isShift() && SI0
->getOpcode() == SI1
->getOpcode() &&
4907 SI0
->getOperand(1) == SI1
->getOperand(1) &&
4908 (SI0
->hasOneUse() || SI1
->hasOneUse())) {
4909 Instruction
*NewOp
=
4910 InsertNewInstBefore(BinaryOperator::CreateOr(SI0
->getOperand(0),
4912 SI0
->getName()), I
);
4913 return BinaryOperator::Create(SI1
->getOpcode(), NewOp
,
4914 SI1
->getOperand(1));
4918 // ((A|B)&1)|(B&-2) -> (A&1) | B
4919 if (match(Op0
, m_And(m_Or(m_Value(A
), m_Value(B
)), m_Value(C
)), *Context
) ||
4920 match(Op0
, m_And(m_Value(C
), m_Or(m_Value(A
), m_Value(B
))), *Context
)) {
4921 Instruction
*Ret
= FoldOrWithConstants(I
, Op1
, A
, B
, C
);
4922 if (Ret
) return Ret
;
4924 // (B&-2)|((A|B)&1) -> (A&1) | B
4925 if (match(Op1
, m_And(m_Or(m_Value(A
), m_Value(B
)), m_Value(C
)), *Context
) ||
4926 match(Op1
, m_And(m_Value(C
), m_Or(m_Value(A
), m_Value(B
))), *Context
)) {
4927 Instruction
*Ret
= FoldOrWithConstants(I
, Op0
, A
, B
, C
);
4928 if (Ret
) return Ret
;
4931 if (match(Op0
, m_Not(m_Value(A
)), *Context
)) { // ~A | Op1
4932 if (A
== Op1
) // ~A | A == -1
4933 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4937 // Note, A is still live here!
4938 if (match(Op1
, m_Not(m_Value(B
)), *Context
)) { // Op0 | ~B
4940 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
4942 // (~A | ~B) == (~(A & B)) - De Morgan's Law
4943 if (A
&& isOnlyUse(Op0
) && isOnlyUse(Op1
)) {
4944 Value
*And
= InsertNewInstBefore(BinaryOperator::CreateAnd(A
, B
,
4945 I
.getName()+".demorgan"), I
);
4946 return BinaryOperator::CreateNot(*Context
, And
);
4950 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
4951 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(I
.getOperand(1))) {
4952 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
),Context
))
4955 if (ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(I
.getOperand(0)))
4956 if (Instruction
*Res
= FoldOrOfICmps(I
, LHS
, RHS
))
4960 // fold (or (cast A), (cast B)) -> (cast (or A, B))
4961 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
4962 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
4963 if (Op0C
->getOpcode() == Op1C
->getOpcode()) {// same cast kind ?
4964 if (!isa
<ICmpInst
>(Op0C
->getOperand(0)) ||
4965 !isa
<ICmpInst
>(Op1C
->getOperand(0))) {
4966 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
4967 if (SrcTy
== Op1C
->getOperand(0)->getType() &&
4968 SrcTy
->isIntOrIntVector() &&
4969 // Only do this if the casts both really cause code to be
4971 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
4973 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
4975 Instruction
*NewOp
= BinaryOperator::CreateOr(Op0C
->getOperand(0),
4976 Op1C
->getOperand(0),
4978 InsertNewInstBefore(NewOp
, I
);
4979 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
4986 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
4987 if (FCmpInst
*LHS
= dyn_cast
<FCmpInst
>(I
.getOperand(0))) {
4988 if (FCmpInst
*RHS
= dyn_cast
<FCmpInst
>(I
.getOperand(1)))
4989 if (Instruction
*Res
= FoldOrOfFCmps(I
, LHS
, RHS
))
4993 return Changed
? &I
: 0;
4998 // XorSelf - Implements: X ^ X --> 0
5001 XorSelf(Value
*rhs
) : RHS(rhs
) {}
5002 bool shouldApply(Value
*LHS
) const { return LHS
== RHS
; }
5003 Instruction
*apply(BinaryOperator
&Xor
) const {
5010 Instruction
*InstCombiner::visitXor(BinaryOperator
&I
) {
5011 bool Changed
= SimplifyCommutative(I
);
5012 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
5014 if (isa
<UndefValue
>(Op1
)) {
5015 if (isa
<UndefValue
>(Op0
))
5016 // Handle undef ^ undef -> 0 special case. This is a common
5018 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
5019 return ReplaceInstUsesWith(I
, Op1
); // X ^ undef -> undef
5022 // xor X, X = 0, even if X is nested in a sequence of Xor's.
5023 if (Instruction
*Result
= AssociativeOpt(I
, XorSelf(Op1
), Context
)) {
5024 assert(Result
== &I
&& "AssociativeOpt didn't work?"); Result
=Result
;
5025 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
5028 // See if we can simplify any instructions used by the instruction whose sole
5029 // purpose is to compute bits we don't care about.
5030 if (SimplifyDemandedInstructionBits(I
))
5032 if (isa
<VectorType
>(I
.getType()))
5033 if (isa
<ConstantAggregateZero
>(Op1
))
5034 return ReplaceInstUsesWith(I
, Op0
); // X ^ <0,0> -> X
5036 // Is this a ~ operation?
5037 if (Value
*NotOp
= dyn_castNotVal(&I
, Context
)) {
5038 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
5039 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
5040 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(NotOp
)) {
5041 if (Op0I
->getOpcode() == Instruction::And
||
5042 Op0I
->getOpcode() == Instruction::Or
) {
5043 if (dyn_castNotVal(Op0I
->getOperand(1), Context
)) Op0I
->swapOperands();
5044 if (Value
*Op0NotVal
= dyn_castNotVal(Op0I
->getOperand(0), Context
)) {
5046 BinaryOperator::CreateNot(*Context
, Op0I
->getOperand(1),
5047 Op0I
->getOperand(1)->getName()+".not");
5048 InsertNewInstBefore(NotY
, I
);
5049 if (Op0I
->getOpcode() == Instruction::And
)
5050 return BinaryOperator::CreateOr(Op0NotVal
, NotY
);
5052 return BinaryOperator::CreateAnd(Op0NotVal
, NotY
);
5059 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(Op1
)) {
5060 if (RHS
== ConstantInt::getTrue(*Context
) && Op0
->hasOneUse()) {
5061 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5062 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(Op0
))
5063 return new ICmpInst(*Context
, ICI
->getInversePredicate(),
5064 ICI
->getOperand(0), ICI
->getOperand(1));
5066 if (FCmpInst
*FCI
= dyn_cast
<FCmpInst
>(Op0
))
5067 return new FCmpInst(*Context
, FCI
->getInversePredicate(),
5068 FCI
->getOperand(0), FCI
->getOperand(1));
5071 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5072 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
5073 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(Op0C
->getOperand(0))) {
5074 if (CI
->hasOneUse() && Op0C
->hasOneUse()) {
5075 Instruction::CastOps Opcode
= Op0C
->getOpcode();
5076 if (Opcode
== Instruction::ZExt
|| Opcode
== Instruction::SExt
) {
5077 if (RHS
== ConstantExpr::getCast(Opcode
,
5078 ConstantInt::getTrue(*Context
),
5079 Op0C
->getDestTy())) {
5080 Instruction
*NewCI
= InsertNewInstBefore(CmpInst::Create(
5082 CI
->getOpcode(), CI
->getInversePredicate(),
5083 CI
->getOperand(0), CI
->getOperand(1)), I
);
5084 NewCI
->takeName(CI
);
5085 return CastInst::Create(Opcode
, NewCI
, Op0C
->getType());
5092 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
5093 // ~(c-X) == X-c-1 == X+(-c-1)
5094 if (Op0I
->getOpcode() == Instruction::Sub
&& RHS
->isAllOnesValue())
5095 if (Constant
*Op0I0C
= dyn_cast
<Constant
>(Op0I
->getOperand(0))) {
5096 Constant
*NegOp0I0C
= ConstantExpr::getNeg(Op0I0C
);
5097 Constant
*ConstantRHS
= ConstantExpr::getSub(NegOp0I0C
,
5098 ConstantInt::get(I
.getType(), 1));
5099 return BinaryOperator::CreateAdd(Op0I
->getOperand(1), ConstantRHS
);
5102 if (ConstantInt
*Op0CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
5103 if (Op0I
->getOpcode() == Instruction::Add
) {
5104 // ~(X-c) --> (-c-1)-X
5105 if (RHS
->isAllOnesValue()) {
5106 Constant
*NegOp0CI
= ConstantExpr::getNeg(Op0CI
);
5107 return BinaryOperator::CreateSub(
5108 ConstantExpr::getSub(NegOp0CI
,
5109 ConstantInt::get(I
.getType(), 1)),
5110 Op0I
->getOperand(0));
5111 } else if (RHS
->getValue().isSignBit()) {
5112 // (X + C) ^ signbit -> (X + C + signbit)
5113 Constant
*C
= ConstantInt::get(*Context
,
5114 RHS
->getValue() + Op0CI
->getValue());
5115 return BinaryOperator::CreateAdd(Op0I
->getOperand(0), C
);
5118 } else if (Op0I
->getOpcode() == Instruction::Or
) {
5119 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5120 if (MaskedValueIsZero(Op0I
->getOperand(0), Op0CI
->getValue())) {
5121 Constant
*NewRHS
= ConstantExpr::getOr(Op0CI
, RHS
);
5122 // Anything in both C1 and C2 is known to be zero, remove it from
5124 Constant
*CommonBits
= ConstantExpr::getAnd(Op0CI
, RHS
);
5125 NewRHS
= ConstantExpr::getAnd(NewRHS
,
5126 ConstantExpr::getNot(CommonBits
));
5127 AddToWorkList(Op0I
);
5128 I
.setOperand(0, Op0I
->getOperand(0));
5129 I
.setOperand(1, NewRHS
);
5136 // Try to fold constant and into select arguments.
5137 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
5138 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
5140 if (isa
<PHINode
>(Op0
))
5141 if (Instruction
*NV
= FoldOpIntoPhi(I
))
5145 if (Value
*X
= dyn_castNotVal(Op0
, Context
)) // ~A ^ A == -1
5147 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
5149 if (Value
*X
= dyn_castNotVal(Op1
, Context
)) // A ^ ~A == -1
5151 return ReplaceInstUsesWith(I
, Constant::getAllOnesValue(I
.getType()));
5154 BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
);
5157 if (match(Op1I
, m_Or(m_Value(A
), m_Value(B
)), *Context
)) {
5158 if (A
== Op0
) { // B^(B|A) == (A|B)^B
5159 Op1I
->swapOperands();
5161 std::swap(Op0
, Op1
);
5162 } else if (B
== Op0
) { // B^(A|B) == (A|B)^B
5163 I
.swapOperands(); // Simplified below.
5164 std::swap(Op0
, Op1
);
5166 } else if (match(Op1I
, m_Xor(m_Specific(Op0
), m_Value(B
)), *Context
)) {
5167 return ReplaceInstUsesWith(I
, B
); // A^(A^B) == B
5168 } else if (match(Op1I
, m_Xor(m_Value(A
), m_Specific(Op0
)), *Context
)) {
5169 return ReplaceInstUsesWith(I
, A
); // A^(B^A) == B
5170 } else if (match(Op1I
, m_And(m_Value(A
), m_Value(B
)), *Context
) &&
5172 if (A
== Op0
) { // A^(A&B) -> A^(B&A)
5173 Op1I
->swapOperands();
5176 if (B
== Op0
) { // A^(B&A) -> (B&A)^A
5177 I
.swapOperands(); // Simplified below.
5178 std::swap(Op0
, Op1
);
5183 BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
);
5186 if (match(Op0I
, m_Or(m_Value(A
), m_Value(B
)), *Context
) &&
5187 Op0I
->hasOneUse()) {
5188 if (A
== Op1
) // (B|A)^B == (A|B)^B
5190 if (B
== Op1
) { // (A|B)^B == A & ~B
5192 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
,
5194 return BinaryOperator::CreateAnd(A
, NotB
);
5196 } else if (match(Op0I
, m_Xor(m_Specific(Op1
), m_Value(B
)), *Context
)) {
5197 return ReplaceInstUsesWith(I
, B
); // (A^B)^A == B
5198 } else if (match(Op0I
, m_Xor(m_Value(A
), m_Specific(Op1
)), *Context
)) {
5199 return ReplaceInstUsesWith(I
, A
); // (B^A)^A == B
5200 } else if (match(Op0I
, m_And(m_Value(A
), m_Value(B
)), *Context
) &&
5202 if (A
== Op1
) // (A&B)^A -> (B&A)^A
5204 if (B
== Op1
&& // (B&A)^A == ~B & A
5205 !isa
<ConstantInt
>(Op1
)) { // Canonical form is (B&C)^C
5207 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
, A
, "tmp"), I
);
5208 return BinaryOperator::CreateAnd(N
, Op1
);
5213 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5214 if (Op0I
&& Op1I
&& Op0I
->isShift() &&
5215 Op0I
->getOpcode() == Op1I
->getOpcode() &&
5216 Op0I
->getOperand(1) == Op1I
->getOperand(1) &&
5217 (Op1I
->hasOneUse() || Op1I
->hasOneUse())) {
5218 Instruction
*NewOp
=
5219 InsertNewInstBefore(BinaryOperator::CreateXor(Op0I
->getOperand(0),
5220 Op1I
->getOperand(0),
5221 Op0I
->getName()), I
);
5222 return BinaryOperator::Create(Op1I
->getOpcode(), NewOp
,
5223 Op1I
->getOperand(1));
5227 Value
*A
, *B
, *C
, *D
;
5228 // (A & B)^(A | B) -> A ^ B
5229 if (match(Op0I
, m_And(m_Value(A
), m_Value(B
)), *Context
) &&
5230 match(Op1I
, m_Or(m_Value(C
), m_Value(D
)), *Context
)) {
5231 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
5232 return BinaryOperator::CreateXor(A
, B
);
5234 // (A | B)^(A & B) -> A ^ B
5235 if (match(Op0I
, m_Or(m_Value(A
), m_Value(B
)), *Context
) &&
5236 match(Op1I
, m_And(m_Value(C
), m_Value(D
)), *Context
)) {
5237 if ((A
== C
&& B
== D
) || (A
== D
&& B
== C
))
5238 return BinaryOperator::CreateXor(A
, B
);
5242 if ((Op0I
->hasOneUse() || Op1I
->hasOneUse()) &&
5243 match(Op0I
, m_And(m_Value(A
), m_Value(B
)), *Context
) &&
5244 match(Op1I
, m_And(m_Value(C
), m_Value(D
)), *Context
)) {
5245 // (X & Y)^(X & Y) -> (Y^Z) & X
5246 Value
*X
= 0, *Y
= 0, *Z
= 0;
5248 X
= A
, Y
= B
, Z
= D
;
5250 X
= A
, Y
= B
, Z
= C
;
5252 X
= B
, Y
= A
, Z
= D
;
5254 X
= B
, Y
= A
, Z
= C
;
5257 Instruction
*NewOp
=
5258 InsertNewInstBefore(BinaryOperator::CreateXor(Y
, Z
, Op0
->getName()), I
);
5259 return BinaryOperator::CreateAnd(NewOp
, X
);
5264 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5265 if (ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(I
.getOperand(1)))
5266 if (Instruction
*R
= AssociativeOpt(I
, FoldICmpLogical(*this, RHS
),Context
))
5269 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5270 if (CastInst
*Op0C
= dyn_cast
<CastInst
>(Op0
)) {
5271 if (CastInst
*Op1C
= dyn_cast
<CastInst
>(Op1
))
5272 if (Op0C
->getOpcode() == Op1C
->getOpcode()) { // same cast kind?
5273 const Type
*SrcTy
= Op0C
->getOperand(0)->getType();
5274 if (SrcTy
== Op1C
->getOperand(0)->getType() && SrcTy
->isInteger() &&
5275 // Only do this if the casts both really cause code to be generated.
5276 ValueRequiresCast(Op0C
->getOpcode(), Op0C
->getOperand(0),
5278 ValueRequiresCast(Op1C
->getOpcode(), Op1C
->getOperand(0),
5280 Instruction
*NewOp
= BinaryOperator::CreateXor(Op0C
->getOperand(0),
5281 Op1C
->getOperand(0),
5283 InsertNewInstBefore(NewOp
, I
);
5284 return CastInst::Create(Op0C
->getOpcode(), NewOp
, I
.getType());
5289 return Changed
? &I
: 0;
5292 static ConstantInt
*ExtractElement(Constant
*V
, Constant
*Idx
,
5293 LLVMContext
*Context
) {
5294 return cast
<ConstantInt
>(ConstantExpr::getExtractElement(V
, Idx
));
5297 static bool HasAddOverflow(ConstantInt
*Result
,
5298 ConstantInt
*In1
, ConstantInt
*In2
,
5301 if (In2
->getValue().isNegative())
5302 return Result
->getValue().sgt(In1
->getValue());
5304 return Result
->getValue().slt(In1
->getValue());
5306 return Result
->getValue().ult(In1
->getValue());
5309 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5310 /// overflowed for this type.
5311 static bool AddWithOverflow(Constant
*&Result
, Constant
*In1
,
5312 Constant
*In2
, LLVMContext
*Context
,
5313 bool IsSigned
= false) {
5314 Result
= ConstantExpr::getAdd(In1
, In2
);
5316 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
5317 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
5318 Constant
*Idx
= ConstantInt::get(Type::Int32Ty
, i
);
5319 if (HasAddOverflow(ExtractElement(Result
, Idx
, Context
),
5320 ExtractElement(In1
, Idx
, Context
),
5321 ExtractElement(In2
, Idx
, Context
),
5328 return HasAddOverflow(cast
<ConstantInt
>(Result
),
5329 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
5333 static bool HasSubOverflow(ConstantInt
*Result
,
5334 ConstantInt
*In1
, ConstantInt
*In2
,
5337 if (In2
->getValue().isNegative())
5338 return Result
->getValue().slt(In1
->getValue());
5340 return Result
->getValue().sgt(In1
->getValue());
5342 return Result
->getValue().ugt(In1
->getValue());
5345 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5346 /// overflowed for this type.
5347 static bool SubWithOverflow(Constant
*&Result
, Constant
*In1
,
5348 Constant
*In2
, LLVMContext
*Context
,
5349 bool IsSigned
= false) {
5350 Result
= ConstantExpr::getSub(In1
, In2
);
5352 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(In1
->getType())) {
5353 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
) {
5354 Constant
*Idx
= ConstantInt::get(Type::Int32Ty
, i
);
5355 if (HasSubOverflow(ExtractElement(Result
, Idx
, Context
),
5356 ExtractElement(In1
, Idx
, Context
),
5357 ExtractElement(In2
, Idx
, Context
),
5364 return HasSubOverflow(cast
<ConstantInt
>(Result
),
5365 cast
<ConstantInt
>(In1
), cast
<ConstantInt
>(In2
),
5369 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
5370 /// code necessary to compute the offset from the base pointer (without adding
5371 /// in the base pointer). Return the result as a signed integer of intptr size.
5372 static Value
*EmitGEPOffset(User
*GEP
, Instruction
&I
, InstCombiner
&IC
) {
5373 TargetData
&TD
= *IC
.getTargetData();
5374 gep_type_iterator GTI
= gep_type_begin(GEP
);
5375 const Type
*IntPtrTy
= TD
.getIntPtrType();
5376 LLVMContext
*Context
= IC
.getContext();
5377 Value
*Result
= Constant::getNullValue(IntPtrTy
);
5379 // Build a mask for high order bits.
5380 unsigned IntPtrWidth
= TD
.getPointerSizeInBits();
5381 uint64_t PtrSizeMask
= ~0ULL >> (64-IntPtrWidth
);
5383 for (User::op_iterator i
= GEP
->op_begin() + 1, e
= GEP
->op_end(); i
!= e
;
5386 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType()) & PtrSizeMask
;
5387 if (ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(Op
)) {
5388 if (OpC
->isZero()) continue;
5390 // Handle a struct index, which adds its field offset to the pointer.
5391 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5392 Size
= TD
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
5394 if (ConstantInt
*RC
= dyn_cast
<ConstantInt
>(Result
))
5396 ConstantInt::get(*Context
,
5397 RC
->getValue() + APInt(IntPtrWidth
, Size
));
5399 Result
= IC
.InsertNewInstBefore(
5400 BinaryOperator::CreateAdd(Result
,
5401 ConstantInt::get(IntPtrTy
, Size
),
5402 GEP
->getName()+".offs"), I
);
5406 Constant
*Scale
= ConstantInt::get(IntPtrTy
, Size
);
5408 ConstantExpr::getIntegerCast(OpC
, IntPtrTy
, true /*SExt*/);
5409 Scale
= ConstantExpr::getMul(OC
, Scale
);
5410 if (Constant
*RC
= dyn_cast
<Constant
>(Result
))
5411 Result
= ConstantExpr::getAdd(RC
, Scale
);
5413 // Emit an add instruction.
5414 Result
= IC
.InsertNewInstBefore(
5415 BinaryOperator::CreateAdd(Result
, Scale
,
5416 GEP
->getName()+".offs"), I
);
5420 // Convert to correct type.
5421 if (Op
->getType() != IntPtrTy
) {
5422 if (Constant
*OpC
= dyn_cast
<Constant
>(Op
))
5423 Op
= ConstantExpr::getIntegerCast(OpC
, IntPtrTy
, true);
5425 Op
= IC
.InsertNewInstBefore(CastInst::CreateIntegerCast(Op
, IntPtrTy
,
5427 Op
->getName()+".c"), I
);
5430 Constant
*Scale
= ConstantInt::get(IntPtrTy
, Size
);
5431 if (Constant
*OpC
= dyn_cast
<Constant
>(Op
))
5432 Op
= ConstantExpr::getMul(OpC
, Scale
);
5433 else // We'll let instcombine(mul) convert this to a shl if possible.
5434 Op
= IC
.InsertNewInstBefore(BinaryOperator::CreateMul(Op
, Scale
,
5435 GEP
->getName()+".idx"), I
);
5438 // Emit an add instruction.
5439 if (isa
<Constant
>(Op
) && isa
<Constant
>(Result
))
5440 Result
= ConstantExpr::getAdd(cast
<Constant
>(Op
),
5441 cast
<Constant
>(Result
));
5443 Result
= IC
.InsertNewInstBefore(BinaryOperator::CreateAdd(Op
, Result
,
5444 GEP
->getName()+".offs"), I
);
5450 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
5451 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
5452 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
5453 /// be complex, and scales are involved. The above expression would also be
5454 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
5455 /// This later form is less amenable to optimization though, and we are allowed
5456 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
5458 /// If we can't emit an optimized form for this expression, this returns null.
5460 static Value
*EvaluateGEPOffsetExpression(User
*GEP
, Instruction
&I
,
5462 TargetData
&TD
= *IC
.getTargetData();
5463 gep_type_iterator GTI
= gep_type_begin(GEP
);
5465 // Check to see if this gep only has a single variable index. If so, and if
5466 // any constant indices are a multiple of its scale, then we can compute this
5467 // in terms of the scale of the variable index. For example, if the GEP
5468 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
5469 // because the expression will cross zero at the same point.
5470 unsigned i
, e
= GEP
->getNumOperands();
5472 for (i
= 1; i
!= e
; ++i
, ++GTI
) {
5473 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
5474 // Compute the aggregate offset of constant indices.
5475 if (CI
->isZero()) continue;
5477 // Handle a struct index, which adds its field offset to the pointer.
5478 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5479 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
5481 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5482 Offset
+= Size
*CI
->getSExtValue();
5485 // Found our variable index.
5490 // If there are no variable indices, we must have a constant offset, just
5491 // evaluate it the general way.
5492 if (i
== e
) return 0;
5494 Value
*VariableIdx
= GEP
->getOperand(i
);
5495 // Determine the scale factor of the variable element. For example, this is
5496 // 4 if the variable index is into an array of i32.
5497 uint64_t VariableScale
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5499 // Verify that there are no other variable indices. If so, emit the hard way.
5500 for (++i
, ++GTI
; i
!= e
; ++i
, ++GTI
) {
5501 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
5504 // Compute the aggregate offset of constant indices.
5505 if (CI
->isZero()) continue;
5507 // Handle a struct index, which adds its field offset to the pointer.
5508 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
5509 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(CI
->getZExtValue());
5511 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
5512 Offset
+= Size
*CI
->getSExtValue();
5516 // Okay, we know we have a single variable index, which must be a
5517 // pointer/array/vector index. If there is no offset, life is simple, return
5519 unsigned IntPtrWidth
= TD
.getPointerSizeInBits();
5521 // Cast to intptrty in case a truncation occurs. If an extension is needed,
5522 // we don't need to bother extending: the extension won't affect where the
5523 // computation crosses zero.
5524 if (VariableIdx
->getType()->getPrimitiveSizeInBits() > IntPtrWidth
)
5525 VariableIdx
= new TruncInst(VariableIdx
, TD
.getIntPtrType(),
5526 VariableIdx
->getName(), &I
);
5530 // Otherwise, there is an index. The computation we will do will be modulo
5531 // the pointer size, so get it.
5532 uint64_t PtrSizeMask
= ~0ULL >> (64-IntPtrWidth
);
5534 Offset
&= PtrSizeMask
;
5535 VariableScale
&= PtrSizeMask
;
5537 // To do this transformation, any constant index must be a multiple of the
5538 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
5539 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
5540 // multiple of the variable scale.
5541 int64_t NewOffs
= Offset
/ (int64_t)VariableScale
;
5542 if (Offset
!= NewOffs
*(int64_t)VariableScale
)
5545 // Okay, we can do this evaluation. Start by converting the index to intptr.
5546 const Type
*IntPtrTy
= TD
.getIntPtrType();
5547 if (VariableIdx
->getType() != IntPtrTy
)
5548 VariableIdx
= CastInst::CreateIntegerCast(VariableIdx
, IntPtrTy
,
5550 VariableIdx
->getName(), &I
);
5551 Constant
*OffsetVal
= ConstantInt::get(IntPtrTy
, NewOffs
);
5552 return BinaryOperator::CreateAdd(VariableIdx
, OffsetVal
, "offset", &I
);
5556 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5557 /// else. At this point we know that the GEP is on the LHS of the comparison.
5558 Instruction
*InstCombiner::FoldGEPICmp(GEPOperator
*GEPLHS
, Value
*RHS
,
5559 ICmpInst::Predicate Cond
,
5561 // Look through bitcasts.
5562 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(RHS
))
5563 RHS
= BCI
->getOperand(0);
5565 Value
*PtrBase
= GEPLHS
->getOperand(0);
5566 if (TD
&& PtrBase
== RHS
&& GEPLHS
->isInBounds()) {
5567 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5568 // This transformation (ignoring the base and scales) is valid because we
5569 // know pointers can't overflow since the gep is inbounds. See if we can
5570 // output an optimized form.
5571 Value
*Offset
= EvaluateGEPOffsetExpression(GEPLHS
, I
, *this);
5573 // If not, synthesize the offset the hard way.
5575 Offset
= EmitGEPOffset(GEPLHS
, I
, *this);
5576 return new ICmpInst(*Context
, ICmpInst::getSignedPredicate(Cond
), Offset
,
5577 Constant::getNullValue(Offset
->getType()));
5578 } else if (GEPOperator
*GEPRHS
= dyn_cast
<GEPOperator
>(RHS
)) {
5579 // If the base pointers are different, but the indices are the same, just
5580 // compare the base pointer.
5581 if (PtrBase
!= GEPRHS
->getOperand(0)) {
5582 bool IndicesTheSame
= GEPLHS
->getNumOperands()==GEPRHS
->getNumOperands();
5583 IndicesTheSame
&= GEPLHS
->getOperand(0)->getType() ==
5584 GEPRHS
->getOperand(0)->getType();
5586 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
5587 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
5588 IndicesTheSame
= false;
5592 // If all indices are the same, just compare the base pointers.
5594 return new ICmpInst(*Context
, ICmpInst::getSignedPredicate(Cond
),
5595 GEPLHS
->getOperand(0), GEPRHS
->getOperand(0));
5597 // Otherwise, the base pointers are different and the indices are
5598 // different, bail out.
5602 // If one of the GEPs has all zero indices, recurse.
5603 bool AllZeros
= true;
5604 for (unsigned i
= 1, e
= GEPLHS
->getNumOperands(); i
!= e
; ++i
)
5605 if (!isa
<Constant
>(GEPLHS
->getOperand(i
)) ||
5606 !cast
<Constant
>(GEPLHS
->getOperand(i
))->isNullValue()) {
5611 return FoldGEPICmp(GEPRHS
, GEPLHS
->getOperand(0),
5612 ICmpInst::getSwappedPredicate(Cond
), I
);
5614 // If the other GEP has all zero indices, recurse.
5616 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
5617 if (!isa
<Constant
>(GEPRHS
->getOperand(i
)) ||
5618 !cast
<Constant
>(GEPRHS
->getOperand(i
))->isNullValue()) {
5623 return FoldGEPICmp(GEPLHS
, GEPRHS
->getOperand(0), Cond
, I
);
5625 if (GEPLHS
->getNumOperands() == GEPRHS
->getNumOperands()) {
5626 // If the GEPs only differ by one index, compare it.
5627 unsigned NumDifferences
= 0; // Keep track of # differences.
5628 unsigned DiffOperand
= 0; // The operand that differs.
5629 for (unsigned i
= 1, e
= GEPRHS
->getNumOperands(); i
!= e
; ++i
)
5630 if (GEPLHS
->getOperand(i
) != GEPRHS
->getOperand(i
)) {
5631 if (GEPLHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits() !=
5632 GEPRHS
->getOperand(i
)->getType()->getPrimitiveSizeInBits()) {
5633 // Irreconcilable differences.
5637 if (NumDifferences
++) break;
5642 if (NumDifferences
== 0) // SAME GEP?
5643 return ReplaceInstUsesWith(I
, // No comparison is needed here.
5644 ConstantInt::get(Type::Int1Ty
,
5645 ICmpInst::isTrueWhenEqual(Cond
)));
5647 else if (NumDifferences
== 1) {
5648 Value
*LHSV
= GEPLHS
->getOperand(DiffOperand
);
5649 Value
*RHSV
= GEPRHS
->getOperand(DiffOperand
);
5650 // Make sure we do a signed comparison here.
5651 return new ICmpInst(*Context
,
5652 ICmpInst::getSignedPredicate(Cond
), LHSV
, RHSV
);
5656 // Only lower this if the icmp is the only user of the GEP or if we expect
5657 // the result to fold to a constant!
5659 (isa
<ConstantExpr
>(GEPLHS
) || GEPLHS
->hasOneUse()) &&
5660 (isa
<ConstantExpr
>(GEPRHS
) || GEPRHS
->hasOneUse())) {
5661 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5662 Value
*L
= EmitGEPOffset(GEPLHS
, I
, *this);
5663 Value
*R
= EmitGEPOffset(GEPRHS
, I
, *this);
5664 return new ICmpInst(*Context
, ICmpInst::getSignedPredicate(Cond
), L
, R
);
5670 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5672 Instruction
*InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst
&I
,
5675 if (!isa
<ConstantFP
>(RHSC
)) return 0;
5676 const APFloat
&RHS
= cast
<ConstantFP
>(RHSC
)->getValueAPF();
5678 // Get the width of the mantissa. We don't want to hack on conversions that
5679 // might lose information from the integer, e.g. "i64 -> float"
5680 int MantissaWidth
= LHSI
->getType()->getFPMantissaWidth();
5681 if (MantissaWidth
== -1) return 0; // Unknown.
5683 // Check to see that the input is converted from an integer type that is small
5684 // enough that preserves all bits. TODO: check here for "known" sign bits.
5685 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5686 unsigned InputSize
= LHSI
->getOperand(0)->getType()->getScalarSizeInBits();
5688 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5689 bool LHSUnsigned
= isa
<UIToFPInst
>(LHSI
);
5693 // If the conversion would lose info, don't hack on this.
5694 if ((int)InputSize
> MantissaWidth
)
5697 // Otherwise, we can potentially simplify the comparison. We know that it
5698 // will always come through as an integer value and we know the constant is
5699 // not a NAN (it would have been previously simplified).
5700 assert(!RHS
.isNaN() && "NaN comparison not already folded!");
5702 ICmpInst::Predicate Pred
;
5703 switch (I
.getPredicate()) {
5704 default: llvm_unreachable("Unexpected predicate!");
5705 case FCmpInst::FCMP_UEQ
:
5706 case FCmpInst::FCMP_OEQ
:
5707 Pred
= ICmpInst::ICMP_EQ
;
5709 case FCmpInst::FCMP_UGT
:
5710 case FCmpInst::FCMP_OGT
:
5711 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGT
: ICmpInst::ICMP_SGT
;
5713 case FCmpInst::FCMP_UGE
:
5714 case FCmpInst::FCMP_OGE
:
5715 Pred
= LHSUnsigned
? ICmpInst::ICMP_UGE
: ICmpInst::ICMP_SGE
;
5717 case FCmpInst::FCMP_ULT
:
5718 case FCmpInst::FCMP_OLT
:
5719 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULT
: ICmpInst::ICMP_SLT
;
5721 case FCmpInst::FCMP_ULE
:
5722 case FCmpInst::FCMP_OLE
:
5723 Pred
= LHSUnsigned
? ICmpInst::ICMP_ULE
: ICmpInst::ICMP_SLE
;
5725 case FCmpInst::FCMP_UNE
:
5726 case FCmpInst::FCMP_ONE
:
5727 Pred
= ICmpInst::ICMP_NE
;
5729 case FCmpInst::FCMP_ORD
:
5730 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5731 case FCmpInst::FCMP_UNO
:
5732 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5735 const IntegerType
*IntTy
= cast
<IntegerType
>(LHSI
->getOperand(0)->getType());
5737 // Now we know that the APFloat is a normal number, zero or inf.
5739 // See if the FP constant is too large for the integer. For example,
5740 // comparing an i8 to 300.0.
5741 unsigned IntWidth
= IntTy
->getScalarSizeInBits();
5744 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5745 // and large values.
5746 APFloat
SMax(RHS
.getSemantics(), APFloat::fcZero
, false);
5747 SMax
.convertFromAPInt(APInt::getSignedMaxValue(IntWidth
), true,
5748 APFloat::rmNearestTiesToEven
);
5749 if (SMax
.compare(RHS
) == APFloat::cmpLessThan
) { // smax < 13123.0
5750 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SLT
||
5751 Pred
== ICmpInst::ICMP_SLE
)
5752 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5753 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5756 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5757 // +INF and large values.
5758 APFloat
UMax(RHS
.getSemantics(), APFloat::fcZero
, false);
5759 UMax
.convertFromAPInt(APInt::getMaxValue(IntWidth
), false,
5760 APFloat::rmNearestTiesToEven
);
5761 if (UMax
.compare(RHS
) == APFloat::cmpLessThan
) { // umax < 13123.0
5762 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_ULT
||
5763 Pred
== ICmpInst::ICMP_ULE
)
5764 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5765 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5770 // See if the RHS value is < SignedMin.
5771 APFloat
SMin(RHS
.getSemantics(), APFloat::fcZero
, false);
5772 SMin
.convertFromAPInt(APInt::getSignedMinValue(IntWidth
), true,
5773 APFloat::rmNearestTiesToEven
);
5774 if (SMin
.compare(RHS
) == APFloat::cmpGreaterThan
) { // smin > 12312.0
5775 if (Pred
== ICmpInst::ICMP_NE
|| Pred
== ICmpInst::ICMP_SGT
||
5776 Pred
== ICmpInst::ICMP_SGE
)
5777 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5778 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5782 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5783 // [0, UMAX], but it may still be fractional. See if it is fractional by
5784 // casting the FP value to the integer value and back, checking for equality.
5785 // Don't do this for zero, because -0.0 is not fractional.
5786 Constant
*RHSInt
= LHSUnsigned
5787 ? ConstantExpr::getFPToUI(RHSC
, IntTy
)
5788 : ConstantExpr::getFPToSI(RHSC
, IntTy
);
5789 if (!RHS
.isZero()) {
5790 bool Equal
= LHSUnsigned
5791 ? ConstantExpr::getUIToFP(RHSInt
, RHSC
->getType()) == RHSC
5792 : ConstantExpr::getSIToFP(RHSInt
, RHSC
->getType()) == RHSC
;
5794 // If we had a comparison against a fractional value, we have to adjust
5795 // the compare predicate and sometimes the value. RHSC is rounded towards
5796 // zero at this point.
5798 default: llvm_unreachable("Unexpected integer comparison!");
5799 case ICmpInst::ICMP_NE
: // (float)int != 4.4 --> true
5800 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5801 case ICmpInst::ICMP_EQ
: // (float)int == 4.4 --> false
5802 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5803 case ICmpInst::ICMP_ULE
:
5804 // (float)int <= 4.4 --> int <= 4
5805 // (float)int <= -4.4 --> false
5806 if (RHS
.isNegative())
5807 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5809 case ICmpInst::ICMP_SLE
:
5810 // (float)int <= 4.4 --> int <= 4
5811 // (float)int <= -4.4 --> int < -4
5812 if (RHS
.isNegative())
5813 Pred
= ICmpInst::ICMP_SLT
;
5815 case ICmpInst::ICMP_ULT
:
5816 // (float)int < -4.4 --> false
5817 // (float)int < 4.4 --> int <= 4
5818 if (RHS
.isNegative())
5819 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5820 Pred
= ICmpInst::ICMP_ULE
;
5822 case ICmpInst::ICMP_SLT
:
5823 // (float)int < -4.4 --> int < -4
5824 // (float)int < 4.4 --> int <= 4
5825 if (!RHS
.isNegative())
5826 Pred
= ICmpInst::ICMP_SLE
;
5828 case ICmpInst::ICMP_UGT
:
5829 // (float)int > 4.4 --> int > 4
5830 // (float)int > -4.4 --> true
5831 if (RHS
.isNegative())
5832 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5834 case ICmpInst::ICMP_SGT
:
5835 // (float)int > 4.4 --> int > 4
5836 // (float)int > -4.4 --> int >= -4
5837 if (RHS
.isNegative())
5838 Pred
= ICmpInst::ICMP_SGE
;
5840 case ICmpInst::ICMP_UGE
:
5841 // (float)int >= -4.4 --> true
5842 // (float)int >= 4.4 --> int > 4
5843 if (!RHS
.isNegative())
5844 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5845 Pred
= ICmpInst::ICMP_UGT
;
5847 case ICmpInst::ICMP_SGE
:
5848 // (float)int >= -4.4 --> int >= -4
5849 // (float)int >= 4.4 --> int > 4
5850 if (!RHS
.isNegative())
5851 Pred
= ICmpInst::ICMP_SGT
;
5857 // Lower this FP comparison into an appropriate integer version of the
5859 return new ICmpInst(*Context
, Pred
, LHSI
->getOperand(0), RHSInt
);
5862 Instruction
*InstCombiner::visitFCmpInst(FCmpInst
&I
) {
5863 bool Changed
= SimplifyCompare(I
);
5864 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
5866 // Fold trivial predicates.
5867 if (I
.getPredicate() == FCmpInst::FCMP_FALSE
)
5868 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5869 if (I
.getPredicate() == FCmpInst::FCMP_TRUE
)
5870 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5872 // Simplify 'fcmp pred X, X'
5874 switch (I
.getPredicate()) {
5875 default: llvm_unreachable("Unknown predicate!");
5876 case FCmpInst::FCMP_UEQ
: // True if unordered or equal
5877 case FCmpInst::FCMP_UGE
: // True if unordered, greater than, or equal
5878 case FCmpInst::FCMP_ULE
: // True if unordered, less than, or equal
5879 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5880 case FCmpInst::FCMP_OGT
: // True if ordered and greater than
5881 case FCmpInst::FCMP_OLT
: // True if ordered and less than
5882 case FCmpInst::FCMP_ONE
: // True if ordered and operands are unequal
5883 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5885 case FCmpInst::FCMP_UNO
: // True if unordered: isnan(X) | isnan(Y)
5886 case FCmpInst::FCMP_ULT
: // True if unordered or less than
5887 case FCmpInst::FCMP_UGT
: // True if unordered or greater than
5888 case FCmpInst::FCMP_UNE
: // True if unordered or not equal
5889 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5890 I
.setPredicate(FCmpInst::FCMP_UNO
);
5891 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
5894 case FCmpInst::FCMP_ORD
: // True if ordered (no nans)
5895 case FCmpInst::FCMP_OEQ
: // True if ordered and equal
5896 case FCmpInst::FCMP_OGE
: // True if ordered and greater than or equal
5897 case FCmpInst::FCMP_OLE
: // True if ordered and less than or equal
5898 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5899 I
.setPredicate(FCmpInst::FCMP_ORD
);
5900 I
.setOperand(1, Constant::getNullValue(Op0
->getType()));
5905 if (isa
<UndefValue
>(Op1
)) // fcmp pred X, undef -> undef
5906 return ReplaceInstUsesWith(I
, UndefValue::get(Type::Int1Ty
));
5908 // Handle fcmp with constant RHS
5909 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
5910 // If the constant is a nan, see if we can fold the comparison based on it.
5911 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(RHSC
)) {
5912 if (CFP
->getValueAPF().isNaN()) {
5913 if (FCmpInst::isOrdered(I
.getPredicate())) // True if ordered and...
5914 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
5915 assert(FCmpInst::isUnordered(I
.getPredicate()) &&
5916 "Comparison must be either ordered or unordered!");
5917 // True if unordered.
5918 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
5922 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
5923 switch (LHSI
->getOpcode()) {
5924 case Instruction::PHI
:
5925 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5926 // block. If in the same block, we're encouraging jump threading. If
5927 // not, we are just pessimizing the code by making an i1 phi.
5928 if (LHSI
->getParent() == I
.getParent())
5929 if (Instruction
*NV
= FoldOpIntoPhi(I
))
5932 case Instruction::SIToFP
:
5933 case Instruction::UIToFP
:
5934 if (Instruction
*NV
= FoldFCmp_IntToFP_Cst(I
, LHSI
, RHSC
))
5937 case Instruction::Select
:
5938 // If either operand of the select is a constant, we can fold the
5939 // comparison into the select arms, which will cause one to be
5940 // constant folded and the select turned into a bitwise or.
5941 Value
*Op1
= 0, *Op2
= 0;
5942 if (LHSI
->hasOneUse()) {
5943 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1))) {
5944 // Fold the known value into the constant operand.
5945 Op1
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
5946 // Insert a new FCmp of the other select operand.
5947 Op2
= InsertNewInstBefore(new FCmpInst(*Context
, I
.getPredicate(),
5948 LHSI
->getOperand(2), RHSC
,
5950 } else if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2))) {
5951 // Fold the known value into the constant operand.
5952 Op2
= ConstantExpr::getCompare(I
.getPredicate(), C
, RHSC
);
5953 // Insert a new FCmp of the other select operand.
5954 Op1
= InsertNewInstBefore(new FCmpInst(*Context
, I
.getPredicate(),
5955 LHSI
->getOperand(1), RHSC
,
5961 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
5966 return Changed
? &I
: 0;
5969 Instruction
*InstCombiner::visitICmpInst(ICmpInst
&I
) {
5970 bool Changed
= SimplifyCompare(I
);
5971 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
5972 const Type
*Ty
= Op0
->getType();
5976 return ReplaceInstUsesWith(I
, ConstantInt::get(Type::Int1Ty
,
5977 I
.isTrueWhenEqual()));
5979 if (isa
<UndefValue
>(Op1
)) // X icmp undef -> undef
5980 return ReplaceInstUsesWith(I
, UndefValue::get(Type::Int1Ty
));
5982 // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
5983 // addresses never equal each other! We already know that Op0 != Op1.
5984 if ((isa
<GlobalValue
>(Op0
) || isa
<AllocaInst
>(Op0
) ||
5985 isa
<ConstantPointerNull
>(Op0
)) &&
5986 (isa
<GlobalValue
>(Op1
) || isa
<AllocaInst
>(Op1
) ||
5987 isa
<ConstantPointerNull
>(Op1
)))
5988 return ReplaceInstUsesWith(I
, ConstantInt::get(Type::Int1Ty
,
5989 !I
.isTrueWhenEqual()));
5991 // icmp's with boolean values can always be turned into bitwise operations
5992 if (Ty
== Type::Int1Ty
) {
5993 switch (I
.getPredicate()) {
5994 default: llvm_unreachable("Invalid icmp instruction!");
5995 case ICmpInst::ICMP_EQ
: { // icmp eq i1 A, B -> ~(A^B)
5996 Instruction
*Xor
= BinaryOperator::CreateXor(Op0
, Op1
, I
.getName()+"tmp");
5997 InsertNewInstBefore(Xor
, I
);
5998 return BinaryOperator::CreateNot(*Context
, Xor
);
6000 case ICmpInst::ICMP_NE
: // icmp eq i1 A, B -> A^B
6001 return BinaryOperator::CreateXor(Op0
, Op1
);
6003 case ICmpInst::ICMP_UGT
:
6004 std::swap(Op0
, Op1
); // Change icmp ugt -> icmp ult
6006 case ICmpInst::ICMP_ULT
:{ // icmp ult i1 A, B -> ~A & B
6007 Instruction
*Not
= BinaryOperator::CreateNot(*Context
,
6008 Op0
, I
.getName()+"tmp");
6009 InsertNewInstBefore(Not
, I
);
6010 return BinaryOperator::CreateAnd(Not
, Op1
);
6012 case ICmpInst::ICMP_SGT
:
6013 std::swap(Op0
, Op1
); // Change icmp sgt -> icmp slt
6015 case ICmpInst::ICMP_SLT
: { // icmp slt i1 A, B -> A & ~B
6016 Instruction
*Not
= BinaryOperator::CreateNot(*Context
,
6017 Op1
, I
.getName()+"tmp");
6018 InsertNewInstBefore(Not
, I
);
6019 return BinaryOperator::CreateAnd(Not
, Op0
);
6021 case ICmpInst::ICMP_UGE
:
6022 std::swap(Op0
, Op1
); // Change icmp uge -> icmp ule
6024 case ICmpInst::ICMP_ULE
: { // icmp ule i1 A, B -> ~A | B
6025 Instruction
*Not
= BinaryOperator::CreateNot(*Context
,
6026 Op0
, I
.getName()+"tmp");
6027 InsertNewInstBefore(Not
, I
);
6028 return BinaryOperator::CreateOr(Not
, Op1
);
6030 case ICmpInst::ICMP_SGE
:
6031 std::swap(Op0
, Op1
); // Change icmp sge -> icmp sle
6033 case ICmpInst::ICMP_SLE
: { // icmp sle i1 A, B -> A | ~B
6034 Instruction
*Not
= BinaryOperator::CreateNot(*Context
,
6035 Op1
, I
.getName()+"tmp");
6036 InsertNewInstBefore(Not
, I
);
6037 return BinaryOperator::CreateOr(Not
, Op0
);
6042 unsigned BitWidth
= 0;
6044 BitWidth
= TD
->getTypeSizeInBits(Ty
->getScalarType());
6045 else if (Ty
->isIntOrIntVector())
6046 BitWidth
= Ty
->getScalarSizeInBits();
6048 bool isSignBit
= false;
6050 // See if we are doing a comparison with a constant.
6051 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6052 Value
*A
= 0, *B
= 0;
6054 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
6055 if (I
.isEquality() && CI
->isNullValue() &&
6056 match(Op0
, m_Sub(m_Value(A
), m_Value(B
)), *Context
)) {
6057 // (icmp cond A B) if cond is equality
6058 return new ICmpInst(*Context
, I
.getPredicate(), A
, B
);
6061 // If we have an icmp le or icmp ge instruction, turn it into the
6062 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
6063 // them being folded in the code below.
6064 switch (I
.getPredicate()) {
6066 case ICmpInst::ICMP_ULE
:
6067 if (CI
->isMaxValue(false)) // A <=u MAX -> TRUE
6068 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6069 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, Op0
,
6070 AddOne(CI
, Context
));
6071 case ICmpInst::ICMP_SLE
:
6072 if (CI
->isMaxValue(true)) // A <=s MAX -> TRUE
6073 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6074 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, Op0
,
6075 AddOne(CI
, Context
));
6076 case ICmpInst::ICMP_UGE
:
6077 if (CI
->isMinValue(false)) // A >=u MIN -> TRUE
6078 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6079 return new ICmpInst(*Context
, ICmpInst::ICMP_UGT
, Op0
,
6080 SubOne(CI
, Context
));
6081 case ICmpInst::ICMP_SGE
:
6082 if (CI
->isMinValue(true)) // A >=s MIN -> TRUE
6083 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6084 return new ICmpInst(*Context
, ICmpInst::ICMP_SGT
, Op0
,
6085 SubOne(CI
, Context
));
6088 // If this comparison is a normal comparison, it demands all
6089 // bits, if it is a sign bit comparison, it only demands the sign bit.
6091 isSignBit
= isSignBitCheck(I
.getPredicate(), CI
, UnusedBit
);
6094 // See if we can fold the comparison based on range information we can get
6095 // by checking whether bits are known to be zero or one in the input.
6096 if (BitWidth
!= 0) {
6097 APInt
Op0KnownZero(BitWidth
, 0), Op0KnownOne(BitWidth
, 0);
6098 APInt
Op1KnownZero(BitWidth
, 0), Op1KnownOne(BitWidth
, 0);
6100 if (SimplifyDemandedBits(I
.getOperandUse(0),
6101 isSignBit
? APInt::getSignBit(BitWidth
)
6102 : APInt::getAllOnesValue(BitWidth
),
6103 Op0KnownZero
, Op0KnownOne
, 0))
6105 if (SimplifyDemandedBits(I
.getOperandUse(1),
6106 APInt::getAllOnesValue(BitWidth
),
6107 Op1KnownZero
, Op1KnownOne
, 0))
6110 // Given the known and unknown bits, compute a range that the LHS could be
6111 // in. Compute the Min, Max and RHS values based on the known bits. For the
6112 // EQ and NE we use unsigned values.
6113 APInt
Op0Min(BitWidth
, 0), Op0Max(BitWidth
, 0);
6114 APInt
Op1Min(BitWidth
, 0), Op1Max(BitWidth
, 0);
6115 if (ICmpInst::isSignedPredicate(I
.getPredicate())) {
6116 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
6118 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
6121 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero
, Op0KnownOne
,
6123 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero
, Op1KnownOne
,
6127 // If Min and Max are known to be the same, then SimplifyDemandedBits
6128 // figured out that the LHS is a constant. Just constant fold this now so
6129 // that code below can assume that Min != Max.
6130 if (!isa
<Constant
>(Op0
) && Op0Min
== Op0Max
)
6131 return new ICmpInst(*Context
, I
.getPredicate(),
6132 ConstantInt::get(*Context
, Op0Min
), Op1
);
6133 if (!isa
<Constant
>(Op1
) && Op1Min
== Op1Max
)
6134 return new ICmpInst(*Context
, I
.getPredicate(), Op0
,
6135 ConstantInt::get(*Context
, Op1Min
));
6137 // Based on the range information we know about the LHS, see if we can
6138 // simplify this comparison. For example, (x&4) < 8 is always true.
6139 switch (I
.getPredicate()) {
6140 default: llvm_unreachable("Unknown icmp opcode!");
6141 case ICmpInst::ICMP_EQ
:
6142 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
6143 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6145 case ICmpInst::ICMP_NE
:
6146 if (Op0Max
.ult(Op1Min
) || Op0Min
.ugt(Op1Max
))
6147 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6149 case ICmpInst::ICMP_ULT
:
6150 if (Op0Max
.ult(Op1Min
)) // A <u B -> true if max(A) < min(B)
6151 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6152 if (Op0Min
.uge(Op1Max
)) // A <u B -> false if min(A) >= max(B)
6153 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6154 if (Op1Min
== Op0Max
) // A <u B -> A != B if max(A) == min(B)
6155 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, Op0
, Op1
);
6156 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6157 if (Op1Max
== Op0Min
+1) // A <u C -> A == C-1 if min(A)+1 == C
6158 return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, Op0
,
6159 SubOne(CI
, Context
));
6161 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6162 if (CI
->isMinValue(true))
6163 return new ICmpInst(*Context
, ICmpInst::ICMP_SGT
, Op0
,
6164 Constant::getAllOnesValue(Op0
->getType()));
6167 case ICmpInst::ICMP_UGT
:
6168 if (Op0Min
.ugt(Op1Max
)) // A >u B -> true if min(A) > max(B)
6169 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6170 if (Op0Max
.ule(Op1Min
)) // A >u B -> false if max(A) <= max(B)
6171 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6173 if (Op1Max
== Op0Min
) // A >u B -> A != B if min(A) == max(B)
6174 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, Op0
, Op1
);
6175 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6176 if (Op1Min
== Op0Max
-1) // A >u C -> A == C+1 if max(a)-1 == C
6177 return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, Op0
,
6178 AddOne(CI
, Context
));
6180 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6181 if (CI
->isMaxValue(true))
6182 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, Op0
,
6183 Constant::getNullValue(Op0
->getType()));
6186 case ICmpInst::ICMP_SLT
:
6187 if (Op0Max
.slt(Op1Min
)) // A <s B -> true if max(A) < min(C)
6188 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6189 if (Op0Min
.sge(Op1Max
)) // A <s B -> false if min(A) >= max(C)
6190 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6191 if (Op1Min
== Op0Max
) // A <s B -> A != B if max(A) == min(B)
6192 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, Op0
, Op1
);
6193 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6194 if (Op1Max
== Op0Min
+1) // A <s C -> A == C-1 if min(A)+1 == C
6195 return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, Op0
,
6196 SubOne(CI
, Context
));
6199 case ICmpInst::ICMP_SGT
:
6200 if (Op0Min
.sgt(Op1Max
)) // A >s B -> true if min(A) > max(B)
6201 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6202 if (Op0Max
.sle(Op1Min
)) // A >s B -> false if max(A) <= min(B)
6203 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6205 if (Op1Max
== Op0Min
) // A >s B -> A != B if min(A) == max(B)
6206 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, Op0
, Op1
);
6207 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6208 if (Op1Min
== Op0Max
-1) // A >s C -> A == C+1 if max(A)-1 == C
6209 return new ICmpInst(*Context
, ICmpInst::ICMP_EQ
, Op0
,
6210 AddOne(CI
, Context
));
6213 case ICmpInst::ICMP_SGE
:
6214 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SGE with ConstantInt not folded!");
6215 if (Op0Min
.sge(Op1Max
)) // A >=s B -> true if min(A) >= max(B)
6216 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6217 if (Op0Max
.slt(Op1Min
)) // A >=s B -> false if max(A) < min(B)
6218 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6220 case ICmpInst::ICMP_SLE
:
6221 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_SLE with ConstantInt not folded!");
6222 if (Op0Max
.sle(Op1Min
)) // A <=s B -> true if max(A) <= min(B)
6223 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6224 if (Op0Min
.sgt(Op1Max
)) // A <=s B -> false if min(A) > max(B)
6225 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6227 case ICmpInst::ICMP_UGE
:
6228 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_UGE with ConstantInt not folded!");
6229 if (Op0Min
.uge(Op1Max
)) // A >=u B -> true if min(A) >= max(B)
6230 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6231 if (Op0Max
.ult(Op1Min
)) // A >=u B -> false if max(A) < min(B)
6232 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6234 case ICmpInst::ICMP_ULE
:
6235 assert(!isa
<ConstantInt
>(Op1
) && "ICMP_ULE with ConstantInt not folded!");
6236 if (Op0Max
.ule(Op1Min
)) // A <=u B -> true if max(A) <= min(B)
6237 return ReplaceInstUsesWith(I
, ConstantInt::getTrue(*Context
));
6238 if (Op0Min
.ugt(Op1Max
)) // A <=u B -> false if min(A) > max(B)
6239 return ReplaceInstUsesWith(I
, ConstantInt::getFalse(*Context
));
6243 // Turn a signed comparison into an unsigned one if both operands
6244 // are known to have the same sign.
6245 if (I
.isSignedPredicate() &&
6246 ((Op0KnownZero
.isNegative() && Op1KnownZero
.isNegative()) ||
6247 (Op0KnownOne
.isNegative() && Op1KnownOne
.isNegative())))
6248 return new ICmpInst(*Context
, I
.getUnsignedPredicate(), Op0
, Op1
);
6251 // Test if the ICmpInst instruction is used exclusively by a select as
6252 // part of a minimum or maximum operation. If so, refrain from doing
6253 // any other folding. This helps out other analyses which understand
6254 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6255 // and CodeGen. And in this case, at least one of the comparison
6256 // operands has at least one user besides the compare (the select),
6257 // which would often largely negate the benefit of folding anyway.
6259 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(*I
.use_begin()))
6260 if ((SI
->getOperand(1) == Op0
&& SI
->getOperand(2) == Op1
) ||
6261 (SI
->getOperand(2) == Op0
&& SI
->getOperand(1) == Op1
))
6264 // See if we are doing a comparison between a constant and an instruction that
6265 // can be folded into the comparison.
6266 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
)) {
6267 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6268 // instruction, see if that instruction also has constants so that the
6269 // instruction can be folded into the icmp
6270 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
6271 if (Instruction
*Res
= visitICmpInstWithInstAndIntCst(I
, LHSI
, CI
))
6275 // Handle icmp with constant (but not simple integer constant) RHS
6276 if (Constant
*RHSC
= dyn_cast
<Constant
>(Op1
)) {
6277 if (Instruction
*LHSI
= dyn_cast
<Instruction
>(Op0
))
6278 switch (LHSI
->getOpcode()) {
6279 case Instruction::GetElementPtr
:
6280 if (RHSC
->isNullValue()) {
6281 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6282 bool isAllZeros
= true;
6283 for (unsigned i
= 1, e
= LHSI
->getNumOperands(); i
!= e
; ++i
)
6284 if (!isa
<Constant
>(LHSI
->getOperand(i
)) ||
6285 !cast
<Constant
>(LHSI
->getOperand(i
))->isNullValue()) {
6290 return new ICmpInst(*Context
, I
.getPredicate(), LHSI
->getOperand(0),
6291 Constant::getNullValue(LHSI
->getOperand(0)->getType()));
6295 case Instruction::PHI
:
6296 // Only fold icmp into the PHI if the phi and fcmp are in the same
6297 // block. If in the same block, we're encouraging jump threading. If
6298 // not, we are just pessimizing the code by making an i1 phi.
6299 if (LHSI
->getParent() == I
.getParent())
6300 if (Instruction
*NV
= FoldOpIntoPhi(I
))
6303 case Instruction::Select
: {
6304 // If either operand of the select is a constant, we can fold the
6305 // comparison into the select arms, which will cause one to be
6306 // constant folded and the select turned into a bitwise or.
6307 Value
*Op1
= 0, *Op2
= 0;
6308 if (LHSI
->hasOneUse()) {
6309 if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(1))) {
6310 // Fold the known value into the constant operand.
6311 Op1
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
6312 // Insert a new ICmp of the other select operand.
6313 Op2
= InsertNewInstBefore(new ICmpInst(*Context
, I
.getPredicate(),
6314 LHSI
->getOperand(2), RHSC
,
6316 } else if (Constant
*C
= dyn_cast
<Constant
>(LHSI
->getOperand(2))) {
6317 // Fold the known value into the constant operand.
6318 Op2
= ConstantExpr::getICmp(I
.getPredicate(), C
, RHSC
);
6319 // Insert a new ICmp of the other select operand.
6320 Op1
= InsertNewInstBefore(new ICmpInst(*Context
, I
.getPredicate(),
6321 LHSI
->getOperand(1), RHSC
,
6327 return SelectInst::Create(LHSI
->getOperand(0), Op1
, Op2
);
6330 case Instruction::Malloc
:
6331 // If we have (malloc != null), and if the malloc has a single use, we
6332 // can assume it is successful and remove the malloc.
6333 if (LHSI
->hasOneUse() && isa
<ConstantPointerNull
>(RHSC
)) {
6334 AddToWorkList(LHSI
);
6335 return ReplaceInstUsesWith(I
, ConstantInt::get(Type::Int1Ty
,
6336 !I
.isTrueWhenEqual()));
6342 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6343 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op0
))
6344 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op1
, I
.getPredicate(), I
))
6346 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Op1
))
6347 if (Instruction
*NI
= FoldGEPICmp(GEP
, Op0
,
6348 ICmpInst::getSwappedPredicate(I
.getPredicate()), I
))
6351 // Test to see if the operands of the icmp are casted versions of other
6352 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6354 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(Op0
)) {
6355 if (isa
<PointerType
>(Op0
->getType()) &&
6356 (isa
<Constant
>(Op1
) || isa
<BitCastInst
>(Op1
))) {
6357 // We keep moving the cast from the left operand over to the right
6358 // operand, where it can often be eliminated completely.
6359 Op0
= CI
->getOperand(0);
6361 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6362 // so eliminate it as well.
6363 if (BitCastInst
*CI2
= dyn_cast
<BitCastInst
>(Op1
))
6364 Op1
= CI2
->getOperand(0);
6366 // If Op1 is a constant, we can fold the cast into the constant.
6367 if (Op0
->getType() != Op1
->getType()) {
6368 if (Constant
*Op1C
= dyn_cast
<Constant
>(Op1
)) {
6369 Op1
= ConstantExpr::getBitCast(Op1C
, Op0
->getType());
6371 // Otherwise, cast the RHS right before the icmp
6372 Op1
= InsertBitCastBefore(Op1
, Op0
->getType(), I
);
6375 return new ICmpInst(*Context
, I
.getPredicate(), Op0
, Op1
);
6379 if (isa
<CastInst
>(Op0
)) {
6380 // Handle the special case of: icmp (cast bool to X), <cst>
6381 // This comes up when you have code like
6384 // For generality, we handle any zero-extension of any operand comparison
6385 // with a constant or another cast from the same type.
6386 if (isa
<ConstantInt
>(Op1
) || isa
<CastInst
>(Op1
))
6387 if (Instruction
*R
= visitICmpInstWithCastAndCast(I
))
6391 // See if it's the same type of instruction on the left and right.
6392 if (BinaryOperator
*Op0I
= dyn_cast
<BinaryOperator
>(Op0
)) {
6393 if (BinaryOperator
*Op1I
= dyn_cast
<BinaryOperator
>(Op1
)) {
6394 if (Op0I
->getOpcode() == Op1I
->getOpcode() && Op0I
->hasOneUse() &&
6395 Op1I
->hasOneUse() && Op0I
->getOperand(1) == Op1I
->getOperand(1)) {
6396 switch (Op0I
->getOpcode()) {
6398 case Instruction::Add
:
6399 case Instruction::Sub
:
6400 case Instruction::Xor
:
6401 if (I
.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6402 return new ICmpInst(*Context
, I
.getPredicate(), Op0I
->getOperand(0),
6403 Op1I
->getOperand(0));
6404 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6405 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
6406 if (CI
->getValue().isSignBit()) {
6407 ICmpInst::Predicate Pred
= I
.isSignedPredicate()
6408 ? I
.getUnsignedPredicate()
6409 : I
.getSignedPredicate();
6410 return new ICmpInst(*Context
, Pred
, Op0I
->getOperand(0),
6411 Op1I
->getOperand(0));
6414 if (CI
->getValue().isMaxSignedValue()) {
6415 ICmpInst::Predicate Pred
= I
.isSignedPredicate()
6416 ? I
.getUnsignedPredicate()
6417 : I
.getSignedPredicate();
6418 Pred
= I
.getSwappedPredicate(Pred
);
6419 return new ICmpInst(*Context
, Pred
, Op0I
->getOperand(0),
6420 Op1I
->getOperand(0));
6424 case Instruction::Mul
:
6425 if (!I
.isEquality())
6428 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op0I
->getOperand(1))) {
6429 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6430 // Mask = -1 >> count-trailing-zeros(Cst).
6431 if (!CI
->isZero() && !CI
->isOne()) {
6432 const APInt
&AP
= CI
->getValue();
6433 ConstantInt
*Mask
= ConstantInt::get(*Context
,
6434 APInt::getLowBitsSet(AP
.getBitWidth(),
6436 AP
.countTrailingZeros()));
6437 Instruction
*And1
= BinaryOperator::CreateAnd(Op0I
->getOperand(0),
6439 Instruction
*And2
= BinaryOperator::CreateAnd(Op1I
->getOperand(0),
6441 InsertNewInstBefore(And1
, I
);
6442 InsertNewInstBefore(And2
, I
);
6443 return new ICmpInst(*Context
, I
.getPredicate(), And1
, And2
);
6452 // ~x < ~y --> y < x
6454 if (match(Op0
, m_Not(m_Value(A
)), *Context
) &&
6455 match(Op1
, m_Not(m_Value(B
)), *Context
))
6456 return new ICmpInst(*Context
, I
.getPredicate(), B
, A
);
6459 if (I
.isEquality()) {
6460 Value
*A
, *B
, *C
, *D
;
6462 // -x == -y --> x == y
6463 if (match(Op0
, m_Neg(m_Value(A
)), *Context
) &&
6464 match(Op1
, m_Neg(m_Value(B
)), *Context
))
6465 return new ICmpInst(*Context
, I
.getPredicate(), A
, B
);
6467 if (match(Op0
, m_Xor(m_Value(A
), m_Value(B
)), *Context
)) {
6468 if (A
== Op1
|| B
== Op1
) { // (A^B) == A -> B == 0
6469 Value
*OtherVal
= A
== Op1
? B
: A
;
6470 return new ICmpInst(*Context
, I
.getPredicate(), OtherVal
,
6471 Constant::getNullValue(A
->getType()));
6474 if (match(Op1
, m_Xor(m_Value(C
), m_Value(D
)), *Context
)) {
6475 // A^c1 == C^c2 --> A == C^(c1^c2)
6476 ConstantInt
*C1
, *C2
;
6477 if (match(B
, m_ConstantInt(C1
), *Context
) &&
6478 match(D
, m_ConstantInt(C2
), *Context
) && Op1
->hasOneUse()) {
6480 ConstantInt::get(*Context
, C1
->getValue() ^ C2
->getValue());
6481 Instruction
*Xor
= BinaryOperator::CreateXor(C
, NC
, "tmp");
6482 return new ICmpInst(*Context
, I
.getPredicate(), A
,
6483 InsertNewInstBefore(Xor
, I
));
6486 // A^B == A^D -> B == D
6487 if (A
== C
) return new ICmpInst(*Context
, I
.getPredicate(), B
, D
);
6488 if (A
== D
) return new ICmpInst(*Context
, I
.getPredicate(), B
, C
);
6489 if (B
== C
) return new ICmpInst(*Context
, I
.getPredicate(), A
, D
);
6490 if (B
== D
) return new ICmpInst(*Context
, I
.getPredicate(), A
, C
);
6494 if (match(Op1
, m_Xor(m_Value(A
), m_Value(B
)), *Context
) &&
6495 (A
== Op0
|| B
== Op0
)) {
6496 // A == (A^B) -> B == 0
6497 Value
*OtherVal
= A
== Op0
? B
: A
;
6498 return new ICmpInst(*Context
, I
.getPredicate(), OtherVal
,
6499 Constant::getNullValue(A
->getType()));
6502 // (A-B) == A -> B == 0
6503 if (match(Op0
, m_Sub(m_Specific(Op1
), m_Value(B
)), *Context
))
6504 return new ICmpInst(*Context
, I
.getPredicate(), B
,
6505 Constant::getNullValue(B
->getType()));
6507 // A == (A-B) -> B == 0
6508 if (match(Op1
, m_Sub(m_Specific(Op0
), m_Value(B
)), *Context
))
6509 return new ICmpInst(*Context
, I
.getPredicate(), B
,
6510 Constant::getNullValue(B
->getType()));
6512 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6513 if (Op0
->hasOneUse() && Op1
->hasOneUse() &&
6514 match(Op0
, m_And(m_Value(A
), m_Value(B
)), *Context
) &&
6515 match(Op1
, m_And(m_Value(C
), m_Value(D
)), *Context
)) {
6516 Value
*X
= 0, *Y
= 0, *Z
= 0;
6519 X
= B
; Y
= D
; Z
= A
;
6520 } else if (A
== D
) {
6521 X
= B
; Y
= C
; Z
= A
;
6522 } else if (B
== C
) {
6523 X
= A
; Y
= D
; Z
= B
;
6524 } else if (B
== D
) {
6525 X
= A
; Y
= C
; Z
= B
;
6528 if (X
) { // Build (X^Y) & Z
6529 Op1
= InsertNewInstBefore(BinaryOperator::CreateXor(X
, Y
, "tmp"), I
);
6530 Op1
= InsertNewInstBefore(BinaryOperator::CreateAnd(Op1
, Z
, "tmp"), I
);
6531 I
.setOperand(0, Op1
);
6532 I
.setOperand(1, Constant::getNullValue(Op1
->getType()));
6537 return Changed
? &I
: 0;
6541 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
6542 /// and CmpRHS are both known to be integer constants.
6543 Instruction
*InstCombiner::FoldICmpDivCst(ICmpInst
&ICI
, BinaryOperator
*DivI
,
6544 ConstantInt
*DivRHS
) {
6545 ConstantInt
*CmpRHS
= cast
<ConstantInt
>(ICI
.getOperand(1));
6546 const APInt
&CmpRHSV
= CmpRHS
->getValue();
6548 // FIXME: If the operand types don't match the type of the divide
6549 // then don't attempt this transform. The code below doesn't have the
6550 // logic to deal with a signed divide and an unsigned compare (and
6551 // vice versa). This is because (x /s C1) <s C2 produces different
6552 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
6553 // (x /u C1) <u C2. Simply casting the operands and result won't
6554 // work. :( The if statement below tests that condition and bails
6556 bool DivIsSigned
= DivI
->getOpcode() == Instruction::SDiv
;
6557 if (!ICI
.isEquality() && DivIsSigned
!= ICI
.isSignedPredicate())
6559 if (DivRHS
->isZero())
6560 return 0; // The ProdOV computation fails on divide by zero.
6561 if (DivIsSigned
&& DivRHS
->isAllOnesValue())
6562 return 0; // The overflow computation also screws up here
6563 if (DivRHS
->isOne())
6564 return 0; // Not worth bothering, and eliminates some funny cases
6567 // Compute Prod = CI * DivRHS. We are essentially solving an equation
6568 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
6569 // C2 (CI). By solving for X we can turn this into a range check
6570 // instead of computing a divide.
6571 Constant
*Prod
= ConstantExpr::getMul(CmpRHS
, DivRHS
);
6573 // Determine if the product overflows by seeing if the product is
6574 // not equal to the divide. Make sure we do the same kind of divide
6575 // as in the LHS instruction that we're folding.
6576 bool ProdOV
= (DivIsSigned
? ConstantExpr::getSDiv(Prod
, DivRHS
) :
6577 ConstantExpr::getUDiv(Prod
, DivRHS
)) != CmpRHS
;
6579 // Get the ICmp opcode
6580 ICmpInst::Predicate Pred
= ICI
.getPredicate();
6582 // Figure out the interval that is being checked. For example, a comparison
6583 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
6584 // Compute this interval based on the constants involved and the signedness of
6585 // the compare/divide. This computes a half-open interval, keeping track of
6586 // whether either value in the interval overflows. After analysis each
6587 // overflow variable is set to 0 if it's corresponding bound variable is valid
6588 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
6589 int LoOverflow
= 0, HiOverflow
= 0;
6590 Constant
*LoBound
= 0, *HiBound
= 0;
6592 if (!DivIsSigned
) { // udiv
6593 // e.g. X/5 op 3 --> [15, 20)
6595 HiOverflow
= LoOverflow
= ProdOV
;
6597 HiOverflow
= AddWithOverflow(HiBound
, LoBound
, DivRHS
, Context
, false);
6598 } else if (DivRHS
->getValue().isStrictlyPositive()) { // Divisor is > 0.
6599 if (CmpRHSV
== 0) { // (X / pos) op 0
6600 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
6601 LoBound
= cast
<ConstantInt
>(ConstantExpr::getNeg(SubOne(DivRHS
,
6604 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / pos) op pos
6605 LoBound
= Prod
; // e.g. X/5 op 3 --> [15, 20)
6606 HiOverflow
= LoOverflow
= ProdOV
;
6608 HiOverflow
= AddWithOverflow(HiBound
, Prod
, DivRHS
, Context
, true);
6609 } else { // (X / pos) op neg
6610 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
6611 HiBound
= AddOne(Prod
, Context
);
6612 LoOverflow
= HiOverflow
= ProdOV
? -1 : 0;
6614 ConstantInt
* DivNeg
=
6615 cast
<ConstantInt
>(ConstantExpr::getNeg(DivRHS
));
6616 LoOverflow
= AddWithOverflow(LoBound
, HiBound
, DivNeg
, Context
,
6620 } else if (DivRHS
->getValue().isNegative()) { // Divisor is < 0.
6621 if (CmpRHSV
== 0) { // (X / neg) op 0
6622 // e.g. X/-5 op 0 --> [-4, 5)
6623 LoBound
= AddOne(DivRHS
, Context
);
6624 HiBound
= cast
<ConstantInt
>(ConstantExpr::getNeg(DivRHS
));
6625 if (HiBound
== DivRHS
) { // -INTMIN = INTMIN
6626 HiOverflow
= 1; // [INTMIN+1, overflow)
6627 HiBound
= 0; // e.g. X/INTMIN = 0 --> X > INTMIN
6629 } else if (CmpRHSV
.isStrictlyPositive()) { // (X / neg) op pos
6630 // e.g. X/-5 op 3 --> [-19, -14)
6631 HiBound
= AddOne(Prod
, Context
);
6632 HiOverflow
= LoOverflow
= ProdOV
? -1 : 0;
6634 LoOverflow
= AddWithOverflow(LoBound
, HiBound
,
6635 DivRHS
, Context
, true) ? -1 : 0;
6636 } else { // (X / neg) op neg
6637 LoBound
= Prod
; // e.g. X/-5 op -3 --> [15, 20)
6638 LoOverflow
= HiOverflow
= ProdOV
;
6640 HiOverflow
= SubWithOverflow(HiBound
, Prod
, DivRHS
, Context
, true);
6643 // Dividing by a negative swaps the condition. LT <-> GT
6644 Pred
= ICmpInst::getSwappedPredicate(Pred
);
6647 Value
*X
= DivI
->getOperand(0);
6649 default: llvm_unreachable("Unhandled icmp opcode!");
6650 case ICmpInst::ICMP_EQ
:
6651 if (LoOverflow
&& HiOverflow
)
6652 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6653 else if (HiOverflow
)
6654 return new ICmpInst(*Context
, DivIsSigned
? ICmpInst::ICMP_SGE
:
6655 ICmpInst::ICMP_UGE
, X
, LoBound
);
6656 else if (LoOverflow
)
6657 return new ICmpInst(*Context
, DivIsSigned
? ICmpInst::ICMP_SLT
:
6658 ICmpInst::ICMP_ULT
, X
, HiBound
);
6660 return InsertRangeTest(X
, LoBound
, HiBound
, DivIsSigned
, true, ICI
);
6661 case ICmpInst::ICMP_NE
:
6662 if (LoOverflow
&& HiOverflow
)
6663 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6664 else if (HiOverflow
)
6665 return new ICmpInst(*Context
, DivIsSigned
? ICmpInst::ICMP_SLT
:
6666 ICmpInst::ICMP_ULT
, X
, LoBound
);
6667 else if (LoOverflow
)
6668 return new ICmpInst(*Context
, DivIsSigned
? ICmpInst::ICMP_SGE
:
6669 ICmpInst::ICMP_UGE
, X
, HiBound
);
6671 return InsertRangeTest(X
, LoBound
, HiBound
, DivIsSigned
, false, ICI
);
6672 case ICmpInst::ICMP_ULT
:
6673 case ICmpInst::ICMP_SLT
:
6674 if (LoOverflow
== +1) // Low bound is greater than input range.
6675 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6676 if (LoOverflow
== -1) // Low bound is less than input range.
6677 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6678 return new ICmpInst(*Context
, Pred
, X
, LoBound
);
6679 case ICmpInst::ICMP_UGT
:
6680 case ICmpInst::ICMP_SGT
:
6681 if (HiOverflow
== +1) // High bound greater than input range.
6682 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6683 else if (HiOverflow
== -1) // High bound less than input range.
6684 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6685 if (Pred
== ICmpInst::ICMP_UGT
)
6686 return new ICmpInst(*Context
, ICmpInst::ICMP_UGE
, X
, HiBound
);
6688 return new ICmpInst(*Context
, ICmpInst::ICMP_SGE
, X
, HiBound
);
6693 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
6695 Instruction
*InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst
&ICI
,
6698 const APInt
&RHSV
= RHS
->getValue();
6700 switch (LHSI
->getOpcode()) {
6701 case Instruction::Trunc
:
6702 if (ICI
.isEquality() && LHSI
->hasOneUse()) {
6703 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
6704 // of the high bits truncated out of x are known.
6705 unsigned DstBits
= LHSI
->getType()->getPrimitiveSizeInBits(),
6706 SrcBits
= LHSI
->getOperand(0)->getType()->getPrimitiveSizeInBits();
6707 APInt
Mask(APInt::getHighBitsSet(SrcBits
, SrcBits
-DstBits
));
6708 APInt
KnownZero(SrcBits
, 0), KnownOne(SrcBits
, 0);
6709 ComputeMaskedBits(LHSI
->getOperand(0), Mask
, KnownZero
, KnownOne
);
6711 // If all the high bits are known, we can do this xform.
6712 if ((KnownZero
|KnownOne
).countLeadingOnes() >= SrcBits
-DstBits
) {
6713 // Pull in the high bits from known-ones set.
6714 APInt
NewRHS(RHS
->getValue());
6715 NewRHS
.zext(SrcBits
);
6717 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSI
->getOperand(0),
6718 ConstantInt::get(*Context
, NewRHS
));
6723 case Instruction::Xor
: // (icmp pred (xor X, XorCST), CI)
6724 if (ConstantInt
*XorCST
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1))) {
6725 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
6727 if ((ICI
.getPredicate() == ICmpInst::ICMP_SLT
&& RHSV
== 0) ||
6728 (ICI
.getPredicate() == ICmpInst::ICMP_SGT
&& RHSV
.isAllOnesValue())) {
6729 Value
*CompareVal
= LHSI
->getOperand(0);
6731 // If the sign bit of the XorCST is not set, there is no change to
6732 // the operation, just stop using the Xor.
6733 if (!XorCST
->getValue().isNegative()) {
6734 ICI
.setOperand(0, CompareVal
);
6735 AddToWorkList(LHSI
);
6739 // Was the old condition true if the operand is positive?
6740 bool isTrueIfPositive
= ICI
.getPredicate() == ICmpInst::ICMP_SGT
;
6742 // If so, the new one isn't.
6743 isTrueIfPositive
^= true;
6745 if (isTrueIfPositive
)
6746 return new ICmpInst(*Context
, ICmpInst::ICMP_SGT
, CompareVal
,
6747 SubOne(RHS
, Context
));
6749 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, CompareVal
,
6750 AddOne(RHS
, Context
));
6753 if (LHSI
->hasOneUse()) {
6754 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
6755 if (!ICI
.isEquality() && XorCST
->getValue().isSignBit()) {
6756 const APInt
&SignBit
= XorCST
->getValue();
6757 ICmpInst::Predicate Pred
= ICI
.isSignedPredicate()
6758 ? ICI
.getUnsignedPredicate()
6759 : ICI
.getSignedPredicate();
6760 return new ICmpInst(*Context
, Pred
, LHSI
->getOperand(0),
6761 ConstantInt::get(*Context
, RHSV
^ SignBit
));
6764 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
6765 if (!ICI
.isEquality() && XorCST
->getValue().isMaxSignedValue()) {
6766 const APInt
&NotSignBit
= XorCST
->getValue();
6767 ICmpInst::Predicate Pred
= ICI
.isSignedPredicate()
6768 ? ICI
.getUnsignedPredicate()
6769 : ICI
.getSignedPredicate();
6770 Pred
= ICI
.getSwappedPredicate(Pred
);
6771 return new ICmpInst(*Context
, Pred
, LHSI
->getOperand(0),
6772 ConstantInt::get(*Context
, RHSV
^ NotSignBit
));
6777 case Instruction::And
: // (icmp pred (and X, AndCST), RHS)
6778 if (LHSI
->hasOneUse() && isa
<ConstantInt
>(LHSI
->getOperand(1)) &&
6779 LHSI
->getOperand(0)->hasOneUse()) {
6780 ConstantInt
*AndCST
= cast
<ConstantInt
>(LHSI
->getOperand(1));
6782 // If the LHS is an AND of a truncating cast, we can widen the
6783 // and/compare to be the input width without changing the value
6784 // produced, eliminating a cast.
6785 if (TruncInst
*Cast
= dyn_cast
<TruncInst
>(LHSI
->getOperand(0))) {
6786 // We can do this transformation if either the AND constant does not
6787 // have its sign bit set or if it is an equality comparison.
6788 // Extending a relational comparison when we're checking the sign
6789 // bit would not work.
6790 if (Cast
->hasOneUse() &&
6791 (ICI
.isEquality() ||
6792 (AndCST
->getValue().isNonNegative() && RHSV
.isNonNegative()))) {
6794 cast
<IntegerType
>(Cast
->getOperand(0)->getType())->getBitWidth();
6795 APInt NewCST
= AndCST
->getValue();
6796 NewCST
.zext(BitWidth
);
6798 NewCI
.zext(BitWidth
);
6799 Instruction
*NewAnd
=
6800 BinaryOperator::CreateAnd(Cast
->getOperand(0),
6801 ConstantInt::get(*Context
, NewCST
), LHSI
->getName());
6802 InsertNewInstBefore(NewAnd
, ICI
);
6803 return new ICmpInst(*Context
, ICI
.getPredicate(), NewAnd
,
6804 ConstantInt::get(*Context
, NewCI
));
6808 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
6809 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
6810 // happens a LOT in code produced by the C front-end, for bitfield
6812 BinaryOperator
*Shift
= dyn_cast
<BinaryOperator
>(LHSI
->getOperand(0));
6813 if (Shift
&& !Shift
->isShift())
6817 ShAmt
= Shift
? dyn_cast
<ConstantInt
>(Shift
->getOperand(1)) : 0;
6818 const Type
*Ty
= Shift
? Shift
->getType() : 0; // Type of the shift.
6819 const Type
*AndTy
= AndCST
->getType(); // Type of the and.
6821 // We can fold this as long as we can't shift unknown bits
6822 // into the mask. This can only happen with signed shift
6823 // rights, as they sign-extend.
6825 bool CanFold
= Shift
->isLogicalShift();
6827 // To test for the bad case of the signed shr, see if any
6828 // of the bits shifted in could be tested after the mask.
6829 uint32_t TyBits
= Ty
->getPrimitiveSizeInBits();
6830 int ShAmtVal
= TyBits
- ShAmt
->getLimitedValue(TyBits
);
6832 uint32_t BitWidth
= AndTy
->getPrimitiveSizeInBits();
6833 if ((APInt::getHighBitsSet(BitWidth
, BitWidth
-ShAmtVal
) &
6834 AndCST
->getValue()) == 0)
6840 if (Shift
->getOpcode() == Instruction::Shl
)
6841 NewCst
= ConstantExpr::getLShr(RHS
, ShAmt
);
6843 NewCst
= ConstantExpr::getShl(RHS
, ShAmt
);
6845 // Check to see if we are shifting out any of the bits being
6847 if (ConstantExpr::get(Shift
->getOpcode(),
6848 NewCst
, ShAmt
) != RHS
) {
6849 // If we shifted bits out, the fold is not going to work out.
6850 // As a special case, check to see if this means that the
6851 // result is always true or false now.
6852 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
6853 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
6854 if (ICI
.getPredicate() == ICmpInst::ICMP_NE
)
6855 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
6857 ICI
.setOperand(1, NewCst
);
6858 Constant
*NewAndCST
;
6859 if (Shift
->getOpcode() == Instruction::Shl
)
6860 NewAndCST
= ConstantExpr::getLShr(AndCST
, ShAmt
);
6862 NewAndCST
= ConstantExpr::getShl(AndCST
, ShAmt
);
6863 LHSI
->setOperand(1, NewAndCST
);
6864 LHSI
->setOperand(0, Shift
->getOperand(0));
6865 AddToWorkList(Shift
); // Shift is dead.
6866 AddUsesToWorkList(ICI
);
6872 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6873 // preferable because it allows the C<<Y expression to be hoisted out
6874 // of a loop if Y is invariant and X is not.
6875 if (Shift
&& Shift
->hasOneUse() && RHSV
== 0 &&
6876 ICI
.isEquality() && !Shift
->isArithmeticShift() &&
6877 !isa
<Constant
>(Shift
->getOperand(0))) {
6880 if (Shift
->getOpcode() == Instruction::LShr
) {
6881 NS
= BinaryOperator::CreateShl(AndCST
,
6882 Shift
->getOperand(1), "tmp");
6884 // Insert a logical shift.
6885 NS
= BinaryOperator::CreateLShr(AndCST
,
6886 Shift
->getOperand(1), "tmp");
6888 InsertNewInstBefore(cast
<Instruction
>(NS
), ICI
);
6890 // Compute X & (C << Y).
6891 Instruction
*NewAnd
=
6892 BinaryOperator::CreateAnd(Shift
->getOperand(0), NS
, LHSI
->getName());
6893 InsertNewInstBefore(NewAnd
, ICI
);
6895 ICI
.setOperand(0, NewAnd
);
6901 case Instruction::Shl
: { // (icmp pred (shl X, ShAmt), CI)
6902 ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
6905 uint32_t TypeBits
= RHSV
.getBitWidth();
6907 // Check that the shift amount is in range. If not, don't perform
6908 // undefined shifts. When the shift is visited it will be
6910 if (ShAmt
->uge(TypeBits
))
6913 if (ICI
.isEquality()) {
6914 // If we are comparing against bits always shifted out, the
6915 // comparison cannot succeed.
6917 ConstantExpr::getShl(ConstantExpr::getLShr(RHS
, ShAmt
),
6919 if (Comp
!= RHS
) {// Comparing against a bit that we know is zero.
6920 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
6921 Constant
*Cst
= ConstantInt::get(Type::Int1Ty
, IsICMP_NE
);
6922 return ReplaceInstUsesWith(ICI
, Cst
);
6925 if (LHSI
->hasOneUse()) {
6926 // Otherwise strength reduce the shift into an and.
6927 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
6929 ConstantInt::get(*Context
, APInt::getLowBitsSet(TypeBits
,
6930 TypeBits
-ShAmtVal
));
6933 BinaryOperator::CreateAnd(LHSI
->getOperand(0),
6934 Mask
, LHSI
->getName()+".mask");
6935 Value
*And
= InsertNewInstBefore(AndI
, ICI
);
6936 return new ICmpInst(*Context
, ICI
.getPredicate(), And
,
6937 ConstantInt::get(*Context
, RHSV
.lshr(ShAmtVal
)));
6941 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6942 bool TrueIfSigned
= false;
6943 if (LHSI
->hasOneUse() &&
6944 isSignBitCheck(ICI
.getPredicate(), RHS
, TrueIfSigned
)) {
6945 // (X << 31) <s 0 --> (X&1) != 0
6946 Constant
*Mask
= ConstantInt::get(*Context
, APInt(TypeBits
, 1) <<
6947 (TypeBits
-ShAmt
->getZExtValue()-1));
6949 BinaryOperator::CreateAnd(LHSI
->getOperand(0),
6950 Mask
, LHSI
->getName()+".mask");
6951 Value
*And
= InsertNewInstBefore(AndI
, ICI
);
6953 return new ICmpInst(*Context
,
6954 TrueIfSigned
? ICmpInst::ICMP_NE
: ICmpInst::ICMP_EQ
,
6955 And
, Constant::getNullValue(And
->getType()));
6960 case Instruction::LShr
: // (icmp pred (shr X, ShAmt), CI)
6961 case Instruction::AShr
: {
6962 // Only handle equality comparisons of shift-by-constant.
6963 ConstantInt
*ShAmt
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
6964 if (!ShAmt
|| !ICI
.isEquality()) break;
6966 // Check that the shift amount is in range. If not, don't perform
6967 // undefined shifts. When the shift is visited it will be
6969 uint32_t TypeBits
= RHSV
.getBitWidth();
6970 if (ShAmt
->uge(TypeBits
))
6973 uint32_t ShAmtVal
= (uint32_t)ShAmt
->getLimitedValue(TypeBits
);
6975 // If we are comparing against bits always shifted out, the
6976 // comparison cannot succeed.
6977 APInt Comp
= RHSV
<< ShAmtVal
;
6978 if (LHSI
->getOpcode() == Instruction::LShr
)
6979 Comp
= Comp
.lshr(ShAmtVal
);
6981 Comp
= Comp
.ashr(ShAmtVal
);
6983 if (Comp
!= RHSV
) { // Comparing against a bit that we know is zero.
6984 bool IsICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
6985 Constant
*Cst
= ConstantInt::get(Type::Int1Ty
, IsICMP_NE
);
6986 return ReplaceInstUsesWith(ICI
, Cst
);
6989 // Otherwise, check to see if the bits shifted out are known to be zero.
6990 // If so, we can compare against the unshifted value:
6991 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
6992 if (LHSI
->hasOneUse() &&
6993 MaskedValueIsZero(LHSI
->getOperand(0),
6994 APInt::getLowBitsSet(Comp
.getBitWidth(), ShAmtVal
))) {
6995 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSI
->getOperand(0),
6996 ConstantExpr::getShl(RHS
, ShAmt
));
6999 if (LHSI
->hasOneUse()) {
7000 // Otherwise strength reduce the shift into an and.
7001 APInt
Val(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShAmtVal
));
7002 Constant
*Mask
= ConstantInt::get(*Context
, Val
);
7005 BinaryOperator::CreateAnd(LHSI
->getOperand(0),
7006 Mask
, LHSI
->getName()+".mask");
7007 Value
*And
= InsertNewInstBefore(AndI
, ICI
);
7008 return new ICmpInst(*Context
, ICI
.getPredicate(), And
,
7009 ConstantExpr::getShl(RHS
, ShAmt
));
7014 case Instruction::SDiv
:
7015 case Instruction::UDiv
:
7016 // Fold: icmp pred ([us]div X, C1), C2 -> range test
7017 // Fold this div into the comparison, producing a range check.
7018 // Determine, based on the divide type, what the range is being
7019 // checked. If there is an overflow on the low or high side, remember
7020 // it, otherwise compute the range [low, hi) bounding the new value.
7021 // See: InsertRangeTest above for the kinds of replacements possible.
7022 if (ConstantInt
*DivRHS
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1)))
7023 if (Instruction
*R
= FoldICmpDivCst(ICI
, cast
<BinaryOperator
>(LHSI
),
7028 case Instruction::Add
:
7029 // Fold: icmp pred (add, X, C1), C2
7031 if (!ICI
.isEquality()) {
7032 ConstantInt
*LHSC
= dyn_cast
<ConstantInt
>(LHSI
->getOperand(1));
7034 const APInt
&LHSV
= LHSC
->getValue();
7036 ConstantRange CR
= ICI
.makeConstantRange(ICI
.getPredicate(), RHSV
)
7039 if (ICI
.isSignedPredicate()) {
7040 if (CR
.getLower().isSignBit()) {
7041 return new ICmpInst(*Context
, ICmpInst::ICMP_SLT
, LHSI
->getOperand(0),
7042 ConstantInt::get(*Context
, CR
.getUpper()));
7043 } else if (CR
.getUpper().isSignBit()) {
7044 return new ICmpInst(*Context
, ICmpInst::ICMP_SGE
, LHSI
->getOperand(0),
7045 ConstantInt::get(*Context
, CR
.getLower()));
7048 if (CR
.getLower().isMinValue()) {
7049 return new ICmpInst(*Context
, ICmpInst::ICMP_ULT
, LHSI
->getOperand(0),
7050 ConstantInt::get(*Context
, CR
.getUpper()));
7051 } else if (CR
.getUpper().isMinValue()) {
7052 return new ICmpInst(*Context
, ICmpInst::ICMP_UGE
, LHSI
->getOperand(0),
7053 ConstantInt::get(*Context
, CR
.getLower()));
7060 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
7061 if (ICI
.isEquality()) {
7062 bool isICMP_NE
= ICI
.getPredicate() == ICmpInst::ICMP_NE
;
7064 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
7065 // the second operand is a constant, simplify a bit.
7066 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(LHSI
)) {
7067 switch (BO
->getOpcode()) {
7068 case Instruction::SRem
:
7069 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
7070 if (RHSV
== 0 && isa
<ConstantInt
>(BO
->getOperand(1)) &&BO
->hasOneUse()){
7071 const APInt
&V
= cast
<ConstantInt
>(BO
->getOperand(1))->getValue();
7072 if (V
.sgt(APInt(V
.getBitWidth(), 1)) && V
.isPowerOf2()) {
7073 Instruction
*NewRem
=
7074 BinaryOperator::CreateURem(BO
->getOperand(0), BO
->getOperand(1),
7076 InsertNewInstBefore(NewRem
, ICI
);
7077 return new ICmpInst(*Context
, ICI
.getPredicate(), NewRem
,
7078 Constant::getNullValue(BO
->getType()));
7082 case Instruction::Add
:
7083 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
7084 if (ConstantInt
*BOp1C
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
7085 if (BO
->hasOneUse())
7086 return new ICmpInst(*Context
, ICI
.getPredicate(), BO
->getOperand(0),
7087 ConstantExpr::getSub(RHS
, BOp1C
));
7088 } else if (RHSV
== 0) {
7089 // Replace ((add A, B) != 0) with (A != -B) if A or B is
7090 // efficiently invertible, or if the add has just this one use.
7091 Value
*BOp0
= BO
->getOperand(0), *BOp1
= BO
->getOperand(1);
7093 if (Value
*NegVal
= dyn_castNegVal(BOp1
, Context
))
7094 return new ICmpInst(*Context
, ICI
.getPredicate(), BOp0
, NegVal
);
7095 else if (Value
*NegVal
= dyn_castNegVal(BOp0
, Context
))
7096 return new ICmpInst(*Context
, ICI
.getPredicate(), NegVal
, BOp1
);
7097 else if (BO
->hasOneUse()) {
7098 Instruction
*Neg
= BinaryOperator::CreateNeg(*Context
, BOp1
);
7099 InsertNewInstBefore(Neg
, ICI
);
7101 return new ICmpInst(*Context
, ICI
.getPredicate(), BOp0
, Neg
);
7105 case Instruction::Xor
:
7106 // For the xor case, we can xor two constants together, eliminating
7107 // the explicit xor.
7108 if (Constant
*BOC
= dyn_cast
<Constant
>(BO
->getOperand(1)))
7109 return new ICmpInst(*Context
, ICI
.getPredicate(), BO
->getOperand(0),
7110 ConstantExpr::getXor(RHS
, BOC
));
7113 case Instruction::Sub
:
7114 // Replace (([sub|xor] A, B) != 0) with (A != B)
7116 return new ICmpInst(*Context
, ICI
.getPredicate(), BO
->getOperand(0),
7120 case Instruction::Or
:
7121 // If bits are being or'd in that are not present in the constant we
7122 // are comparing against, then the comparison could never succeed!
7123 if (Constant
*BOC
= dyn_cast
<Constant
>(BO
->getOperand(1))) {
7124 Constant
*NotCI
= ConstantExpr::getNot(RHS
);
7125 if (!ConstantExpr::getAnd(BOC
, NotCI
)->isNullValue())
7126 return ReplaceInstUsesWith(ICI
,
7127 ConstantInt::get(Type::Int1Ty
,
7132 case Instruction::And
:
7133 if (ConstantInt
*BOC
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
7134 // If bits are being compared against that are and'd out, then the
7135 // comparison can never succeed!
7136 if ((RHSV
& ~BOC
->getValue()) != 0)
7137 return ReplaceInstUsesWith(ICI
,
7138 ConstantInt::get(Type::Int1Ty
,
7141 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7142 if (RHS
== BOC
&& RHSV
.isPowerOf2())
7143 return new ICmpInst(*Context
, isICMP_NE
? ICmpInst::ICMP_EQ
:
7144 ICmpInst::ICMP_NE
, LHSI
,
7145 Constant::getNullValue(RHS
->getType()));
7147 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7148 if (BOC
->getValue().isSignBit()) {
7149 Value
*X
= BO
->getOperand(0);
7150 Constant
*Zero
= Constant::getNullValue(X
->getType());
7151 ICmpInst::Predicate pred
= isICMP_NE
?
7152 ICmpInst::ICMP_SLT
: ICmpInst::ICMP_SGE
;
7153 return new ICmpInst(*Context
, pred
, X
, Zero
);
7156 // ((X & ~7) == 0) --> X < 8
7157 if (RHSV
== 0 && isHighOnes(BOC
)) {
7158 Value
*X
= BO
->getOperand(0);
7159 Constant
*NegX
= ConstantExpr::getNeg(BOC
);
7160 ICmpInst::Predicate pred
= isICMP_NE
?
7161 ICmpInst::ICMP_UGE
: ICmpInst::ICMP_ULT
;
7162 return new ICmpInst(*Context
, pred
, X
, NegX
);
7167 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(LHSI
)) {
7168 // Handle icmp {eq|ne} <intrinsic>, intcst.
7169 if (II
->getIntrinsicID() == Intrinsic::bswap
) {
7171 ICI
.setOperand(0, II
->getOperand(1));
7172 ICI
.setOperand(1, ConstantInt::get(*Context
, RHSV
.byteSwap()));
7180 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7181 /// We only handle extending casts so far.
7183 Instruction
*InstCombiner::visitICmpInstWithCastAndCast(ICmpInst
&ICI
) {
7184 const CastInst
*LHSCI
= cast
<CastInst
>(ICI
.getOperand(0));
7185 Value
*LHSCIOp
= LHSCI
->getOperand(0);
7186 const Type
*SrcTy
= LHSCIOp
->getType();
7187 const Type
*DestTy
= LHSCI
->getType();
7190 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7191 // integer type is the same size as the pointer type.
7192 if (TD
&& LHSCI
->getOpcode() == Instruction::PtrToInt
&&
7193 TD
->getPointerSizeInBits() ==
7194 cast
<IntegerType
>(DestTy
)->getBitWidth()) {
7196 if (Constant
*RHSC
= dyn_cast
<Constant
>(ICI
.getOperand(1))) {
7197 RHSOp
= ConstantExpr::getIntToPtr(RHSC
, SrcTy
);
7198 } else if (PtrToIntInst
*RHSC
= dyn_cast
<PtrToIntInst
>(ICI
.getOperand(1))) {
7199 RHSOp
= RHSC
->getOperand(0);
7200 // If the pointer types don't match, insert a bitcast.
7201 if (LHSCIOp
->getType() != RHSOp
->getType())
7202 RHSOp
= InsertBitCastBefore(RHSOp
, LHSCIOp
->getType(), ICI
);
7206 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSCIOp
, RHSOp
);
7209 // The code below only handles extension cast instructions, so far.
7211 if (LHSCI
->getOpcode() != Instruction::ZExt
&&
7212 LHSCI
->getOpcode() != Instruction::SExt
)
7215 bool isSignedExt
= LHSCI
->getOpcode() == Instruction::SExt
;
7216 bool isSignedCmp
= ICI
.isSignedPredicate();
7218 if (CastInst
*CI
= dyn_cast
<CastInst
>(ICI
.getOperand(1))) {
7219 // Not an extension from the same type?
7220 RHSCIOp
= CI
->getOperand(0);
7221 if (RHSCIOp
->getType() != LHSCIOp
->getType())
7224 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7225 // and the other is a zext), then we can't handle this.
7226 if (CI
->getOpcode() != LHSCI
->getOpcode())
7229 // Deal with equality cases early.
7230 if (ICI
.isEquality())
7231 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
7233 // A signed comparison of sign extended values simplifies into a
7234 // signed comparison.
7235 if (isSignedCmp
&& isSignedExt
)
7236 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSCIOp
, RHSCIOp
);
7238 // The other three cases all fold into an unsigned comparison.
7239 return new ICmpInst(*Context
, ICI
.getUnsignedPredicate(), LHSCIOp
, RHSCIOp
);
7242 // If we aren't dealing with a constant on the RHS, exit early
7243 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ICI
.getOperand(1));
7247 // Compute the constant that would happen if we truncated to SrcTy then
7248 // reextended to DestTy.
7249 Constant
*Res1
= ConstantExpr::getTrunc(CI
, SrcTy
);
7250 Constant
*Res2
= ConstantExpr::getCast(LHSCI
->getOpcode(),
7253 // If the re-extended constant didn't change...
7255 // Make sure that sign of the Cmp and the sign of the Cast are the same.
7256 // For example, we might have:
7257 // %A = sext i16 %X to i32
7258 // %B = icmp ugt i32 %A, 1330
7259 // It is incorrect to transform this into
7260 // %B = icmp ugt i16 %X, 1330
7261 // because %A may have negative value.
7263 // However, we allow this when the compare is EQ/NE, because they are
7265 if (isSignedExt
== isSignedCmp
|| ICI
.isEquality())
7266 return new ICmpInst(*Context
, ICI
.getPredicate(), LHSCIOp
, Res1
);
7270 // The re-extended constant changed so the constant cannot be represented
7271 // in the shorter type. Consequently, we cannot emit a simple comparison.
7273 // First, handle some easy cases. We know the result cannot be equal at this
7274 // point so handle the ICI.isEquality() cases
7275 if (ICI
.getPredicate() == ICmpInst::ICMP_EQ
)
7276 return ReplaceInstUsesWith(ICI
, ConstantInt::getFalse(*Context
));
7277 if (ICI
.getPredicate() == ICmpInst::ICMP_NE
)
7278 return ReplaceInstUsesWith(ICI
, ConstantInt::getTrue(*Context
));
7280 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7281 // should have been folded away previously and not enter in here.
7284 // We're performing a signed comparison.
7285 if (cast
<ConstantInt
>(CI
)->getValue().isNegative())
7286 Result
= ConstantInt::getFalse(*Context
); // X < (small) --> false
7288 Result
= ConstantInt::getTrue(*Context
); // X < (large) --> true
7290 // We're performing an unsigned comparison.
7292 // We're performing an unsigned comp with a sign extended value.
7293 // This is true if the input is >= 0. [aka >s -1]
7294 Constant
*NegOne
= Constant::getAllOnesValue(SrcTy
);
7295 Result
= InsertNewInstBefore(new ICmpInst(*Context
, ICmpInst::ICMP_SGT
,
7296 LHSCIOp
, NegOne
, ICI
.getName()), ICI
);
7298 // Unsigned extend & unsigned compare -> always true.
7299 Result
= ConstantInt::getTrue(*Context
);
7303 // Finally, return the value computed.
7304 if (ICI
.getPredicate() == ICmpInst::ICMP_ULT
||
7305 ICI
.getPredicate() == ICmpInst::ICMP_SLT
)
7306 return ReplaceInstUsesWith(ICI
, Result
);
7308 assert((ICI
.getPredicate()==ICmpInst::ICMP_UGT
||
7309 ICI
.getPredicate()==ICmpInst::ICMP_SGT
) &&
7310 "ICmp should be folded!");
7311 if (Constant
*CI
= dyn_cast
<Constant
>(Result
))
7312 return ReplaceInstUsesWith(ICI
, ConstantExpr::getNot(CI
));
7313 return BinaryOperator::CreateNot(*Context
, Result
);
7316 Instruction
*InstCombiner::visitShl(BinaryOperator
&I
) {
7317 return commonShiftTransforms(I
);
7320 Instruction
*InstCombiner::visitLShr(BinaryOperator
&I
) {
7321 return commonShiftTransforms(I
);
7324 Instruction
*InstCombiner::visitAShr(BinaryOperator
&I
) {
7325 if (Instruction
*R
= commonShiftTransforms(I
))
7328 Value
*Op0
= I
.getOperand(0);
7330 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7331 if (ConstantInt
*CSI
= dyn_cast
<ConstantInt
>(Op0
))
7332 if (CSI
->isAllOnesValue())
7333 return ReplaceInstUsesWith(I
, CSI
);
7335 // See if we can turn a signed shr into an unsigned shr.
7336 if (MaskedValueIsZero(Op0
,
7337 APInt::getSignBit(I
.getType()->getScalarSizeInBits())))
7338 return BinaryOperator::CreateLShr(Op0
, I
.getOperand(1));
7340 // Arithmetic shifting an all-sign-bit value is a no-op.
7341 unsigned NumSignBits
= ComputeNumSignBits(Op0
);
7342 if (NumSignBits
== Op0
->getType()->getScalarSizeInBits())
7343 return ReplaceInstUsesWith(I
, Op0
);
7348 Instruction
*InstCombiner::commonShiftTransforms(BinaryOperator
&I
) {
7349 assert(I
.getOperand(1)->getType() == I
.getOperand(0)->getType());
7350 Value
*Op0
= I
.getOperand(0), *Op1
= I
.getOperand(1);
7352 // shl X, 0 == X and shr X, 0 == X
7353 // shl 0, X == 0 and shr 0, X == 0
7354 if (Op1
== Constant::getNullValue(Op1
->getType()) ||
7355 Op0
== Constant::getNullValue(Op0
->getType()))
7356 return ReplaceInstUsesWith(I
, Op0
);
7358 if (isa
<UndefValue
>(Op0
)) {
7359 if (I
.getOpcode() == Instruction::AShr
) // undef >>s X -> undef
7360 return ReplaceInstUsesWith(I
, Op0
);
7361 else // undef << X -> 0, undef >>u X -> 0
7362 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7364 if (isa
<UndefValue
>(Op1
)) {
7365 if (I
.getOpcode() == Instruction::AShr
) // X >>s undef -> X
7366 return ReplaceInstUsesWith(I
, Op0
);
7367 else // X << undef, X >>u undef -> 0
7368 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7371 // See if we can fold away this shift.
7372 if (SimplifyDemandedInstructionBits(I
))
7375 // Try to fold constant and into select arguments.
7376 if (isa
<Constant
>(Op0
))
7377 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op1
))
7378 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
7381 if (ConstantInt
*CUI
= dyn_cast
<ConstantInt
>(Op1
))
7382 if (Instruction
*Res
= FoldShiftByConstant(Op0
, CUI
, I
))
7387 Instruction
*InstCombiner::FoldShiftByConstant(Value
*Op0
, ConstantInt
*Op1
,
7388 BinaryOperator
&I
) {
7389 bool isLeftShift
= I
.getOpcode() == Instruction::Shl
;
7391 // See if we can simplify any instructions used by the instruction whose sole
7392 // purpose is to compute bits we don't care about.
7393 uint32_t TypeBits
= Op0
->getType()->getScalarSizeInBits();
7395 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7398 if (Op1
->uge(TypeBits
)) {
7399 if (I
.getOpcode() != Instruction::AShr
)
7400 return ReplaceInstUsesWith(I
, Constant::getNullValue(Op0
->getType()));
7402 I
.setOperand(1, ConstantInt::get(I
.getType(), TypeBits
-1));
7407 // ((X*C1) << C2) == (X * (C1 << C2))
7408 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(Op0
))
7409 if (BO
->getOpcode() == Instruction::Mul
&& isLeftShift
)
7410 if (Constant
*BOOp
= dyn_cast
<Constant
>(BO
->getOperand(1)))
7411 return BinaryOperator::CreateMul(BO
->getOperand(0),
7412 ConstantExpr::getShl(BOOp
, Op1
));
7414 // Try to fold constant and into select arguments.
7415 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op0
))
7416 if (Instruction
*R
= FoldOpIntoSelect(I
, SI
, this))
7418 if (isa
<PHINode
>(Op0
))
7419 if (Instruction
*NV
= FoldOpIntoPhi(I
))
7422 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7423 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(Op0
)) {
7424 Instruction
*TrOp
= dyn_cast
<Instruction
>(TI
->getOperand(0));
7425 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7426 // place. Don't try to do this transformation in this case. Also, we
7427 // require that the input operand is a shift-by-constant so that we have
7428 // confidence that the shifts will get folded together. We could do this
7429 // xform in more cases, but it is unlikely to be profitable.
7430 if (TrOp
&& I
.isLogicalShift() && TrOp
->isShift() &&
7431 isa
<ConstantInt
>(TrOp
->getOperand(1))) {
7432 // Okay, we'll do this xform. Make the shift of shift.
7433 Constant
*ShAmt
= ConstantExpr::getZExt(Op1
, TrOp
->getType());
7434 Instruction
*NSh
= BinaryOperator::Create(I
.getOpcode(), TrOp
, ShAmt
,
7436 InsertNewInstBefore(NSh
, I
); // (shift2 (shift1 & 0x00FF), c2)
7438 // For logical shifts, the truncation has the effect of making the high
7439 // part of the register be zeros. Emulate this by inserting an AND to
7440 // clear the top bits as needed. This 'and' will usually be zapped by
7441 // other xforms later if dead.
7442 unsigned SrcSize
= TrOp
->getType()->getScalarSizeInBits();
7443 unsigned DstSize
= TI
->getType()->getScalarSizeInBits();
7444 APInt
MaskV(APInt::getLowBitsSet(SrcSize
, DstSize
));
7446 // The mask we constructed says what the trunc would do if occurring
7447 // between the shifts. We want to know the effect *after* the second
7448 // shift. We know that it is a logical shift by a constant, so adjust the
7449 // mask as appropriate.
7450 if (I
.getOpcode() == Instruction::Shl
)
7451 MaskV
<<= Op1
->getZExtValue();
7453 assert(I
.getOpcode() == Instruction::LShr
&& "Unknown logical shift");
7454 MaskV
= MaskV
.lshr(Op1
->getZExtValue());
7458 BinaryOperator::CreateAnd(NSh
, ConstantInt::get(*Context
, MaskV
),
7460 InsertNewInstBefore(And
, I
); // shift1 & 0x00FF
7462 // Return the value truncated to the interesting size.
7463 return new TruncInst(And
, I
.getType());
7467 if (Op0
->hasOneUse()) {
7468 if (BinaryOperator
*Op0BO
= dyn_cast
<BinaryOperator
>(Op0
)) {
7469 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7472 switch (Op0BO
->getOpcode()) {
7474 case Instruction::Add
:
7475 case Instruction::And
:
7476 case Instruction::Or
:
7477 case Instruction::Xor
: {
7478 // These operators commute.
7479 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
7480 if (isLeftShift
&& Op0BO
->getOperand(1)->hasOneUse() &&
7481 match(Op0BO
->getOperand(1), m_Shr(m_Value(V1
),
7482 m_Specific(Op1
)), *Context
)){
7483 Instruction
*YS
= BinaryOperator::CreateShl(
7484 Op0BO
->getOperand(0), Op1
,
7486 InsertNewInstBefore(YS
, I
); // (Y << C)
7488 BinaryOperator::Create(Op0BO
->getOpcode(), YS
, V1
,
7489 Op0BO
->getOperand(1)->getName());
7490 InsertNewInstBefore(X
, I
); // (X + (Y << C))
7491 uint32_t Op1Val
= Op1
->getLimitedValue(TypeBits
);
7492 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
,
7493 APInt::getHighBitsSet(TypeBits
, TypeBits
-Op1Val
)));
7496 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
7497 Value
*Op0BOOp1
= Op0BO
->getOperand(1);
7498 if (isLeftShift
&& Op0BOOp1
->hasOneUse() &&
7500 m_And(m_Shr(m_Value(V1
), m_Specific(Op1
)),
7501 m_ConstantInt(CC
)), *Context
) &&
7502 cast
<BinaryOperator
>(Op0BOOp1
)->getOperand(0)->hasOneUse()) {
7503 Instruction
*YS
= BinaryOperator::CreateShl(
7504 Op0BO
->getOperand(0), Op1
,
7506 InsertNewInstBefore(YS
, I
); // (Y << C)
7508 BinaryOperator::CreateAnd(V1
,
7509 ConstantExpr::getShl(CC
, Op1
),
7510 V1
->getName()+".mask");
7511 InsertNewInstBefore(XM
, I
); // X & (CC << C)
7513 return BinaryOperator::Create(Op0BO
->getOpcode(), YS
, XM
);
7518 case Instruction::Sub
: {
7519 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7520 if (isLeftShift
&& Op0BO
->getOperand(0)->hasOneUse() &&
7521 match(Op0BO
->getOperand(0), m_Shr(m_Value(V1
),
7522 m_Specific(Op1
)), *Context
)){
7523 Instruction
*YS
= BinaryOperator::CreateShl(
7524 Op0BO
->getOperand(1), Op1
,
7526 InsertNewInstBefore(YS
, I
); // (Y << C)
7528 BinaryOperator::Create(Op0BO
->getOpcode(), V1
, YS
,
7529 Op0BO
->getOperand(0)->getName());
7530 InsertNewInstBefore(X
, I
); // (X + (Y << C))
7531 uint32_t Op1Val
= Op1
->getLimitedValue(TypeBits
);
7532 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
,
7533 APInt::getHighBitsSet(TypeBits
, TypeBits
-Op1Val
)));
7536 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
7537 if (isLeftShift
&& Op0BO
->getOperand(0)->hasOneUse() &&
7538 match(Op0BO
->getOperand(0),
7539 m_And(m_Shr(m_Value(V1
), m_Value(V2
)),
7540 m_ConstantInt(CC
)), *Context
) && V2
== Op1
&&
7541 cast
<BinaryOperator
>(Op0BO
->getOperand(0))
7542 ->getOperand(0)->hasOneUse()) {
7543 Instruction
*YS
= BinaryOperator::CreateShl(
7544 Op0BO
->getOperand(1), Op1
,
7546 InsertNewInstBefore(YS
, I
); // (Y << C)
7548 BinaryOperator::CreateAnd(V1
,
7549 ConstantExpr::getShl(CC
, Op1
),
7550 V1
->getName()+".mask");
7551 InsertNewInstBefore(XM
, I
); // X & (CC << C)
7553 return BinaryOperator::Create(Op0BO
->getOpcode(), XM
, YS
);
7561 // If the operand is an bitwise operator with a constant RHS, and the
7562 // shift is the only use, we can pull it out of the shift.
7563 if (ConstantInt
*Op0C
= dyn_cast
<ConstantInt
>(Op0BO
->getOperand(1))) {
7564 bool isValid
= true; // Valid only for And, Or, Xor
7565 bool highBitSet
= false; // Transform if high bit of constant set?
7567 switch (Op0BO
->getOpcode()) {
7568 default: isValid
= false; break; // Do not perform transform!
7569 case Instruction::Add
:
7570 isValid
= isLeftShift
;
7572 case Instruction::Or
:
7573 case Instruction::Xor
:
7576 case Instruction::And
:
7581 // If this is a signed shift right, and the high bit is modified
7582 // by the logical operation, do not perform the transformation.
7583 // The highBitSet boolean indicates the value of the high bit of
7584 // the constant which would cause it to be modified for this
7587 if (isValid
&& I
.getOpcode() == Instruction::AShr
)
7588 isValid
= Op0C
->getValue()[TypeBits
-1] == highBitSet
;
7591 Constant
*NewRHS
= ConstantExpr::get(I
.getOpcode(), Op0C
, Op1
);
7593 Instruction
*NewShift
=
7594 BinaryOperator::Create(I
.getOpcode(), Op0BO
->getOperand(0), Op1
);
7595 InsertNewInstBefore(NewShift
, I
);
7596 NewShift
->takeName(Op0BO
);
7598 return BinaryOperator::Create(Op0BO
->getOpcode(), NewShift
,
7605 // Find out if this is a shift of a shift by a constant.
7606 BinaryOperator
*ShiftOp
= dyn_cast
<BinaryOperator
>(Op0
);
7607 if (ShiftOp
&& !ShiftOp
->isShift())
7610 if (ShiftOp
&& isa
<ConstantInt
>(ShiftOp
->getOperand(1))) {
7611 ConstantInt
*ShiftAmt1C
= cast
<ConstantInt
>(ShiftOp
->getOperand(1));
7612 uint32_t ShiftAmt1
= ShiftAmt1C
->getLimitedValue(TypeBits
);
7613 uint32_t ShiftAmt2
= Op1
->getLimitedValue(TypeBits
);
7614 assert(ShiftAmt2
!= 0 && "Should have been simplified earlier");
7615 if (ShiftAmt1
== 0) return 0; // Will be simplified in the future.
7616 Value
*X
= ShiftOp
->getOperand(0);
7618 uint32_t AmtSum
= ShiftAmt1
+ShiftAmt2
; // Fold into one big shift.
7620 const IntegerType
*Ty
= cast
<IntegerType
>(I
.getType());
7622 // Check for (X << c1) << c2 and (X >> c1) >> c2
7623 if (I
.getOpcode() == ShiftOp
->getOpcode()) {
7624 // If this is oversized composite shift, then unsigned shifts get 0, ashr
7626 if (AmtSum
>= TypeBits
) {
7627 if (I
.getOpcode() != Instruction::AShr
)
7628 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7629 AmtSum
= TypeBits
-1; // Saturate to 31 for i32 ashr.
7632 return BinaryOperator::Create(I
.getOpcode(), X
,
7633 ConstantInt::get(Ty
, AmtSum
));
7634 } else if (ShiftOp
->getOpcode() == Instruction::LShr
&&
7635 I
.getOpcode() == Instruction::AShr
) {
7636 if (AmtSum
>= TypeBits
)
7637 return ReplaceInstUsesWith(I
, Constant::getNullValue(I
.getType()));
7639 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
7640 return BinaryOperator::CreateLShr(X
, ConstantInt::get(Ty
, AmtSum
));
7641 } else if (ShiftOp
->getOpcode() == Instruction::AShr
&&
7642 I
.getOpcode() == Instruction::LShr
) {
7643 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
7644 if (AmtSum
>= TypeBits
)
7645 AmtSum
= TypeBits
-1;
7647 Instruction
*Shift
=
7648 BinaryOperator::CreateAShr(X
, ConstantInt::get(Ty
, AmtSum
));
7649 InsertNewInstBefore(Shift
, I
);
7651 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7652 return BinaryOperator::CreateAnd(Shift
, ConstantInt::get(*Context
, Mask
));
7655 // Okay, if we get here, one shift must be left, and the other shift must be
7656 // right. See if the amounts are equal.
7657 if (ShiftAmt1
== ShiftAmt2
) {
7658 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
7659 if (I
.getOpcode() == Instruction::Shl
) {
7660 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt1
));
7661 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
, Mask
));
7663 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
7664 if (I
.getOpcode() == Instruction::LShr
) {
7665 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt1
));
7666 return BinaryOperator::CreateAnd(X
, ConstantInt::get(*Context
, Mask
));
7668 // We can simplify ((X << C) >>s C) into a trunc + sext.
7669 // NOTE: we could do this for any C, but that would make 'unusual' integer
7670 // types. For now, just stick to ones well-supported by the code
7672 const Type
*SExtType
= 0;
7673 switch (Ty
->getBitWidth() - ShiftAmt1
) {
7680 SExtType
= IntegerType::get(Ty
->getBitWidth() - ShiftAmt1
);
7685 Instruction
*NewTrunc
= new TruncInst(X
, SExtType
, "sext");
7686 InsertNewInstBefore(NewTrunc
, I
);
7687 return new SExtInst(NewTrunc
, Ty
);
7689 // Otherwise, we can't handle it yet.
7690 } else if (ShiftAmt1
< ShiftAmt2
) {
7691 uint32_t ShiftDiff
= ShiftAmt2
-ShiftAmt1
;
7693 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
7694 if (I
.getOpcode() == Instruction::Shl
) {
7695 assert(ShiftOp
->getOpcode() == Instruction::LShr
||
7696 ShiftOp
->getOpcode() == Instruction::AShr
);
7697 Instruction
*Shift
=
7698 BinaryOperator::CreateShl(X
, ConstantInt::get(Ty
, ShiftDiff
));
7699 InsertNewInstBefore(Shift
, I
);
7701 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7702 return BinaryOperator::CreateAnd(Shift
,
7703 ConstantInt::get(*Context
, Mask
));
7706 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
7707 if (I
.getOpcode() == Instruction::LShr
) {
7708 assert(ShiftOp
->getOpcode() == Instruction::Shl
);
7709 Instruction
*Shift
=
7710 BinaryOperator::CreateLShr(X
, ConstantInt::get(Ty
, ShiftDiff
));
7711 InsertNewInstBefore(Shift
, I
);
7713 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7714 return BinaryOperator::CreateAnd(Shift
,
7715 ConstantInt::get(*Context
, Mask
));
7718 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
7720 assert(ShiftAmt2
< ShiftAmt1
);
7721 uint32_t ShiftDiff
= ShiftAmt1
-ShiftAmt2
;
7723 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
7724 if (I
.getOpcode() == Instruction::Shl
) {
7725 assert(ShiftOp
->getOpcode() == Instruction::LShr
||
7726 ShiftOp
->getOpcode() == Instruction::AShr
);
7727 Instruction
*Shift
=
7728 BinaryOperator::Create(ShiftOp
->getOpcode(), X
,
7729 ConstantInt::get(Ty
, ShiftDiff
));
7730 InsertNewInstBefore(Shift
, I
);
7732 APInt
Mask(APInt::getHighBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7733 return BinaryOperator::CreateAnd(Shift
,
7734 ConstantInt::get(*Context
, Mask
));
7737 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
7738 if (I
.getOpcode() == Instruction::LShr
) {
7739 assert(ShiftOp
->getOpcode() == Instruction::Shl
);
7740 Instruction
*Shift
=
7741 BinaryOperator::CreateShl(X
, ConstantInt::get(Ty
, ShiftDiff
));
7742 InsertNewInstBefore(Shift
, I
);
7744 APInt
Mask(APInt::getLowBitsSet(TypeBits
, TypeBits
- ShiftAmt2
));
7745 return BinaryOperator::CreateAnd(Shift
,
7746 ConstantInt::get(*Context
, Mask
));
7749 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
7756 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
7757 /// expression. If so, decompose it, returning some value X, such that Val is
7760 static Value
*DecomposeSimpleLinearExpr(Value
*Val
, unsigned &Scale
,
7761 int &Offset
, LLVMContext
*Context
) {
7762 assert(Val
->getType() == Type::Int32Ty
&& "Unexpected allocation size type!");
7763 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Val
)) {
7764 Offset
= CI
->getZExtValue();
7766 return ConstantInt::get(Type::Int32Ty
, 0);
7767 } else if (BinaryOperator
*I
= dyn_cast
<BinaryOperator
>(Val
)) {
7768 if (ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
7769 if (I
->getOpcode() == Instruction::Shl
) {
7770 // This is a value scaled by '1 << the shift amt'.
7771 Scale
= 1U << RHS
->getZExtValue();
7773 return I
->getOperand(0);
7774 } else if (I
->getOpcode() == Instruction::Mul
) {
7775 // This value is scaled by 'RHS'.
7776 Scale
= RHS
->getZExtValue();
7778 return I
->getOperand(0);
7779 } else if (I
->getOpcode() == Instruction::Add
) {
7780 // We have X+C. Check to see if we really have (X*C2)+C1,
7781 // where C1 is divisible by C2.
7784 DecomposeSimpleLinearExpr(I
->getOperand(0), SubScale
,
7786 Offset
+= RHS
->getZExtValue();
7793 // Otherwise, we can't look past this.
7800 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
7801 /// try to eliminate the cast by moving the type information into the alloc.
7802 Instruction
*InstCombiner::PromoteCastOfAllocation(BitCastInst
&CI
,
7803 AllocationInst
&AI
) {
7804 const PointerType
*PTy
= cast
<PointerType
>(CI
.getType());
7806 // Remove any uses of AI that are dead.
7807 assert(!CI
.use_empty() && "Dead instructions should be removed earlier!");
7809 for (Value::use_iterator UI
= AI
.use_begin(), E
= AI
.use_end(); UI
!= E
; ) {
7810 Instruction
*User
= cast
<Instruction
>(*UI
++);
7811 if (isInstructionTriviallyDead(User
)) {
7812 while (UI
!= E
&& *UI
== User
)
7813 ++UI
; // If this instruction uses AI more than once, don't break UI.
7816 DOUT
<< "IC: DCE: " << *User
;
7817 EraseInstFromFunction(*User
);
7821 // This requires TargetData to get the alloca alignment and size information.
7824 // Get the type really allocated and the type casted to.
7825 const Type
*AllocElTy
= AI
.getAllocatedType();
7826 const Type
*CastElTy
= PTy
->getElementType();
7827 if (!AllocElTy
->isSized() || !CastElTy
->isSized()) return 0;
7829 unsigned AllocElTyAlign
= TD
->getABITypeAlignment(AllocElTy
);
7830 unsigned CastElTyAlign
= TD
->getABITypeAlignment(CastElTy
);
7831 if (CastElTyAlign
< AllocElTyAlign
) return 0;
7833 // If the allocation has multiple uses, only promote it if we are strictly
7834 // increasing the alignment of the resultant allocation. If we keep it the
7835 // same, we open the door to infinite loops of various kinds. (A reference
7836 // from a dbg.declare doesn't count as a use for this purpose.)
7837 if (!AI
.hasOneUse() && !hasOneUsePlusDeclare(&AI
) &&
7838 CastElTyAlign
== AllocElTyAlign
) return 0;
7840 uint64_t AllocElTySize
= TD
->getTypeAllocSize(AllocElTy
);
7841 uint64_t CastElTySize
= TD
->getTypeAllocSize(CastElTy
);
7842 if (CastElTySize
== 0 || AllocElTySize
== 0) return 0;
7844 // See if we can satisfy the modulus by pulling a scale out of the array
7846 unsigned ArraySizeScale
;
7848 Value
*NumElements
= // See if the array size is a decomposable linear expr.
7849 DecomposeSimpleLinearExpr(AI
.getOperand(0), ArraySizeScale
,
7850 ArrayOffset
, Context
);
7852 // If we can now satisfy the modulus, by using a non-1 scale, we really can
7854 if ((AllocElTySize
*ArraySizeScale
) % CastElTySize
!= 0 ||
7855 (AllocElTySize
*ArrayOffset
) % CastElTySize
!= 0) return 0;
7857 unsigned Scale
= (AllocElTySize
*ArraySizeScale
)/CastElTySize
;
7862 // If the allocation size is constant, form a constant mul expression
7863 Amt
= ConstantInt::get(Type::Int32Ty
, Scale
);
7864 if (isa
<ConstantInt
>(NumElements
))
7865 Amt
= ConstantExpr::getMul(cast
<ConstantInt
>(NumElements
),
7866 cast
<ConstantInt
>(Amt
));
7867 // otherwise multiply the amount and the number of elements
7869 Instruction
*Tmp
= BinaryOperator::CreateMul(Amt
, NumElements
, "tmp");
7870 Amt
= InsertNewInstBefore(Tmp
, AI
);
7874 if (int Offset
= (AllocElTySize
*ArrayOffset
)/CastElTySize
) {
7875 Value
*Off
= ConstantInt::get(Type::Int32Ty
, Offset
, true);
7876 Instruction
*Tmp
= BinaryOperator::CreateAdd(Amt
, Off
, "tmp");
7877 Amt
= InsertNewInstBefore(Tmp
, AI
);
7880 AllocationInst
*New
;
7881 if (isa
<MallocInst
>(AI
))
7882 New
= new MallocInst(CastElTy
, Amt
, AI
.getAlignment());
7884 New
= new AllocaInst(CastElTy
, Amt
, AI
.getAlignment());
7885 InsertNewInstBefore(New
, AI
);
7888 // If the allocation has one real use plus a dbg.declare, just remove the
7890 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(&AI
)) {
7891 EraseInstFromFunction(*DI
);
7893 // If the allocation has multiple real uses, insert a cast and change all
7894 // things that used it to use the new cast. This will also hack on CI, but it
7896 else if (!AI
.hasOneUse()) {
7897 AddUsesToWorkList(AI
);
7898 // New is the allocation instruction, pointer typed. AI is the original
7899 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7900 CastInst
*NewCast
= new BitCastInst(New
, AI
.getType(), "tmpcast");
7901 InsertNewInstBefore(NewCast
, AI
);
7902 AI
.replaceAllUsesWith(NewCast
);
7904 return ReplaceInstUsesWith(CI
, New
);
7907 /// CanEvaluateInDifferentType - Return true if we can take the specified value
7908 /// and return it as type Ty without inserting any new casts and without
7909 /// changing the computed value. This is used by code that tries to decide
7910 /// whether promoting or shrinking integer operations to wider or smaller types
7911 /// will allow us to eliminate a truncate or extend.
7913 /// This is a truncation operation if Ty is smaller than V->getType(), or an
7914 /// extension operation if Ty is larger.
7916 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
7917 /// should return true if trunc(V) can be computed by computing V in the smaller
7918 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
7919 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
7920 /// efficiently truncated.
7922 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
7923 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
7924 /// the final result.
7925 bool InstCombiner::CanEvaluateInDifferentType(Value
*V
, const Type
*Ty
,
7927 int &NumCastsRemoved
){
7928 // We can always evaluate constants in another type.
7929 if (isa
<Constant
>(V
))
7932 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7933 if (!I
) return false;
7935 const Type
*OrigTy
= V
->getType();
7937 // If this is an extension or truncate, we can often eliminate it.
7938 if (isa
<TruncInst
>(I
) || isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) {
7939 // If this is a cast from the destination type, we can trivially eliminate
7940 // it, and this will remove a cast overall.
7941 if (I
->getOperand(0)->getType() == Ty
) {
7942 // If the first operand is itself a cast, and is eliminable, do not count
7943 // this as an eliminable cast. We would prefer to eliminate those two
7945 if (!isa
<CastInst
>(I
->getOperand(0)) && I
->hasOneUse())
7951 // We can't extend or shrink something that has multiple uses: doing so would
7952 // require duplicating the instruction in general, which isn't profitable.
7953 if (!I
->hasOneUse()) return false;
7955 unsigned Opc
= I
->getOpcode();
7957 case Instruction::Add
:
7958 case Instruction::Sub
:
7959 case Instruction::Mul
:
7960 case Instruction::And
:
7961 case Instruction::Or
:
7962 case Instruction::Xor
:
7963 // These operators can all arbitrarily be extended or truncated.
7964 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7966 CanEvaluateInDifferentType(I
->getOperand(1), Ty
, CastOpc
,
7969 case Instruction::UDiv
:
7970 case Instruction::URem
: {
7971 // UDiv and URem can be truncated if all the truncated bits are zero.
7972 uint32_t OrigBitWidth
= OrigTy
->getScalarSizeInBits();
7973 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
7974 if (BitWidth
< OrigBitWidth
) {
7975 APInt Mask
= APInt::getHighBitsSet(OrigBitWidth
, OrigBitWidth
-BitWidth
);
7976 if (MaskedValueIsZero(I
->getOperand(0), Mask
) &&
7977 MaskedValueIsZero(I
->getOperand(1), Mask
)) {
7978 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7980 CanEvaluateInDifferentType(I
->getOperand(1), Ty
, CastOpc
,
7986 case Instruction::Shl
:
7987 // If we are truncating the result of this SHL, and if it's a shift of a
7988 // constant amount, we can always perform a SHL in a smaller type.
7989 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
7990 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
7991 if (BitWidth
< OrigTy
->getScalarSizeInBits() &&
7992 CI
->getLimitedValue(BitWidth
) < BitWidth
)
7993 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
7997 case Instruction::LShr
:
7998 // If this is a truncate of a logical shr, we can truncate it to a smaller
7999 // lshr iff we know that the bits we would otherwise be shifting in are
8001 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
8002 uint32_t OrigBitWidth
= OrigTy
->getScalarSizeInBits();
8003 uint32_t BitWidth
= Ty
->getScalarSizeInBits();
8004 if (BitWidth
< OrigBitWidth
&&
8005 MaskedValueIsZero(I
->getOperand(0),
8006 APInt::getHighBitsSet(OrigBitWidth
, OrigBitWidth
-BitWidth
)) &&
8007 CI
->getLimitedValue(BitWidth
) < BitWidth
) {
8008 return CanEvaluateInDifferentType(I
->getOperand(0), Ty
, CastOpc
,
8013 case Instruction::ZExt
:
8014 case Instruction::SExt
:
8015 case Instruction::Trunc
:
8016 // If this is the same kind of case as our original (e.g. zext+zext), we
8017 // can safely replace it. Note that replacing it does not reduce the number
8018 // of casts in the input.
8022 // sext (zext ty1), ty2 -> zext ty2
8023 if (CastOpc
== Instruction::SExt
&& Opc
== Instruction::ZExt
)
8026 case Instruction::Select
: {
8027 SelectInst
*SI
= cast
<SelectInst
>(I
);
8028 return CanEvaluateInDifferentType(SI
->getTrueValue(), Ty
, CastOpc
,
8030 CanEvaluateInDifferentType(SI
->getFalseValue(), Ty
, CastOpc
,
8033 case Instruction::PHI
: {
8034 // We can change a phi if we can change all operands.
8035 PHINode
*PN
= cast
<PHINode
>(I
);
8036 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
8037 if (!CanEvaluateInDifferentType(PN
->getIncomingValue(i
), Ty
, CastOpc
,
8043 // TODO: Can handle more cases here.
8050 /// EvaluateInDifferentType - Given an expression that
8051 /// CanEvaluateInDifferentType returns true for, actually insert the code to
8052 /// evaluate the expression.
8053 Value
*InstCombiner::EvaluateInDifferentType(Value
*V
, const Type
*Ty
,
8055 if (Constant
*C
= dyn_cast
<Constant
>(V
))
8056 return ConstantExpr::getIntegerCast(C
, Ty
,
8057 isSigned
/*Sext or ZExt*/);
8059 // Otherwise, it must be an instruction.
8060 Instruction
*I
= cast
<Instruction
>(V
);
8061 Instruction
*Res
= 0;
8062 unsigned Opc
= I
->getOpcode();
8064 case Instruction::Add
:
8065 case Instruction::Sub
:
8066 case Instruction::Mul
:
8067 case Instruction::And
:
8068 case Instruction::Or
:
8069 case Instruction::Xor
:
8070 case Instruction::AShr
:
8071 case Instruction::LShr
:
8072 case Instruction::Shl
:
8073 case Instruction::UDiv
:
8074 case Instruction::URem
: {
8075 Value
*LHS
= EvaluateInDifferentType(I
->getOperand(0), Ty
, isSigned
);
8076 Value
*RHS
= EvaluateInDifferentType(I
->getOperand(1), Ty
, isSigned
);
8077 Res
= BinaryOperator::Create((Instruction::BinaryOps
)Opc
, LHS
, RHS
);
8080 case Instruction::Trunc
:
8081 case Instruction::ZExt
:
8082 case Instruction::SExt
:
8083 // If the source type of the cast is the type we're trying for then we can
8084 // just return the source. There's no need to insert it because it is not
8086 if (I
->getOperand(0)->getType() == Ty
)
8087 return I
->getOperand(0);
8089 // Otherwise, must be the same type of cast, so just reinsert a new one.
8090 Res
= CastInst::Create(cast
<CastInst
>(I
)->getOpcode(), I
->getOperand(0),
8093 case Instruction::Select
: {
8094 Value
*True
= EvaluateInDifferentType(I
->getOperand(1), Ty
, isSigned
);
8095 Value
*False
= EvaluateInDifferentType(I
->getOperand(2), Ty
, isSigned
);
8096 Res
= SelectInst::Create(I
->getOperand(0), True
, False
);
8099 case Instruction::PHI
: {
8100 PHINode
*OPN
= cast
<PHINode
>(I
);
8101 PHINode
*NPN
= PHINode::Create(Ty
);
8102 for (unsigned i
= 0, e
= OPN
->getNumIncomingValues(); i
!= e
; ++i
) {
8103 Value
*V
=EvaluateInDifferentType(OPN
->getIncomingValue(i
), Ty
, isSigned
);
8104 NPN
->addIncoming(V
, OPN
->getIncomingBlock(i
));
8110 // TODO: Can handle more cases here.
8111 llvm_unreachable("Unreachable!");
8116 return InsertNewInstBefore(Res
, *I
);
8119 /// @brief Implement the transforms common to all CastInst visitors.
8120 Instruction
*InstCombiner::commonCastTransforms(CastInst
&CI
) {
8121 Value
*Src
= CI
.getOperand(0);
8123 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
8124 // eliminate it now.
8125 if (CastInst
*CSrc
= dyn_cast
<CastInst
>(Src
)) { // A->B->C cast
8126 if (Instruction::CastOps opc
=
8127 isEliminableCastPair(CSrc
, CI
.getOpcode(), CI
.getType(), TD
)) {
8128 // The first cast (CSrc) is eliminable so we need to fix up or replace
8129 // the second cast (CI). CSrc will then have a good chance of being dead.
8130 return CastInst::Create(opc
, CSrc
->getOperand(0), CI
.getType());
8134 // If we are casting a select then fold the cast into the select
8135 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Src
))
8136 if (Instruction
*NV
= FoldOpIntoSelect(CI
, SI
, this))
8139 // If we are casting a PHI then fold the cast into the PHI
8140 if (isa
<PHINode
>(Src
))
8141 if (Instruction
*NV
= FoldOpIntoPhi(CI
))
8147 /// FindElementAtOffset - Given a type and a constant offset, determine whether
8148 /// or not there is a sequence of GEP indices into the type that will land us at
8149 /// the specified offset. If so, fill them into NewIndices and return the
8150 /// resultant element type, otherwise return null.
8151 static const Type
*FindElementAtOffset(const Type
*Ty
, int64_t Offset
,
8152 SmallVectorImpl
<Value
*> &NewIndices
,
8153 const TargetData
*TD
,
8154 LLVMContext
*Context
) {
8156 if (!Ty
->isSized()) return 0;
8158 // Start with the index over the outer type. Note that the type size
8159 // might be zero (even if the offset isn't zero) if the indexed type
8160 // is something like [0 x {int, int}]
8161 const Type
*IntPtrTy
= TD
->getIntPtrType();
8162 int64_t FirstIdx
= 0;
8163 if (int64_t TySize
= TD
->getTypeAllocSize(Ty
)) {
8164 FirstIdx
= Offset
/TySize
;
8165 Offset
-= FirstIdx
*TySize
;
8167 // Handle hosts where % returns negative instead of values [0..TySize).
8171 assert(Offset
>= 0);
8173 assert((uint64_t)Offset
< (uint64_t)TySize
&& "Out of range offset");
8176 NewIndices
.push_back(ConstantInt::get(IntPtrTy
, FirstIdx
));
8178 // Index into the types. If we fail, set OrigBase to null.
8180 // Indexing into tail padding between struct/array elements.
8181 if (uint64_t(Offset
*8) >= TD
->getTypeSizeInBits(Ty
))
8184 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
8185 const StructLayout
*SL
= TD
->getStructLayout(STy
);
8186 assert(Offset
< (int64_t)SL
->getSizeInBytes() &&
8187 "Offset must stay within the indexed type");
8189 unsigned Elt
= SL
->getElementContainingOffset(Offset
);
8190 NewIndices
.push_back(ConstantInt::get(Type::Int32Ty
, Elt
));
8192 Offset
-= SL
->getElementOffset(Elt
);
8193 Ty
= STy
->getElementType(Elt
);
8194 } else if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(Ty
)) {
8195 uint64_t EltSize
= TD
->getTypeAllocSize(AT
->getElementType());
8196 assert(EltSize
&& "Cannot index into a zero-sized array");
8197 NewIndices
.push_back(ConstantInt::get(IntPtrTy
,Offset
/EltSize
));
8199 Ty
= AT
->getElementType();
8201 // Otherwise, we can't index into the middle of this atomic type, bail.
8209 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8210 Instruction
*InstCombiner::commonPointerCastTransforms(CastInst
&CI
) {
8211 Value
*Src
= CI
.getOperand(0);
8213 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Src
)) {
8214 // If casting the result of a getelementptr instruction with no offset, turn
8215 // this into a cast of the original pointer!
8216 if (GEP
->hasAllZeroIndices()) {
8217 // Changing the cast operand is usually not a good idea but it is safe
8218 // here because the pointer operand is being replaced with another
8219 // pointer operand so the opcode doesn't need to change.
8221 CI
.setOperand(0, GEP
->getOperand(0));
8225 // If the GEP has a single use, and the base pointer is a bitcast, and the
8226 // GEP computes a constant offset, see if we can convert these three
8227 // instructions into fewer. This typically happens with unions and other
8228 // non-type-safe code.
8229 if (TD
&& GEP
->hasOneUse() && isa
<BitCastInst
>(GEP
->getOperand(0))) {
8230 if (GEP
->hasAllConstantIndices()) {
8231 // We are guaranteed to get a constant from EmitGEPOffset.
8232 ConstantInt
*OffsetV
=
8233 cast
<ConstantInt
>(EmitGEPOffset(GEP
, CI
, *this));
8234 int64_t Offset
= OffsetV
->getSExtValue();
8236 // Get the base pointer input of the bitcast, and the type it points to.
8237 Value
*OrigBase
= cast
<BitCastInst
>(GEP
->getOperand(0))->getOperand(0);
8238 const Type
*GEPIdxTy
=
8239 cast
<PointerType
>(OrigBase
->getType())->getElementType();
8240 SmallVector
<Value
*, 8> NewIndices
;
8241 if (FindElementAtOffset(GEPIdxTy
, Offset
, NewIndices
, TD
, Context
)) {
8242 // If we were able to index down into an element, create the GEP
8243 // and bitcast the result. This eliminates one bitcast, potentially
8245 Instruction
*NGEP
= GetElementPtrInst::Create(OrigBase
,
8247 NewIndices
.end(), "");
8248 InsertNewInstBefore(NGEP
, CI
);
8249 NGEP
->takeName(GEP
);
8250 if (cast
<GEPOperator
>(GEP
)->isInBounds())
8251 cast
<GEPOperator
>(NGEP
)->setIsInBounds(true);
8253 if (isa
<BitCastInst
>(CI
))
8254 return new BitCastInst(NGEP
, CI
.getType());
8255 assert(isa
<PtrToIntInst
>(CI
));
8256 return new PtrToIntInst(NGEP
, CI
.getType());
8262 return commonCastTransforms(CI
);
8265 /// isSafeIntegerType - Return true if this is a basic integer type, not a crazy
8266 /// type like i42. We don't want to introduce operations on random non-legal
8267 /// integer types where they don't already exist in the code. In the future,
8268 /// we should consider making this based off target-data, so that 32-bit targets
8269 /// won't get i64 operations etc.
8270 static bool isSafeIntegerType(const Type
*Ty
) {
8271 switch (Ty
->getPrimitiveSizeInBits()) {
8282 /// commonIntCastTransforms - This function implements the common transforms
8283 /// for trunc, zext, and sext.
8284 Instruction
*InstCombiner::commonIntCastTransforms(CastInst
&CI
) {
8285 if (Instruction
*Result
= commonCastTransforms(CI
))
8288 Value
*Src
= CI
.getOperand(0);
8289 const Type
*SrcTy
= Src
->getType();
8290 const Type
*DestTy
= CI
.getType();
8291 uint32_t SrcBitSize
= SrcTy
->getScalarSizeInBits();
8292 uint32_t DestBitSize
= DestTy
->getScalarSizeInBits();
8294 // See if we can simplify any instructions used by the LHS whose sole
8295 // purpose is to compute bits we don't care about.
8296 if (SimplifyDemandedInstructionBits(CI
))
8299 // If the source isn't an instruction or has more than one use then we
8300 // can't do anything more.
8301 Instruction
*SrcI
= dyn_cast
<Instruction
>(Src
);
8302 if (!SrcI
|| !Src
->hasOneUse())
8305 // Attempt to propagate the cast into the instruction for int->int casts.
8306 int NumCastsRemoved
= 0;
8307 // Only do this if the dest type is a simple type, don't convert the
8308 // expression tree to something weird like i93 unless the source is also
8310 if ((isSafeIntegerType(DestTy
->getScalarType()) ||
8311 !isSafeIntegerType(SrcI
->getType()->getScalarType())) &&
8312 CanEvaluateInDifferentType(SrcI
, DestTy
,
8313 CI
.getOpcode(), NumCastsRemoved
)) {
8314 // If this cast is a truncate, evaluting in a different type always
8315 // eliminates the cast, so it is always a win. If this is a zero-extension,
8316 // we need to do an AND to maintain the clear top-part of the computation,
8317 // so we require that the input have eliminated at least one cast. If this
8318 // is a sign extension, we insert two new casts (to do the extension) so we
8319 // require that two casts have been eliminated.
8320 bool DoXForm
= false;
8321 bool JustReplace
= false;
8322 switch (CI
.getOpcode()) {
8324 // All the others use floating point so we shouldn't actually
8325 // get here because of the check above.
8326 llvm_unreachable("Unknown cast type");
8327 case Instruction::Trunc
:
8330 case Instruction::ZExt
: {
8331 DoXForm
= NumCastsRemoved
>= 1;
8332 if (!DoXForm
&& 0) {
8333 // If it's unnecessary to issue an AND to clear the high bits, it's
8334 // always profitable to do this xform.
8335 Value
*TryRes
= EvaluateInDifferentType(SrcI
, DestTy
, false);
8336 APInt
Mask(APInt::getBitsSet(DestBitSize
, SrcBitSize
, DestBitSize
));
8337 if (MaskedValueIsZero(TryRes
, Mask
))
8338 return ReplaceInstUsesWith(CI
, TryRes
);
8340 if (Instruction
*TryI
= dyn_cast
<Instruction
>(TryRes
))
8341 if (TryI
->use_empty())
8342 EraseInstFromFunction(*TryI
);
8346 case Instruction::SExt
: {
8347 DoXForm
= NumCastsRemoved
>= 2;
8348 if (!DoXForm
&& !isa
<TruncInst
>(SrcI
) && 0) {
8349 // If we do not have to emit the truncate + sext pair, then it's always
8350 // profitable to do this xform.
8352 // It's not safe to eliminate the trunc + sext pair if one of the
8353 // eliminated cast is a truncate. e.g.
8354 // t2 = trunc i32 t1 to i16
8355 // t3 = sext i16 t2 to i32
8358 Value
*TryRes
= EvaluateInDifferentType(SrcI
, DestTy
, true);
8359 unsigned NumSignBits
= ComputeNumSignBits(TryRes
);
8360 if (NumSignBits
> (DestBitSize
- SrcBitSize
))
8361 return ReplaceInstUsesWith(CI
, TryRes
);
8363 if (Instruction
*TryI
= dyn_cast
<Instruction
>(TryRes
))
8364 if (TryI
->use_empty())
8365 EraseInstFromFunction(*TryI
);
8372 DOUT
<< "ICE: EvaluateInDifferentType converting expression type to avoid"
8374 Value
*Res
= EvaluateInDifferentType(SrcI
, DestTy
,
8375 CI
.getOpcode() == Instruction::SExt
);
8377 // Just replace this cast with the result.
8378 return ReplaceInstUsesWith(CI
, Res
);
8380 assert(Res
->getType() == DestTy
);
8381 switch (CI
.getOpcode()) {
8382 default: llvm_unreachable("Unknown cast type!");
8383 case Instruction::Trunc
:
8384 // Just replace this cast with the result.
8385 return ReplaceInstUsesWith(CI
, Res
);
8386 case Instruction::ZExt
: {
8387 assert(SrcBitSize
< DestBitSize
&& "Not a zext?");
8389 // If the high bits are already zero, just replace this cast with the
8391 APInt
Mask(APInt::getBitsSet(DestBitSize
, SrcBitSize
, DestBitSize
));
8392 if (MaskedValueIsZero(Res
, Mask
))
8393 return ReplaceInstUsesWith(CI
, Res
);
8395 // We need to emit an AND to clear the high bits.
8396 Constant
*C
= ConstantInt::get(*Context
,
8397 APInt::getLowBitsSet(DestBitSize
, SrcBitSize
));
8398 return BinaryOperator::CreateAnd(Res
, C
);
8400 case Instruction::SExt
: {
8401 // If the high bits are already filled with sign bit, just replace this
8402 // cast with the result.
8403 unsigned NumSignBits
= ComputeNumSignBits(Res
);
8404 if (NumSignBits
> (DestBitSize
- SrcBitSize
))
8405 return ReplaceInstUsesWith(CI
, Res
);
8407 // We need to emit a cast to truncate, then a cast to sext.
8408 return CastInst::Create(Instruction::SExt
,
8409 InsertCastBefore(Instruction::Trunc
, Res
, Src
->getType(),
8416 Value
*Op0
= SrcI
->getNumOperands() > 0 ? SrcI
->getOperand(0) : 0;
8417 Value
*Op1
= SrcI
->getNumOperands() > 1 ? SrcI
->getOperand(1) : 0;
8419 switch (SrcI
->getOpcode()) {
8420 case Instruction::Add
:
8421 case Instruction::Mul
:
8422 case Instruction::And
:
8423 case Instruction::Or
:
8424 case Instruction::Xor
:
8425 // If we are discarding information, rewrite.
8426 if (DestBitSize
< SrcBitSize
&& DestBitSize
!= 1) {
8427 // Don't insert two casts unless at least one can be eliminated.
8428 if (!ValueRequiresCast(CI
.getOpcode(), Op1
, DestTy
, TD
) ||
8429 !ValueRequiresCast(CI
.getOpcode(), Op0
, DestTy
, TD
)) {
8430 Value
*Op0c
= InsertCastBefore(Instruction::Trunc
, Op0
, DestTy
, *SrcI
);
8431 Value
*Op1c
= InsertCastBefore(Instruction::Trunc
, Op1
, DestTy
, *SrcI
);
8432 return BinaryOperator::Create(
8433 cast
<BinaryOperator
>(SrcI
)->getOpcode(), Op0c
, Op1c
);
8437 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8438 if (isa
<ZExtInst
>(CI
) && SrcBitSize
== 1 &&
8439 SrcI
->getOpcode() == Instruction::Xor
&&
8440 Op1
== ConstantInt::getTrue(*Context
) &&
8441 (!Op0
->hasOneUse() || !isa
<CmpInst
>(Op0
))) {
8442 Value
*New
= InsertCastBefore(Instruction::ZExt
, Op0
, DestTy
, CI
);
8443 return BinaryOperator::CreateXor(New
,
8444 ConstantInt::get(CI
.getType(), 1));
8448 case Instruction::Shl
: {
8449 // Canonicalize trunc inside shl, if we can.
8450 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op1
);
8451 if (CI
&& DestBitSize
< SrcBitSize
&&
8452 CI
->getLimitedValue(DestBitSize
) < DestBitSize
) {
8453 Value
*Op0c
= InsertCastBefore(Instruction::Trunc
, Op0
, DestTy
, *SrcI
);
8454 Value
*Op1c
= InsertCastBefore(Instruction::Trunc
, Op1
, DestTy
, *SrcI
);
8455 return BinaryOperator::CreateShl(Op0c
, Op1c
);
8463 Instruction
*InstCombiner::visitTrunc(TruncInst
&CI
) {
8464 if (Instruction
*Result
= commonIntCastTransforms(CI
))
8467 Value
*Src
= CI
.getOperand(0);
8468 const Type
*Ty
= CI
.getType();
8469 uint32_t DestBitWidth
= Ty
->getScalarSizeInBits();
8470 uint32_t SrcBitWidth
= Src
->getType()->getScalarSizeInBits();
8472 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8473 if (DestBitWidth
== 1) {
8474 Constant
*One
= ConstantInt::get(Src
->getType(), 1);
8475 Src
= InsertNewInstBefore(BinaryOperator::CreateAnd(Src
, One
, "tmp"), CI
);
8476 Value
*Zero
= Constant::getNullValue(Src
->getType());
8477 return new ICmpInst(*Context
, ICmpInst::ICMP_NE
, Src
, Zero
);
8480 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8481 ConstantInt
*ShAmtV
= 0;
8483 if (Src
->hasOneUse() &&
8484 match(Src
, m_LShr(m_Value(ShiftOp
), m_ConstantInt(ShAmtV
)), *Context
)) {
8485 uint32_t ShAmt
= ShAmtV
->getLimitedValue(SrcBitWidth
);
8487 // Get a mask for the bits shifting in.
8488 APInt
Mask(APInt::getLowBitsSet(SrcBitWidth
, ShAmt
).shl(DestBitWidth
));
8489 if (MaskedValueIsZero(ShiftOp
, Mask
)) {
8490 if (ShAmt
>= DestBitWidth
) // All zeros.
8491 return ReplaceInstUsesWith(CI
, Constant::getNullValue(Ty
));
8493 // Okay, we can shrink this. Truncate the input, then return a new
8495 Value
*V1
= InsertCastBefore(Instruction::Trunc
, ShiftOp
, Ty
, CI
);
8496 Value
*V2
= ConstantExpr::getTrunc(ShAmtV
, Ty
);
8497 return BinaryOperator::CreateLShr(V1
, V2
);
8504 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
8505 /// in order to eliminate the icmp.
8506 Instruction
*InstCombiner::transformZExtICmp(ICmpInst
*ICI
, Instruction
&CI
,
8508 // If we are just checking for a icmp eq of a single bit and zext'ing it
8509 // to an integer, then shift the bit to the appropriate place and then
8510 // cast to integer to avoid the comparison.
8511 if (ConstantInt
*Op1C
= dyn_cast
<ConstantInt
>(ICI
->getOperand(1))) {
8512 const APInt
&Op1CV
= Op1C
->getValue();
8514 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
8515 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
8516 if ((ICI
->getPredicate() == ICmpInst::ICMP_SLT
&& Op1CV
== 0) ||
8517 (ICI
->getPredicate() == ICmpInst::ICMP_SGT
&&Op1CV
.isAllOnesValue())) {
8518 if (!DoXform
) return ICI
;
8520 Value
*In
= ICI
->getOperand(0);
8521 Value
*Sh
= ConstantInt::get(In
->getType(),
8522 In
->getType()->getScalarSizeInBits()-1);
8523 In
= InsertNewInstBefore(BinaryOperator::CreateLShr(In
, Sh
,
8524 In
->getName()+".lobit"),
8526 if (In
->getType() != CI
.getType())
8527 In
= CastInst::CreateIntegerCast(In
, CI
.getType(),
8528 false/*ZExt*/, "tmp", &CI
);
8530 if (ICI
->getPredicate() == ICmpInst::ICMP_SGT
) {
8531 Constant
*One
= ConstantInt::get(In
->getType(), 1);
8532 In
= InsertNewInstBefore(BinaryOperator::CreateXor(In
, One
,
8533 In
->getName()+".not"),
8537 return ReplaceInstUsesWith(CI
, In
);
8542 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
8543 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8544 // zext (X == 1) to i32 --> X iff X has only the low bit set.
8545 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
8546 // zext (X != 0) to i32 --> X iff X has only the low bit set.
8547 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
8548 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
8549 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8550 if ((Op1CV
== 0 || Op1CV
.isPowerOf2()) &&
8551 // This only works for EQ and NE
8552 ICI
->isEquality()) {
8553 // If Op1C some other power of two, convert:
8554 uint32_t BitWidth
= Op1C
->getType()->getBitWidth();
8555 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
8556 APInt
TypeMask(APInt::getAllOnesValue(BitWidth
));
8557 ComputeMaskedBits(ICI
->getOperand(0), TypeMask
, KnownZero
, KnownOne
);
8559 APInt
KnownZeroMask(~KnownZero
);
8560 if (KnownZeroMask
.isPowerOf2()) { // Exactly 1 possible 1?
8561 if (!DoXform
) return ICI
;
8563 bool isNE
= ICI
->getPredicate() == ICmpInst::ICMP_NE
;
8564 if (Op1CV
!= 0 && (Op1CV
!= KnownZeroMask
)) {
8565 // (X&4) == 2 --> false
8566 // (X&4) != 2 --> true
8567 Constant
*Res
= ConstantInt::get(Type::Int1Ty
, isNE
);
8568 Res
= ConstantExpr::getZExt(Res
, CI
.getType());
8569 return ReplaceInstUsesWith(CI
, Res
);
8572 uint32_t ShiftAmt
= KnownZeroMask
.logBase2();
8573 Value
*In
= ICI
->getOperand(0);
8575 // Perform a logical shr by shiftamt.
8576 // Insert the shift to put the result in the low bit.
8577 In
= InsertNewInstBefore(BinaryOperator::CreateLShr(In
,
8578 ConstantInt::get(In
->getType(), ShiftAmt
),
8579 In
->getName()+".lobit"), CI
);
8582 if ((Op1CV
!= 0) == isNE
) { // Toggle the low bit.
8583 Constant
*One
= ConstantInt::get(In
->getType(), 1);
8584 In
= BinaryOperator::CreateXor(In
, One
, "tmp");
8585 InsertNewInstBefore(cast
<Instruction
>(In
), CI
);
8588 if (CI
.getType() == In
->getType())
8589 return ReplaceInstUsesWith(CI
, In
);
8591 return CastInst::CreateIntegerCast(In
, CI
.getType(), false/*ZExt*/);
8599 Instruction
*InstCombiner::visitZExt(ZExtInst
&CI
) {
8600 // If one of the common conversion will work ..
8601 if (Instruction
*Result
= commonIntCastTransforms(CI
))
8604 Value
*Src
= CI
.getOperand(0);
8606 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
8607 // types and if the sizes are just right we can convert this into a logical
8608 // 'and' which will be much cheaper than the pair of casts.
8609 if (TruncInst
*CSrc
= dyn_cast
<TruncInst
>(Src
)) { // A->B->C cast
8610 // Get the sizes of the types involved. We know that the intermediate type
8611 // will be smaller than A or C, but don't know the relation between A and C.
8612 Value
*A
= CSrc
->getOperand(0);
8613 unsigned SrcSize
= A
->getType()->getScalarSizeInBits();
8614 unsigned MidSize
= CSrc
->getType()->getScalarSizeInBits();
8615 unsigned DstSize
= CI
.getType()->getScalarSizeInBits();
8616 // If we're actually extending zero bits, then if
8617 // SrcSize < DstSize: zext(a & mask)
8618 // SrcSize == DstSize: a & mask
8619 // SrcSize > DstSize: trunc(a) & mask
8620 if (SrcSize
< DstSize
) {
8621 APInt
AndValue(APInt::getLowBitsSet(SrcSize
, MidSize
));
8622 Constant
*AndConst
= ConstantInt::get(A
->getType(), AndValue
);
8624 BinaryOperator::CreateAnd(A
, AndConst
, CSrc
->getName()+".mask");
8625 InsertNewInstBefore(And
, CI
);
8626 return new ZExtInst(And
, CI
.getType());
8627 } else if (SrcSize
== DstSize
) {
8628 APInt
AndValue(APInt::getLowBitsSet(SrcSize
, MidSize
));
8629 return BinaryOperator::CreateAnd(A
, ConstantInt::get(A
->getType(),
8631 } else if (SrcSize
> DstSize
) {
8632 Instruction
*Trunc
= new TruncInst(A
, CI
.getType(), "tmp");
8633 InsertNewInstBefore(Trunc
, CI
);
8634 APInt
AndValue(APInt::getLowBitsSet(DstSize
, MidSize
));
8635 return BinaryOperator::CreateAnd(Trunc
,
8636 ConstantInt::get(Trunc
->getType(),
8641 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(Src
))
8642 return transformZExtICmp(ICI
, CI
);
8644 BinaryOperator
*SrcI
= dyn_cast
<BinaryOperator
>(Src
);
8645 if (SrcI
&& SrcI
->getOpcode() == Instruction::Or
) {
8646 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
8647 // of the (zext icmp) will be transformed.
8648 ICmpInst
*LHS
= dyn_cast
<ICmpInst
>(SrcI
->getOperand(0));
8649 ICmpInst
*RHS
= dyn_cast
<ICmpInst
>(SrcI
->getOperand(1));
8650 if (LHS
&& RHS
&& LHS
->hasOneUse() && RHS
->hasOneUse() &&
8651 (transformZExtICmp(LHS
, CI
, false) ||
8652 transformZExtICmp(RHS
, CI
, false))) {
8653 Value
*LCast
= InsertCastBefore(Instruction::ZExt
, LHS
, CI
.getType(), CI
);
8654 Value
*RCast
= InsertCastBefore(Instruction::ZExt
, RHS
, CI
.getType(), CI
);
8655 return BinaryOperator::Create(Instruction::Or
, LCast
, RCast
);
8659 // zext(trunc(t) & C) -> (t & zext(C)).
8660 if (SrcI
&& SrcI
->getOpcode() == Instruction::And
&& SrcI
->hasOneUse())
8661 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(SrcI
->getOperand(1)))
8662 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(SrcI
->getOperand(0))) {
8663 Value
*TI0
= TI
->getOperand(0);
8664 if (TI0
->getType() == CI
.getType())
8666 BinaryOperator::CreateAnd(TI0
,
8667 ConstantExpr::getZExt(C
, CI
.getType()));
8670 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
8671 if (SrcI
&& SrcI
->getOpcode() == Instruction::Xor
&& SrcI
->hasOneUse())
8672 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(SrcI
->getOperand(1)))
8673 if (BinaryOperator
*And
= dyn_cast
<BinaryOperator
>(SrcI
->getOperand(0)))
8674 if (And
->getOpcode() == Instruction::And
&& And
->hasOneUse() &&
8675 And
->getOperand(1) == C
)
8676 if (TruncInst
*TI
= dyn_cast
<TruncInst
>(And
->getOperand(0))) {
8677 Value
*TI0
= TI
->getOperand(0);
8678 if (TI0
->getType() == CI
.getType()) {
8679 Constant
*ZC
= ConstantExpr::getZExt(C
, CI
.getType());
8680 Instruction
*NewAnd
= BinaryOperator::CreateAnd(TI0
, ZC
, "tmp");
8681 InsertNewInstBefore(NewAnd
, *And
);
8682 return BinaryOperator::CreateXor(NewAnd
, ZC
);
8689 Instruction
*InstCombiner::visitSExt(SExtInst
&CI
) {
8690 if (Instruction
*I
= commonIntCastTransforms(CI
))
8693 Value
*Src
= CI
.getOperand(0);
8695 // Canonicalize sign-extend from i1 to a select.
8696 if (Src
->getType() == Type::Int1Ty
)
8697 return SelectInst::Create(Src
,
8698 Constant::getAllOnesValue(CI
.getType()),
8699 Constant::getNullValue(CI
.getType()));
8701 // See if the value being truncated is already sign extended. If so, just
8702 // eliminate the trunc/sext pair.
8703 if (Operator::getOpcode(Src
) == Instruction::Trunc
) {
8704 Value
*Op
= cast
<User
>(Src
)->getOperand(0);
8705 unsigned OpBits
= Op
->getType()->getScalarSizeInBits();
8706 unsigned MidBits
= Src
->getType()->getScalarSizeInBits();
8707 unsigned DestBits
= CI
.getType()->getScalarSizeInBits();
8708 unsigned NumSignBits
= ComputeNumSignBits(Op
);
8710 if (OpBits
== DestBits
) {
8711 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8712 // bits, it is already ready.
8713 if (NumSignBits
> DestBits
-MidBits
)
8714 return ReplaceInstUsesWith(CI
, Op
);
8715 } else if (OpBits
< DestBits
) {
8716 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8717 // bits, just sext from i32.
8718 if (NumSignBits
> OpBits
-MidBits
)
8719 return new SExtInst(Op
, CI
.getType(), "tmp");
8721 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8722 // bits, just truncate to i32.
8723 if (NumSignBits
> OpBits
-MidBits
)
8724 return new TruncInst(Op
, CI
.getType(), "tmp");
8728 // If the input is a shl/ashr pair of a same constant, then this is a sign
8729 // extension from a smaller value. If we could trust arbitrary bitwidth
8730 // integers, we could turn this into a truncate to the smaller bit and then
8731 // use a sext for the whole extension. Since we don't, look deeper and check
8732 // for a truncate. If the source and dest are the same type, eliminate the
8733 // trunc and extend and just do shifts. For example, turn:
8734 // %a = trunc i32 %i to i8
8735 // %b = shl i8 %a, 6
8736 // %c = ashr i8 %b, 6
8737 // %d = sext i8 %c to i32
8739 // %a = shl i32 %i, 30
8740 // %d = ashr i32 %a, 30
8742 ConstantInt
*BA
= 0, *CA
= 0;
8743 if (match(Src
, m_AShr(m_Shl(m_Value(A
), m_ConstantInt(BA
)),
8744 m_ConstantInt(CA
)), *Context
) &&
8745 BA
== CA
&& isa
<TruncInst
>(A
)) {
8746 Value
*I
= cast
<TruncInst
>(A
)->getOperand(0);
8747 if (I
->getType() == CI
.getType()) {
8748 unsigned MidSize
= Src
->getType()->getScalarSizeInBits();
8749 unsigned SrcDstSize
= CI
.getType()->getScalarSizeInBits();
8750 unsigned ShAmt
= CA
->getZExtValue()+SrcDstSize
-MidSize
;
8751 Constant
*ShAmtV
= ConstantInt::get(CI
.getType(), ShAmt
);
8752 I
= InsertNewInstBefore(BinaryOperator::CreateShl(I
, ShAmtV
,
8754 return BinaryOperator::CreateAShr(I
, ShAmtV
);
8761 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
8762 /// in the specified FP type without changing its value.
8763 static Constant
*FitsInFPType(ConstantFP
*CFP
, const fltSemantics
&Sem
,
8764 LLVMContext
*Context
) {
8766 APFloat F
= CFP
->getValueAPF();
8767 (void)F
.convert(Sem
, APFloat::rmNearestTiesToEven
, &losesInfo
);
8769 return ConstantFP::get(*Context
, F
);
8773 /// LookThroughFPExtensions - If this is an fp extension instruction, look
8774 /// through it until we get the source value.
8775 static Value
*LookThroughFPExtensions(Value
*V
, LLVMContext
*Context
) {
8776 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
8777 if (I
->getOpcode() == Instruction::FPExt
)
8778 return LookThroughFPExtensions(I
->getOperand(0), Context
);
8780 // If this value is a constant, return the constant in the smallest FP type
8781 // that can accurately represent it. This allows us to turn
8782 // (float)((double)X+2.0) into x+2.0f.
8783 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(V
)) {
8784 if (CFP
->getType() == Type::PPC_FP128Ty
)
8785 return V
; // No constant folding of this.
8786 // See if the value can be truncated to float and then reextended.
8787 if (Value
*V
= FitsInFPType(CFP
, APFloat::IEEEsingle
, Context
))
8789 if (CFP
->getType() == Type::DoubleTy
)
8790 return V
; // Won't shrink.
8791 if (Value
*V
= FitsInFPType(CFP
, APFloat::IEEEdouble
, Context
))
8793 // Don't try to shrink to various long double types.
8799 Instruction
*InstCombiner::visitFPTrunc(FPTruncInst
&CI
) {
8800 if (Instruction
*I
= commonCastTransforms(CI
))
8803 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
8804 // smaller than the destination type, we can eliminate the truncate by doing
8805 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
8806 // many builtins (sqrt, etc).
8807 BinaryOperator
*OpI
= dyn_cast
<BinaryOperator
>(CI
.getOperand(0));
8808 if (OpI
&& OpI
->hasOneUse()) {
8809 switch (OpI
->getOpcode()) {
8811 case Instruction::FAdd
:
8812 case Instruction::FSub
:
8813 case Instruction::FMul
:
8814 case Instruction::FDiv
:
8815 case Instruction::FRem
:
8816 const Type
*SrcTy
= OpI
->getType();
8817 Value
*LHSTrunc
= LookThroughFPExtensions(OpI
->getOperand(0), Context
);
8818 Value
*RHSTrunc
= LookThroughFPExtensions(OpI
->getOperand(1), Context
);
8819 if (LHSTrunc
->getType() != SrcTy
&&
8820 RHSTrunc
->getType() != SrcTy
) {
8821 unsigned DstSize
= CI
.getType()->getScalarSizeInBits();
8822 // If the source types were both smaller than the destination type of
8823 // the cast, do this xform.
8824 if (LHSTrunc
->getType()->getScalarSizeInBits() <= DstSize
&&
8825 RHSTrunc
->getType()->getScalarSizeInBits() <= DstSize
) {
8826 LHSTrunc
= InsertCastBefore(Instruction::FPExt
, LHSTrunc
,
8828 RHSTrunc
= InsertCastBefore(Instruction::FPExt
, RHSTrunc
,
8830 return BinaryOperator::Create(OpI
->getOpcode(), LHSTrunc
, RHSTrunc
);
8839 Instruction
*InstCombiner::visitFPExt(CastInst
&CI
) {
8840 return commonCastTransforms(CI
);
8843 Instruction
*InstCombiner::visitFPToUI(FPToUIInst
&FI
) {
8844 Instruction
*OpI
= dyn_cast
<Instruction
>(FI
.getOperand(0));
8846 return commonCastTransforms(FI
);
8848 // fptoui(uitofp(X)) --> X
8849 // fptoui(sitofp(X)) --> X
8850 // This is safe if the intermediate type has enough bits in its mantissa to
8851 // accurately represent all values of X. For example, do not do this with
8852 // i64->float->i64. This is also safe for sitofp case, because any negative
8853 // 'X' value would cause an undefined result for the fptoui.
8854 if ((isa
<UIToFPInst
>(OpI
) || isa
<SIToFPInst
>(OpI
)) &&
8855 OpI
->getOperand(0)->getType() == FI
.getType() &&
8856 (int)FI
.getType()->getScalarSizeInBits() < /*extra bit for sign */
8857 OpI
->getType()->getFPMantissaWidth())
8858 return ReplaceInstUsesWith(FI
, OpI
->getOperand(0));
8860 return commonCastTransforms(FI
);
8863 Instruction
*InstCombiner::visitFPToSI(FPToSIInst
&FI
) {
8864 Instruction
*OpI
= dyn_cast
<Instruction
>(FI
.getOperand(0));
8866 return commonCastTransforms(FI
);
8868 // fptosi(sitofp(X)) --> X
8869 // fptosi(uitofp(X)) --> X
8870 // This is safe if the intermediate type has enough bits in its mantissa to
8871 // accurately represent all values of X. For example, do not do this with
8872 // i64->float->i64. This is also safe for sitofp case, because any negative
8873 // 'X' value would cause an undefined result for the fptoui.
8874 if ((isa
<UIToFPInst
>(OpI
) || isa
<SIToFPInst
>(OpI
)) &&
8875 OpI
->getOperand(0)->getType() == FI
.getType() &&
8876 (int)FI
.getType()->getScalarSizeInBits() <=
8877 OpI
->getType()->getFPMantissaWidth())
8878 return ReplaceInstUsesWith(FI
, OpI
->getOperand(0));
8880 return commonCastTransforms(FI
);
8883 Instruction
*InstCombiner::visitUIToFP(CastInst
&CI
) {
8884 return commonCastTransforms(CI
);
8887 Instruction
*InstCombiner::visitSIToFP(CastInst
&CI
) {
8888 return commonCastTransforms(CI
);
8891 Instruction
*InstCombiner::visitPtrToInt(PtrToIntInst
&CI
) {
8892 // If the destination integer type is smaller than the intptr_t type for
8893 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
8894 // trunc to be exposed to other transforms. Don't do this for extending
8895 // ptrtoint's, because we don't know if the target sign or zero extends its
8898 CI
.getType()->getScalarSizeInBits() < TD
->getPointerSizeInBits()) {
8899 Value
*P
= InsertNewInstBefore(new PtrToIntInst(CI
.getOperand(0),
8900 TD
->getIntPtrType(),
8902 return new TruncInst(P
, CI
.getType());
8905 return commonPointerCastTransforms(CI
);
8908 Instruction
*InstCombiner::visitIntToPtr(IntToPtrInst
&CI
) {
8909 // If the source integer type is larger than the intptr_t type for
8910 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
8911 // allows the trunc to be exposed to other transforms. Don't do this for
8912 // extending inttoptr's, because we don't know if the target sign or zero
8913 // extends to pointers.
8915 CI
.getOperand(0)->getType()->getScalarSizeInBits() >
8916 TD
->getPointerSizeInBits()) {
8917 Value
*P
= InsertNewInstBefore(new TruncInst(CI
.getOperand(0),
8918 TD
->getIntPtrType(),
8920 return new IntToPtrInst(P
, CI
.getType());
8923 if (Instruction
*I
= commonCastTransforms(CI
))
8929 Instruction
*InstCombiner::visitBitCast(BitCastInst
&CI
) {
8930 // If the operands are integer typed then apply the integer transforms,
8931 // otherwise just apply the common ones.
8932 Value
*Src
= CI
.getOperand(0);
8933 const Type
*SrcTy
= Src
->getType();
8934 const Type
*DestTy
= CI
.getType();
8936 if (isa
<PointerType
>(SrcTy
)) {
8937 if (Instruction
*I
= commonPointerCastTransforms(CI
))
8940 if (Instruction
*Result
= commonCastTransforms(CI
))
8945 // Get rid of casts from one type to the same type. These are useless and can
8946 // be replaced by the operand.
8947 if (DestTy
== Src
->getType())
8948 return ReplaceInstUsesWith(CI
, Src
);
8950 if (const PointerType
*DstPTy
= dyn_cast
<PointerType
>(DestTy
)) {
8951 const PointerType
*SrcPTy
= cast
<PointerType
>(SrcTy
);
8952 const Type
*DstElTy
= DstPTy
->getElementType();
8953 const Type
*SrcElTy
= SrcPTy
->getElementType();
8955 // If the address spaces don't match, don't eliminate the bitcast, which is
8956 // required for changing types.
8957 if (SrcPTy
->getAddressSpace() != DstPTy
->getAddressSpace())
8960 // If we are casting a malloc or alloca to a pointer to a type of the same
8961 // size, rewrite the allocation instruction to allocate the "right" type.
8962 if (AllocationInst
*AI
= dyn_cast
<AllocationInst
>(Src
))
8963 if (Instruction
*V
= PromoteCastOfAllocation(CI
, *AI
))
8966 // If the source and destination are pointers, and this cast is equivalent
8967 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
8968 // This can enhance SROA and other transforms that want type-safe pointers.
8969 Constant
*ZeroUInt
= Constant::getNullValue(Type::Int32Ty
);
8970 unsigned NumZeros
= 0;
8971 while (SrcElTy
!= DstElTy
&&
8972 isa
<CompositeType
>(SrcElTy
) && !isa
<PointerType
>(SrcElTy
) &&
8973 SrcElTy
->getNumContainedTypes() /* not "{}" */) {
8974 SrcElTy
= cast
<CompositeType
>(SrcElTy
)->getTypeAtIndex(ZeroUInt
);
8978 // If we found a path from the src to dest, create the getelementptr now.
8979 if (SrcElTy
== DstElTy
) {
8980 SmallVector
<Value
*, 8> Idxs(NumZeros
+1, ZeroUInt
);
8981 Instruction
*GEP
= GetElementPtrInst::Create(Src
,
8982 Idxs
.begin(), Idxs
.end(), "",
8983 ((Instruction
*) NULL
));
8984 cast
<GEPOperator
>(GEP
)->setIsInBounds(true);
8989 if (const VectorType
*DestVTy
= dyn_cast
<VectorType
>(DestTy
)) {
8990 if (DestVTy
->getNumElements() == 1) {
8991 if (!isa
<VectorType
>(SrcTy
)) {
8992 Value
*Elem
= InsertCastBefore(Instruction::BitCast
, Src
,
8993 DestVTy
->getElementType(), CI
);
8994 return InsertElementInst::Create(UndefValue::get(DestTy
), Elem
,
8995 Constant::getNullValue(Type::Int32Ty
));
8997 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
9001 if (const VectorType
*SrcVTy
= dyn_cast
<VectorType
>(SrcTy
)) {
9002 if (SrcVTy
->getNumElements() == 1) {
9003 if (!isa
<VectorType
>(DestTy
)) {
9005 ExtractElementInst::Create(Src
, Constant::getNullValue(Type::Int32Ty
));
9006 InsertNewInstBefore(Elem
, CI
);
9007 return CastInst::Create(Instruction::BitCast
, Elem
, DestTy
);
9012 if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(Src
)) {
9013 if (SVI
->hasOneUse()) {
9014 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
9015 // a bitconvert to a vector with the same # elts.
9016 if (isa
<VectorType
>(DestTy
) &&
9017 cast
<VectorType
>(DestTy
)->getNumElements() ==
9018 SVI
->getType()->getNumElements() &&
9019 SVI
->getType()->getNumElements() ==
9020 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements()) {
9022 // If either of the operands is a cast from CI.getType(), then
9023 // evaluating the shuffle in the casted destination's type will allow
9024 // us to eliminate at least one cast.
9025 if (((Tmp
= dyn_cast
<CastInst
>(SVI
->getOperand(0))) &&
9026 Tmp
->getOperand(0)->getType() == DestTy
) ||
9027 ((Tmp
= dyn_cast
<CastInst
>(SVI
->getOperand(1))) &&
9028 Tmp
->getOperand(0)->getType() == DestTy
)) {
9029 Value
*LHS
= InsertCastBefore(Instruction::BitCast
,
9030 SVI
->getOperand(0), DestTy
, CI
);
9031 Value
*RHS
= InsertCastBefore(Instruction::BitCast
,
9032 SVI
->getOperand(1), DestTy
, CI
);
9033 // Return a new shuffle vector. Use the same element ID's, as we
9034 // know the vector types match #elts.
9035 return new ShuffleVectorInst(LHS
, RHS
, SVI
->getOperand(2));
9043 /// GetSelectFoldableOperands - We want to turn code that looks like this:
9045 /// %D = select %cond, %C, %A
9047 /// %C = select %cond, %B, 0
9050 /// Assuming that the specified instruction is an operand to the select, return
9051 /// a bitmask indicating which operands of this instruction are foldable if they
9052 /// equal the other incoming value of the select.
9054 static unsigned GetSelectFoldableOperands(Instruction
*I
) {
9055 switch (I
->getOpcode()) {
9056 case Instruction::Add
:
9057 case Instruction::Mul
:
9058 case Instruction::And
:
9059 case Instruction::Or
:
9060 case Instruction::Xor
:
9061 return 3; // Can fold through either operand.
9062 case Instruction::Sub
: // Can only fold on the amount subtracted.
9063 case Instruction::Shl
: // Can only fold on the shift amount.
9064 case Instruction::LShr
:
9065 case Instruction::AShr
:
9068 return 0; // Cannot fold
9072 /// GetSelectFoldableConstant - For the same transformation as the previous
9073 /// function, return the identity constant that goes into the select.
9074 static Constant
*GetSelectFoldableConstant(Instruction
*I
,
9075 LLVMContext
*Context
) {
9076 switch (I
->getOpcode()) {
9077 default: llvm_unreachable("This cannot happen!");
9078 case Instruction::Add
:
9079 case Instruction::Sub
:
9080 case Instruction::Or
:
9081 case Instruction::Xor
:
9082 case Instruction::Shl
:
9083 case Instruction::LShr
:
9084 case Instruction::AShr
:
9085 return Constant::getNullValue(I
->getType());
9086 case Instruction::And
:
9087 return Constant::getAllOnesValue(I
->getType());
9088 case Instruction::Mul
:
9089 return ConstantInt::get(I
->getType(), 1);
9093 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
9094 /// have the same opcode and only one use each. Try to simplify this.
9095 Instruction
*InstCombiner::FoldSelectOpOp(SelectInst
&SI
, Instruction
*TI
,
9097 if (TI
->getNumOperands() == 1) {
9098 // If this is a non-volatile load or a cast from the same type,
9101 if (TI
->getOperand(0)->getType() != FI
->getOperand(0)->getType())
9104 return 0; // unknown unary op.
9107 // Fold this by inserting a select from the input values.
9108 SelectInst
*NewSI
= SelectInst::Create(SI
.getCondition(), TI
->getOperand(0),
9109 FI
->getOperand(0), SI
.getName()+".v");
9110 InsertNewInstBefore(NewSI
, SI
);
9111 return CastInst::Create(Instruction::CastOps(TI
->getOpcode()), NewSI
,
9115 // Only handle binary operators here.
9116 if (!isa
<BinaryOperator
>(TI
))
9119 // Figure out if the operations have any operands in common.
9120 Value
*MatchOp
, *OtherOpT
, *OtherOpF
;
9122 if (TI
->getOperand(0) == FI
->getOperand(0)) {
9123 MatchOp
= TI
->getOperand(0);
9124 OtherOpT
= TI
->getOperand(1);
9125 OtherOpF
= FI
->getOperand(1);
9126 MatchIsOpZero
= true;
9127 } else if (TI
->getOperand(1) == FI
->getOperand(1)) {
9128 MatchOp
= TI
->getOperand(1);
9129 OtherOpT
= TI
->getOperand(0);
9130 OtherOpF
= FI
->getOperand(0);
9131 MatchIsOpZero
= false;
9132 } else if (!TI
->isCommutative()) {
9134 } else if (TI
->getOperand(0) == FI
->getOperand(1)) {
9135 MatchOp
= TI
->getOperand(0);
9136 OtherOpT
= TI
->getOperand(1);
9137 OtherOpF
= FI
->getOperand(0);
9138 MatchIsOpZero
= true;
9139 } else if (TI
->getOperand(1) == FI
->getOperand(0)) {
9140 MatchOp
= TI
->getOperand(1);
9141 OtherOpT
= TI
->getOperand(0);
9142 OtherOpF
= FI
->getOperand(1);
9143 MatchIsOpZero
= true;
9148 // If we reach here, they do have operations in common.
9149 SelectInst
*NewSI
= SelectInst::Create(SI
.getCondition(), OtherOpT
,
9150 OtherOpF
, SI
.getName()+".v");
9151 InsertNewInstBefore(NewSI
, SI
);
9153 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(TI
)) {
9155 return BinaryOperator::Create(BO
->getOpcode(), MatchOp
, NewSI
);
9157 return BinaryOperator::Create(BO
->getOpcode(), NewSI
, MatchOp
);
9159 llvm_unreachable("Shouldn't get here");
9163 static bool isSelect01(Constant
*C1
, Constant
*C2
) {
9164 ConstantInt
*C1I
= dyn_cast
<ConstantInt
>(C1
);
9167 ConstantInt
*C2I
= dyn_cast
<ConstantInt
>(C2
);
9170 return (C1I
->isZero() || C1I
->isOne()) && (C2I
->isZero() || C2I
->isOne());
9173 /// FoldSelectIntoOp - Try fold the select into one of the operands to
9174 /// facilitate further optimization.
9175 Instruction
*InstCombiner::FoldSelectIntoOp(SelectInst
&SI
, Value
*TrueVal
,
9177 // See the comment above GetSelectFoldableOperands for a description of the
9178 // transformation we are doing here.
9179 if (Instruction
*TVI
= dyn_cast
<Instruction
>(TrueVal
)) {
9180 if (TVI
->hasOneUse() && TVI
->getNumOperands() == 2 &&
9181 !isa
<Constant
>(FalseVal
)) {
9182 if (unsigned SFO
= GetSelectFoldableOperands(TVI
)) {
9183 unsigned OpToFold
= 0;
9184 if ((SFO
& 1) && FalseVal
== TVI
->getOperand(0)) {
9186 } else if ((SFO
& 2) && FalseVal
== TVI
->getOperand(1)) {
9191 Constant
*C
= GetSelectFoldableConstant(TVI
, Context
);
9192 Value
*OOp
= TVI
->getOperand(2-OpToFold
);
9193 // Avoid creating select between 2 constants unless it's selecting
9195 if (!isa
<Constant
>(OOp
) || isSelect01(C
, cast
<Constant
>(OOp
))) {
9196 Instruction
*NewSel
= SelectInst::Create(SI
.getCondition(), OOp
, C
);
9197 InsertNewInstBefore(NewSel
, SI
);
9198 NewSel
->takeName(TVI
);
9199 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(TVI
))
9200 return BinaryOperator::Create(BO
->getOpcode(), FalseVal
, NewSel
);
9201 llvm_unreachable("Unknown instruction!!");
9208 if (Instruction
*FVI
= dyn_cast
<Instruction
>(FalseVal
)) {
9209 if (FVI
->hasOneUse() && FVI
->getNumOperands() == 2 &&
9210 !isa
<Constant
>(TrueVal
)) {
9211 if (unsigned SFO
= GetSelectFoldableOperands(FVI
)) {
9212 unsigned OpToFold
= 0;
9213 if ((SFO
& 1) && TrueVal
== FVI
->getOperand(0)) {
9215 } else if ((SFO
& 2) && TrueVal
== FVI
->getOperand(1)) {
9220 Constant
*C
= GetSelectFoldableConstant(FVI
, Context
);
9221 Value
*OOp
= FVI
->getOperand(2-OpToFold
);
9222 // Avoid creating select between 2 constants unless it's selecting
9224 if (!isa
<Constant
>(OOp
) || isSelect01(C
, cast
<Constant
>(OOp
))) {
9225 Instruction
*NewSel
= SelectInst::Create(SI
.getCondition(), C
, OOp
);
9226 InsertNewInstBefore(NewSel
, SI
);
9227 NewSel
->takeName(FVI
);
9228 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FVI
))
9229 return BinaryOperator::Create(BO
->getOpcode(), TrueVal
, NewSel
);
9230 llvm_unreachable("Unknown instruction!!");
9240 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9241 /// ICmpInst as its first operand.
9243 Instruction
*InstCombiner::visitSelectInstWithICmp(SelectInst
&SI
,
9245 bool Changed
= false;
9246 ICmpInst::Predicate Pred
= ICI
->getPredicate();
9247 Value
*CmpLHS
= ICI
->getOperand(0);
9248 Value
*CmpRHS
= ICI
->getOperand(1);
9249 Value
*TrueVal
= SI
.getTrueValue();
9250 Value
*FalseVal
= SI
.getFalseValue();
9252 // Check cases where the comparison is with a constant that
9253 // can be adjusted to fit the min/max idiom. We may edit ICI in
9254 // place here, so make sure the select is the only user.
9255 if (ICI
->hasOneUse())
9256 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CmpRHS
)) {
9259 case ICmpInst::ICMP_ULT
:
9260 case ICmpInst::ICMP_SLT
: {
9261 // X < MIN ? T : F --> F
9262 if (CI
->isMinValue(Pred
== ICmpInst::ICMP_SLT
))
9263 return ReplaceInstUsesWith(SI
, FalseVal
);
9264 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9265 Constant
*AdjustedRHS
= SubOne(CI
, Context
);
9266 if ((CmpLHS
== TrueVal
&& AdjustedRHS
== FalseVal
) ||
9267 (CmpLHS
== FalseVal
&& AdjustedRHS
== TrueVal
)) {
9268 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9269 CmpRHS
= AdjustedRHS
;
9270 std::swap(FalseVal
, TrueVal
);
9271 ICI
->setPredicate(Pred
);
9272 ICI
->setOperand(1, CmpRHS
);
9273 SI
.setOperand(1, TrueVal
);
9274 SI
.setOperand(2, FalseVal
);
9279 case ICmpInst::ICMP_UGT
:
9280 case ICmpInst::ICMP_SGT
: {
9281 // X > MAX ? T : F --> F
9282 if (CI
->isMaxValue(Pred
== ICmpInst::ICMP_SGT
))
9283 return ReplaceInstUsesWith(SI
, FalseVal
);
9284 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9285 Constant
*AdjustedRHS
= AddOne(CI
, Context
);
9286 if ((CmpLHS
== TrueVal
&& AdjustedRHS
== FalseVal
) ||
9287 (CmpLHS
== FalseVal
&& AdjustedRHS
== TrueVal
)) {
9288 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9289 CmpRHS
= AdjustedRHS
;
9290 std::swap(FalseVal
, TrueVal
);
9291 ICI
->setPredicate(Pred
);
9292 ICI
->setOperand(1, CmpRHS
);
9293 SI
.setOperand(1, TrueVal
);
9294 SI
.setOperand(2, FalseVal
);
9301 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9302 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9303 CmpInst::Predicate Pred
= CmpInst::BAD_ICMP_PREDICATE
;
9304 if (match(TrueVal
, m_ConstantInt
<-1>(), *Context
) &&
9305 match(FalseVal
, m_ConstantInt
<0>(), *Context
))
9306 Pred
= ICI
->getPredicate();
9307 else if (match(TrueVal
, m_ConstantInt
<0>(), *Context
) &&
9308 match(FalseVal
, m_ConstantInt
<-1>(), *Context
))
9309 Pred
= CmpInst::getInversePredicate(ICI
->getPredicate());
9311 if (Pred
!= CmpInst::BAD_ICMP_PREDICATE
) {
9312 // If we are just checking for a icmp eq of a single bit and zext'ing it
9313 // to an integer, then shift the bit to the appropriate place and then
9314 // cast to integer to avoid the comparison.
9315 const APInt
&Op1CV
= CI
->getValue();
9317 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9318 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9319 if ((Pred
== ICmpInst::ICMP_SLT
&& Op1CV
== 0) ||
9320 (Pred
== ICmpInst::ICMP_SGT
&& Op1CV
.isAllOnesValue())) {
9321 Value
*In
= ICI
->getOperand(0);
9322 Value
*Sh
= ConstantInt::get(In
->getType(),
9323 In
->getType()->getScalarSizeInBits()-1);
9324 In
= InsertNewInstBefore(BinaryOperator::CreateAShr(In
, Sh
,
9325 In
->getName()+".lobit"),
9327 if (In
->getType() != SI
.getType())
9328 In
= CastInst::CreateIntegerCast(In
, SI
.getType(),
9329 true/*SExt*/, "tmp", ICI
);
9331 if (Pred
== ICmpInst::ICMP_SGT
)
9332 In
= InsertNewInstBefore(BinaryOperator::CreateNot(*Context
, In
,
9333 In
->getName()+".not"), *ICI
);
9335 return ReplaceInstUsesWith(SI
, In
);
9340 if (CmpLHS
== TrueVal
&& CmpRHS
== FalseVal
) {
9341 // Transform (X == Y) ? X : Y -> Y
9342 if (Pred
== ICmpInst::ICMP_EQ
)
9343 return ReplaceInstUsesWith(SI
, FalseVal
);
9344 // Transform (X != Y) ? X : Y -> X
9345 if (Pred
== ICmpInst::ICMP_NE
)
9346 return ReplaceInstUsesWith(SI
, TrueVal
);
9347 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9349 } else if (CmpLHS
== FalseVal
&& CmpRHS
== TrueVal
) {
9350 // Transform (X == Y) ? Y : X -> X
9351 if (Pred
== ICmpInst::ICMP_EQ
)
9352 return ReplaceInstUsesWith(SI
, FalseVal
);
9353 // Transform (X != Y) ? Y : X -> Y
9354 if (Pred
== ICmpInst::ICMP_NE
)
9355 return ReplaceInstUsesWith(SI
, TrueVal
);
9356 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9359 /// NOTE: if we wanted to, this is where to detect integer ABS
9361 return Changed
? &SI
: 0;
9364 Instruction
*InstCombiner::visitSelectInst(SelectInst
&SI
) {
9365 Value
*CondVal
= SI
.getCondition();
9366 Value
*TrueVal
= SI
.getTrueValue();
9367 Value
*FalseVal
= SI
.getFalseValue();
9369 // select true, X, Y -> X
9370 // select false, X, Y -> Y
9371 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(CondVal
))
9372 return ReplaceInstUsesWith(SI
, C
->getZExtValue() ? TrueVal
: FalseVal
);
9374 // select C, X, X -> X
9375 if (TrueVal
== FalseVal
)
9376 return ReplaceInstUsesWith(SI
, TrueVal
);
9378 if (isa
<UndefValue
>(TrueVal
)) // select C, undef, X -> X
9379 return ReplaceInstUsesWith(SI
, FalseVal
);
9380 if (isa
<UndefValue
>(FalseVal
)) // select C, X, undef -> X
9381 return ReplaceInstUsesWith(SI
, TrueVal
);
9382 if (isa
<UndefValue
>(CondVal
)) { // select undef, X, Y -> X or Y
9383 if (isa
<Constant
>(TrueVal
))
9384 return ReplaceInstUsesWith(SI
, TrueVal
);
9386 return ReplaceInstUsesWith(SI
, FalseVal
);
9389 if (SI
.getType() == Type::Int1Ty
) {
9390 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(TrueVal
)) {
9391 if (C
->getZExtValue()) {
9392 // Change: A = select B, true, C --> A = or B, C
9393 return BinaryOperator::CreateOr(CondVal
, FalseVal
);
9395 // Change: A = select B, false, C --> A = and !B, C
9397 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
, CondVal
,
9398 "not."+CondVal
->getName()), SI
);
9399 return BinaryOperator::CreateAnd(NotCond
, FalseVal
);
9401 } else if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(FalseVal
)) {
9402 if (C
->getZExtValue() == false) {
9403 // Change: A = select B, C, false --> A = and B, C
9404 return BinaryOperator::CreateAnd(CondVal
, TrueVal
);
9406 // Change: A = select B, C, true --> A = or !B, C
9408 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
, CondVal
,
9409 "not."+CondVal
->getName()), SI
);
9410 return BinaryOperator::CreateOr(NotCond
, TrueVal
);
9414 // select a, b, a -> a&b
9415 // select a, a, b -> a|b
9416 if (CondVal
== TrueVal
)
9417 return BinaryOperator::CreateOr(CondVal
, FalseVal
);
9418 else if (CondVal
== FalseVal
)
9419 return BinaryOperator::CreateAnd(CondVal
, TrueVal
);
9422 // Selecting between two integer constants?
9423 if (ConstantInt
*TrueValC
= dyn_cast
<ConstantInt
>(TrueVal
))
9424 if (ConstantInt
*FalseValC
= dyn_cast
<ConstantInt
>(FalseVal
)) {
9425 // select C, 1, 0 -> zext C to int
9426 if (FalseValC
->isZero() && TrueValC
->getValue() == 1) {
9427 return CastInst::Create(Instruction::ZExt
, CondVal
, SI
.getType());
9428 } else if (TrueValC
->isZero() && FalseValC
->getValue() == 1) {
9429 // select C, 0, 1 -> zext !C to int
9431 InsertNewInstBefore(BinaryOperator::CreateNot(*Context
, CondVal
,
9432 "not."+CondVal
->getName()), SI
);
9433 return CastInst::Create(Instruction::ZExt
, NotCond
, SI
.getType());
9436 if (ICmpInst
*IC
= dyn_cast
<ICmpInst
>(SI
.getCondition())) {
9437 // If one of the constants is zero (we know they can't both be) and we
9438 // have an icmp instruction with zero, and we have an 'and' with the
9439 // non-constant value, eliminate this whole mess. This corresponds to
9440 // cases like this: ((X & 27) ? 27 : 0)
9441 if (TrueValC
->isZero() || FalseValC
->isZero())
9442 if (IC
->isEquality() && isa
<ConstantInt
>(IC
->getOperand(1)) &&
9443 cast
<Constant
>(IC
->getOperand(1))->isNullValue())
9444 if (Instruction
*ICA
= dyn_cast
<Instruction
>(IC
->getOperand(0)))
9445 if (ICA
->getOpcode() == Instruction::And
&&
9446 isa
<ConstantInt
>(ICA
->getOperand(1)) &&
9447 (ICA
->getOperand(1) == TrueValC
||
9448 ICA
->getOperand(1) == FalseValC
) &&
9449 isOneBitSet(cast
<ConstantInt
>(ICA
->getOperand(1)))) {
9450 // Okay, now we know that everything is set up, we just don't
9451 // know whether we have a icmp_ne or icmp_eq and whether the
9452 // true or false val is the zero.
9453 bool ShouldNotVal
= !TrueValC
->isZero();
9454 ShouldNotVal
^= IC
->getPredicate() == ICmpInst::ICMP_NE
;
9457 V
= InsertNewInstBefore(BinaryOperator::Create(
9458 Instruction::Xor
, V
, ICA
->getOperand(1)), SI
);
9459 return ReplaceInstUsesWith(SI
, V
);
9464 // See if we are selecting two values based on a comparison of the two values.
9465 if (FCmpInst
*FCI
= dyn_cast
<FCmpInst
>(CondVal
)) {
9466 if (FCI
->getOperand(0) == TrueVal
&& FCI
->getOperand(1) == FalseVal
) {
9467 // Transform (X == Y) ? X : Y -> Y
9468 if (FCI
->getPredicate() == FCmpInst::FCMP_OEQ
) {
9469 // This is not safe in general for floating point:
9470 // consider X== -0, Y== +0.
9471 // It becomes safe if either operand is a nonzero constant.
9472 ConstantFP
*CFPt
, *CFPf
;
9473 if (((CFPt
= dyn_cast
<ConstantFP
>(TrueVal
)) &&
9474 !CFPt
->getValueAPF().isZero()) ||
9475 ((CFPf
= dyn_cast
<ConstantFP
>(FalseVal
)) &&
9476 !CFPf
->getValueAPF().isZero()))
9477 return ReplaceInstUsesWith(SI
, FalseVal
);
9479 // Transform (X != Y) ? X : Y -> X
9480 if (FCI
->getPredicate() == FCmpInst::FCMP_ONE
)
9481 return ReplaceInstUsesWith(SI
, TrueVal
);
9482 // NOTE: if we wanted to, this is where to detect MIN/MAX
9484 } else if (FCI
->getOperand(0) == FalseVal
&& FCI
->getOperand(1) == TrueVal
){
9485 // Transform (X == Y) ? Y : X -> X
9486 if (FCI
->getPredicate() == FCmpInst::FCMP_OEQ
) {
9487 // This is not safe in general for floating point:
9488 // consider X== -0, Y== +0.
9489 // It becomes safe if either operand is a nonzero constant.
9490 ConstantFP
*CFPt
, *CFPf
;
9491 if (((CFPt
= dyn_cast
<ConstantFP
>(TrueVal
)) &&
9492 !CFPt
->getValueAPF().isZero()) ||
9493 ((CFPf
= dyn_cast
<ConstantFP
>(FalseVal
)) &&
9494 !CFPf
->getValueAPF().isZero()))
9495 return ReplaceInstUsesWith(SI
, FalseVal
);
9497 // Transform (X != Y) ? Y : X -> Y
9498 if (FCI
->getPredicate() == FCmpInst::FCMP_ONE
)
9499 return ReplaceInstUsesWith(SI
, TrueVal
);
9500 // NOTE: if we wanted to, this is where to detect MIN/MAX
9502 // NOTE: if we wanted to, this is where to detect ABS
9505 // See if we are selecting two values based on a comparison of the two values.
9506 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(CondVal
))
9507 if (Instruction
*Result
= visitSelectInstWithICmp(SI
, ICI
))
9510 if (Instruction
*TI
= dyn_cast
<Instruction
>(TrueVal
))
9511 if (Instruction
*FI
= dyn_cast
<Instruction
>(FalseVal
))
9512 if (TI
->hasOneUse() && FI
->hasOneUse()) {
9513 Instruction
*AddOp
= 0, *SubOp
= 0;
9515 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
9516 if (TI
->getOpcode() == FI
->getOpcode())
9517 if (Instruction
*IV
= FoldSelectOpOp(SI
, TI
, FI
))
9520 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
9521 // even legal for FP.
9522 if ((TI
->getOpcode() == Instruction::Sub
&&
9523 FI
->getOpcode() == Instruction::Add
) ||
9524 (TI
->getOpcode() == Instruction::FSub
&&
9525 FI
->getOpcode() == Instruction::FAdd
)) {
9526 AddOp
= FI
; SubOp
= TI
;
9527 } else if ((FI
->getOpcode() == Instruction::Sub
&&
9528 TI
->getOpcode() == Instruction::Add
) ||
9529 (FI
->getOpcode() == Instruction::FSub
&&
9530 TI
->getOpcode() == Instruction::FAdd
)) {
9531 AddOp
= TI
; SubOp
= FI
;
9535 Value
*OtherAddOp
= 0;
9536 if (SubOp
->getOperand(0) == AddOp
->getOperand(0)) {
9537 OtherAddOp
= AddOp
->getOperand(1);
9538 } else if (SubOp
->getOperand(0) == AddOp
->getOperand(1)) {
9539 OtherAddOp
= AddOp
->getOperand(0);
9543 // So at this point we know we have (Y -> OtherAddOp):
9544 // select C, (add X, Y), (sub X, Z)
9545 Value
*NegVal
; // Compute -Z
9546 if (Constant
*C
= dyn_cast
<Constant
>(SubOp
->getOperand(1))) {
9547 NegVal
= ConstantExpr::getNeg(C
);
9549 NegVal
= InsertNewInstBefore(
9550 BinaryOperator::CreateNeg(*Context
, SubOp
->getOperand(1),
9554 Value
*NewTrueOp
= OtherAddOp
;
9555 Value
*NewFalseOp
= NegVal
;
9557 std::swap(NewTrueOp
, NewFalseOp
);
9558 Instruction
*NewSel
=
9559 SelectInst::Create(CondVal
, NewTrueOp
,
9560 NewFalseOp
, SI
.getName() + ".p");
9562 NewSel
= InsertNewInstBefore(NewSel
, SI
);
9563 return BinaryOperator::CreateAdd(SubOp
->getOperand(0), NewSel
);
9568 // See if we can fold the select into one of our operands.
9569 if (SI
.getType()->isInteger()) {
9570 Instruction
*FoldI
= FoldSelectIntoOp(SI
, TrueVal
, FalseVal
);
9575 if (BinaryOperator::isNot(CondVal
)) {
9576 SI
.setOperand(0, BinaryOperator::getNotArgument(CondVal
));
9577 SI
.setOperand(1, FalseVal
);
9578 SI
.setOperand(2, TrueVal
);
9585 /// EnforceKnownAlignment - If the specified pointer points to an object that
9586 /// we control, modify the object's alignment to PrefAlign. This isn't
9587 /// often possible though. If alignment is important, a more reliable approach
9588 /// is to simply align all global variables and allocation instructions to
9589 /// their preferred alignment from the beginning.
9591 static unsigned EnforceKnownAlignment(Value
*V
,
9592 unsigned Align
, unsigned PrefAlign
) {
9594 User
*U
= dyn_cast
<User
>(V
);
9595 if (!U
) return Align
;
9597 switch (Operator::getOpcode(U
)) {
9599 case Instruction::BitCast
:
9600 return EnforceKnownAlignment(U
->getOperand(0), Align
, PrefAlign
);
9601 case Instruction::GetElementPtr
: {
9602 // If all indexes are zero, it is just the alignment of the base pointer.
9603 bool AllZeroOperands
= true;
9604 for (User::op_iterator i
= U
->op_begin() + 1, e
= U
->op_end(); i
!= e
; ++i
)
9605 if (!isa
<Constant
>(*i
) ||
9606 !cast
<Constant
>(*i
)->isNullValue()) {
9607 AllZeroOperands
= false;
9611 if (AllZeroOperands
) {
9612 // Treat this like a bitcast.
9613 return EnforceKnownAlignment(U
->getOperand(0), Align
, PrefAlign
);
9619 if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
9620 // If there is a large requested alignment and we can, bump up the alignment
9622 if (!GV
->isDeclaration()) {
9623 if (GV
->getAlignment() >= PrefAlign
)
9624 Align
= GV
->getAlignment();
9626 GV
->setAlignment(PrefAlign
);
9630 } else if (AllocationInst
*AI
= dyn_cast
<AllocationInst
>(V
)) {
9631 // If there is a requested alignment and if this is an alloca, round up. We
9632 // don't do this for malloc, because some systems can't respect the request.
9633 if (isa
<AllocaInst
>(AI
)) {
9634 if (AI
->getAlignment() >= PrefAlign
)
9635 Align
= AI
->getAlignment();
9637 AI
->setAlignment(PrefAlign
);
9646 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
9647 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
9648 /// and it is more than the alignment of the ultimate object, see if we can
9649 /// increase the alignment of the ultimate object, making this check succeed.
9650 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value
*V
,
9651 unsigned PrefAlign
) {
9652 unsigned BitWidth
= TD
? TD
->getTypeSizeInBits(V
->getType()) :
9653 sizeof(PrefAlign
) * CHAR_BIT
;
9654 APInt Mask
= APInt::getAllOnesValue(BitWidth
);
9655 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
9656 ComputeMaskedBits(V
, Mask
, KnownZero
, KnownOne
);
9657 unsigned TrailZ
= KnownZero
.countTrailingOnes();
9658 unsigned Align
= 1u << std::min(BitWidth
- 1, TrailZ
);
9660 if (PrefAlign
> Align
)
9661 Align
= EnforceKnownAlignment(V
, Align
, PrefAlign
);
9663 // We don't need to make any adjustment.
9667 Instruction
*InstCombiner::SimplifyMemTransfer(MemIntrinsic
*MI
) {
9668 unsigned DstAlign
= GetOrEnforceKnownAlignment(MI
->getOperand(1));
9669 unsigned SrcAlign
= GetOrEnforceKnownAlignment(MI
->getOperand(2));
9670 unsigned MinAlign
= std::min(DstAlign
, SrcAlign
);
9671 unsigned CopyAlign
= MI
->getAlignment();
9673 if (CopyAlign
< MinAlign
) {
9674 MI
->setAlignment(ConstantInt::get(MI
->getAlignmentType(),
9679 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
9681 ConstantInt
*MemOpLength
= dyn_cast
<ConstantInt
>(MI
->getOperand(3));
9682 if (MemOpLength
== 0) return 0;
9684 // Source and destination pointer types are always "i8*" for intrinsic. See
9685 // if the size is something we can handle with a single primitive load/store.
9686 // A single load+store correctly handles overlapping memory in the memmove
9688 unsigned Size
= MemOpLength
->getZExtValue();
9689 if (Size
== 0) return MI
; // Delete this mem transfer.
9691 if (Size
> 8 || (Size
&(Size
-1)))
9692 return 0; // If not 1/2/4/8 bytes, exit.
9694 // Use an integer load+store unless we can find something better.
9696 PointerType::getUnqual(IntegerType::get(Size
<<3));
9698 // Memcpy forces the use of i8* for the source and destination. That means
9699 // that if you're using memcpy to move one double around, you'll get a cast
9700 // from double* to i8*. We'd much rather use a double load+store rather than
9701 // an i64 load+store, here because this improves the odds that the source or
9702 // dest address will be promotable. See if we can find a better type than the
9703 // integer datatype.
9704 if (Value
*Op
= getBitCastOperand(MI
->getOperand(1))) {
9705 const Type
*SrcETy
= cast
<PointerType
>(Op
->getType())->getElementType();
9706 if (TD
&& SrcETy
->isSized() && TD
->getTypeStoreSize(SrcETy
) == Size
) {
9707 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
9708 // down through these levels if so.
9709 while (!SrcETy
->isSingleValueType()) {
9710 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcETy
)) {
9711 if (STy
->getNumElements() == 1)
9712 SrcETy
= STy
->getElementType(0);
9715 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcETy
)) {
9716 if (ATy
->getNumElements() == 1)
9717 SrcETy
= ATy
->getElementType();
9724 if (SrcETy
->isSingleValueType())
9725 NewPtrTy
= PointerType::getUnqual(SrcETy
);
9730 // If the memcpy/memmove provides better alignment info than we can
9732 SrcAlign
= std::max(SrcAlign
, CopyAlign
);
9733 DstAlign
= std::max(DstAlign
, CopyAlign
);
9735 Value
*Src
= InsertBitCastBefore(MI
->getOperand(2), NewPtrTy
, *MI
);
9736 Value
*Dest
= InsertBitCastBefore(MI
->getOperand(1), NewPtrTy
, *MI
);
9737 Instruction
*L
= new LoadInst(Src
, "tmp", false, SrcAlign
);
9738 InsertNewInstBefore(L
, *MI
);
9739 InsertNewInstBefore(new StoreInst(L
, Dest
, false, DstAlign
), *MI
);
9741 // Set the size of the copy to 0, it will be deleted on the next iteration.
9742 MI
->setOperand(3, Constant::getNullValue(MemOpLength
->getType()));
9746 Instruction
*InstCombiner::SimplifyMemSet(MemSetInst
*MI
) {
9747 unsigned Alignment
= GetOrEnforceKnownAlignment(MI
->getDest());
9748 if (MI
->getAlignment() < Alignment
) {
9749 MI
->setAlignment(ConstantInt::get(MI
->getAlignmentType(),
9754 // Extract the length and alignment and fill if they are constant.
9755 ConstantInt
*LenC
= dyn_cast
<ConstantInt
>(MI
->getLength());
9756 ConstantInt
*FillC
= dyn_cast
<ConstantInt
>(MI
->getValue());
9757 if (!LenC
|| !FillC
|| FillC
->getType() != Type::Int8Ty
)
9759 uint64_t Len
= LenC
->getZExtValue();
9760 Alignment
= MI
->getAlignment();
9762 // If the length is zero, this is a no-op
9763 if (Len
== 0) return MI
; // memset(d,c,0,a) -> noop
9765 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
9766 if (Len
<= 8 && isPowerOf2_32((uint32_t)Len
)) {
9767 const Type
*ITy
= IntegerType::get(Len
*8); // n=1 -> i8.
9769 Value
*Dest
= MI
->getDest();
9770 Dest
= InsertBitCastBefore(Dest
, PointerType::getUnqual(ITy
), *MI
);
9772 // Alignment 0 is identity for alignment 1 for memset, but not store.
9773 if (Alignment
== 0) Alignment
= 1;
9775 // Extract the fill value and store.
9776 uint64_t Fill
= FillC
->getZExtValue()*0x0101010101010101ULL
;
9777 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy
, Fill
),
9778 Dest
, false, Alignment
), *MI
);
9780 // Set the size of the copy to 0, it will be deleted on the next iteration.
9781 MI
->setLength(Constant::getNullValue(LenC
->getType()));
9789 /// visitCallInst - CallInst simplification. This mostly only handles folding
9790 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
9791 /// the heavy lifting.
9793 Instruction
*InstCombiner::visitCallInst(CallInst
&CI
) {
9794 // If the caller function is nounwind, mark the call as nounwind, even if the
9796 if (CI
.getParent()->getParent()->doesNotThrow() &&
9797 !CI
.doesNotThrow()) {
9798 CI
.setDoesNotThrow();
9804 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(&CI
);
9805 if (!II
) return visitCallSite(&CI
);
9807 // Intrinsics cannot occur in an invoke, so handle them here instead of in
9809 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(II
)) {
9810 bool Changed
= false;
9812 // memmove/cpy/set of zero bytes is a noop.
9813 if (Constant
*NumBytes
= dyn_cast
<Constant
>(MI
->getLength())) {
9814 if (NumBytes
->isNullValue()) return EraseInstFromFunction(CI
);
9816 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(NumBytes
))
9817 if (CI
->getZExtValue() == 1) {
9818 // Replace the instruction with just byte operations. We would
9819 // transform other cases to loads/stores, but we don't know if
9820 // alignment is sufficient.
9824 // If we have a memmove and the source operation is a constant global,
9825 // then the source and dest pointers can't alias, so we can change this
9826 // into a call to memcpy.
9827 if (MemMoveInst
*MMI
= dyn_cast
<MemMoveInst
>(MI
)) {
9828 if (GlobalVariable
*GVSrc
= dyn_cast
<GlobalVariable
>(MMI
->getSource()))
9829 if (GVSrc
->isConstant()) {
9830 Module
*M
= CI
.getParent()->getParent()->getParent();
9831 Intrinsic::ID MemCpyID
= Intrinsic::memcpy
;
9833 Tys
[0] = CI
.getOperand(3)->getType();
9835 Intrinsic::getDeclaration(M
, MemCpyID
, Tys
, 1));
9839 // memmove(x,x,size) -> noop.
9840 if (MMI
->getSource() == MMI
->getDest())
9841 return EraseInstFromFunction(CI
);
9844 // If we can determine a pointer alignment that is bigger than currently
9845 // set, update the alignment.
9846 if (isa
<MemTransferInst
>(MI
)) {
9847 if (Instruction
*I
= SimplifyMemTransfer(MI
))
9849 } else if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(MI
)) {
9850 if (Instruction
*I
= SimplifyMemSet(MSI
))
9854 if (Changed
) return II
;
9857 switch (II
->getIntrinsicID()) {
9859 case Intrinsic::bswap
:
9860 // bswap(bswap(x)) -> x
9861 if (IntrinsicInst
*Operand
= dyn_cast
<IntrinsicInst
>(II
->getOperand(1)))
9862 if (Operand
->getIntrinsicID() == Intrinsic::bswap
)
9863 return ReplaceInstUsesWith(CI
, Operand
->getOperand(1));
9865 case Intrinsic::ppc_altivec_lvx
:
9866 case Intrinsic::ppc_altivec_lvxl
:
9867 case Intrinsic::x86_sse_loadu_ps
:
9868 case Intrinsic::x86_sse2_loadu_pd
:
9869 case Intrinsic::x86_sse2_loadu_dq
:
9870 // Turn PPC lvx -> load if the pointer is known aligned.
9871 // Turn X86 loadups -> load if the pointer is known aligned.
9872 if (GetOrEnforceKnownAlignment(II
->getOperand(1), 16) >= 16) {
9873 Value
*Ptr
= InsertBitCastBefore(II
->getOperand(1),
9874 PointerType::getUnqual(II
->getType()),
9876 return new LoadInst(Ptr
);
9879 case Intrinsic::ppc_altivec_stvx
:
9880 case Intrinsic::ppc_altivec_stvxl
:
9881 // Turn stvx -> store if the pointer is known aligned.
9882 if (GetOrEnforceKnownAlignment(II
->getOperand(2), 16) >= 16) {
9883 const Type
*OpPtrTy
=
9884 PointerType::getUnqual(II
->getOperand(1)->getType());
9885 Value
*Ptr
= InsertBitCastBefore(II
->getOperand(2), OpPtrTy
, CI
);
9886 return new StoreInst(II
->getOperand(1), Ptr
);
9889 case Intrinsic::x86_sse_storeu_ps
:
9890 case Intrinsic::x86_sse2_storeu_pd
:
9891 case Intrinsic::x86_sse2_storeu_dq
:
9892 // Turn X86 storeu -> store if the pointer is known aligned.
9893 if (GetOrEnforceKnownAlignment(II
->getOperand(1), 16) >= 16) {
9894 const Type
*OpPtrTy
=
9895 PointerType::getUnqual(II
->getOperand(2)->getType());
9896 Value
*Ptr
= InsertBitCastBefore(II
->getOperand(1), OpPtrTy
, CI
);
9897 return new StoreInst(II
->getOperand(2), Ptr
);
9901 case Intrinsic::x86_sse_cvttss2si
: {
9902 // These intrinsics only demands the 0th element of its input vector. If
9903 // we can simplify the input based on that, do so now.
9905 cast
<VectorType
>(II
->getOperand(1)->getType())->getNumElements();
9906 APInt
DemandedElts(VWidth
, 1);
9907 APInt
UndefElts(VWidth
, 0);
9908 if (Value
*V
= SimplifyDemandedVectorElts(II
->getOperand(1), DemandedElts
,
9910 II
->setOperand(1, V
);
9916 case Intrinsic::ppc_altivec_vperm
:
9917 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
9918 if (ConstantVector
*Mask
= dyn_cast
<ConstantVector
>(II
->getOperand(3))) {
9919 assert(Mask
->getNumOperands() == 16 && "Bad type for intrinsic!");
9921 // Check that all of the elements are integer constants or undefs.
9922 bool AllEltsOk
= true;
9923 for (unsigned i
= 0; i
!= 16; ++i
) {
9924 if (!isa
<ConstantInt
>(Mask
->getOperand(i
)) &&
9925 !isa
<UndefValue
>(Mask
->getOperand(i
))) {
9932 // Cast the input vectors to byte vectors.
9933 Value
*Op0
=InsertBitCastBefore(II
->getOperand(1),Mask
->getType(),CI
);
9934 Value
*Op1
=InsertBitCastBefore(II
->getOperand(2),Mask
->getType(),CI
);
9935 Value
*Result
= UndefValue::get(Op0
->getType());
9937 // Only extract each element once.
9938 Value
*ExtractedElts
[32];
9939 memset(ExtractedElts
, 0, sizeof(ExtractedElts
));
9941 for (unsigned i
= 0; i
!= 16; ++i
) {
9942 if (isa
<UndefValue
>(Mask
->getOperand(i
)))
9944 unsigned Idx
=cast
<ConstantInt
>(Mask
->getOperand(i
))->getZExtValue();
9945 Idx
&= 31; // Match the hardware behavior.
9947 if (ExtractedElts
[Idx
] == 0) {
9949 ExtractElementInst::Create(Idx
< 16 ? Op0
: Op1
,
9950 ConstantInt::get(Type::Int32Ty
, Idx
&15, false), "tmp");
9951 InsertNewInstBefore(Elt
, CI
);
9952 ExtractedElts
[Idx
] = Elt
;
9955 // Insert this value into the result vector.
9956 Result
= InsertElementInst::Create(Result
, ExtractedElts
[Idx
],
9957 ConstantInt::get(Type::Int32Ty
, i
, false),
9959 InsertNewInstBefore(cast
<Instruction
>(Result
), CI
);
9961 return CastInst::Create(Instruction::BitCast
, Result
, CI
.getType());
9966 case Intrinsic::stackrestore
: {
9967 // If the save is right next to the restore, remove the restore. This can
9968 // happen when variable allocas are DCE'd.
9969 if (IntrinsicInst
*SS
= dyn_cast
<IntrinsicInst
>(II
->getOperand(1))) {
9970 if (SS
->getIntrinsicID() == Intrinsic::stacksave
) {
9971 BasicBlock::iterator BI
= SS
;
9973 return EraseInstFromFunction(CI
);
9977 // Scan down this block to see if there is another stack restore in the
9978 // same block without an intervening call/alloca.
9979 BasicBlock::iterator BI
= II
;
9980 TerminatorInst
*TI
= II
->getParent()->getTerminator();
9981 bool CannotRemove
= false;
9982 for (++BI
; &*BI
!= TI
; ++BI
) {
9983 if (isa
<AllocaInst
>(BI
)) {
9984 CannotRemove
= true;
9987 if (CallInst
*BCI
= dyn_cast
<CallInst
>(BI
)) {
9988 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(BCI
)) {
9989 // If there is a stackrestore below this one, remove this one.
9990 if (II
->getIntrinsicID() == Intrinsic::stackrestore
)
9991 return EraseInstFromFunction(CI
);
9992 // Otherwise, ignore the intrinsic.
9994 // If we found a non-intrinsic call, we can't remove the stack
9996 CannotRemove
= true;
10002 // If the stack restore is in a return/unwind block and if there are no
10003 // allocas or calls between the restore and the return, nuke the restore.
10004 if (!CannotRemove
&& (isa
<ReturnInst
>(TI
) || isa
<UnwindInst
>(TI
)))
10005 return EraseInstFromFunction(CI
);
10010 return visitCallSite(II
);
10013 // InvokeInst simplification
10015 Instruction
*InstCombiner::visitInvokeInst(InvokeInst
&II
) {
10016 return visitCallSite(&II
);
10019 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
10020 /// passed through the varargs area, we can eliminate the use of the cast.
10021 static bool isSafeToEliminateVarargsCast(const CallSite CS
,
10022 const CastInst
* const CI
,
10023 const TargetData
* const TD
,
10025 if (!CI
->isLosslessCast())
10028 // The size of ByVal arguments is derived from the type, so we
10029 // can't change to a type with a different size. If the size were
10030 // passed explicitly we could avoid this check.
10031 if (!CS
.paramHasAttr(ix
, Attribute::ByVal
))
10034 const Type
* SrcTy
=
10035 cast
<PointerType
>(CI
->getOperand(0)->getType())->getElementType();
10036 const Type
* DstTy
= cast
<PointerType
>(CI
->getType())->getElementType();
10037 if (!SrcTy
->isSized() || !DstTy
->isSized())
10039 if (!TD
|| TD
->getTypeAllocSize(SrcTy
) != TD
->getTypeAllocSize(DstTy
))
10044 // visitCallSite - Improvements for call and invoke instructions.
10046 Instruction
*InstCombiner::visitCallSite(CallSite CS
) {
10047 bool Changed
= false;
10049 // If the callee is a constexpr cast of a function, attempt to move the cast
10050 // to the arguments of the call/invoke.
10051 if (transformConstExprCastCall(CS
)) return 0;
10053 Value
*Callee
= CS
.getCalledValue();
10055 if (Function
*CalleeF
= dyn_cast
<Function
>(Callee
))
10056 if (CalleeF
->getCallingConv() != CS
.getCallingConv()) {
10057 Instruction
*OldCall
= CS
.getInstruction();
10058 // If the call and callee calling conventions don't match, this call must
10059 // be unreachable, as the call is undefined.
10060 new StoreInst(ConstantInt::getTrue(*Context
),
10061 UndefValue::get(PointerType::getUnqual(Type::Int1Ty
)),
10063 if (!OldCall
->use_empty())
10064 OldCall
->replaceAllUsesWith(UndefValue::get(OldCall
->getType()));
10065 if (isa
<CallInst
>(OldCall
)) // Not worth removing an invoke here.
10066 return EraseInstFromFunction(*OldCall
);
10070 if (isa
<ConstantPointerNull
>(Callee
) || isa
<UndefValue
>(Callee
)) {
10071 // This instruction is not reachable, just remove it. We insert a store to
10072 // undef so that we know that this code is not reachable, despite the fact
10073 // that we can't modify the CFG here.
10074 new StoreInst(ConstantInt::getTrue(*Context
),
10075 UndefValue::get(PointerType::getUnqual(Type::Int1Ty
)),
10076 CS
.getInstruction());
10078 if (!CS
.getInstruction()->use_empty())
10079 CS
.getInstruction()->
10080 replaceAllUsesWith(UndefValue::get(CS
.getInstruction()->getType()));
10082 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(CS
.getInstruction())) {
10083 // Don't break the CFG, insert a dummy cond branch.
10084 BranchInst::Create(II
->getNormalDest(), II
->getUnwindDest(),
10085 ConstantInt::getTrue(*Context
), II
);
10087 return EraseInstFromFunction(*CS
.getInstruction());
10090 if (BitCastInst
*BC
= dyn_cast
<BitCastInst
>(Callee
))
10091 if (IntrinsicInst
*In
= dyn_cast
<IntrinsicInst
>(BC
->getOperand(0)))
10092 if (In
->getIntrinsicID() == Intrinsic::init_trampoline
)
10093 return transformCallThroughTrampoline(CS
);
10095 const PointerType
*PTy
= cast
<PointerType
>(Callee
->getType());
10096 const FunctionType
*FTy
= cast
<FunctionType
>(PTy
->getElementType());
10097 if (FTy
->isVarArg()) {
10098 int ix
= FTy
->getNumParams() + (isa
<InvokeInst
>(Callee
) ? 3 : 1);
10099 // See if we can optimize any arguments passed through the varargs area of
10101 for (CallSite::arg_iterator I
= CS
.arg_begin()+FTy
->getNumParams(),
10102 E
= CS
.arg_end(); I
!= E
; ++I
, ++ix
) {
10103 CastInst
*CI
= dyn_cast
<CastInst
>(*I
);
10104 if (CI
&& isSafeToEliminateVarargsCast(CS
, CI
, TD
, ix
)) {
10105 *I
= CI
->getOperand(0);
10111 if (isa
<InlineAsm
>(Callee
) && !CS
.doesNotThrow()) {
10112 // Inline asm calls cannot throw - mark them 'nounwind'.
10113 CS
.setDoesNotThrow();
10117 return Changed
? CS
.getInstruction() : 0;
10120 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
10121 // attempt to move the cast to the arguments of the call/invoke.
10123 bool InstCombiner::transformConstExprCastCall(CallSite CS
) {
10124 if (!isa
<ConstantExpr
>(CS
.getCalledValue())) return false;
10125 ConstantExpr
*CE
= cast
<ConstantExpr
>(CS
.getCalledValue());
10126 if (CE
->getOpcode() != Instruction::BitCast
||
10127 !isa
<Function
>(CE
->getOperand(0)))
10129 Function
*Callee
= cast
<Function
>(CE
->getOperand(0));
10130 Instruction
*Caller
= CS
.getInstruction();
10131 const AttrListPtr
&CallerPAL
= CS
.getAttributes();
10133 // Okay, this is a cast from a function to a different type. Unless doing so
10134 // would cause a type conversion of one of our arguments, change this call to
10135 // be a direct call with arguments casted to the appropriate types.
10137 const FunctionType
*FT
= Callee
->getFunctionType();
10138 const Type
*OldRetTy
= Caller
->getType();
10139 const Type
*NewRetTy
= FT
->getReturnType();
10141 if (isa
<StructType
>(NewRetTy
))
10142 return false; // TODO: Handle multiple return values.
10144 // Check to see if we are changing the return type...
10145 if (OldRetTy
!= NewRetTy
) {
10146 if (Callee
->isDeclaration() &&
10147 // Conversion is ok if changing from one pointer type to another or from
10148 // a pointer to an integer of the same size.
10149 !((isa
<PointerType
>(OldRetTy
) || !TD
||
10150 OldRetTy
== TD
->getIntPtrType()) &&
10151 (isa
<PointerType
>(NewRetTy
) || !TD
||
10152 NewRetTy
== TD
->getIntPtrType())))
10153 return false; // Cannot transform this return value.
10155 if (!Caller
->use_empty() &&
10156 // void -> non-void is handled specially
10157 NewRetTy
!= Type::VoidTy
&& !CastInst::isCastable(NewRetTy
, OldRetTy
))
10158 return false; // Cannot transform this return value.
10160 if (!CallerPAL
.isEmpty() && !Caller
->use_empty()) {
10161 Attributes RAttrs
= CallerPAL
.getRetAttributes();
10162 if (RAttrs
& Attribute::typeIncompatible(NewRetTy
))
10163 return false; // Attribute not compatible with transformed value.
10166 // If the callsite is an invoke instruction, and the return value is used by
10167 // a PHI node in a successor, we cannot change the return type of the call
10168 // because there is no place to put the cast instruction (without breaking
10169 // the critical edge). Bail out in this case.
10170 if (!Caller
->use_empty())
10171 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
))
10172 for (Value::use_iterator UI
= II
->use_begin(), E
= II
->use_end();
10174 if (PHINode
*PN
= dyn_cast
<PHINode
>(*UI
))
10175 if (PN
->getParent() == II
->getNormalDest() ||
10176 PN
->getParent() == II
->getUnwindDest())
10180 unsigned NumActualArgs
= unsigned(CS
.arg_end()-CS
.arg_begin());
10181 unsigned NumCommonArgs
= std::min(FT
->getNumParams(), NumActualArgs
);
10183 CallSite::arg_iterator AI
= CS
.arg_begin();
10184 for (unsigned i
= 0, e
= NumCommonArgs
; i
!= e
; ++i
, ++AI
) {
10185 const Type
*ParamTy
= FT
->getParamType(i
);
10186 const Type
*ActTy
= (*AI
)->getType();
10188 if (!CastInst::isCastable(ActTy
, ParamTy
))
10189 return false; // Cannot transform this parameter value.
10191 if (CallerPAL
.getParamAttributes(i
+ 1)
10192 & Attribute::typeIncompatible(ParamTy
))
10193 return false; // Attribute not compatible with transformed value.
10195 // Converting from one pointer type to another or between a pointer and an
10196 // integer of the same size is safe even if we do not have a body.
10197 bool isConvertible
= ActTy
== ParamTy
||
10198 (TD
&& ((isa
<PointerType
>(ParamTy
) || ParamTy
== TD
->getIntPtrType()) &&
10199 (isa
<PointerType
>(ActTy
) || ActTy
== TD
->getIntPtrType())));
10200 if (Callee
->isDeclaration() && !isConvertible
) return false;
10203 if (FT
->getNumParams() < NumActualArgs
&& !FT
->isVarArg() &&
10204 Callee
->isDeclaration())
10205 return false; // Do not delete arguments unless we have a function body.
10207 if (FT
->getNumParams() < NumActualArgs
&& FT
->isVarArg() &&
10208 !CallerPAL
.isEmpty())
10209 // In this case we have more arguments than the new function type, but we
10210 // won't be dropping them. Check that these extra arguments have attributes
10211 // that are compatible with being a vararg call argument.
10212 for (unsigned i
= CallerPAL
.getNumSlots(); i
; --i
) {
10213 if (CallerPAL
.getSlot(i
- 1).Index
<= FT
->getNumParams())
10215 Attributes PAttrs
= CallerPAL
.getSlot(i
- 1).Attrs
;
10216 if (PAttrs
& Attribute::VarArgsIncompatible
)
10220 // Okay, we decided that this is a safe thing to do: go ahead and start
10221 // inserting cast instructions as necessary...
10222 std::vector
<Value
*> Args
;
10223 Args
.reserve(NumActualArgs
);
10224 SmallVector
<AttributeWithIndex
, 8> attrVec
;
10225 attrVec
.reserve(NumCommonArgs
);
10227 // Get any return attributes.
10228 Attributes RAttrs
= CallerPAL
.getRetAttributes();
10230 // If the return value is not being used, the type may not be compatible
10231 // with the existing attributes. Wipe out any problematic attributes.
10232 RAttrs
&= ~Attribute::typeIncompatible(NewRetTy
);
10234 // Add the new return attributes.
10236 attrVec
.push_back(AttributeWithIndex::get(0, RAttrs
));
10238 AI
= CS
.arg_begin();
10239 for (unsigned i
= 0; i
!= NumCommonArgs
; ++i
, ++AI
) {
10240 const Type
*ParamTy
= FT
->getParamType(i
);
10241 if ((*AI
)->getType() == ParamTy
) {
10242 Args
.push_back(*AI
);
10244 Instruction::CastOps opcode
= CastInst::getCastOpcode(*AI
,
10245 false, ParamTy
, false);
10246 CastInst
*NewCast
= CastInst::Create(opcode
, *AI
, ParamTy
, "tmp");
10247 Args
.push_back(InsertNewInstBefore(NewCast
, *Caller
));
10250 // Add any parameter attributes.
10251 if (Attributes PAttrs
= CallerPAL
.getParamAttributes(i
+ 1))
10252 attrVec
.push_back(AttributeWithIndex::get(i
+ 1, PAttrs
));
10255 // If the function takes more arguments than the call was taking, add them
10257 for (unsigned i
= NumCommonArgs
; i
!= FT
->getNumParams(); ++i
)
10258 Args
.push_back(Constant::getNullValue(FT
->getParamType(i
)));
10260 // If we are removing arguments to the function, emit an obnoxious warning...
10261 if (FT
->getNumParams() < NumActualArgs
) {
10262 if (!FT
->isVarArg()) {
10263 errs() << "WARNING: While resolving call to function '"
10264 << Callee
->getName() << "' arguments were dropped!\n";
10266 // Add all of the arguments in their promoted form to the arg list...
10267 for (unsigned i
= FT
->getNumParams(); i
!= NumActualArgs
; ++i
, ++AI
) {
10268 const Type
*PTy
= getPromotedType((*AI
)->getType());
10269 if (PTy
!= (*AI
)->getType()) {
10270 // Must promote to pass through va_arg area!
10271 Instruction::CastOps opcode
= CastInst::getCastOpcode(*AI
, false,
10273 Instruction
*Cast
= CastInst::Create(opcode
, *AI
, PTy
, "tmp");
10274 InsertNewInstBefore(Cast
, *Caller
);
10275 Args
.push_back(Cast
);
10277 Args
.push_back(*AI
);
10280 // Add any parameter attributes.
10281 if (Attributes PAttrs
= CallerPAL
.getParamAttributes(i
+ 1))
10282 attrVec
.push_back(AttributeWithIndex::get(i
+ 1, PAttrs
));
10287 if (Attributes FnAttrs
= CallerPAL
.getFnAttributes())
10288 attrVec
.push_back(AttributeWithIndex::get(~0, FnAttrs
));
10290 if (NewRetTy
== Type::VoidTy
)
10291 Caller
->setName(""); // Void type should not have a name.
10293 const AttrListPtr
&NewCallerPAL
= AttrListPtr::get(attrVec
.begin(),
10297 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10298 NC
= InvokeInst::Create(Callee
, II
->getNormalDest(), II
->getUnwindDest(),
10299 Args
.begin(), Args
.end(),
10300 Caller
->getName(), Caller
);
10301 cast
<InvokeInst
>(NC
)->setCallingConv(II
->getCallingConv());
10302 cast
<InvokeInst
>(NC
)->setAttributes(NewCallerPAL
);
10304 NC
= CallInst::Create(Callee
, Args
.begin(), Args
.end(),
10305 Caller
->getName(), Caller
);
10306 CallInst
*CI
= cast
<CallInst
>(Caller
);
10307 if (CI
->isTailCall())
10308 cast
<CallInst
>(NC
)->setTailCall();
10309 cast
<CallInst
>(NC
)->setCallingConv(CI
->getCallingConv());
10310 cast
<CallInst
>(NC
)->setAttributes(NewCallerPAL
);
10313 // Insert a cast of the return type as necessary.
10315 if (OldRetTy
!= NV
->getType() && !Caller
->use_empty()) {
10316 if (NV
->getType() != Type::VoidTy
) {
10317 Instruction::CastOps opcode
= CastInst::getCastOpcode(NC
, false,
10319 NV
= NC
= CastInst::Create(opcode
, NC
, OldRetTy
, "tmp");
10321 // If this is an invoke instruction, we should insert it after the first
10322 // non-phi, instruction in the normal successor block.
10323 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10324 BasicBlock::iterator I
= II
->getNormalDest()->getFirstNonPHI();
10325 InsertNewInstBefore(NC
, *I
);
10327 // Otherwise, it's a call, just insert cast right after the call instr
10328 InsertNewInstBefore(NC
, *Caller
);
10330 AddUsersToWorkList(*Caller
);
10332 NV
= UndefValue::get(Caller
->getType());
10336 if (Caller
->getType() != Type::VoidTy
&& !Caller
->use_empty())
10337 Caller
->replaceAllUsesWith(NV
);
10338 Caller
->eraseFromParent();
10339 RemoveFromWorkList(Caller
);
10343 // transformCallThroughTrampoline - Turn a call to a function created by the
10344 // init_trampoline intrinsic into a direct call to the underlying function.
10346 Instruction
*InstCombiner::transformCallThroughTrampoline(CallSite CS
) {
10347 Value
*Callee
= CS
.getCalledValue();
10348 const PointerType
*PTy
= cast
<PointerType
>(Callee
->getType());
10349 const FunctionType
*FTy
= cast
<FunctionType
>(PTy
->getElementType());
10350 const AttrListPtr
&Attrs
= CS
.getAttributes();
10352 // If the call already has the 'nest' attribute somewhere then give up -
10353 // otherwise 'nest' would occur twice after splicing in the chain.
10354 if (Attrs
.hasAttrSomewhere(Attribute::Nest
))
10357 IntrinsicInst
*Tramp
=
10358 cast
<IntrinsicInst
>(cast
<BitCastInst
>(Callee
)->getOperand(0));
10360 Function
*NestF
= cast
<Function
>(Tramp
->getOperand(2)->stripPointerCasts());
10361 const PointerType
*NestFPTy
= cast
<PointerType
>(NestF
->getType());
10362 const FunctionType
*NestFTy
= cast
<FunctionType
>(NestFPTy
->getElementType());
10364 const AttrListPtr
&NestAttrs
= NestF
->getAttributes();
10365 if (!NestAttrs
.isEmpty()) {
10366 unsigned NestIdx
= 1;
10367 const Type
*NestTy
= 0;
10368 Attributes NestAttr
= Attribute::None
;
10370 // Look for a parameter marked with the 'nest' attribute.
10371 for (FunctionType::param_iterator I
= NestFTy
->param_begin(),
10372 E
= NestFTy
->param_end(); I
!= E
; ++NestIdx
, ++I
)
10373 if (NestAttrs
.paramHasAttr(NestIdx
, Attribute::Nest
)) {
10374 // Record the parameter type and any other attributes.
10376 NestAttr
= NestAttrs
.getParamAttributes(NestIdx
);
10381 Instruction
*Caller
= CS
.getInstruction();
10382 std::vector
<Value
*> NewArgs
;
10383 NewArgs
.reserve(unsigned(CS
.arg_end()-CS
.arg_begin())+1);
10385 SmallVector
<AttributeWithIndex
, 8> NewAttrs
;
10386 NewAttrs
.reserve(Attrs
.getNumSlots() + 1);
10388 // Insert the nest argument into the call argument list, which may
10389 // mean appending it. Likewise for attributes.
10391 // Add any result attributes.
10392 if (Attributes Attr
= Attrs
.getRetAttributes())
10393 NewAttrs
.push_back(AttributeWithIndex::get(0, Attr
));
10397 CallSite::arg_iterator I
= CS
.arg_begin(), E
= CS
.arg_end();
10399 if (Idx
== NestIdx
) {
10400 // Add the chain argument and attributes.
10401 Value
*NestVal
= Tramp
->getOperand(3);
10402 if (NestVal
->getType() != NestTy
)
10403 NestVal
= new BitCastInst(NestVal
, NestTy
, "nest", Caller
);
10404 NewArgs
.push_back(NestVal
);
10405 NewAttrs
.push_back(AttributeWithIndex::get(NestIdx
, NestAttr
));
10411 // Add the original argument and attributes.
10412 NewArgs
.push_back(*I
);
10413 if (Attributes Attr
= Attrs
.getParamAttributes(Idx
))
10415 (AttributeWithIndex::get(Idx
+ (Idx
>= NestIdx
), Attr
));
10421 // Add any function attributes.
10422 if (Attributes Attr
= Attrs
.getFnAttributes())
10423 NewAttrs
.push_back(AttributeWithIndex::get(~0, Attr
));
10425 // The trampoline may have been bitcast to a bogus type (FTy).
10426 // Handle this by synthesizing a new function type, equal to FTy
10427 // with the chain parameter inserted.
10429 std::vector
<const Type
*> NewTypes
;
10430 NewTypes
.reserve(FTy
->getNumParams()+1);
10432 // Insert the chain's type into the list of parameter types, which may
10433 // mean appending it.
10436 FunctionType::param_iterator I
= FTy
->param_begin(),
10437 E
= FTy
->param_end();
10440 if (Idx
== NestIdx
)
10441 // Add the chain's type.
10442 NewTypes
.push_back(NestTy
);
10447 // Add the original type.
10448 NewTypes
.push_back(*I
);
10454 // Replace the trampoline call with a direct call. Let the generic
10455 // code sort out any function type mismatches.
10456 FunctionType
*NewFTy
= FunctionType::get(FTy
->getReturnType(), NewTypes
,
10458 Constant
*NewCallee
=
10459 NestF
->getType() == PointerType::getUnqual(NewFTy
) ?
10460 NestF
: ConstantExpr::getBitCast(NestF
,
10461 PointerType::getUnqual(NewFTy
));
10462 const AttrListPtr
&NewPAL
= AttrListPtr::get(NewAttrs
.begin(),
10465 Instruction
*NewCaller
;
10466 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(Caller
)) {
10467 NewCaller
= InvokeInst::Create(NewCallee
,
10468 II
->getNormalDest(), II
->getUnwindDest(),
10469 NewArgs
.begin(), NewArgs
.end(),
10470 Caller
->getName(), Caller
);
10471 cast
<InvokeInst
>(NewCaller
)->setCallingConv(II
->getCallingConv());
10472 cast
<InvokeInst
>(NewCaller
)->setAttributes(NewPAL
);
10474 NewCaller
= CallInst::Create(NewCallee
, NewArgs
.begin(), NewArgs
.end(),
10475 Caller
->getName(), Caller
);
10476 if (cast
<CallInst
>(Caller
)->isTailCall())
10477 cast
<CallInst
>(NewCaller
)->setTailCall();
10478 cast
<CallInst
>(NewCaller
)->
10479 setCallingConv(cast
<CallInst
>(Caller
)->getCallingConv());
10480 cast
<CallInst
>(NewCaller
)->setAttributes(NewPAL
);
10482 if (Caller
->getType() != Type::VoidTy
&& !Caller
->use_empty())
10483 Caller
->replaceAllUsesWith(NewCaller
);
10484 Caller
->eraseFromParent();
10485 RemoveFromWorkList(Caller
);
10490 // Replace the trampoline call with a direct call. Since there is no 'nest'
10491 // parameter, there is no need to adjust the argument list. Let the generic
10492 // code sort out any function type mismatches.
10493 Constant
*NewCallee
=
10494 NestF
->getType() == PTy
? NestF
:
10495 ConstantExpr::getBitCast(NestF
, PTy
);
10496 CS
.setCalledFunction(NewCallee
);
10497 return CS
.getInstruction();
10500 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(c,d)]
10501 /// and if a/b/c/d and the add's all have a single use, turn this into two phi's
10502 /// and a single binop.
10503 Instruction
*InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode
&PN
) {
10504 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
10505 assert(isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
));
10506 unsigned Opc
= FirstInst
->getOpcode();
10507 Value
*LHSVal
= FirstInst
->getOperand(0);
10508 Value
*RHSVal
= FirstInst
->getOperand(1);
10510 const Type
*LHSType
= LHSVal
->getType();
10511 const Type
*RHSType
= RHSVal
->getType();
10513 // Scan to see if all operands are the same opcode, all have one use, and all
10514 // kill their operands (i.e. the operands have one use).
10515 for (unsigned i
= 1; i
!= PN
.getNumIncomingValues(); ++i
) {
10516 Instruction
*I
= dyn_cast
<Instruction
>(PN
.getIncomingValue(i
));
10517 if (!I
|| I
->getOpcode() != Opc
|| !I
->hasOneUse() ||
10518 // Verify type of the LHS matches so we don't fold cmp's of different
10519 // types or GEP's with different index types.
10520 I
->getOperand(0)->getType() != LHSType
||
10521 I
->getOperand(1)->getType() != RHSType
)
10524 // If they are CmpInst instructions, check their predicates
10525 if (Opc
== Instruction::ICmp
|| Opc
== Instruction::FCmp
)
10526 if (cast
<CmpInst
>(I
)->getPredicate() !=
10527 cast
<CmpInst
>(FirstInst
)->getPredicate())
10530 // Keep track of which operand needs a phi node.
10531 if (I
->getOperand(0) != LHSVal
) LHSVal
= 0;
10532 if (I
->getOperand(1) != RHSVal
) RHSVal
= 0;
10535 // Otherwise, this is safe to transform!
10537 Value
*InLHS
= FirstInst
->getOperand(0);
10538 Value
*InRHS
= FirstInst
->getOperand(1);
10539 PHINode
*NewLHS
= 0, *NewRHS
= 0;
10541 NewLHS
= PHINode::Create(LHSType
,
10542 FirstInst
->getOperand(0)->getName() + ".pn");
10543 NewLHS
->reserveOperandSpace(PN
.getNumOperands()/2);
10544 NewLHS
->addIncoming(InLHS
, PN
.getIncomingBlock(0));
10545 InsertNewInstBefore(NewLHS
, PN
);
10550 NewRHS
= PHINode::Create(RHSType
,
10551 FirstInst
->getOperand(1)->getName() + ".pn");
10552 NewRHS
->reserveOperandSpace(PN
.getNumOperands()/2);
10553 NewRHS
->addIncoming(InRHS
, PN
.getIncomingBlock(0));
10554 InsertNewInstBefore(NewRHS
, PN
);
10558 // Add all operands to the new PHIs.
10559 if (NewLHS
|| NewRHS
) {
10560 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10561 Instruction
*InInst
= cast
<Instruction
>(PN
.getIncomingValue(i
));
10563 Value
*NewInLHS
= InInst
->getOperand(0);
10564 NewLHS
->addIncoming(NewInLHS
, PN
.getIncomingBlock(i
));
10567 Value
*NewInRHS
= InInst
->getOperand(1);
10568 NewRHS
->addIncoming(NewInRHS
, PN
.getIncomingBlock(i
));
10573 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(FirstInst
))
10574 return BinaryOperator::Create(BinOp
->getOpcode(), LHSVal
, RHSVal
);
10575 CmpInst
*CIOp
= cast
<CmpInst
>(FirstInst
);
10576 return CmpInst::Create(*Context
, CIOp
->getOpcode(), CIOp
->getPredicate(),
10580 Instruction
*InstCombiner::FoldPHIArgGEPIntoPHI(PHINode
&PN
) {
10581 GetElementPtrInst
*FirstInst
=cast
<GetElementPtrInst
>(PN
.getIncomingValue(0));
10583 SmallVector
<Value
*, 16> FixedOperands(FirstInst
->op_begin(),
10584 FirstInst
->op_end());
10585 // This is true if all GEP bases are allocas and if all indices into them are
10587 bool AllBasePointersAreAllocas
= true;
10589 // Scan to see if all operands are the same opcode, all have one use, and all
10590 // kill their operands (i.e. the operands have one use).
10591 for (unsigned i
= 1; i
!= PN
.getNumIncomingValues(); ++i
) {
10592 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(PN
.getIncomingValue(i
));
10593 if (!GEP
|| !GEP
->hasOneUse() || GEP
->getType() != FirstInst
->getType() ||
10594 GEP
->getNumOperands() != FirstInst
->getNumOperands())
10597 // Keep track of whether or not all GEPs are of alloca pointers.
10598 if (AllBasePointersAreAllocas
&&
10599 (!isa
<AllocaInst
>(GEP
->getOperand(0)) ||
10600 !GEP
->hasAllConstantIndices()))
10601 AllBasePointersAreAllocas
= false;
10603 // Compare the operand lists.
10604 for (unsigned op
= 0, e
= FirstInst
->getNumOperands(); op
!= e
; ++op
) {
10605 if (FirstInst
->getOperand(op
) == GEP
->getOperand(op
))
10608 // Don't merge two GEPs when two operands differ (introducing phi nodes)
10609 // if one of the PHIs has a constant for the index. The index may be
10610 // substantially cheaper to compute for the constants, so making it a
10611 // variable index could pessimize the path. This also handles the case
10612 // for struct indices, which must always be constant.
10613 if (isa
<ConstantInt
>(FirstInst
->getOperand(op
)) ||
10614 isa
<ConstantInt
>(GEP
->getOperand(op
)))
10617 if (FirstInst
->getOperand(op
)->getType() !=GEP
->getOperand(op
)->getType())
10619 FixedOperands
[op
] = 0; // Needs a PHI.
10623 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
10624 // bother doing this transformation. At best, this will just save a bit of
10625 // offset calculation, but all the predecessors will have to materialize the
10626 // stack address into a register anyway. We'd actually rather *clone* the
10627 // load up into the predecessors so that we have a load of a gep of an alloca,
10628 // which can usually all be folded into the load.
10629 if (AllBasePointersAreAllocas
)
10632 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
10633 // that is variable.
10634 SmallVector
<PHINode
*, 16> OperandPhis(FixedOperands
.size());
10636 bool HasAnyPHIs
= false;
10637 for (unsigned i
= 0, e
= FixedOperands
.size(); i
!= e
; ++i
) {
10638 if (FixedOperands
[i
]) continue; // operand doesn't need a phi.
10639 Value
*FirstOp
= FirstInst
->getOperand(i
);
10640 PHINode
*NewPN
= PHINode::Create(FirstOp
->getType(),
10641 FirstOp
->getName()+".pn");
10642 InsertNewInstBefore(NewPN
, PN
);
10644 NewPN
->reserveOperandSpace(e
);
10645 NewPN
->addIncoming(FirstOp
, PN
.getIncomingBlock(0));
10646 OperandPhis
[i
] = NewPN
;
10647 FixedOperands
[i
] = NewPN
;
10652 // Add all operands to the new PHIs.
10654 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10655 GetElementPtrInst
*InGEP
=cast
<GetElementPtrInst
>(PN
.getIncomingValue(i
));
10656 BasicBlock
*InBB
= PN
.getIncomingBlock(i
);
10658 for (unsigned op
= 0, e
= OperandPhis
.size(); op
!= e
; ++op
)
10659 if (PHINode
*OpPhi
= OperandPhis
[op
])
10660 OpPhi
->addIncoming(InGEP
->getOperand(op
), InBB
);
10664 Value
*Base
= FixedOperands
[0];
10665 GetElementPtrInst
*GEP
=
10666 GetElementPtrInst::Create(Base
, FixedOperands
.begin()+1,
10667 FixedOperands
.end());
10668 if (cast
<GEPOperator
>(FirstInst
)->isInBounds())
10669 cast
<GEPOperator
>(GEP
)->setIsInBounds(true);
10674 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10675 /// sink the load out of the block that defines it. This means that it must be
10676 /// obvious the value of the load is not changed from the point of the load to
10677 /// the end of the block it is in.
10679 /// Finally, it is safe, but not profitable, to sink a load targetting a
10680 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10682 static bool isSafeAndProfitableToSinkLoad(LoadInst
*L
) {
10683 BasicBlock::iterator BBI
= L
, E
= L
->getParent()->end();
10685 for (++BBI
; BBI
!= E
; ++BBI
)
10686 if (BBI
->mayWriteToMemory())
10689 // Check for non-address taken alloca. If not address-taken already, it isn't
10690 // profitable to do this xform.
10691 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(L
->getOperand(0))) {
10692 bool isAddressTaken
= false;
10693 for (Value::use_iterator UI
= AI
->use_begin(), E
= AI
->use_end();
10695 if (isa
<LoadInst
>(UI
)) continue;
10696 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(*UI
)) {
10697 // If storing TO the alloca, then the address isn't taken.
10698 if (SI
->getOperand(1) == AI
) continue;
10700 isAddressTaken
= true;
10704 if (!isAddressTaken
&& AI
->isStaticAlloca())
10708 // If this load is a load from a GEP with a constant offset from an alloca,
10709 // then we don't want to sink it. In its present form, it will be
10710 // load [constant stack offset]. Sinking it will cause us to have to
10711 // materialize the stack addresses in each predecessor in a register only to
10712 // do a shared load from register in the successor.
10713 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(L
->getOperand(0)))
10714 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(GEP
->getOperand(0)))
10715 if (AI
->isStaticAlloca() && GEP
->hasAllConstantIndices())
10722 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10723 // operator and they all are only used by the PHI, PHI together their
10724 // inputs, and do the operation once, to the result of the PHI.
10725 Instruction
*InstCombiner::FoldPHIArgOpIntoPHI(PHINode
&PN
) {
10726 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
10728 // Scan the instruction, looking for input operations that can be folded away.
10729 // If all input operands to the phi are the same instruction (e.g. a cast from
10730 // the same type or "+42") we can pull the operation through the PHI, reducing
10731 // code size and simplifying code.
10732 Constant
*ConstantOp
= 0;
10733 const Type
*CastSrcTy
= 0;
10734 bool isVolatile
= false;
10735 if (isa
<CastInst
>(FirstInst
)) {
10736 CastSrcTy
= FirstInst
->getOperand(0)->getType();
10737 } else if (isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
)) {
10738 // Can fold binop, compare or shift here if the RHS is a constant,
10739 // otherwise call FoldPHIArgBinOpIntoPHI.
10740 ConstantOp
= dyn_cast
<Constant
>(FirstInst
->getOperand(1));
10741 if (ConstantOp
== 0)
10742 return FoldPHIArgBinOpIntoPHI(PN
);
10743 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(FirstInst
)) {
10744 isVolatile
= LI
->isVolatile();
10745 // We can't sink the load if the loaded value could be modified between the
10746 // load and the PHI.
10747 if (LI
->getParent() != PN
.getIncomingBlock(0) ||
10748 !isSafeAndProfitableToSinkLoad(LI
))
10751 // If the PHI is of volatile loads and the load block has multiple
10752 // successors, sinking it would remove a load of the volatile value from
10753 // the path through the other successor.
10755 LI
->getParent()->getTerminator()->getNumSuccessors() != 1)
10758 } else if (isa
<GetElementPtrInst
>(FirstInst
)) {
10759 return FoldPHIArgGEPIntoPHI(PN
);
10761 return 0; // Cannot fold this operation.
10764 // Check to see if all arguments are the same operation.
10765 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10766 if (!isa
<Instruction
>(PN
.getIncomingValue(i
))) return 0;
10767 Instruction
*I
= cast
<Instruction
>(PN
.getIncomingValue(i
));
10768 if (!I
->hasOneUse() || !I
->isSameOperationAs(FirstInst
))
10771 if (I
->getOperand(0)->getType() != CastSrcTy
)
10772 return 0; // Cast operation must match.
10773 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
10774 // We can't sink the load if the loaded value could be modified between
10775 // the load and the PHI.
10776 if (LI
->isVolatile() != isVolatile
||
10777 LI
->getParent() != PN
.getIncomingBlock(i
) ||
10778 !isSafeAndProfitableToSinkLoad(LI
))
10781 // If the PHI is of volatile loads and the load block has multiple
10782 // successors, sinking it would remove a load of the volatile value from
10783 // the path through the other successor.
10785 LI
->getParent()->getTerminator()->getNumSuccessors() != 1)
10788 } else if (I
->getOperand(1) != ConstantOp
) {
10793 // Okay, they are all the same operation. Create a new PHI node of the
10794 // correct type, and PHI together all of the LHS's of the instructions.
10795 PHINode
*NewPN
= PHINode::Create(FirstInst
->getOperand(0)->getType(),
10796 PN
.getName()+".in");
10797 NewPN
->reserveOperandSpace(PN
.getNumOperands()/2);
10799 Value
*InVal
= FirstInst
->getOperand(0);
10800 NewPN
->addIncoming(InVal
, PN
.getIncomingBlock(0));
10802 // Add all operands to the new PHI.
10803 for (unsigned i
= 1, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
10804 Value
*NewInVal
= cast
<Instruction
>(PN
.getIncomingValue(i
))->getOperand(0);
10805 if (NewInVal
!= InVal
)
10807 NewPN
->addIncoming(NewInVal
, PN
.getIncomingBlock(i
));
10812 // The new PHI unions all of the same values together. This is really
10813 // common, so we handle it intelligently here for compile-time speed.
10817 InsertNewInstBefore(NewPN
, PN
);
10821 // Insert and return the new operation.
10822 if (CastInst
* FirstCI
= dyn_cast
<CastInst
>(FirstInst
))
10823 return CastInst::Create(FirstCI
->getOpcode(), PhiVal
, PN
.getType());
10824 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(FirstInst
))
10825 return BinaryOperator::Create(BinOp
->getOpcode(), PhiVal
, ConstantOp
);
10826 if (CmpInst
*CIOp
= dyn_cast
<CmpInst
>(FirstInst
))
10827 return CmpInst::Create(*Context
, CIOp
->getOpcode(), CIOp
->getPredicate(),
10828 PhiVal
, ConstantOp
);
10829 assert(isa
<LoadInst
>(FirstInst
) && "Unknown operation");
10831 // If this was a volatile load that we are merging, make sure to loop through
10832 // and mark all the input loads as non-volatile. If we don't do this, we will
10833 // insert a new volatile load and the old ones will not be deletable.
10835 for (unsigned i
= 0, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
)
10836 cast
<LoadInst
>(PN
.getIncomingValue(i
))->setVolatile(false);
10838 return new LoadInst(PhiVal
, "", isVolatile
);
10841 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
10843 static bool DeadPHICycle(PHINode
*PN
,
10844 SmallPtrSet
<PHINode
*, 16> &PotentiallyDeadPHIs
) {
10845 if (PN
->use_empty()) return true;
10846 if (!PN
->hasOneUse()) return false;
10848 // Remember this node, and if we find the cycle, return.
10849 if (!PotentiallyDeadPHIs
.insert(PN
))
10852 // Don't scan crazily complex things.
10853 if (PotentiallyDeadPHIs
.size() == 16)
10856 if (PHINode
*PU
= dyn_cast
<PHINode
>(PN
->use_back()))
10857 return DeadPHICycle(PU
, PotentiallyDeadPHIs
);
10862 /// PHIsEqualValue - Return true if this phi node is always equal to
10863 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
10864 /// z = some value; x = phi (y, z); y = phi (x, z)
10865 static bool PHIsEqualValue(PHINode
*PN
, Value
*NonPhiInVal
,
10866 SmallPtrSet
<PHINode
*, 16> &ValueEqualPHIs
) {
10867 // See if we already saw this PHI node.
10868 if (!ValueEqualPHIs
.insert(PN
))
10871 // Don't scan crazily complex things.
10872 if (ValueEqualPHIs
.size() == 16)
10875 // Scan the operands to see if they are either phi nodes or are equal to
10877 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
10878 Value
*Op
= PN
->getIncomingValue(i
);
10879 if (PHINode
*OpPN
= dyn_cast
<PHINode
>(Op
)) {
10880 if (!PHIsEqualValue(OpPN
, NonPhiInVal
, ValueEqualPHIs
))
10882 } else if (Op
!= NonPhiInVal
)
10890 // PHINode simplification
10892 Instruction
*InstCombiner::visitPHINode(PHINode
&PN
) {
10893 // If LCSSA is around, don't mess with Phi nodes
10894 if (MustPreserveLCSSA
) return 0;
10896 if (Value
*V
= PN
.hasConstantValue())
10897 return ReplaceInstUsesWith(PN
, V
);
10899 // If all PHI operands are the same operation, pull them through the PHI,
10900 // reducing code size.
10901 if (isa
<Instruction
>(PN
.getIncomingValue(0)) &&
10902 isa
<Instruction
>(PN
.getIncomingValue(1)) &&
10903 cast
<Instruction
>(PN
.getIncomingValue(0))->getOpcode() ==
10904 cast
<Instruction
>(PN
.getIncomingValue(1))->getOpcode() &&
10905 // FIXME: The hasOneUse check will fail for PHIs that use the value more
10906 // than themselves more than once.
10907 PN
.getIncomingValue(0)->hasOneUse())
10908 if (Instruction
*Result
= FoldPHIArgOpIntoPHI(PN
))
10911 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
10912 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
10913 // PHI)... break the cycle.
10914 if (PN
.hasOneUse()) {
10915 Instruction
*PHIUser
= cast
<Instruction
>(PN
.use_back());
10916 if (PHINode
*PU
= dyn_cast
<PHINode
>(PHIUser
)) {
10917 SmallPtrSet
<PHINode
*, 16> PotentiallyDeadPHIs
;
10918 PotentiallyDeadPHIs
.insert(&PN
);
10919 if (DeadPHICycle(PU
, PotentiallyDeadPHIs
))
10920 return ReplaceInstUsesWith(PN
, UndefValue::get(PN
.getType()));
10923 // If this phi has a single use, and if that use just computes a value for
10924 // the next iteration of a loop, delete the phi. This occurs with unused
10925 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
10926 // common case here is good because the only other things that catch this
10927 // are induction variable analysis (sometimes) and ADCE, which is only run
10929 if (PHIUser
->hasOneUse() &&
10930 (isa
<BinaryOperator
>(PHIUser
) || isa
<GetElementPtrInst
>(PHIUser
)) &&
10931 PHIUser
->use_back() == &PN
) {
10932 return ReplaceInstUsesWith(PN
, UndefValue::get(PN
.getType()));
10936 // We sometimes end up with phi cycles that non-obviously end up being the
10937 // same value, for example:
10938 // z = some value; x = phi (y, z); y = phi (x, z)
10939 // where the phi nodes don't necessarily need to be in the same block. Do a
10940 // quick check to see if the PHI node only contains a single non-phi value, if
10941 // so, scan to see if the phi cycle is actually equal to that value.
10943 unsigned InValNo
= 0, NumOperandVals
= PN
.getNumIncomingValues();
10944 // Scan for the first non-phi operand.
10945 while (InValNo
!= NumOperandVals
&&
10946 isa
<PHINode
>(PN
.getIncomingValue(InValNo
)))
10949 if (InValNo
!= NumOperandVals
) {
10950 Value
*NonPhiInVal
= PN
.getOperand(InValNo
);
10952 // Scan the rest of the operands to see if there are any conflicts, if so
10953 // there is no need to recursively scan other phis.
10954 for (++InValNo
; InValNo
!= NumOperandVals
; ++InValNo
) {
10955 Value
*OpVal
= PN
.getIncomingValue(InValNo
);
10956 if (OpVal
!= NonPhiInVal
&& !isa
<PHINode
>(OpVal
))
10960 // If we scanned over all operands, then we have one unique value plus
10961 // phi values. Scan PHI nodes to see if they all merge in each other or
10963 if (InValNo
== NumOperandVals
) {
10964 SmallPtrSet
<PHINode
*, 16> ValueEqualPHIs
;
10965 if (PHIsEqualValue(&PN
, NonPhiInVal
, ValueEqualPHIs
))
10966 return ReplaceInstUsesWith(PN
, NonPhiInVal
);
10973 static Value
*InsertCastToIntPtrTy(Value
*V
, const Type
*DTy
,
10974 Instruction
*InsertPoint
,
10975 InstCombiner
*IC
) {
10976 unsigned PtrSize
= DTy
->getScalarSizeInBits();
10977 unsigned VTySize
= V
->getType()->getScalarSizeInBits();
10978 // We must cast correctly to the pointer type. Ensure that we
10979 // sign extend the integer value if it is smaller as this is
10980 // used for address computation.
10981 Instruction::CastOps opcode
=
10982 (VTySize
< PtrSize
? Instruction::SExt
:
10983 (VTySize
== PtrSize
? Instruction::BitCast
: Instruction::Trunc
));
10984 return IC
->InsertCastBefore(opcode
, V
, DTy
, *InsertPoint
);
10988 Instruction
*InstCombiner::visitGetElementPtrInst(GetElementPtrInst
&GEP
) {
10989 Value
*PtrOp
= GEP
.getOperand(0);
10990 // Is it 'getelementptr %P, i32 0' or 'getelementptr %P'
10991 // If so, eliminate the noop.
10992 if (GEP
.getNumOperands() == 1)
10993 return ReplaceInstUsesWith(GEP
, PtrOp
);
10995 if (isa
<UndefValue
>(GEP
.getOperand(0)))
10996 return ReplaceInstUsesWith(GEP
, UndefValue::get(GEP
.getType()));
10998 bool HasZeroPointerIndex
= false;
10999 if (Constant
*C
= dyn_cast
<Constant
>(GEP
.getOperand(1)))
11000 HasZeroPointerIndex
= C
->isNullValue();
11002 if (GEP
.getNumOperands() == 2 && HasZeroPointerIndex
)
11003 return ReplaceInstUsesWith(GEP
, PtrOp
);
11005 // Eliminate unneeded casts for indices.
11006 bool MadeChange
= false;
11008 gep_type_iterator GTI
= gep_type_begin(GEP
);
11009 for (User::op_iterator i
= GEP
.op_begin() + 1, e
= GEP
.op_end();
11010 i
!= e
; ++i
, ++GTI
) {
11011 if (TD
&& isa
<SequentialType
>(*GTI
)) {
11012 if (CastInst
*CI
= dyn_cast
<CastInst
>(*i
)) {
11013 if (CI
->getOpcode() == Instruction::ZExt
||
11014 CI
->getOpcode() == Instruction::SExt
) {
11015 const Type
*SrcTy
= CI
->getOperand(0)->getType();
11016 // We can eliminate a cast from i32 to i64 iff the target
11017 // is a 32-bit pointer target.
11018 if (SrcTy
->getScalarSizeInBits() >= TD
->getPointerSizeInBits()) {
11020 *i
= CI
->getOperand(0);
11024 // If we are using a wider index than needed for this platform, shrink it
11025 // to what we need. If narrower, sign-extend it to what we need.
11026 // If the incoming value needs a cast instruction,
11027 // insert it. This explicit cast can make subsequent optimizations more
11030 if (TD
->getTypeSizeInBits(Op
->getType()) > TD
->getPointerSizeInBits()) {
11031 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
11032 *i
= ConstantExpr::getTrunc(C
, TD
->getIntPtrType());
11035 Op
= InsertCastBefore(Instruction::Trunc
, Op
, TD
->getIntPtrType(),
11040 } else if (TD
->getTypeSizeInBits(Op
->getType())
11041 < TD
->getPointerSizeInBits()) {
11042 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
11043 *i
= ConstantExpr::getSExt(C
, TD
->getIntPtrType());
11046 Op
= InsertCastBefore(Instruction::SExt
, Op
, TD
->getIntPtrType(),
11054 if (MadeChange
) return &GEP
;
11056 // Combine Indices - If the source pointer to this getelementptr instruction
11057 // is a getelementptr instruction, combine the indices of the two
11058 // getelementptr instructions into a single instruction.
11060 SmallVector
<Value
*, 8> SrcGEPOperands
;
11061 bool BothInBounds
= cast
<GEPOperator
>(&GEP
)->isInBounds();
11062 if (GEPOperator
*Src
= dyn_cast
<GEPOperator
>(PtrOp
)) {
11063 SrcGEPOperands
.append(Src
->op_begin(), Src
->op_end());
11064 if (!Src
->isInBounds())
11065 BothInBounds
= false;
11068 if (!SrcGEPOperands
.empty()) {
11069 // Note that if our source is a gep chain itself that we wait for that
11070 // chain to be resolved before we perform this transformation. This
11071 // avoids us creating a TON of code in some cases.
11073 if (isa
<GetElementPtrInst
>(SrcGEPOperands
[0]) &&
11074 cast
<Instruction
>(SrcGEPOperands
[0])->getNumOperands() == 2)
11075 return 0; // Wait until our source is folded to completion.
11077 SmallVector
<Value
*, 8> Indices
;
11079 // Find out whether the last index in the source GEP is a sequential idx.
11080 bool EndsWithSequential
= false;
11081 for (gep_type_iterator I
= gep_type_begin(*cast
<User
>(PtrOp
)),
11082 E
= gep_type_end(*cast
<User
>(PtrOp
)); I
!= E
; ++I
)
11083 EndsWithSequential
= !isa
<StructType
>(*I
);
11085 // Can we combine the two pointer arithmetics offsets?
11086 if (EndsWithSequential
) {
11087 // Replace: gep (gep %P, long B), long A, ...
11088 // With: T = long A+B; gep %P, T, ...
11090 Value
*Sum
, *SO1
= SrcGEPOperands
.back(), *GO1
= GEP
.getOperand(1);
11091 if (SO1
== Constant::getNullValue(SO1
->getType())) {
11093 } else if (GO1
== Constant::getNullValue(GO1
->getType())) {
11096 // If they aren't the same type, convert both to an integer of the
11097 // target's pointer size.
11098 if (SO1
->getType() != GO1
->getType()) {
11099 if (Constant
*SO1C
= dyn_cast
<Constant
>(SO1
)) {
11101 ConstantExpr::getIntegerCast(SO1C
, GO1
->getType(), true);
11102 } else if (Constant
*GO1C
= dyn_cast
<Constant
>(GO1
)) {
11104 ConstantExpr::getIntegerCast(GO1C
, SO1
->getType(), true);
11106 unsigned PS
= TD
->getPointerSizeInBits();
11107 if (TD
->getTypeSizeInBits(SO1
->getType()) == PS
) {
11108 // Convert GO1 to SO1's type.
11109 GO1
= InsertCastToIntPtrTy(GO1
, SO1
->getType(), &GEP
, this);
11111 } else if (TD
->getTypeSizeInBits(GO1
->getType()) == PS
) {
11112 // Convert SO1 to GO1's type.
11113 SO1
= InsertCastToIntPtrTy(SO1
, GO1
->getType(), &GEP
, this);
11115 const Type
*PT
= TD
->getIntPtrType();
11116 SO1
= InsertCastToIntPtrTy(SO1
, PT
, &GEP
, this);
11117 GO1
= InsertCastToIntPtrTy(GO1
, PT
, &GEP
, this);
11121 if (isa
<Constant
>(SO1
) && isa
<Constant
>(GO1
))
11122 Sum
= ConstantExpr::getAdd(cast
<Constant
>(SO1
),
11123 cast
<Constant
>(GO1
));
11125 Sum
= BinaryOperator::CreateAdd(SO1
, GO1
, PtrOp
->getName()+".sum");
11126 InsertNewInstBefore(cast
<Instruction
>(Sum
), GEP
);
11130 // Recycle the GEP we already have if possible.
11131 if (SrcGEPOperands
.size() == 2) {
11132 GEP
.setOperand(0, SrcGEPOperands
[0]);
11133 GEP
.setOperand(1, Sum
);
11136 Indices
.insert(Indices
.end(), SrcGEPOperands
.begin()+1,
11137 SrcGEPOperands
.end()-1);
11138 Indices
.push_back(Sum
);
11139 Indices
.insert(Indices
.end(), GEP
.op_begin()+2, GEP
.op_end());
11141 } else if (isa
<Constant
>(*GEP
.idx_begin()) &&
11142 cast
<Constant
>(*GEP
.idx_begin())->isNullValue() &&
11143 SrcGEPOperands
.size() != 1) {
11144 // Otherwise we can do the fold if the first index of the GEP is a zero
11145 Indices
.insert(Indices
.end(), SrcGEPOperands
.begin()+1,
11146 SrcGEPOperands
.end());
11147 Indices
.insert(Indices
.end(), GEP
.idx_begin()+1, GEP
.idx_end());
11150 if (!Indices
.empty()) {
11151 GetElementPtrInst
*NewGEP
= GetElementPtrInst::Create(SrcGEPOperands
[0],
11156 cast
<GEPOperator
>(NewGEP
)->setIsInBounds(true);
11160 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(PtrOp
)) {
11161 // GEP of global variable. If all of the indices for this GEP are
11162 // constants, we can promote this to a constexpr instead of an instruction.
11164 // Scan for nonconstants...
11165 SmallVector
<Constant
*, 8> Indices
;
11166 User::op_iterator I
= GEP
.idx_begin(), E
= GEP
.idx_end();
11167 for (; I
!= E
&& isa
<Constant
>(*I
); ++I
)
11168 Indices
.push_back(cast
<Constant
>(*I
));
11170 if (I
== E
) { // If they are all constants...
11171 Constant
*CE
= ConstantExpr::getGetElementPtr(GV
,
11172 &Indices
[0],Indices
.size());
11174 // Replace all uses of the GEP with the new constexpr...
11175 return ReplaceInstUsesWith(GEP
, CE
);
11177 } else if (Value
*X
= getBitCastOperand(PtrOp
)) { // Is the operand a cast?
11178 if (!isa
<PointerType
>(X
->getType())) {
11179 // Not interesting. Source pointer must be a cast from pointer.
11180 } else if (HasZeroPointerIndex
) {
11181 // transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
11182 // into : GEP [10 x i8]* X, i32 0, ...
11184 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
11185 // into : GEP i8* X, ...
11187 // This occurs when the program declares an array extern like "int X[];"
11188 const PointerType
*CPTy
= cast
<PointerType
>(PtrOp
->getType());
11189 const PointerType
*XTy
= cast
<PointerType
>(X
->getType());
11190 if (const ArrayType
*CATy
=
11191 dyn_cast
<ArrayType
>(CPTy
->getElementType())) {
11192 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
11193 if (CATy
->getElementType() == XTy
->getElementType()) {
11194 // -> GEP i8* X, ...
11195 SmallVector
<Value
*, 8> Indices(GEP
.idx_begin()+1, GEP
.idx_end());
11196 GetElementPtrInst
*NewGEP
=
11197 GetElementPtrInst::Create(X
, Indices
.begin(), Indices
.end(),
11199 if (cast
<GEPOperator
>(&GEP
)->isInBounds())
11200 cast
<GEPOperator
>(NewGEP
)->setIsInBounds(true);
11202 } else if (const ArrayType
*XATy
=
11203 dyn_cast
<ArrayType
>(XTy
->getElementType())) {
11204 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
11205 if (CATy
->getElementType() == XATy
->getElementType()) {
11206 // -> GEP [10 x i8]* X, i32 0, ...
11207 // At this point, we know that the cast source type is a pointer
11208 // to an array of the same type as the destination pointer
11209 // array. Because the array type is never stepped over (there
11210 // is a leading zero) we can fold the cast into this GEP.
11211 GEP
.setOperand(0, X
);
11216 } else if (GEP
.getNumOperands() == 2) {
11217 // Transform things like:
11218 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
11219 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
11220 const Type
*SrcElTy
= cast
<PointerType
>(X
->getType())->getElementType();
11221 const Type
*ResElTy
=cast
<PointerType
>(PtrOp
->getType())->getElementType();
11222 if (TD
&& isa
<ArrayType
>(SrcElTy
) &&
11223 TD
->getTypeAllocSize(cast
<ArrayType
>(SrcElTy
)->getElementType()) ==
11224 TD
->getTypeAllocSize(ResElTy
)) {
11226 Idx
[0] = Constant::getNullValue(Type::Int32Ty
);
11227 Idx
[1] = GEP
.getOperand(1);
11228 GetElementPtrInst
*NewGEP
=
11229 GetElementPtrInst::Create(X
, Idx
, Idx
+ 2, GEP
.getName());
11230 if (cast
<GEPOperator
>(&GEP
)->isInBounds())
11231 cast
<GEPOperator
>(NewGEP
)->setIsInBounds(true);
11232 Value
*V
= InsertNewInstBefore(NewGEP
, GEP
);
11233 // V and GEP are both pointer types --> BitCast
11234 return new BitCastInst(V
, GEP
.getType());
11237 // Transform things like:
11238 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
11239 // (where tmp = 8*tmp2) into:
11240 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
11242 if (TD
&& isa
<ArrayType
>(SrcElTy
) && ResElTy
== Type::Int8Ty
) {
11243 uint64_t ArrayEltSize
=
11244 TD
->getTypeAllocSize(cast
<ArrayType
>(SrcElTy
)->getElementType());
11246 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
11247 // allow either a mul, shift, or constant here.
11249 ConstantInt
*Scale
= 0;
11250 if (ArrayEltSize
== 1) {
11251 NewIdx
= GEP
.getOperand(1);
11253 ConstantInt::get(cast
<IntegerType
>(NewIdx
->getType()), 1);
11254 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
.getOperand(1))) {
11255 NewIdx
= ConstantInt::get(CI
->getType(), 1);
11257 } else if (Instruction
*Inst
=dyn_cast
<Instruction
>(GEP
.getOperand(1))){
11258 if (Inst
->getOpcode() == Instruction::Shl
&&
11259 isa
<ConstantInt
>(Inst
->getOperand(1))) {
11260 ConstantInt
*ShAmt
= cast
<ConstantInt
>(Inst
->getOperand(1));
11261 uint32_t ShAmtVal
= ShAmt
->getLimitedValue(64);
11262 Scale
= ConstantInt::get(cast
<IntegerType
>(Inst
->getType()),
11264 NewIdx
= Inst
->getOperand(0);
11265 } else if (Inst
->getOpcode() == Instruction::Mul
&&
11266 isa
<ConstantInt
>(Inst
->getOperand(1))) {
11267 Scale
= cast
<ConstantInt
>(Inst
->getOperand(1));
11268 NewIdx
= Inst
->getOperand(0);
11272 // If the index will be to exactly the right offset with the scale taken
11273 // out, perform the transformation. Note, we don't know whether Scale is
11274 // signed or not. We'll use unsigned version of division/modulo
11275 // operation after making sure Scale doesn't have the sign bit set.
11276 if (ArrayEltSize
&& Scale
&& Scale
->getSExtValue() >= 0LL &&
11277 Scale
->getZExtValue() % ArrayEltSize
== 0) {
11278 Scale
= ConstantInt::get(Scale
->getType(),
11279 Scale
->getZExtValue() / ArrayEltSize
);
11280 if (Scale
->getZExtValue() != 1) {
11282 ConstantExpr::getIntegerCast(Scale
, NewIdx
->getType(),
11284 Instruction
*Sc
= BinaryOperator::CreateMul(NewIdx
, C
, "idxscale");
11285 NewIdx
= InsertNewInstBefore(Sc
, GEP
);
11288 // Insert the new GEP instruction.
11290 Idx
[0] = Constant::getNullValue(Type::Int32Ty
);
11292 Instruction
*NewGEP
=
11293 GetElementPtrInst::Create(X
, Idx
, Idx
+ 2, GEP
.getName());
11294 if (cast
<GEPOperator
>(&GEP
)->isInBounds())
11295 cast
<GEPOperator
>(NewGEP
)->setIsInBounds(true);
11296 NewGEP
= InsertNewInstBefore(NewGEP
, GEP
);
11297 // The NewGEP must be pointer typed, so must the old one -> BitCast
11298 return new BitCastInst(NewGEP
, GEP
.getType());
11304 /// See if we can simplify:
11305 /// X = bitcast A to B*
11306 /// Y = gep X, <...constant indices...>
11307 /// into a gep of the original struct. This is important for SROA and alias
11308 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
11309 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(PtrOp
)) {
11311 !isa
<BitCastInst
>(BCI
->getOperand(0)) && GEP
.hasAllConstantIndices()) {
11312 // Determine how much the GEP moves the pointer. We are guaranteed to get
11313 // a constant back from EmitGEPOffset.
11314 ConstantInt
*OffsetV
=
11315 cast
<ConstantInt
>(EmitGEPOffset(&GEP
, GEP
, *this));
11316 int64_t Offset
= OffsetV
->getSExtValue();
11318 // If this GEP instruction doesn't move the pointer, just replace the GEP
11319 // with a bitcast of the real input to the dest type.
11321 // If the bitcast is of an allocation, and the allocation will be
11322 // converted to match the type of the cast, don't touch this.
11323 if (isa
<AllocationInst
>(BCI
->getOperand(0))) {
11324 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
11325 if (Instruction
*I
= visitBitCast(*BCI
)) {
11328 BCI
->getParent()->getInstList().insert(BCI
, I
);
11329 ReplaceInstUsesWith(*BCI
, I
);
11334 return new BitCastInst(BCI
->getOperand(0), GEP
.getType());
11337 // Otherwise, if the offset is non-zero, we need to find out if there is a
11338 // field at Offset in 'A's type. If so, we can pull the cast through the
11340 SmallVector
<Value
*, 8> NewIndices
;
11342 cast
<PointerType
>(BCI
->getOperand(0)->getType())->getElementType();
11343 if (FindElementAtOffset(InTy
, Offset
, NewIndices
, TD
, Context
)) {
11344 Instruction
*NGEP
=
11345 GetElementPtrInst::Create(BCI
->getOperand(0), NewIndices
.begin(),
11347 if (NGEP
->getType() == GEP
.getType()) return NGEP
;
11348 if (cast
<GEPOperator
>(&GEP
)->isInBounds())
11349 cast
<GEPOperator
>(NGEP
)->setIsInBounds(true);
11350 InsertNewInstBefore(NGEP
, GEP
);
11351 NGEP
->takeName(&GEP
);
11352 return new BitCastInst(NGEP
, GEP
.getType());
11360 Instruction
*InstCombiner::visitAllocationInst(AllocationInst
&AI
) {
11361 // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
11362 if (AI
.isArrayAllocation()) { // Check C != 1
11363 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
11364 const Type
*NewTy
=
11365 ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
11366 AllocationInst
*New
= 0;
11368 // Create and insert the replacement instruction...
11369 if (isa
<MallocInst
>(AI
))
11370 New
= new MallocInst(NewTy
, 0, AI
.getAlignment(), AI
.getName());
11372 assert(isa
<AllocaInst
>(AI
) && "Unknown type of allocation inst!");
11373 New
= new AllocaInst(NewTy
, 0, AI
.getAlignment(), AI
.getName());
11376 InsertNewInstBefore(New
, AI
);
11378 // Scan to the end of the allocation instructions, to skip over a block of
11379 // allocas if possible...also skip interleaved debug info
11381 BasicBlock::iterator It
= New
;
11382 while (isa
<AllocationInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
)) ++It
;
11384 // Now that I is pointing to the first non-allocation-inst in the block,
11385 // insert our getelementptr instruction...
11387 Value
*NullIdx
= Constant::getNullValue(Type::Int32Ty
);
11391 Value
*V
= GetElementPtrInst::Create(New
, Idx
, Idx
+ 2,
11392 New
->getName()+".sub", It
);
11393 cast
<GEPOperator
>(V
)->setIsInBounds(true);
11395 // Now make everything use the getelementptr instead of the original
11397 return ReplaceInstUsesWith(AI
, V
);
11398 } else if (isa
<UndefValue
>(AI
.getArraySize())) {
11399 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
11403 if (TD
&& isa
<AllocaInst
>(AI
) && AI
.getAllocatedType()->isSized()) {
11404 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
11405 // Note that we only do this for alloca's, because malloc should allocate
11406 // and return a unique pointer, even for a zero byte allocation.
11407 if (TD
->getTypeAllocSize(AI
.getAllocatedType()) == 0)
11408 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
11410 // If the alignment is 0 (unspecified), assign it the preferred alignment.
11411 if (AI
.getAlignment() == 0)
11412 AI
.setAlignment(TD
->getPrefTypeAlignment(AI
.getAllocatedType()));
11418 Instruction
*InstCombiner::visitFreeInst(FreeInst
&FI
) {
11419 Value
*Op
= FI
.getOperand(0);
11421 // free undef -> unreachable.
11422 if (isa
<UndefValue
>(Op
)) {
11423 // Insert a new store to null because we cannot modify the CFG here.
11424 new StoreInst(ConstantInt::getTrue(*Context
),
11425 UndefValue::get(PointerType::getUnqual(Type::Int1Ty
)), &FI
);
11426 return EraseInstFromFunction(FI
);
11429 // If we have 'free null' delete the instruction. This can happen in stl code
11430 // when lots of inlining happens.
11431 if (isa
<ConstantPointerNull
>(Op
))
11432 return EraseInstFromFunction(FI
);
11434 // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
11435 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(Op
)) {
11436 FI
.setOperand(0, CI
->getOperand(0));
11440 // Change free (gep X, 0,0,0,0) into free(X)
11441 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
11442 if (GEPI
->hasAllZeroIndices()) {
11443 AddToWorkList(GEPI
);
11444 FI
.setOperand(0, GEPI
->getOperand(0));
11449 // Change free(malloc) into nothing, if the malloc has a single use.
11450 if (MallocInst
*MI
= dyn_cast
<MallocInst
>(Op
))
11451 if (MI
->hasOneUse()) {
11452 EraseInstFromFunction(FI
);
11453 return EraseInstFromFunction(*MI
);
11460 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11461 static Instruction
*InstCombineLoadCast(InstCombiner
&IC
, LoadInst
&LI
,
11462 const TargetData
*TD
) {
11463 User
*CI
= cast
<User
>(LI
.getOperand(0));
11464 Value
*CastOp
= CI
->getOperand(0);
11465 LLVMContext
*Context
= IC
.getContext();
11468 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(CI
)) {
11469 // Instead of loading constant c string, use corresponding integer value
11470 // directly if string length is small enough.
11472 if (GetConstantStringInfo(CE
->getOperand(0), Str
) && !Str
.empty()) {
11473 unsigned len
= Str
.length();
11474 const Type
*Ty
= cast
<PointerType
>(CE
->getType())->getElementType();
11475 unsigned numBits
= Ty
->getPrimitiveSizeInBits();
11476 // Replace LI with immediate integer store.
11477 if ((numBits
>> 3) == len
+ 1) {
11478 APInt
StrVal(numBits
, 0);
11479 APInt
SingleChar(numBits
, 0);
11480 if (TD
->isLittleEndian()) {
11481 for (signed i
= len
-1; i
>= 0; i
--) {
11482 SingleChar
= (uint64_t) Str
[i
] & UCHAR_MAX
;
11483 StrVal
= (StrVal
<< 8) | SingleChar
;
11486 for (unsigned i
= 0; i
< len
; i
++) {
11487 SingleChar
= (uint64_t) Str
[i
] & UCHAR_MAX
;
11488 StrVal
= (StrVal
<< 8) | SingleChar
;
11490 // Append NULL at the end.
11492 StrVal
= (StrVal
<< 8) | SingleChar
;
11494 Value
*NL
= ConstantInt::get(*Context
, StrVal
);
11495 return IC
.ReplaceInstUsesWith(LI
, NL
);
11501 const PointerType
*DestTy
= cast
<PointerType
>(CI
->getType());
11502 const Type
*DestPTy
= DestTy
->getElementType();
11503 if (const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType())) {
11505 // If the address spaces don't match, don't eliminate the cast.
11506 if (DestTy
->getAddressSpace() != SrcTy
->getAddressSpace())
11509 const Type
*SrcPTy
= SrcTy
->getElementType();
11511 if (DestPTy
->isInteger() || isa
<PointerType
>(DestPTy
) ||
11512 isa
<VectorType
>(DestPTy
)) {
11513 // If the source is an array, the code below will not succeed. Check to
11514 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11516 if (const ArrayType
*ASrcTy
= dyn_cast
<ArrayType
>(SrcPTy
))
11517 if (Constant
*CSrc
= dyn_cast
<Constant
>(CastOp
))
11518 if (ASrcTy
->getNumElements() != 0) {
11520 Idxs
[0] = Idxs
[1] = Constant::getNullValue(Type::Int32Ty
);
11521 CastOp
= ConstantExpr::getGetElementPtr(CSrc
, Idxs
, 2);
11522 SrcTy
= cast
<PointerType
>(CastOp
->getType());
11523 SrcPTy
= SrcTy
->getElementType();
11526 if (IC
.getTargetData() &&
11527 (SrcPTy
->isInteger() || isa
<PointerType
>(SrcPTy
) ||
11528 isa
<VectorType
>(SrcPTy
)) &&
11529 // Do not allow turning this into a load of an integer, which is then
11530 // casted to a pointer, this pessimizes pointer analysis a lot.
11531 (isa
<PointerType
>(SrcPTy
) == isa
<PointerType
>(LI
.getType())) &&
11532 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) ==
11533 IC
.getTargetData()->getTypeSizeInBits(DestPTy
)) {
11535 // Okay, we are casting from one integer or pointer type to another of
11536 // the same size. Instead of casting the pointer before the load, cast
11537 // the result of the loaded value.
11538 Value
*NewLoad
= IC
.InsertNewInstBefore(new LoadInst(CastOp
,
11540 LI
.isVolatile()),LI
);
11541 // Now cast the result of the load.
11542 return new BitCastInst(NewLoad
, LI
.getType());
11549 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
11550 Value
*Op
= LI
.getOperand(0);
11552 // Attempt to improve the alignment.
11554 unsigned KnownAlign
=
11555 GetOrEnforceKnownAlignment(Op
, TD
->getPrefTypeAlignment(LI
.getType()));
11557 (LI
.getAlignment() == 0 ? TD
->getABITypeAlignment(LI
.getType()) :
11558 LI
.getAlignment()))
11559 LI
.setAlignment(KnownAlign
);
11562 // load (cast X) --> cast (load X) iff safe
11563 if (isa
<CastInst
>(Op
))
11564 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
11567 // None of the following transforms are legal for volatile loads.
11568 if (LI
.isVolatile()) return 0;
11570 // Do really simple store-to-load forwarding and load CSE, to catch cases
11571 // where there are several consequtive memory accesses to the same location,
11572 // separated by a few arithmetic operations.
11573 BasicBlock::iterator BBI
= &LI
;
11574 if (Value
*AvailableVal
= FindAvailableLoadedValue(Op
, LI
.getParent(), BBI
,6))
11575 return ReplaceInstUsesWith(LI
, AvailableVal
);
11577 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
11578 const Value
*GEPI0
= GEPI
->getOperand(0);
11579 // TODO: Consider a target hook for valid address spaces for this xform.
11580 if (isa
<ConstantPointerNull
>(GEPI0
) &&
11581 cast
<PointerType
>(GEPI0
->getType())->getAddressSpace() == 0) {
11582 // Insert a new store to null instruction before the load to indicate
11583 // that this code is not reachable. We do this instead of inserting
11584 // an unreachable instruction directly because we cannot modify the
11586 new StoreInst(UndefValue::get(LI
.getType()),
11587 Constant::getNullValue(Op
->getType()), &LI
);
11588 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11592 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
11593 // load null/undef -> undef
11594 // TODO: Consider a target hook for valid address spaces for this xform.
11595 if (isa
<UndefValue
>(C
) || (C
->isNullValue() &&
11596 cast
<PointerType
>(Op
->getType())->getAddressSpace() == 0)) {
11597 // Insert a new store to null instruction before the load to indicate that
11598 // this code is not reachable. We do this instead of inserting an
11599 // unreachable instruction directly because we cannot modify the CFG.
11600 new StoreInst(UndefValue::get(LI
.getType()),
11601 Constant::getNullValue(Op
->getType()), &LI
);
11602 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11605 // Instcombine load (constant global) into the value loaded.
11606 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(Op
))
11607 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
11608 return ReplaceInstUsesWith(LI
, GV
->getInitializer());
11610 // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
11611 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
)) {
11612 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
11613 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(CE
->getOperand(0)))
11614 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
11616 ConstantFoldLoadThroughGEPConstantExpr(GV
->getInitializer(), CE
,
11618 return ReplaceInstUsesWith(LI
, V
);
11619 if (CE
->getOperand(0)->isNullValue()) {
11620 // Insert a new store to null instruction before the load to indicate
11621 // that this code is not reachable. We do this instead of inserting
11622 // an unreachable instruction directly because we cannot modify the
11624 new StoreInst(UndefValue::get(LI
.getType()),
11625 Constant::getNullValue(Op
->getType()), &LI
);
11626 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11629 } else if (CE
->isCast()) {
11630 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
11636 // If this load comes from anywhere in a constant global, and if the global
11637 // is all undef or zero, we know what it loads.
11638 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(Op
->getUnderlyingObject())){
11639 if (GV
->isConstant() && GV
->hasDefinitiveInitializer()) {
11640 if (GV
->getInitializer()->isNullValue())
11641 return ReplaceInstUsesWith(LI
, Constant::getNullValue(LI
.getType()));
11642 else if (isa
<UndefValue
>(GV
->getInitializer()))
11643 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
11647 if (Op
->hasOneUse()) {
11648 // Change select and PHI nodes to select values instead of addresses: this
11649 // helps alias analysis out a lot, allows many others simplifications, and
11650 // exposes redundancy in the code.
11652 // Note that we cannot do the transformation unless we know that the
11653 // introduced loads cannot trap! Something like this is valid as long as
11654 // the condition is always false: load (select bool %C, int* null, int* %G),
11655 // but it would not be valid if we transformed it to load from null
11656 // unconditionally.
11658 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
11659 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11660 if (isSafeToLoadUnconditionally(SI
->getOperand(1), SI
) &&
11661 isSafeToLoadUnconditionally(SI
->getOperand(2), SI
)) {
11662 Value
*V1
= InsertNewInstBefore(new LoadInst(SI
->getOperand(1),
11663 SI
->getOperand(1)->getName()+".val"), LI
);
11664 Value
*V2
= InsertNewInstBefore(new LoadInst(SI
->getOperand(2),
11665 SI
->getOperand(2)->getName()+".val"), LI
);
11666 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
11669 // load (select (cond, null, P)) -> load P
11670 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(1)))
11671 if (C
->isNullValue()) {
11672 LI
.setOperand(0, SI
->getOperand(2));
11676 // load (select (cond, P, null)) -> load P
11677 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(2)))
11678 if (C
->isNullValue()) {
11679 LI
.setOperand(0, SI
->getOperand(1));
11687 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11688 /// when possible. This makes it generally easy to do alias analysis and/or
11689 /// SROA/mem2reg of the memory object.
11690 static Instruction
*InstCombineStoreToCast(InstCombiner
&IC
, StoreInst
&SI
) {
11691 User
*CI
= cast
<User
>(SI
.getOperand(1));
11692 Value
*CastOp
= CI
->getOperand(0);
11694 const Type
*DestPTy
= cast
<PointerType
>(CI
->getType())->getElementType();
11695 const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType());
11696 if (SrcTy
== 0) return 0;
11698 const Type
*SrcPTy
= SrcTy
->getElementType();
11700 if (!DestPTy
->isInteger() && !isa
<PointerType
>(DestPTy
))
11703 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11704 /// to its first element. This allows us to handle things like:
11705 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11706 /// on 32-bit hosts.
11707 SmallVector
<Value
*, 4> NewGEPIndices
;
11709 // If the source is an array, the code below will not succeed. Check to
11710 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11712 if (isa
<ArrayType
>(SrcPTy
) || isa
<StructType
>(SrcPTy
)) {
11713 // Index through pointer.
11714 Constant
*Zero
= Constant::getNullValue(Type::Int32Ty
);
11715 NewGEPIndices
.push_back(Zero
);
11718 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcPTy
)) {
11719 if (!STy
->getNumElements()) /* Struct can be empty {} */
11721 NewGEPIndices
.push_back(Zero
);
11722 SrcPTy
= STy
->getElementType(0);
11723 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcPTy
)) {
11724 NewGEPIndices
.push_back(Zero
);
11725 SrcPTy
= ATy
->getElementType();
11731 SrcTy
= PointerType::get(SrcPTy
, SrcTy
->getAddressSpace());
11734 if (!SrcPTy
->isInteger() && !isa
<PointerType
>(SrcPTy
))
11737 // If the pointers point into different address spaces or if they point to
11738 // values with different sizes, we can't do the transformation.
11739 if (!IC
.getTargetData() ||
11740 SrcTy
->getAddressSpace() !=
11741 cast
<PointerType
>(CI
->getType())->getAddressSpace() ||
11742 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) !=
11743 IC
.getTargetData()->getTypeSizeInBits(DestPTy
))
11746 // Okay, we are casting from one integer or pointer type to another of
11747 // the same size. Instead of casting the pointer before
11748 // the store, cast the value to be stored.
11750 Value
*SIOp0
= SI
.getOperand(0);
11751 Instruction::CastOps opcode
= Instruction::BitCast
;
11752 const Type
* CastSrcTy
= SIOp0
->getType();
11753 const Type
* CastDstTy
= SrcPTy
;
11754 if (isa
<PointerType
>(CastDstTy
)) {
11755 if (CastSrcTy
->isInteger())
11756 opcode
= Instruction::IntToPtr
;
11757 } else if (isa
<IntegerType
>(CastDstTy
)) {
11758 if (isa
<PointerType
>(SIOp0
->getType()))
11759 opcode
= Instruction::PtrToInt
;
11762 // SIOp0 is a pointer to aggregate and this is a store to the first field,
11763 // emit a GEP to index into its first field.
11764 if (!NewGEPIndices
.empty()) {
11765 if (Constant
*C
= dyn_cast
<Constant
>(CastOp
))
11766 CastOp
= ConstantExpr::getGetElementPtr(C
, &NewGEPIndices
[0],
11767 NewGEPIndices
.size());
11769 CastOp
= IC
.InsertNewInstBefore(
11770 GetElementPtrInst::Create(CastOp
, NewGEPIndices
.begin(),
11771 NewGEPIndices
.end()), SI
);
11772 cast
<GEPOperator
>(CastOp
)->setIsInBounds(true);
11775 if (Constant
*C
= dyn_cast
<Constant
>(SIOp0
))
11776 NewCast
= ConstantExpr::getCast(opcode
, C
, CastDstTy
);
11778 NewCast
= IC
.InsertNewInstBefore(
11779 CastInst::Create(opcode
, SIOp0
, CastDstTy
, SIOp0
->getName()+".c"),
11781 return new StoreInst(NewCast
, CastOp
);
11784 /// equivalentAddressValues - Test if A and B will obviously have the same
11785 /// value. This includes recognizing that %t0 and %t1 will have the same
11786 /// value in code like this:
11787 /// %t0 = getelementptr \@a, 0, 3
11788 /// store i32 0, i32* %t0
11789 /// %t1 = getelementptr \@a, 0, 3
11790 /// %t2 = load i32* %t1
11792 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
11793 // Test if the values are trivially equivalent.
11794 if (A
== B
) return true;
11796 // Test if the values come form identical arithmetic instructions.
11797 if (isa
<BinaryOperator
>(A
) ||
11798 isa
<CastInst
>(A
) ||
11800 isa
<GetElementPtrInst
>(A
))
11801 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
11802 if (cast
<Instruction
>(A
)->isIdenticalTo(BI
))
11805 // Otherwise they may not be equivalent.
11809 // If this instruction has two uses, one of which is a llvm.dbg.declare,
11810 // return the llvm.dbg.declare.
11811 DbgDeclareInst
*InstCombiner::hasOneUsePlusDeclare(Value
*V
) {
11812 if (!V
->hasNUses(2))
11814 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end();
11816 if (DbgDeclareInst
*DI
= dyn_cast
<DbgDeclareInst
>(UI
))
11818 if (isa
<BitCastInst
>(UI
) && UI
->hasOneUse()) {
11819 if (DbgDeclareInst
*DI
= dyn_cast
<DbgDeclareInst
>(UI
->use_begin()))
11826 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
11827 Value
*Val
= SI
.getOperand(0);
11828 Value
*Ptr
= SI
.getOperand(1);
11830 if (isa
<UndefValue
>(Ptr
)) { // store X, undef -> noop (even if volatile)
11831 EraseInstFromFunction(SI
);
11836 // If the RHS is an alloca with a single use, zapify the store, making the
11838 // If the RHS is an alloca with a two uses, the other one being a
11839 // llvm.dbg.declare, zapify the store and the declare, making the
11840 // alloca dead. We must do this to prevent declare's from affecting
11842 if (!SI
.isVolatile()) {
11843 if (Ptr
->hasOneUse()) {
11844 if (isa
<AllocaInst
>(Ptr
)) {
11845 EraseInstFromFunction(SI
);
11849 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
11850 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
11851 if (GEP
->getOperand(0)->hasOneUse()) {
11852 EraseInstFromFunction(SI
);
11856 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(GEP
->getOperand(0))) {
11857 EraseInstFromFunction(*DI
);
11858 EraseInstFromFunction(SI
);
11865 if (DbgDeclareInst
*DI
= hasOneUsePlusDeclare(Ptr
)) {
11866 EraseInstFromFunction(*DI
);
11867 EraseInstFromFunction(SI
);
11873 // Attempt to improve the alignment.
11875 unsigned KnownAlign
=
11876 GetOrEnforceKnownAlignment(Ptr
, TD
->getPrefTypeAlignment(Val
->getType()));
11878 (SI
.getAlignment() == 0 ? TD
->getABITypeAlignment(Val
->getType()) :
11879 SI
.getAlignment()))
11880 SI
.setAlignment(KnownAlign
);
11883 // Do really simple DSE, to catch cases where there are several consecutive
11884 // stores to the same location, separated by a few arithmetic operations. This
11885 // situation often occurs with bitfield accesses.
11886 BasicBlock::iterator BBI
= &SI
;
11887 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
11890 // Don't count debug info directives, lest they affect codegen,
11891 // and we skip pointer-to-pointer bitcasts, which are NOPs.
11892 // It is necessary for correctness to skip those that feed into a
11893 // llvm.dbg.declare, as these are not present when debugging is off.
11894 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
11895 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType()))) {
11900 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
11901 // Prev store isn't volatile, and stores to the same location?
11902 if (!PrevSI
->isVolatile() &&equivalentAddressValues(PrevSI
->getOperand(1),
11903 SI
.getOperand(1))) {
11906 EraseInstFromFunction(*PrevSI
);
11912 // If this is a load, we have to stop. However, if the loaded value is from
11913 // the pointer we're loading and is producing the pointer we're storing,
11914 // then *this* store is dead (X = load P; store X -> P).
11915 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
11916 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
) &&
11917 !SI
.isVolatile()) {
11918 EraseInstFromFunction(SI
);
11922 // Otherwise, this is a load from some other location. Stores before it
11923 // may not be dead.
11927 // Don't skip over loads or things that can modify memory.
11928 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory())
11933 if (SI
.isVolatile()) return 0; // Don't hack volatile stores.
11935 // store X, null -> turns into 'unreachable' in SimplifyCFG
11936 if (isa
<ConstantPointerNull
>(Ptr
) &&
11937 cast
<PointerType
>(Ptr
->getType())->getAddressSpace() == 0) {
11938 if (!isa
<UndefValue
>(Val
)) {
11939 SI
.setOperand(0, UndefValue::get(Val
->getType()));
11940 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
11941 AddToWorkList(U
); // Dropped a use.
11944 return 0; // Do not modify these!
11947 // store undef, Ptr -> noop
11948 if (isa
<UndefValue
>(Val
)) {
11949 EraseInstFromFunction(SI
);
11954 // If the pointer destination is a cast, see if we can fold the cast into the
11956 if (isa
<CastInst
>(Ptr
))
11957 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
11959 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
11961 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
11965 // If this store is the last instruction in the basic block (possibly
11966 // excepting debug info instructions and the pointer bitcasts that feed
11967 // into them), and if the block ends with an unconditional branch, try
11968 // to move it to the successor block.
11972 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
11973 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType())));
11974 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
11975 if (BI
->isUnconditional())
11976 if (SimplifyStoreAtEndOfBlock(SI
))
11977 return 0; // xform done!
11982 /// SimplifyStoreAtEndOfBlock - Turn things like:
11983 /// if () { *P = v1; } else { *P = v2 }
11984 /// into a phi node with a store in the successor.
11986 /// Simplify things like:
11987 /// *P = v1; if () { *P = v2; }
11988 /// into a phi node with a store in the successor.
11990 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst
&SI
) {
11991 BasicBlock
*StoreBB
= SI
.getParent();
11993 // Check to see if the successor block has exactly two incoming edges. If
11994 // so, see if the other predecessor contains a store to the same location.
11995 // if so, insert a PHI node (if needed) and move the stores down.
11996 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
11998 // Determine whether Dest has exactly two predecessors and, if so, compute
11999 // the other predecessor.
12000 pred_iterator PI
= pred_begin(DestBB
);
12001 BasicBlock
*OtherBB
= 0;
12002 if (*PI
!= StoreBB
)
12005 if (PI
== pred_end(DestBB
))
12008 if (*PI
!= StoreBB
) {
12013 if (++PI
!= pred_end(DestBB
))
12016 // Bail out if all the relevant blocks aren't distinct (this can happen,
12017 // for example, if SI is in an infinite loop)
12018 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
12021 // Verify that the other block ends in a branch and is not otherwise empty.
12022 BasicBlock::iterator BBI
= OtherBB
->getTerminator();
12023 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
12024 if (!OtherBr
|| BBI
== OtherBB
->begin())
12027 // If the other block ends in an unconditional branch, check for the 'if then
12028 // else' case. there is an instruction before the branch.
12029 StoreInst
*OtherStore
= 0;
12030 if (OtherBr
->isUnconditional()) {
12032 // Skip over debugging info.
12033 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
12034 (isa
<BitCastInst
>(BBI
) && isa
<PointerType
>(BBI
->getType()))) {
12035 if (BBI
==OtherBB
->begin())
12039 // If this isn't a store, or isn't a store to the same location, bail out.
12040 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
12041 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1))
12044 // Otherwise, the other block ended with a conditional branch. If one of the
12045 // destinations is StoreBB, then we have the if/then case.
12046 if (OtherBr
->getSuccessor(0) != StoreBB
&&
12047 OtherBr
->getSuccessor(1) != StoreBB
)
12050 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
12051 // if/then triangle. See if there is a store to the same ptr as SI that
12052 // lives in OtherBB.
12054 // Check to see if we find the matching store.
12055 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
12056 if (OtherStore
->getOperand(1) != SI
.getOperand(1))
12060 // If we find something that may be using or overwriting the stored
12061 // value, or if we run out of instructions, we can't do the xform.
12062 if (BBI
->mayReadFromMemory() || BBI
->mayWriteToMemory() ||
12063 BBI
== OtherBB
->begin())
12067 // In order to eliminate the store in OtherBr, we have to
12068 // make sure nothing reads or overwrites the stored value in
12070 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
12071 // FIXME: This should really be AA driven.
12072 if (I
->mayReadFromMemory() || I
->mayWriteToMemory())
12077 // Insert a PHI node now if we need it.
12078 Value
*MergedVal
= OtherStore
->getOperand(0);
12079 if (MergedVal
!= SI
.getOperand(0)) {
12080 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), "storemerge");
12081 PN
->reserveOperandSpace(2);
12082 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
12083 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
12084 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
12087 // Advance to a place where it is safe to insert the new store and
12089 BBI
= DestBB
->getFirstNonPHI();
12090 InsertNewInstBefore(new StoreInst(MergedVal
, SI
.getOperand(1),
12091 OtherStore
->isVolatile()), *BBI
);
12093 // Nuke the old stores.
12094 EraseInstFromFunction(SI
);
12095 EraseInstFromFunction(*OtherStore
);
12101 Instruction
*InstCombiner::visitBranchInst(BranchInst
&BI
) {
12102 // Change br (not X), label True, label False to: br X, label False, True
12104 BasicBlock
*TrueDest
;
12105 BasicBlock
*FalseDest
;
12106 if (match(&BI
, m_Br(m_Not(m_Value(X
)), TrueDest
, FalseDest
), *Context
) &&
12107 !isa
<Constant
>(X
)) {
12108 // Swap Destinations and condition...
12109 BI
.setCondition(X
);
12110 BI
.setSuccessor(0, FalseDest
);
12111 BI
.setSuccessor(1, TrueDest
);
12115 // Cannonicalize fcmp_one -> fcmp_oeq
12116 FCmpInst::Predicate FPred
; Value
*Y
;
12117 if (match(&BI
, m_Br(m_FCmp(FPred
, m_Value(X
), m_Value(Y
)),
12118 TrueDest
, FalseDest
), *Context
))
12119 if ((FPred
== FCmpInst::FCMP_ONE
|| FPred
== FCmpInst::FCMP_OLE
||
12120 FPred
== FCmpInst::FCMP_OGE
) && BI
.getCondition()->hasOneUse()) {
12121 FCmpInst
*I
= cast
<FCmpInst
>(BI
.getCondition());
12122 FCmpInst::Predicate NewPred
= FCmpInst::getInversePredicate(FPred
);
12123 Instruction
*NewSCC
= new FCmpInst(I
, NewPred
, X
, Y
, "");
12124 NewSCC
->takeName(I
);
12125 // Swap Destinations and condition...
12126 BI
.setCondition(NewSCC
);
12127 BI
.setSuccessor(0, FalseDest
);
12128 BI
.setSuccessor(1, TrueDest
);
12129 RemoveFromWorkList(I
);
12130 I
->eraseFromParent();
12131 AddToWorkList(NewSCC
);
12135 // Cannonicalize icmp_ne -> icmp_eq
12136 ICmpInst::Predicate IPred
;
12137 if (match(&BI
, m_Br(m_ICmp(IPred
, m_Value(X
), m_Value(Y
)),
12138 TrueDest
, FalseDest
), *Context
))
12139 if ((IPred
== ICmpInst::ICMP_NE
|| IPred
== ICmpInst::ICMP_ULE
||
12140 IPred
== ICmpInst::ICMP_SLE
|| IPred
== ICmpInst::ICMP_UGE
||
12141 IPred
== ICmpInst::ICMP_SGE
) && BI
.getCondition()->hasOneUse()) {
12142 ICmpInst
*I
= cast
<ICmpInst
>(BI
.getCondition());
12143 ICmpInst::Predicate NewPred
= ICmpInst::getInversePredicate(IPred
);
12144 Instruction
*NewSCC
= new ICmpInst(I
, NewPred
, X
, Y
, "");
12145 NewSCC
->takeName(I
);
12146 // Swap Destinations and condition...
12147 BI
.setCondition(NewSCC
);
12148 BI
.setSuccessor(0, FalseDest
);
12149 BI
.setSuccessor(1, TrueDest
);
12150 RemoveFromWorkList(I
);
12151 I
->eraseFromParent();;
12152 AddToWorkList(NewSCC
);
12159 Instruction
*InstCombiner::visitSwitchInst(SwitchInst
&SI
) {
12160 Value
*Cond
= SI
.getCondition();
12161 if (Instruction
*I
= dyn_cast
<Instruction
>(Cond
)) {
12162 if (I
->getOpcode() == Instruction::Add
)
12163 if (ConstantInt
*AddRHS
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
12164 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
12165 for (unsigned i
= 2, e
= SI
.getNumOperands(); i
!= e
; i
+= 2)
12167 ConstantExpr::getSub(cast
<Constant
>(SI
.getOperand(i
)),
12169 SI
.setOperand(0, I
->getOperand(0));
12177 Instruction
*InstCombiner::visitExtractValueInst(ExtractValueInst
&EV
) {
12178 Value
*Agg
= EV
.getAggregateOperand();
12180 if (!EV
.hasIndices())
12181 return ReplaceInstUsesWith(EV
, Agg
);
12183 if (Constant
*C
= dyn_cast
<Constant
>(Agg
)) {
12184 if (isa
<UndefValue
>(C
))
12185 return ReplaceInstUsesWith(EV
, UndefValue::get(EV
.getType()));
12187 if (isa
<ConstantAggregateZero
>(C
))
12188 return ReplaceInstUsesWith(EV
, Constant::getNullValue(EV
.getType()));
12190 if (isa
<ConstantArray
>(C
) || isa
<ConstantStruct
>(C
)) {
12191 // Extract the element indexed by the first index out of the constant
12192 Value
*V
= C
->getOperand(*EV
.idx_begin());
12193 if (EV
.getNumIndices() > 1)
12194 // Extract the remaining indices out of the constant indexed by the
12196 return ExtractValueInst::Create(V
, EV
.idx_begin() + 1, EV
.idx_end());
12198 return ReplaceInstUsesWith(EV
, V
);
12200 return 0; // Can't handle other constants
12202 if (InsertValueInst
*IV
= dyn_cast
<InsertValueInst
>(Agg
)) {
12203 // We're extracting from an insertvalue instruction, compare the indices
12204 const unsigned *exti
, *exte
, *insi
, *inse
;
12205 for (exti
= EV
.idx_begin(), insi
= IV
->idx_begin(),
12206 exte
= EV
.idx_end(), inse
= IV
->idx_end();
12207 exti
!= exte
&& insi
!= inse
;
12209 if (*insi
!= *exti
)
12210 // The insert and extract both reference distinctly different elements.
12211 // This means the extract is not influenced by the insert, and we can
12212 // replace the aggregate operand of the extract with the aggregate
12213 // operand of the insert. i.e., replace
12214 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12215 // %E = extractvalue { i32, { i32 } } %I, 0
12217 // %E = extractvalue { i32, { i32 } } %A, 0
12218 return ExtractValueInst::Create(IV
->getAggregateOperand(),
12219 EV
.idx_begin(), EV
.idx_end());
12221 if (exti
== exte
&& insi
== inse
)
12222 // Both iterators are at the end: Index lists are identical. Replace
12223 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12224 // %C = extractvalue { i32, { i32 } } %B, 1, 0
12226 return ReplaceInstUsesWith(EV
, IV
->getInsertedValueOperand());
12227 if (exti
== exte
) {
12228 // The extract list is a prefix of the insert list. i.e. replace
12229 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12230 // %E = extractvalue { i32, { i32 } } %I, 1
12232 // %X = extractvalue { i32, { i32 } } %A, 1
12233 // %E = insertvalue { i32 } %X, i32 42, 0
12234 // by switching the order of the insert and extract (though the
12235 // insertvalue should be left in, since it may have other uses).
12236 Value
*NewEV
= InsertNewInstBefore(
12237 ExtractValueInst::Create(IV
->getAggregateOperand(),
12238 EV
.idx_begin(), EV
.idx_end()),
12240 return InsertValueInst::Create(NewEV
, IV
->getInsertedValueOperand(),
12244 // The insert list is a prefix of the extract list
12245 // We can simply remove the common indices from the extract and make it
12246 // operate on the inserted value instead of the insertvalue result.
12248 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12249 // %E = extractvalue { i32, { i32 } } %I, 1, 0
12251 // %E extractvalue { i32 } { i32 42 }, 0
12252 return ExtractValueInst::Create(IV
->getInsertedValueOperand(),
12255 // Can't simplify extracts from other values. Note that nested extracts are
12256 // already simplified implicitely by the above (extract ( extract (insert) )
12257 // will be translated into extract ( insert ( extract ) ) first and then just
12258 // the value inserted, if appropriate).
12262 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
12263 /// is to leave as a vector operation.
12264 static bool CheapToScalarize(Value
*V
, bool isConstant
) {
12265 if (isa
<ConstantAggregateZero
>(V
))
12267 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(V
)) {
12268 if (isConstant
) return true;
12269 // If all elts are the same, we can extract.
12270 Constant
*Op0
= C
->getOperand(0);
12271 for (unsigned i
= 1; i
< C
->getNumOperands(); ++i
)
12272 if (C
->getOperand(i
) != Op0
)
12276 Instruction
*I
= dyn_cast
<Instruction
>(V
);
12277 if (!I
) return false;
12279 // Insert element gets simplified to the inserted element or is deleted if
12280 // this is constant idx extract element and its a constant idx insertelt.
12281 if (I
->getOpcode() == Instruction::InsertElement
&& isConstant
&&
12282 isa
<ConstantInt
>(I
->getOperand(2)))
12284 if (I
->getOpcode() == Instruction::Load
&& I
->hasOneUse())
12286 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(I
))
12287 if (BO
->hasOneUse() &&
12288 (CheapToScalarize(BO
->getOperand(0), isConstant
) ||
12289 CheapToScalarize(BO
->getOperand(1), isConstant
)))
12291 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
12292 if (CI
->hasOneUse() &&
12293 (CheapToScalarize(CI
->getOperand(0), isConstant
) ||
12294 CheapToScalarize(CI
->getOperand(1), isConstant
)))
12300 /// Read and decode a shufflevector mask.
12302 /// It turns undef elements into values that are larger than the number of
12303 /// elements in the input.
12304 static std::vector
<unsigned> getShuffleMask(const ShuffleVectorInst
*SVI
) {
12305 unsigned NElts
= SVI
->getType()->getNumElements();
12306 if (isa
<ConstantAggregateZero
>(SVI
->getOperand(2)))
12307 return std::vector
<unsigned>(NElts
, 0);
12308 if (isa
<UndefValue
>(SVI
->getOperand(2)))
12309 return std::vector
<unsigned>(NElts
, 2*NElts
);
12311 std::vector
<unsigned> Result
;
12312 const ConstantVector
*CP
= cast
<ConstantVector
>(SVI
->getOperand(2));
12313 for (User::const_op_iterator i
= CP
->op_begin(), e
= CP
->op_end(); i
!=e
; ++i
)
12314 if (isa
<UndefValue
>(*i
))
12315 Result
.push_back(NElts
*2); // undef -> 8
12317 Result
.push_back(cast
<ConstantInt
>(*i
)->getZExtValue());
12321 /// FindScalarElement - Given a vector and an element number, see if the scalar
12322 /// value is already around as a register, for example if it were inserted then
12323 /// extracted from the vector.
12324 static Value
*FindScalarElement(Value
*V
, unsigned EltNo
,
12325 LLVMContext
*Context
) {
12326 assert(isa
<VectorType
>(V
->getType()) && "Not looking at a vector?");
12327 const VectorType
*PTy
= cast
<VectorType
>(V
->getType());
12328 unsigned Width
= PTy
->getNumElements();
12329 if (EltNo
>= Width
) // Out of range access.
12330 return UndefValue::get(PTy
->getElementType());
12332 if (isa
<UndefValue
>(V
))
12333 return UndefValue::get(PTy
->getElementType());
12334 else if (isa
<ConstantAggregateZero
>(V
))
12335 return Constant::getNullValue(PTy
->getElementType());
12336 else if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(V
))
12337 return CP
->getOperand(EltNo
);
12338 else if (InsertElementInst
*III
= dyn_cast
<InsertElementInst
>(V
)) {
12339 // If this is an insert to a variable element, we don't know what it is.
12340 if (!isa
<ConstantInt
>(III
->getOperand(2)))
12342 unsigned IIElt
= cast
<ConstantInt
>(III
->getOperand(2))->getZExtValue();
12344 // If this is an insert to the element we are looking for, return the
12346 if (EltNo
== IIElt
)
12347 return III
->getOperand(1);
12349 // Otherwise, the insertelement doesn't modify the value, recurse on its
12351 return FindScalarElement(III
->getOperand(0), EltNo
, Context
);
12352 } else if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(V
)) {
12353 unsigned LHSWidth
=
12354 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
12355 unsigned InEl
= getShuffleMask(SVI
)[EltNo
];
12356 if (InEl
< LHSWidth
)
12357 return FindScalarElement(SVI
->getOperand(0), InEl
, Context
);
12358 else if (InEl
< LHSWidth
*2)
12359 return FindScalarElement(SVI
->getOperand(1), InEl
- LHSWidth
, Context
);
12361 return UndefValue::get(PTy
->getElementType());
12364 // Otherwise, we don't know.
12368 Instruction
*InstCombiner::visitExtractElementInst(ExtractElementInst
&EI
) {
12369 // If vector val is undef, replace extract with scalar undef.
12370 if (isa
<UndefValue
>(EI
.getOperand(0)))
12371 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12373 // If vector val is constant 0, replace extract with scalar 0.
12374 if (isa
<ConstantAggregateZero
>(EI
.getOperand(0)))
12375 return ReplaceInstUsesWith(EI
, Constant::getNullValue(EI
.getType()));
12377 if (ConstantVector
*C
= dyn_cast
<ConstantVector
>(EI
.getOperand(0))) {
12378 // If vector val is constant with all elements the same, replace EI with
12379 // that element. When the elements are not identical, we cannot replace yet
12380 // (we do that below, but only when the index is constant).
12381 Constant
*op0
= C
->getOperand(0);
12382 for (unsigned i
= 1; i
< C
->getNumOperands(); ++i
)
12383 if (C
->getOperand(i
) != op0
) {
12388 return ReplaceInstUsesWith(EI
, op0
);
12391 // If extracting a specified index from the vector, see if we can recursively
12392 // find a previously computed scalar that was inserted into the vector.
12393 if (ConstantInt
*IdxC
= dyn_cast
<ConstantInt
>(EI
.getOperand(1))) {
12394 unsigned IndexVal
= IdxC
->getZExtValue();
12395 unsigned VectorWidth
=
12396 cast
<VectorType
>(EI
.getOperand(0)->getType())->getNumElements();
12398 // If this is extracting an invalid index, turn this into undef, to avoid
12399 // crashing the code below.
12400 if (IndexVal
>= VectorWidth
)
12401 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12403 // This instruction only demands the single element from the input vector.
12404 // If the input vector has a single use, simplify it based on this use
12406 if (EI
.getOperand(0)->hasOneUse() && VectorWidth
!= 1) {
12407 APInt
UndefElts(VectorWidth
, 0);
12408 APInt
DemandedMask(VectorWidth
, 1 << IndexVal
);
12409 if (Value
*V
= SimplifyDemandedVectorElts(EI
.getOperand(0),
12410 DemandedMask
, UndefElts
)) {
12411 EI
.setOperand(0, V
);
12416 if (Value
*Elt
= FindScalarElement(EI
.getOperand(0), IndexVal
, Context
))
12417 return ReplaceInstUsesWith(EI
, Elt
);
12419 // If the this extractelement is directly using a bitcast from a vector of
12420 // the same number of elements, see if we can find the source element from
12421 // it. In this case, we will end up needing to bitcast the scalars.
12422 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(EI
.getOperand(0))) {
12423 if (const VectorType
*VT
=
12424 dyn_cast
<VectorType
>(BCI
->getOperand(0)->getType()))
12425 if (VT
->getNumElements() == VectorWidth
)
12426 if (Value
*Elt
= FindScalarElement(BCI
->getOperand(0),
12427 IndexVal
, Context
))
12428 return new BitCastInst(Elt
, EI
.getType());
12432 if (Instruction
*I
= dyn_cast
<Instruction
>(EI
.getOperand(0))) {
12433 if (I
->hasOneUse()) {
12434 // Push extractelement into predecessor operation if legal and
12435 // profitable to do so
12436 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(I
)) {
12437 bool isConstantElt
= isa
<ConstantInt
>(EI
.getOperand(1));
12438 if (CheapToScalarize(BO
, isConstantElt
)) {
12439 ExtractElementInst
*newEI0
=
12440 ExtractElementInst::Create(BO
->getOperand(0), EI
.getOperand(1),
12441 EI
.getName()+".lhs");
12442 ExtractElementInst
*newEI1
=
12443 ExtractElementInst::Create(BO
->getOperand(1), EI
.getOperand(1),
12444 EI
.getName()+".rhs");
12445 InsertNewInstBefore(newEI0
, EI
);
12446 InsertNewInstBefore(newEI1
, EI
);
12447 return BinaryOperator::Create(BO
->getOpcode(), newEI0
, newEI1
);
12449 } else if (isa
<LoadInst
>(I
)) {
12451 cast
<PointerType
>(I
->getOperand(0)->getType())->getAddressSpace();
12452 Value
*Ptr
= InsertBitCastBefore(I
->getOperand(0),
12453 PointerType::get(EI
.getType(), AS
),EI
);
12454 GetElementPtrInst
*GEP
=
12455 GetElementPtrInst::Create(Ptr
, EI
.getOperand(1), I
->getName()+".gep");
12456 cast
<GEPOperator
>(GEP
)->setIsInBounds(true);
12457 InsertNewInstBefore(GEP
, EI
);
12458 return new LoadInst(GEP
);
12461 if (InsertElementInst
*IE
= dyn_cast
<InsertElementInst
>(I
)) {
12462 // Extracting the inserted element?
12463 if (IE
->getOperand(2) == EI
.getOperand(1))
12464 return ReplaceInstUsesWith(EI
, IE
->getOperand(1));
12465 // If the inserted and extracted elements are constants, they must not
12466 // be the same value, extract from the pre-inserted value instead.
12467 if (isa
<Constant
>(IE
->getOperand(2)) &&
12468 isa
<Constant
>(EI
.getOperand(1))) {
12469 AddUsesToWorkList(EI
);
12470 EI
.setOperand(0, IE
->getOperand(0));
12473 } else if (ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(I
)) {
12474 // If this is extracting an element from a shufflevector, figure out where
12475 // it came from and extract from the appropriate input element instead.
12476 if (ConstantInt
*Elt
= dyn_cast
<ConstantInt
>(EI
.getOperand(1))) {
12477 unsigned SrcIdx
= getShuffleMask(SVI
)[Elt
->getZExtValue()];
12479 unsigned LHSWidth
=
12480 cast
<VectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
12482 if (SrcIdx
< LHSWidth
)
12483 Src
= SVI
->getOperand(0);
12484 else if (SrcIdx
< LHSWidth
*2) {
12485 SrcIdx
-= LHSWidth
;
12486 Src
= SVI
->getOperand(1);
12488 return ReplaceInstUsesWith(EI
, UndefValue::get(EI
.getType()));
12490 return ExtractElementInst::Create(Src
,
12491 ConstantInt::get(Type::Int32Ty
, SrcIdx
, false));
12494 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
12499 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
12500 /// elements from either LHS or RHS, return the shuffle mask and true.
12501 /// Otherwise, return false.
12502 static bool CollectSingleShuffleElements(Value
*V
, Value
*LHS
, Value
*RHS
,
12503 std::vector
<Constant
*> &Mask
,
12504 LLVMContext
*Context
) {
12505 assert(V
->getType() == LHS
->getType() && V
->getType() == RHS
->getType() &&
12506 "Invalid CollectSingleShuffleElements");
12507 unsigned NumElts
= cast
<VectorType
>(V
->getType())->getNumElements();
12509 if (isa
<UndefValue
>(V
)) {
12510 Mask
.assign(NumElts
, UndefValue::get(Type::Int32Ty
));
12512 } else if (V
== LHS
) {
12513 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12514 Mask
.push_back(ConstantInt::get(Type::Int32Ty
, i
));
12516 } else if (V
== RHS
) {
12517 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12518 Mask
.push_back(ConstantInt::get(Type::Int32Ty
, i
+NumElts
));
12520 } else if (InsertElementInst
*IEI
= dyn_cast
<InsertElementInst
>(V
)) {
12521 // If this is an insert of an extract from some other vector, include it.
12522 Value
*VecOp
= IEI
->getOperand(0);
12523 Value
*ScalarOp
= IEI
->getOperand(1);
12524 Value
*IdxOp
= IEI
->getOperand(2);
12526 if (!isa
<ConstantInt
>(IdxOp
))
12528 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12530 if (isa
<UndefValue
>(ScalarOp
)) { // inserting undef into vector.
12531 // Okay, we can handle this if the vector we are insertinting into is
12532 // transitively ok.
12533 if (CollectSingleShuffleElements(VecOp
, LHS
, RHS
, Mask
, Context
)) {
12534 // If so, update the mask to reflect the inserted undef.
12535 Mask
[InsertedIdx
] = UndefValue::get(Type::Int32Ty
);
12538 } else if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)){
12539 if (isa
<ConstantInt
>(EI
->getOperand(1)) &&
12540 EI
->getOperand(0)->getType() == V
->getType()) {
12541 unsigned ExtractedIdx
=
12542 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12544 // This must be extracting from either LHS or RHS.
12545 if (EI
->getOperand(0) == LHS
|| EI
->getOperand(0) == RHS
) {
12546 // Okay, we can handle this if the vector we are insertinting into is
12547 // transitively ok.
12548 if (CollectSingleShuffleElements(VecOp
, LHS
, RHS
, Mask
, Context
)) {
12549 // If so, update the mask to reflect the inserted value.
12550 if (EI
->getOperand(0) == LHS
) {
12551 Mask
[InsertedIdx
% NumElts
] =
12552 ConstantInt::get(Type::Int32Ty
, ExtractedIdx
);
12554 assert(EI
->getOperand(0) == RHS
);
12555 Mask
[InsertedIdx
% NumElts
] =
12556 ConstantInt::get(Type::Int32Ty
, ExtractedIdx
+NumElts
);
12565 // TODO: Handle shufflevector here!
12570 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12571 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12572 /// that computes V and the LHS value of the shuffle.
12573 static Value
*CollectShuffleElements(Value
*V
, std::vector
<Constant
*> &Mask
,
12574 Value
*&RHS
, LLVMContext
*Context
) {
12575 assert(isa
<VectorType
>(V
->getType()) &&
12576 (RHS
== 0 || V
->getType() == RHS
->getType()) &&
12577 "Invalid shuffle!");
12578 unsigned NumElts
= cast
<VectorType
>(V
->getType())->getNumElements();
12580 if (isa
<UndefValue
>(V
)) {
12581 Mask
.assign(NumElts
, UndefValue::get(Type::Int32Ty
));
12583 } else if (isa
<ConstantAggregateZero
>(V
)) {
12584 Mask
.assign(NumElts
, ConstantInt::get(Type::Int32Ty
, 0));
12586 } else if (InsertElementInst
*IEI
= dyn_cast
<InsertElementInst
>(V
)) {
12587 // If this is an insert of an extract from some other vector, include it.
12588 Value
*VecOp
= IEI
->getOperand(0);
12589 Value
*ScalarOp
= IEI
->getOperand(1);
12590 Value
*IdxOp
= IEI
->getOperand(2);
12592 if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)) {
12593 if (isa
<ConstantInt
>(EI
->getOperand(1)) && isa
<ConstantInt
>(IdxOp
) &&
12594 EI
->getOperand(0)->getType() == V
->getType()) {
12595 unsigned ExtractedIdx
=
12596 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12597 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12599 // Either the extracted from or inserted into vector must be RHSVec,
12600 // otherwise we'd end up with a shuffle of three inputs.
12601 if (EI
->getOperand(0) == RHS
|| RHS
== 0) {
12602 RHS
= EI
->getOperand(0);
12603 Value
*V
= CollectShuffleElements(VecOp
, Mask
, RHS
, Context
);
12604 Mask
[InsertedIdx
% NumElts
] =
12605 ConstantInt::get(Type::Int32Ty
, NumElts
+ExtractedIdx
);
12609 if (VecOp
== RHS
) {
12610 Value
*V
= CollectShuffleElements(EI
->getOperand(0), Mask
,
12612 // Everything but the extracted element is replaced with the RHS.
12613 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
12614 if (i
!= InsertedIdx
)
12615 Mask
[i
] = ConstantInt::get(Type::Int32Ty
, NumElts
+i
);
12620 // If this insertelement is a chain that comes from exactly these two
12621 // vectors, return the vector and the effective shuffle.
12622 if (CollectSingleShuffleElements(IEI
, EI
->getOperand(0), RHS
, Mask
,
12624 return EI
->getOperand(0);
12629 // TODO: Handle shufflevector here!
12631 // Otherwise, can't do anything fancy. Return an identity vector.
12632 for (unsigned i
= 0; i
!= NumElts
; ++i
)
12633 Mask
.push_back(ConstantInt::get(Type::Int32Ty
, i
));
12637 Instruction
*InstCombiner::visitInsertElementInst(InsertElementInst
&IE
) {
12638 Value
*VecOp
= IE
.getOperand(0);
12639 Value
*ScalarOp
= IE
.getOperand(1);
12640 Value
*IdxOp
= IE
.getOperand(2);
12642 // Inserting an undef or into an undefined place, remove this.
12643 if (isa
<UndefValue
>(ScalarOp
) || isa
<UndefValue
>(IdxOp
))
12644 ReplaceInstUsesWith(IE
, VecOp
);
12646 // If the inserted element was extracted from some other vector, and if the
12647 // indexes are constant, try to turn this into a shufflevector operation.
12648 if (ExtractElementInst
*EI
= dyn_cast
<ExtractElementInst
>(ScalarOp
)) {
12649 if (isa
<ConstantInt
>(EI
->getOperand(1)) && isa
<ConstantInt
>(IdxOp
) &&
12650 EI
->getOperand(0)->getType() == IE
.getType()) {
12651 unsigned NumVectorElts
= IE
.getType()->getNumElements();
12652 unsigned ExtractedIdx
=
12653 cast
<ConstantInt
>(EI
->getOperand(1))->getZExtValue();
12654 unsigned InsertedIdx
= cast
<ConstantInt
>(IdxOp
)->getZExtValue();
12656 if (ExtractedIdx
>= NumVectorElts
) // Out of range extract.
12657 return ReplaceInstUsesWith(IE
, VecOp
);
12659 if (InsertedIdx
>= NumVectorElts
) // Out of range insert.
12660 return ReplaceInstUsesWith(IE
, UndefValue::get(IE
.getType()));
12662 // If we are extracting a value from a vector, then inserting it right
12663 // back into the same place, just use the input vector.
12664 if (EI
->getOperand(0) == VecOp
&& ExtractedIdx
== InsertedIdx
)
12665 return ReplaceInstUsesWith(IE
, VecOp
);
12667 // We could theoretically do this for ANY input. However, doing so could
12668 // turn chains of insertelement instructions into a chain of shufflevector
12669 // instructions, and right now we do not merge shufflevectors. As such,
12670 // only do this in a situation where it is clear that there is benefit.
12671 if (isa
<UndefValue
>(VecOp
) || isa
<ConstantAggregateZero
>(VecOp
)) {
12672 // Turn this into shuffle(EIOp0, VecOp, Mask). The result has all of
12673 // the values of VecOp, except then one read from EIOp0.
12674 // Build a new shuffle mask.
12675 std::vector
<Constant
*> Mask
;
12676 if (isa
<UndefValue
>(VecOp
))
12677 Mask
.assign(NumVectorElts
, UndefValue::get(Type::Int32Ty
));
12679 assert(isa
<ConstantAggregateZero
>(VecOp
) && "Unknown thing");
12680 Mask
.assign(NumVectorElts
, ConstantInt::get(Type::Int32Ty
,
12683 Mask
[InsertedIdx
] =
12684 ConstantInt::get(Type::Int32Ty
, ExtractedIdx
);
12685 return new ShuffleVectorInst(EI
->getOperand(0), VecOp
,
12686 ConstantVector::get(Mask
));
12689 // If this insertelement isn't used by some other insertelement, turn it
12690 // (and any insertelements it points to), into one big shuffle.
12691 if (!IE
.hasOneUse() || !isa
<InsertElementInst
>(IE
.use_back())) {
12692 std::vector
<Constant
*> Mask
;
12694 Value
*LHS
= CollectShuffleElements(&IE
, Mask
, RHS
, Context
);
12695 if (RHS
== 0) RHS
= UndefValue::get(LHS
->getType());
12696 // We now have a shuffle of LHS, RHS, Mask.
12697 return new ShuffleVectorInst(LHS
, RHS
,
12698 ConstantVector::get(Mask
));
12703 unsigned VWidth
= cast
<VectorType
>(VecOp
->getType())->getNumElements();
12704 APInt
UndefElts(VWidth
, 0);
12705 APInt
AllOnesEltMask(APInt::getAllOnesValue(VWidth
));
12706 if (SimplifyDemandedVectorElts(&IE
, AllOnesEltMask
, UndefElts
))
12713 Instruction
*InstCombiner::visitShuffleVectorInst(ShuffleVectorInst
&SVI
) {
12714 Value
*LHS
= SVI
.getOperand(0);
12715 Value
*RHS
= SVI
.getOperand(1);
12716 std::vector
<unsigned> Mask
= getShuffleMask(&SVI
);
12718 bool MadeChange
= false;
12720 // Undefined shuffle mask -> undefined value.
12721 if (isa
<UndefValue
>(SVI
.getOperand(2)))
12722 return ReplaceInstUsesWith(SVI
, UndefValue::get(SVI
.getType()));
12724 unsigned VWidth
= cast
<VectorType
>(SVI
.getType())->getNumElements();
12726 if (VWidth
!= cast
<VectorType
>(LHS
->getType())->getNumElements())
12729 APInt
UndefElts(VWidth
, 0);
12730 APInt
AllOnesEltMask(APInt::getAllOnesValue(VWidth
));
12731 if (SimplifyDemandedVectorElts(&SVI
, AllOnesEltMask
, UndefElts
)) {
12732 LHS
= SVI
.getOperand(0);
12733 RHS
= SVI
.getOperand(1);
12737 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12738 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12739 if (LHS
== RHS
|| isa
<UndefValue
>(LHS
)) {
12740 if (isa
<UndefValue
>(LHS
) && LHS
== RHS
) {
12741 // shuffle(undef,undef,mask) -> undef.
12742 return ReplaceInstUsesWith(SVI
, LHS
);
12745 // Remap any references to RHS to use LHS.
12746 std::vector
<Constant
*> Elts
;
12747 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
12748 if (Mask
[i
] >= 2*e
)
12749 Elts
.push_back(UndefValue::get(Type::Int32Ty
));
12751 if ((Mask
[i
] >= e
&& isa
<UndefValue
>(RHS
)) ||
12752 (Mask
[i
] < e
&& isa
<UndefValue
>(LHS
))) {
12753 Mask
[i
] = 2*e
; // Turn into undef.
12754 Elts
.push_back(UndefValue::get(Type::Int32Ty
));
12756 Mask
[i
] = Mask
[i
] % e
; // Force to LHS.
12757 Elts
.push_back(ConstantInt::get(Type::Int32Ty
, Mask
[i
]));
12761 SVI
.setOperand(0, SVI
.getOperand(1));
12762 SVI
.setOperand(1, UndefValue::get(RHS
->getType()));
12763 SVI
.setOperand(2, ConstantVector::get(Elts
));
12764 LHS
= SVI
.getOperand(0);
12765 RHS
= SVI
.getOperand(1);
12769 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
12770 bool isLHSID
= true, isRHSID
= true;
12772 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
12773 if (Mask
[i
] >= e
*2) continue; // Ignore undef values.
12774 // Is this an identity shuffle of the LHS value?
12775 isLHSID
&= (Mask
[i
] == i
);
12777 // Is this an identity shuffle of the RHS value?
12778 isRHSID
&= (Mask
[i
]-e
== i
);
12781 // Eliminate identity shuffles.
12782 if (isLHSID
) return ReplaceInstUsesWith(SVI
, LHS
);
12783 if (isRHSID
) return ReplaceInstUsesWith(SVI
, RHS
);
12785 // If the LHS is a shufflevector itself, see if we can combine it with this
12786 // one without producing an unusual shuffle. Here we are really conservative:
12787 // we are absolutely afraid of producing a shuffle mask not in the input
12788 // program, because the code gen may not be smart enough to turn a merged
12789 // shuffle into two specific shuffles: it may produce worse code. As such,
12790 // we only merge two shuffles if the result is one of the two input shuffle
12791 // masks. In this case, merging the shuffles just removes one instruction,
12792 // which we know is safe. This is good for things like turning:
12793 // (splat(splat)) -> splat.
12794 if (ShuffleVectorInst
*LHSSVI
= dyn_cast
<ShuffleVectorInst
>(LHS
)) {
12795 if (isa
<UndefValue
>(RHS
)) {
12796 std::vector
<unsigned> LHSMask
= getShuffleMask(LHSSVI
);
12798 std::vector
<unsigned> NewMask
;
12799 for (unsigned i
= 0, e
= Mask
.size(); i
!= e
; ++i
)
12800 if (Mask
[i
] >= 2*e
)
12801 NewMask
.push_back(2*e
);
12803 NewMask
.push_back(LHSMask
[Mask
[i
]]);
12805 // If the result mask is equal to the src shuffle or this shuffle mask, do
12806 // the replacement.
12807 if (NewMask
== LHSMask
|| NewMask
== Mask
) {
12808 unsigned LHSInNElts
=
12809 cast
<VectorType
>(LHSSVI
->getOperand(0)->getType())->getNumElements();
12810 std::vector
<Constant
*> Elts
;
12811 for (unsigned i
= 0, e
= NewMask
.size(); i
!= e
; ++i
) {
12812 if (NewMask
[i
] >= LHSInNElts
*2) {
12813 Elts
.push_back(UndefValue::get(Type::Int32Ty
));
12815 Elts
.push_back(ConstantInt::get(Type::Int32Ty
, NewMask
[i
]));
12818 return new ShuffleVectorInst(LHSSVI
->getOperand(0),
12819 LHSSVI
->getOperand(1),
12820 ConstantVector::get(Elts
));
12825 return MadeChange
? &SVI
: 0;
12831 /// TryToSinkInstruction - Try to move the specified instruction from its
12832 /// current block into the beginning of DestBlock, which can only happen if it's
12833 /// safe to move the instruction past all of the instructions between it and the
12834 /// end of its block.
12835 static bool TryToSinkInstruction(Instruction
*I
, BasicBlock
*DestBlock
) {
12836 assert(I
->hasOneUse() && "Invariants didn't hold!");
12838 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
12839 if (isa
<PHINode
>(I
) || I
->mayHaveSideEffects() || isa
<TerminatorInst
>(I
))
12842 // Do not sink alloca instructions out of the entry block.
12843 if (isa
<AllocaInst
>(I
) && I
->getParent() ==
12844 &DestBlock
->getParent()->getEntryBlock())
12847 // We can only sink load instructions if there is nothing between the load and
12848 // the end of block that could change the value.
12849 if (I
->mayReadFromMemory()) {
12850 for (BasicBlock::iterator Scan
= I
, E
= I
->getParent()->end();
12852 if (Scan
->mayWriteToMemory())
12856 BasicBlock::iterator InsertPos
= DestBlock
->getFirstNonPHI();
12858 CopyPrecedingStopPoint(I
, InsertPos
);
12859 I
->moveBefore(InsertPos
);
12865 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
12866 /// all reachable code to the worklist.
12868 /// This has a couple of tricks to make the code faster and more powerful. In
12869 /// particular, we constant fold and DCE instructions as we go, to avoid adding
12870 /// them to the worklist (this significantly speeds up instcombine on code where
12871 /// many instructions are dead or constant). Additionally, if we find a branch
12872 /// whose condition is a known constant, we only visit the reachable successors.
12874 static void AddReachableCodeToWorklist(BasicBlock
*BB
,
12875 SmallPtrSet
<BasicBlock
*, 64> &Visited
,
12877 const TargetData
*TD
) {
12878 SmallVector
<BasicBlock
*, 256> Worklist
;
12879 Worklist
.push_back(BB
);
12881 while (!Worklist
.empty()) {
12882 BB
= Worklist
.back();
12883 Worklist
.pop_back();
12885 // We have now visited this block! If we've already been here, ignore it.
12886 if (!Visited
.insert(BB
)) continue;
12888 DbgInfoIntrinsic
*DBI_Prev
= NULL
;
12889 for (BasicBlock::iterator BBI
= BB
->begin(), E
= BB
->end(); BBI
!= E
; ) {
12890 Instruction
*Inst
= BBI
++;
12892 // DCE instruction if trivially dead.
12893 if (isInstructionTriviallyDead(Inst
)) {
12895 DOUT
<< "IC: DCE: " << *Inst
;
12896 Inst
->eraseFromParent();
12900 // ConstantProp instruction if trivially constant.
12901 if (Constant
*C
= ConstantFoldInstruction(Inst
, BB
->getContext(), TD
)) {
12902 DOUT
<< "IC: ConstFold to: " << *C
<< " from: " << *Inst
;
12903 Inst
->replaceAllUsesWith(C
);
12905 Inst
->eraseFromParent();
12909 // If there are two consecutive llvm.dbg.stoppoint calls then
12910 // it is likely that the optimizer deleted code in between these
12912 DbgInfoIntrinsic
*DBI_Next
= dyn_cast
<DbgInfoIntrinsic
>(Inst
);
12915 && DBI_Prev
->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
12916 && DBI_Next
->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
) {
12917 IC
.RemoveFromWorkList(DBI_Prev
);
12918 DBI_Prev
->eraseFromParent();
12920 DBI_Prev
= DBI_Next
;
12925 IC
.AddToWorkList(Inst
);
12928 // Recursively visit successors. If this is a branch or switch on a
12929 // constant, only visit the reachable successor.
12930 TerminatorInst
*TI
= BB
->getTerminator();
12931 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
)) {
12932 if (BI
->isConditional() && isa
<ConstantInt
>(BI
->getCondition())) {
12933 bool CondVal
= cast
<ConstantInt
>(BI
->getCondition())->getZExtValue();
12934 BasicBlock
*ReachableBB
= BI
->getSuccessor(!CondVal
);
12935 Worklist
.push_back(ReachableBB
);
12938 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(TI
)) {
12939 if (ConstantInt
*Cond
= dyn_cast
<ConstantInt
>(SI
->getCondition())) {
12940 // See if this is an explicit destination.
12941 for (unsigned i
= 1, e
= SI
->getNumSuccessors(); i
!= e
; ++i
)
12942 if (SI
->getCaseValue(i
) == Cond
) {
12943 BasicBlock
*ReachableBB
= SI
->getSuccessor(i
);
12944 Worklist
.push_back(ReachableBB
);
12948 // Otherwise it is the default destination.
12949 Worklist
.push_back(SI
->getSuccessor(0));
12954 for (unsigned i
= 0, e
= TI
->getNumSuccessors(); i
!= e
; ++i
)
12955 Worklist
.push_back(TI
->getSuccessor(i
));
12959 bool InstCombiner::DoOneIteration(Function
&F
, unsigned Iteration
) {
12960 bool Changed
= false;
12961 TD
= getAnalysisIfAvailable
<TargetData
>();
12963 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration
<< " on "
12964 << F
.getNameStr() << "\n");
12967 // Do a depth-first traversal of the function, populate the worklist with
12968 // the reachable instructions. Ignore blocks that are not reachable. Keep
12969 // track of which blocks we visit.
12970 SmallPtrSet
<BasicBlock
*, 64> Visited
;
12971 AddReachableCodeToWorklist(F
.begin(), Visited
, *this, TD
);
12973 // Do a quick scan over the function. If we find any blocks that are
12974 // unreachable, remove any instructions inside of them. This prevents
12975 // the instcombine code from having to deal with some bad special cases.
12976 for (Function::iterator BB
= F
.begin(), E
= F
.end(); BB
!= E
; ++BB
)
12977 if (!Visited
.count(BB
)) {
12978 Instruction
*Term
= BB
->getTerminator();
12979 while (Term
!= BB
->begin()) { // Remove instrs bottom-up
12980 BasicBlock::iterator I
= Term
; --I
;
12982 DOUT
<< "IC: DCE: " << *I
;
12983 // A debug intrinsic shouldn't force another iteration if we weren't
12984 // going to do one without it.
12985 if (!isa
<DbgInfoIntrinsic
>(I
)) {
12989 if (!I
->use_empty())
12990 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
12991 I
->eraseFromParent();
12996 while (!Worklist
.empty()) {
12997 Instruction
*I
= RemoveOneFromWorkList();
12998 if (I
== 0) continue; // skip null values.
13000 // Check to see if we can DCE the instruction.
13001 if (isInstructionTriviallyDead(I
)) {
13002 // Add operands to the worklist.
13003 if (I
->getNumOperands() < 4)
13004 AddUsesToWorkList(*I
);
13007 DOUT
<< "IC: DCE: " << *I
;
13009 I
->eraseFromParent();
13010 RemoveFromWorkList(I
);
13015 // Instruction isn't dead, see if we can constant propagate it.
13016 if (Constant
*C
= ConstantFoldInstruction(I
, F
.getContext(), TD
)) {
13017 DOUT
<< "IC: ConstFold to: " << *C
<< " from: " << *I
;
13019 // Add operands to the worklist.
13020 AddUsesToWorkList(*I
);
13021 ReplaceInstUsesWith(*I
, C
);
13024 I
->eraseFromParent();
13025 RemoveFromWorkList(I
);
13031 // See if we can constant fold its operands.
13032 for (User::op_iterator i
= I
->op_begin(), e
= I
->op_end(); i
!= e
; ++i
)
13033 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(i
))
13034 if (Constant
*NewC
= ConstantFoldConstantExpression(CE
,
13035 F
.getContext(), TD
))
13042 // See if we can trivially sink this instruction to a successor basic block.
13043 if (I
->hasOneUse()) {
13044 BasicBlock
*BB
= I
->getParent();
13045 BasicBlock
*UserParent
= cast
<Instruction
>(I
->use_back())->getParent();
13046 if (UserParent
!= BB
) {
13047 bool UserIsSuccessor
= false;
13048 // See if the user is one of our successors.
13049 for (succ_iterator SI
= succ_begin(BB
), E
= succ_end(BB
); SI
!= E
; ++SI
)
13050 if (*SI
== UserParent
) {
13051 UserIsSuccessor
= true;
13055 // If the user is one of our immediate successors, and if that successor
13056 // only has us as a predecessors (we'd have to split the critical edge
13057 // otherwise), we can keep going.
13058 if (UserIsSuccessor
&& !isa
<PHINode
>(I
->use_back()) &&
13059 next(pred_begin(UserParent
)) == pred_end(UserParent
))
13060 // Okay, the CFG is simple enough, try to sink this instruction.
13061 Changed
|= TryToSinkInstruction(I
, UserParent
);
13065 // Now that we have an instruction, try combining it to simplify it...
13069 DEBUG(std::ostringstream SS
; I
->print(SS
); OrigI
= SS
.str(););
13070 if (Instruction
*Result
= visit(*I
)) {
13072 // Should we replace the old instruction with a new one?
13074 DOUT
<< "IC: Old = " << *I
13075 << " New = " << *Result
;
13077 // Everything uses the new instruction now.
13078 I
->replaceAllUsesWith(Result
);
13080 // Push the new instruction and any users onto the worklist.
13081 AddToWorkList(Result
);
13082 AddUsersToWorkList(*Result
);
13084 // Move the name to the new instruction first.
13085 Result
->takeName(I
);
13087 // Insert the new instruction into the basic block...
13088 BasicBlock
*InstParent
= I
->getParent();
13089 BasicBlock::iterator InsertPos
= I
;
13091 if (!isa
<PHINode
>(Result
)) // If combining a PHI, don't insert
13092 while (isa
<PHINode
>(InsertPos
)) // middle of a block of PHIs.
13095 InstParent
->getInstList().insert(InsertPos
, Result
);
13097 // Make sure that we reprocess all operands now that we reduced their
13099 AddUsesToWorkList(*I
);
13101 // Instructions can end up on the worklist more than once. Make sure
13102 // we do not process an instruction that has been deleted.
13103 RemoveFromWorkList(I
);
13105 // Erase the old instruction.
13106 InstParent
->getInstList().erase(I
);
13109 DOUT
<< "IC: Mod = " << OrigI
13110 << " New = " << *I
;
13113 // If the instruction was modified, it's possible that it is now dead.
13114 // if so, remove it.
13115 if (isInstructionTriviallyDead(I
)) {
13116 // Make sure we process all operands now that we are reducing their
13118 AddUsesToWorkList(*I
);
13120 // Instructions may end up in the worklist more than once. Erase all
13121 // occurrences of this instruction.
13122 RemoveFromWorkList(I
);
13123 I
->eraseFromParent();
13126 AddUsersToWorkList(*I
);
13133 assert(WorklistMap
.empty() && "Worklist empty, but map not?");
13135 // Do an explicit clear, this shrinks the map if needed.
13136 WorklistMap
.clear();
13141 bool InstCombiner::runOnFunction(Function
&F
) {
13142 MustPreserveLCSSA
= mustPreserveAnalysisID(LCSSAID
);
13143 Context
= &F
.getContext();
13145 bool EverMadeChange
= false;
13147 // Iterate while there is work to do.
13148 unsigned Iteration
= 0;
13149 while (DoOneIteration(F
, Iteration
++))
13150 EverMadeChange
= true;
13151 return EverMadeChange
;
13154 FunctionPass
*llvm::createInstructionCombiningPass() {
13155 return new InstCombiner();