1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines routines for folding instructions into constants.
12 // Also, to supplement the basic VMCore ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // TargetData information. These functions cannot go in VMCore due to library
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringMap.h"
29 #include "llvm/Target/TargetData.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/GetElementPtrTypeIterator.h"
32 #include "llvm/Support/MathExtras.h"
37 //===----------------------------------------------------------------------===//
38 // Constant Folding internal helper functions
39 //===----------------------------------------------------------------------===//
41 /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset
42 /// from a global, return the global and the constant. Because of
43 /// constantexprs, this function is recursive.
44 static bool IsConstantOffsetFromGlobal(Constant
*C
, GlobalValue
*&GV
,
45 int64_t &Offset
, const TargetData
&TD
) {
46 // Trivial case, constant is the global.
47 if ((GV
= dyn_cast
<GlobalValue
>(C
))) {
52 // Otherwise, if this isn't a constant expr, bail out.
53 ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
);
54 if (!CE
) return false;
56 // Look through ptr->int and ptr->ptr casts.
57 if (CE
->getOpcode() == Instruction::PtrToInt
||
58 CE
->getOpcode() == Instruction::BitCast
)
59 return IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, TD
);
61 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
62 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
63 // Cannot compute this if the element type of the pointer is missing size
65 if (!cast
<PointerType
>(CE
->getOperand(0)->getType())
66 ->getElementType()->isSized())
69 // If the base isn't a global+constant, we aren't either.
70 if (!IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, TD
))
73 // Otherwise, add any offset that our operands provide.
74 gep_type_iterator GTI
= gep_type_begin(CE
);
75 for (User::const_op_iterator i
= CE
->op_begin() + 1, e
= CE
->op_end();
77 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(*i
);
78 if (!CI
) return false; // Index isn't a simple constant?
79 if (CI
->getZExtValue() == 0) continue; // Not adding anything.
81 if (const StructType
*ST
= dyn_cast
<StructType
>(*GTI
)) {
83 Offset
+= TD
.getStructLayout(ST
)->getElementOffset(CI
->getZExtValue());
85 const SequentialType
*SQT
= cast
<SequentialType
>(*GTI
);
86 Offset
+= TD
.getTypeAllocSize(SQT
->getElementType())*CI
->getSExtValue();
96 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
97 /// Attempt to symbolically evaluate the result of a binary operator merging
98 /// these together. If target data info is available, it is provided as TD,
99 /// otherwise TD is null.
100 static Constant
*SymbolicallyEvaluateBinop(unsigned Opc
, Constant
*Op0
,
101 Constant
*Op1
, const TargetData
*TD
,
102 LLVMContext
&Context
){
105 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
106 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
110 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
111 // constant. This happens frequently when iterating over a global array.
112 if (Opc
== Instruction::Sub
&& TD
) {
113 GlobalValue
*GV1
, *GV2
;
114 int64_t Offs1
, Offs2
;
116 if (IsConstantOffsetFromGlobal(Op0
, GV1
, Offs1
, *TD
))
117 if (IsConstantOffsetFromGlobal(Op1
, GV2
, Offs2
, *TD
) &&
119 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
120 return ConstantInt::get(Op0
->getType(), Offs1
-Offs2
);
127 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
128 /// constant expression, do so.
129 static Constant
*SymbolicallyEvaluateGEP(Constant
* const* Ops
, unsigned NumOps
,
130 const Type
*ResultTy
,
131 LLVMContext
&Context
,
132 const TargetData
*TD
) {
133 Constant
*Ptr
= Ops
[0];
134 if (!TD
|| !cast
<PointerType
>(Ptr
->getType())->getElementType()->isSized())
137 unsigned BitWidth
= TD
->getTypeSizeInBits(TD
->getIntPtrType(Context
));
138 APInt
BasePtr(BitWidth
, 0);
139 bool BaseIsInt
= true;
140 if (!Ptr
->isNullValue()) {
141 // If this is a inttoptr from a constant int, we can fold this as the base,
142 // otherwise we can't.
143 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
144 if (CE
->getOpcode() == Instruction::IntToPtr
)
145 if (ConstantInt
*Base
= dyn_cast
<ConstantInt
>(CE
->getOperand(0))) {
146 BasePtr
= Base
->getValue();
147 BasePtr
.zextOrTrunc(BitWidth
);
154 // If this is a constant expr gep that is effectively computing an
155 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
156 for (unsigned i
= 1; i
!= NumOps
; ++i
)
157 if (!isa
<ConstantInt
>(Ops
[i
]))
160 APInt Offset
= APInt(BitWidth
,
161 TD
->getIndexedOffset(Ptr
->getType(),
162 (Value
**)Ops
+1, NumOps
-1));
163 // If the base value for this address is a literal integer value, fold the
164 // getelementptr to the resulting integer value casted to the pointer type.
166 Constant
*C
= ConstantInt::get(Context
, Offset
+BasePtr
);
167 return ConstantExpr::getIntToPtr(C
, ResultTy
);
170 // Otherwise form a regular getelementptr. Recompute the indices so that
171 // we eliminate over-indexing of the notional static type array bounds.
172 // This makes it easy to determine if the getelementptr is "inbounds".
173 // Also, this helps GlobalOpt do SROA on GlobalVariables.
174 const Type
*Ty
= Ptr
->getType();
175 SmallVector
<Constant
*, 32> NewIdxs
;
177 if (const SequentialType
*ATy
= dyn_cast
<SequentialType
>(Ty
)) {
178 // The only pointer indexing we'll do is on the first index of the GEP.
179 if (isa
<PointerType
>(ATy
) && !NewIdxs
.empty())
181 // Determine which element of the array the offset points into.
182 APInt
ElemSize(BitWidth
, TD
->getTypeAllocSize(ATy
->getElementType()));
185 APInt NewIdx
= Offset
.udiv(ElemSize
);
186 Offset
-= NewIdx
* ElemSize
;
187 NewIdxs
.push_back(ConstantInt::get(TD
->getIntPtrType(Context
), NewIdx
));
188 Ty
= ATy
->getElementType();
189 } else if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
190 // Determine which field of the struct the offset points into. The
191 // getZExtValue is at least as safe as the StructLayout API because we
192 // know the offset is within the struct at this point.
193 const StructLayout
&SL
= *TD
->getStructLayout(STy
);
194 unsigned ElIdx
= SL
.getElementContainingOffset(Offset
.getZExtValue());
195 NewIdxs
.push_back(ConstantInt::get(Type::getInt32Ty(Context
), ElIdx
));
196 Offset
-= APInt(BitWidth
, SL
.getElementOffset(ElIdx
));
197 Ty
= STy
->getTypeAtIndex(ElIdx
);
199 // We've reached some non-indexable type.
202 } while (Ty
!= cast
<PointerType
>(ResultTy
)->getElementType());
204 // If we haven't used up the entire offset by descending the static
205 // type, then the offset is pointing into the middle of an indivisible
206 // member, so we can't simplify it.
212 ConstantExpr::getGetElementPtr(Ptr
, &NewIdxs
[0], NewIdxs
.size());
213 assert(cast
<PointerType
>(C
->getType())->getElementType() == Ty
&&
214 "Computed GetElementPtr has unexpected type!");
216 // If we ended up indexing a member with a type that doesn't match
217 // the type of what the original indices indexed, add a cast.
218 if (Ty
!= cast
<PointerType
>(ResultTy
)->getElementType())
219 C
= ConstantExpr::getBitCast(C
, ResultTy
);
224 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
225 /// targetdata. Return 0 if unfoldable.
226 static Constant
*FoldBitCast(Constant
*C
, const Type
*DestTy
,
227 const TargetData
&TD
, LLVMContext
&Context
) {
228 // If this is a bitcast from constant vector -> vector, fold it.
229 if (ConstantVector
*CV
= dyn_cast
<ConstantVector
>(C
)) {
230 if (const VectorType
*DestVTy
= dyn_cast
<VectorType
>(DestTy
)) {
231 // If the element types match, VMCore can fold it.
232 unsigned NumDstElt
= DestVTy
->getNumElements();
233 unsigned NumSrcElt
= CV
->getNumOperands();
234 if (NumDstElt
== NumSrcElt
)
237 const Type
*SrcEltTy
= CV
->getType()->getElementType();
238 const Type
*DstEltTy
= DestVTy
->getElementType();
240 // Otherwise, we're changing the number of elements in a vector, which
241 // requires endianness information to do the right thing. For example,
242 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
243 // folds to (little endian):
244 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
245 // and to (big endian):
246 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
248 // First thing is first. We only want to think about integer here, so if
249 // we have something in FP form, recast it as integer.
250 if (DstEltTy
->isFloatingPoint()) {
251 // Fold to an vector of integers with same size as our FP type.
252 unsigned FPWidth
= DstEltTy
->getPrimitiveSizeInBits();
253 const Type
*DestIVTy
= VectorType::get(
254 IntegerType::get(Context
, FPWidth
), NumDstElt
);
255 // Recursively handle this integer conversion, if possible.
256 C
= FoldBitCast(C
, DestIVTy
, TD
, Context
);
259 // Finally, VMCore can handle this now that #elts line up.
260 return ConstantExpr::getBitCast(C
, DestTy
);
263 // Okay, we know the destination is integer, if the input is FP, convert
264 // it to integer first.
265 if (SrcEltTy
->isFloatingPoint()) {
266 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
267 const Type
*SrcIVTy
= VectorType::get(
268 IntegerType::get(Context
, FPWidth
), NumSrcElt
);
269 // Ask VMCore to do the conversion now that #elts line up.
270 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
271 CV
= dyn_cast
<ConstantVector
>(C
);
272 if (!CV
) return 0; // If VMCore wasn't able to fold it, bail out.
275 // Now we know that the input and output vectors are both integer vectors
276 // of the same size, and that their #elements is not the same. Do the
277 // conversion here, which depends on whether the input or output has
279 bool isLittleEndian
= TD
.isLittleEndian();
281 SmallVector
<Constant
*, 32> Result
;
282 if (NumDstElt
< NumSrcElt
) {
283 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
284 Constant
*Zero
= Constant::getNullValue(DstEltTy
);
285 unsigned Ratio
= NumSrcElt
/NumDstElt
;
286 unsigned SrcBitSize
= SrcEltTy
->getPrimitiveSizeInBits();
288 for (unsigned i
= 0; i
!= NumDstElt
; ++i
) {
289 // Build each element of the result.
290 Constant
*Elt
= Zero
;
291 unsigned ShiftAmt
= isLittleEndian
? 0 : SrcBitSize
*(Ratio
-1);
292 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
293 Constant
*Src
= dyn_cast
<ConstantInt
>(CV
->getOperand(SrcElt
++));
294 if (!Src
) return 0; // Reject constantexpr elements.
296 // Zero extend the element to the right size.
297 Src
= ConstantExpr::getZExt(Src
, Elt
->getType());
299 // Shift it to the right place, depending on endianness.
300 Src
= ConstantExpr::getShl(Src
,
301 ConstantInt::get(Src
->getType(), ShiftAmt
));
302 ShiftAmt
+= isLittleEndian
? SrcBitSize
: -SrcBitSize
;
305 Elt
= ConstantExpr::getOr(Elt
, Src
);
307 Result
.push_back(Elt
);
310 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
311 unsigned Ratio
= NumDstElt
/NumSrcElt
;
312 unsigned DstBitSize
= DstEltTy
->getPrimitiveSizeInBits();
314 // Loop over each source value, expanding into multiple results.
315 for (unsigned i
= 0; i
!= NumSrcElt
; ++i
) {
316 Constant
*Src
= dyn_cast
<ConstantInt
>(CV
->getOperand(i
));
317 if (!Src
) return 0; // Reject constantexpr elements.
319 unsigned ShiftAmt
= isLittleEndian
? 0 : DstBitSize
*(Ratio
-1);
320 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
321 // Shift the piece of the value into the right place, depending on
323 Constant
*Elt
= ConstantExpr::getLShr(Src
,
324 ConstantInt::get(Src
->getType(), ShiftAmt
));
325 ShiftAmt
+= isLittleEndian
? DstBitSize
: -DstBitSize
;
327 // Truncate and remember this piece.
328 Result
.push_back(ConstantExpr::getTrunc(Elt
, DstEltTy
));
333 return ConstantVector::get(Result
.data(), Result
.size());
341 //===----------------------------------------------------------------------===//
342 // Constant Folding public APIs
343 //===----------------------------------------------------------------------===//
346 /// ConstantFoldInstruction - Attempt to constant fold the specified
347 /// instruction. If successful, the constant result is returned, if not, null
348 /// is returned. Note that this function can only fail when attempting to fold
349 /// instructions like loads and stores, which have no constant expression form.
351 Constant
*llvm::ConstantFoldInstruction(Instruction
*I
, LLVMContext
&Context
,
352 const TargetData
*TD
) {
353 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
354 if (PN
->getNumIncomingValues() == 0)
355 return UndefValue::get(PN
->getType());
357 Constant
*Result
= dyn_cast
<Constant
>(PN
->getIncomingValue(0));
358 if (Result
== 0) return 0;
360 // Handle PHI nodes specially here...
361 for (unsigned i
= 1, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
362 if (PN
->getIncomingValue(i
) != Result
&& PN
->getIncomingValue(i
) != PN
)
363 return 0; // Not all the same incoming constants...
365 // If we reach here, all incoming values are the same constant.
369 // Scan the operand list, checking to see if they are all constants, if so,
370 // hand off to ConstantFoldInstOperands.
371 SmallVector
<Constant
*, 8> Ops
;
372 for (User::op_iterator i
= I
->op_begin(), e
= I
->op_end(); i
!= e
; ++i
)
373 if (Constant
*Op
= dyn_cast
<Constant
>(*i
))
376 return 0; // All operands not constant!
378 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
379 return ConstantFoldCompareInstOperands(CI
->getPredicate(),
380 Ops
.data(), Ops
.size(),
383 return ConstantFoldInstOperands(I
->getOpcode(), I
->getType(),
384 Ops
.data(), Ops
.size(), Context
, TD
);
387 /// ConstantFoldConstantExpression - Attempt to fold the constant expression
388 /// using the specified TargetData. If successful, the constant result is
389 /// result is returned, if not, null is returned.
390 Constant
*llvm::ConstantFoldConstantExpression(ConstantExpr
*CE
,
391 LLVMContext
&Context
,
392 const TargetData
*TD
) {
393 SmallVector
<Constant
*, 8> Ops
;
394 for (User::op_iterator i
= CE
->op_begin(), e
= CE
->op_end(); i
!= e
; ++i
)
395 Ops
.push_back(cast
<Constant
>(*i
));
398 return ConstantFoldCompareInstOperands(CE
->getPredicate(),
399 Ops
.data(), Ops
.size(),
401 return ConstantFoldInstOperands(CE
->getOpcode(), CE
->getType(),
402 Ops
.data(), Ops
.size(), Context
, TD
);
405 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
406 /// specified opcode and operands. If successful, the constant result is
407 /// returned, if not, null is returned. Note that this function can fail when
408 /// attempting to fold instructions like loads and stores, which have no
409 /// constant expression form.
411 Constant
*llvm::ConstantFoldInstOperands(unsigned Opcode
, const Type
*DestTy
,
412 Constant
* const* Ops
, unsigned NumOps
,
413 LLVMContext
&Context
,
414 const TargetData
*TD
) {
415 // Handle easy binops first.
416 if (Instruction::isBinaryOp(Opcode
)) {
417 if (isa
<ConstantExpr
>(Ops
[0]) || isa
<ConstantExpr
>(Ops
[1]))
418 if (Constant
*C
= SymbolicallyEvaluateBinop(Opcode
, Ops
[0], Ops
[1], TD
,
422 return ConstantExpr::get(Opcode
, Ops
[0], Ops
[1]);
427 case Instruction::Call
:
428 if (Function
*F
= dyn_cast
<Function
>(Ops
[0]))
429 if (canConstantFoldCallTo(F
))
430 return ConstantFoldCall(F
, Ops
+1, NumOps
-1);
432 case Instruction::ICmp
:
433 case Instruction::FCmp
:
434 llvm_unreachable("This function is invalid for compares: no predicate specified");
435 case Instruction::PtrToInt
:
436 // If the input is a inttoptr, eliminate the pair. This requires knowing
437 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
438 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
439 if (TD
&& CE
->getOpcode() == Instruction::IntToPtr
) {
440 Constant
*Input
= CE
->getOperand(0);
441 unsigned InWidth
= Input
->getType()->getScalarSizeInBits();
442 if (TD
->getPointerSizeInBits() < InWidth
) {
444 ConstantInt::get(Context
, APInt::getLowBitsSet(InWidth
,
445 TD
->getPointerSizeInBits()));
446 Input
= ConstantExpr::getAnd(Input
, Mask
);
448 // Do a zext or trunc to get to the dest size.
449 return ConstantExpr::getIntegerCast(Input
, DestTy
, false);
452 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
453 case Instruction::IntToPtr
:
454 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
455 // the int size is >= the ptr size. This requires knowing the width of a
456 // pointer, so it can't be done in ConstantExpr::getCast.
457 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
459 TD
->getPointerSizeInBits() <=
460 CE
->getType()->getScalarSizeInBits()) {
461 if (CE
->getOpcode() == Instruction::PtrToInt
) {
462 Constant
*Input
= CE
->getOperand(0);
463 Constant
*C
= FoldBitCast(Input
, DestTy
, *TD
, Context
);
464 return C
? C
: ConstantExpr::getBitCast(Input
, DestTy
);
466 // If there's a constant offset added to the integer value before
467 // it is casted back to a pointer, see if the expression can be
468 // converted into a GEP.
469 if (CE
->getOpcode() == Instruction::Add
)
470 if (ConstantInt
*L
= dyn_cast
<ConstantInt
>(CE
->getOperand(0)))
471 if (ConstantExpr
*R
= dyn_cast
<ConstantExpr
>(CE
->getOperand(1)))
472 if (R
->getOpcode() == Instruction::PtrToInt
)
473 if (GlobalVariable
*GV
=
474 dyn_cast
<GlobalVariable
>(R
->getOperand(0))) {
475 const PointerType
*GVTy
= cast
<PointerType
>(GV
->getType());
476 if (const ArrayType
*AT
=
477 dyn_cast
<ArrayType
>(GVTy
->getElementType())) {
478 const Type
*ElTy
= AT
->getElementType();
479 uint64_t AllocSize
= TD
->getTypeAllocSize(ElTy
);
480 APInt
PSA(L
->getValue().getBitWidth(), AllocSize
);
481 if (ElTy
== cast
<PointerType
>(DestTy
)->getElementType() &&
482 L
->getValue().urem(PSA
) == 0) {
483 APInt ElemIdx
= L
->getValue().udiv(PSA
);
484 if (ElemIdx
.ult(APInt(ElemIdx
.getBitWidth(),
485 AT
->getNumElements()))) {
486 Constant
*Index
[] = {
487 Constant::getNullValue(CE
->getType()),
488 ConstantInt::get(Context
, ElemIdx
)
491 ConstantExpr::getGetElementPtr(GV
, &Index
[0], 2);
498 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
499 case Instruction::Trunc
:
500 case Instruction::ZExt
:
501 case Instruction::SExt
:
502 case Instruction::FPTrunc
:
503 case Instruction::FPExt
:
504 case Instruction::UIToFP
:
505 case Instruction::SIToFP
:
506 case Instruction::FPToUI
:
507 case Instruction::FPToSI
:
508 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
509 case Instruction::BitCast
:
511 if (Constant
*C
= FoldBitCast(Ops
[0], DestTy
, *TD
, Context
))
513 return ConstantExpr::getBitCast(Ops
[0], DestTy
);
514 case Instruction::Select
:
515 return ConstantExpr::getSelect(Ops
[0], Ops
[1], Ops
[2]);
516 case Instruction::ExtractElement
:
517 return ConstantExpr::getExtractElement(Ops
[0], Ops
[1]);
518 case Instruction::InsertElement
:
519 return ConstantExpr::getInsertElement(Ops
[0], Ops
[1], Ops
[2]);
520 case Instruction::ShuffleVector
:
521 return ConstantExpr::getShuffleVector(Ops
[0], Ops
[1], Ops
[2]);
522 case Instruction::GetElementPtr
:
523 if (Constant
*C
= SymbolicallyEvaluateGEP(Ops
, NumOps
, DestTy
, Context
, TD
))
526 return ConstantExpr::getGetElementPtr(Ops
[0], Ops
+1, NumOps
-1);
530 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
531 /// instruction (icmp/fcmp) with the specified operands. If it fails, it
532 /// returns a constant expression of the specified operands.
534 Constant
*llvm::ConstantFoldCompareInstOperands(unsigned Predicate
,
535 Constant
*const * Ops
,
537 LLVMContext
&Context
,
538 const TargetData
*TD
) {
539 // fold: icmp (inttoptr x), null -> icmp x, 0
540 // fold: icmp (ptrtoint x), 0 -> icmp x, null
541 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
542 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
544 // ConstantExpr::getCompare cannot do this, because it doesn't have TD
545 // around to know if bit truncation is happening.
546 if (ConstantExpr
*CE0
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
547 if (TD
&& Ops
[1]->isNullValue()) {
548 const Type
*IntPtrTy
= TD
->getIntPtrType(Context
);
549 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
550 // Convert the integer value to the right size to ensure we get the
551 // proper extension or truncation.
552 Constant
*C
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
554 Constant
*NewOps
[] = { C
, Constant::getNullValue(C
->getType()) };
555 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
559 // Only do this transformation if the int is intptrty in size, otherwise
560 // there is a truncation or extension that we aren't modeling.
561 if (CE0
->getOpcode() == Instruction::PtrToInt
&&
562 CE0
->getType() == IntPtrTy
) {
563 Constant
*C
= CE0
->getOperand(0);
564 Constant
*NewOps
[] = { C
, Constant::getNullValue(C
->getType()) };
566 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
571 if (ConstantExpr
*CE1
= dyn_cast
<ConstantExpr
>(Ops
[1])) {
572 if (TD
&& CE0
->getOpcode() == CE1
->getOpcode()) {
573 const Type
*IntPtrTy
= TD
->getIntPtrType(Context
);
575 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
576 // Convert the integer value to the right size to ensure we get the
577 // proper extension or truncation.
578 Constant
*C0
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
580 Constant
*C1
= ConstantExpr::getIntegerCast(CE1
->getOperand(0),
582 Constant
*NewOps
[] = { C0
, C1
};
583 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
587 // Only do this transformation if the int is intptrty in size, otherwise
588 // there is a truncation or extension that we aren't modeling.
589 if ((CE0
->getOpcode() == Instruction::PtrToInt
&&
590 CE0
->getType() == IntPtrTy
&&
591 CE0
->getOperand(0)->getType() == CE1
->getOperand(0)->getType())) {
592 Constant
*NewOps
[] = {
593 CE0
->getOperand(0), CE1
->getOperand(0)
595 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
601 return ConstantExpr::getCompare(Predicate
, Ops
[0], Ops
[1]);
605 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
606 /// getelementptr constantexpr, return the constant value being addressed by the
607 /// constant expression, or null if something is funny and we can't decide.
608 Constant
*llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant
*C
,
610 LLVMContext
&Context
) {
611 if (CE
->getOperand(1) != Constant::getNullValue(CE
->getOperand(1)->getType()))
612 return 0; // Do not allow stepping over the value!
614 // Loop over all of the operands, tracking down which value we are
616 gep_type_iterator I
= gep_type_begin(CE
), E
= gep_type_end(CE
);
617 for (++I
; I
!= E
; ++I
)
618 if (const StructType
*STy
= dyn_cast
<StructType
>(*I
)) {
619 ConstantInt
*CU
= cast
<ConstantInt
>(I
.getOperand());
620 assert(CU
->getZExtValue() < STy
->getNumElements() &&
621 "Struct index out of range!");
622 unsigned El
= (unsigned)CU
->getZExtValue();
623 if (ConstantStruct
*CS
= dyn_cast
<ConstantStruct
>(C
)) {
624 C
= CS
->getOperand(El
);
625 } else if (isa
<ConstantAggregateZero
>(C
)) {
626 C
= Constant::getNullValue(STy
->getElementType(El
));
627 } else if (isa
<UndefValue
>(C
)) {
628 C
= UndefValue::get(STy
->getElementType(El
));
632 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
.getOperand())) {
633 if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(*I
)) {
634 if (CI
->getZExtValue() >= ATy
->getNumElements())
636 if (ConstantArray
*CA
= dyn_cast
<ConstantArray
>(C
))
637 C
= CA
->getOperand(CI
->getZExtValue());
638 else if (isa
<ConstantAggregateZero
>(C
))
639 C
= Constant::getNullValue(ATy
->getElementType());
640 else if (isa
<UndefValue
>(C
))
641 C
= UndefValue::get(ATy
->getElementType());
644 } else if (const VectorType
*PTy
= dyn_cast
<VectorType
>(*I
)) {
645 if (CI
->getZExtValue() >= PTy
->getNumElements())
647 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(C
))
648 C
= CP
->getOperand(CI
->getZExtValue());
649 else if (isa
<ConstantAggregateZero
>(C
))
650 C
= Constant::getNullValue(PTy
->getElementType());
651 else if (isa
<UndefValue
>(C
))
652 C
= UndefValue::get(PTy
->getElementType());
665 //===----------------------------------------------------------------------===//
666 // Constant Folding for Calls
669 /// canConstantFoldCallTo - Return true if its even possible to fold a call to
670 /// the specified function.
672 llvm::canConstantFoldCallTo(const Function
*F
) {
673 switch (F
->getIntrinsicID()) {
674 case Intrinsic::sqrt
:
675 case Intrinsic::powi
:
676 case Intrinsic::bswap
:
677 case Intrinsic::ctpop
:
678 case Intrinsic::ctlz
:
679 case Intrinsic::cttz
:
684 if (!F
->hasName()) return false;
685 StringRef Name
= F
->getName();
687 // In these cases, the check of the length is required. We don't want to
688 // return true for a name like "cos\0blah" which strcmp would return equal to
689 // "cos", but has length 8.
691 default: return false;
693 return Name
== "acos" || Name
== "asin" ||
694 Name
== "atan" || Name
== "atan2";
696 return Name
== "cos" || Name
== "ceil" || Name
== "cosf" || Name
== "cosh";
698 return Name
== "exp";
700 return Name
== "fabs" || Name
== "fmod" || Name
== "floor";
702 return Name
== "log" || Name
== "log10";
704 return Name
== "pow";
706 return Name
== "sin" || Name
== "sinh" || Name
== "sqrt" ||
707 Name
== "sinf" || Name
== "sqrtf";
709 return Name
== "tan" || Name
== "tanh";
713 static Constant
*ConstantFoldFP(double (*NativeFP
)(double), double V
,
714 const Type
*Ty
, LLVMContext
&Context
) {
722 if (Ty
== Type::getFloatTy(Context
))
723 return ConstantFP::get(Context
, APFloat((float)V
));
724 if (Ty
== Type::getDoubleTy(Context
))
725 return ConstantFP::get(Context
, APFloat(V
));
726 llvm_unreachable("Can only constant fold float/double");
727 return 0; // dummy return to suppress warning
730 static Constant
*ConstantFoldBinaryFP(double (*NativeFP
)(double, double),
733 LLVMContext
&Context
) {
741 if (Ty
== Type::getFloatTy(Context
))
742 return ConstantFP::get(Context
, APFloat((float)V
));
743 if (Ty
== Type::getDoubleTy(Context
))
744 return ConstantFP::get(Context
, APFloat(V
));
745 llvm_unreachable("Can only constant fold float/double");
746 return 0; // dummy return to suppress warning
749 /// ConstantFoldCall - Attempt to constant fold a call to the specified function
750 /// with the specified arguments, returning null if unsuccessful.
753 llvm::ConstantFoldCall(Function
*F
,
754 Constant
* const* Operands
, unsigned NumOperands
) {
755 if (!F
->hasName()) return 0;
756 LLVMContext
&Context
= F
->getContext();
757 StringRef Name
= F
->getName();
759 const Type
*Ty
= F
->getReturnType();
760 if (NumOperands
== 1) {
761 if (ConstantFP
*Op
= dyn_cast
<ConstantFP
>(Operands
[0])) {
762 if (Ty
!=Type::getFloatTy(F
->getContext()) &&
763 Ty
!=Type::getDoubleTy(Context
))
765 /// Currently APFloat versions of these functions do not exist, so we use
766 /// the host native double versions. Float versions are not called
767 /// directly but for all these it is true (float)(f((double)arg)) ==
768 /// f(arg). Long double not supported yet.
769 double V
= Ty
==Type::getFloatTy(F
->getContext()) ?
770 (double)Op
->getValueAPF().convertToFloat():
771 Op
->getValueAPF().convertToDouble();
775 return ConstantFoldFP(acos
, V
, Ty
, Context
);
776 else if (Name
== "asin")
777 return ConstantFoldFP(asin
, V
, Ty
, Context
);
778 else if (Name
== "atan")
779 return ConstantFoldFP(atan
, V
, Ty
, Context
);
783 return ConstantFoldFP(ceil
, V
, Ty
, Context
);
784 else if (Name
== "cos")
785 return ConstantFoldFP(cos
, V
, Ty
, Context
);
786 else if (Name
== "cosh")
787 return ConstantFoldFP(cosh
, V
, Ty
, Context
);
788 else if (Name
== "cosf")
789 return ConstantFoldFP(cos
, V
, Ty
, Context
);
793 return ConstantFoldFP(exp
, V
, Ty
, Context
);
797 return ConstantFoldFP(fabs
, V
, Ty
, Context
);
798 else if (Name
== "floor")
799 return ConstantFoldFP(floor
, V
, Ty
, Context
);
802 if (Name
== "log" && V
> 0)
803 return ConstantFoldFP(log
, V
, Ty
, Context
);
804 else if (Name
== "log10" && V
> 0)
805 return ConstantFoldFP(log10
, V
, Ty
, Context
);
806 else if (Name
== "llvm.sqrt.f32" ||
807 Name
== "llvm.sqrt.f64") {
809 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
811 return Constant::getNullValue(Ty
);
816 return ConstantFoldFP(sin
, V
, Ty
, Context
);
817 else if (Name
== "sinh")
818 return ConstantFoldFP(sinh
, V
, Ty
, Context
);
819 else if (Name
== "sqrt" && V
>= 0)
820 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
821 else if (Name
== "sqrtf" && V
>= 0)
822 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
823 else if (Name
== "sinf")
824 return ConstantFoldFP(sin
, V
, Ty
, Context
);
828 return ConstantFoldFP(tan
, V
, Ty
, Context
);
829 else if (Name
== "tanh")
830 return ConstantFoldFP(tanh
, V
, Ty
, Context
);
835 } else if (ConstantInt
*Op
= dyn_cast
<ConstantInt
>(Operands
[0])) {
836 if (Name
.startswith("llvm.bswap"))
837 return ConstantInt::get(Context
, Op
->getValue().byteSwap());
838 else if (Name
.startswith("llvm.ctpop"))
839 return ConstantInt::get(Ty
, Op
->getValue().countPopulation());
840 else if (Name
.startswith("llvm.cttz"))
841 return ConstantInt::get(Ty
, Op
->getValue().countTrailingZeros());
842 else if (Name
.startswith("llvm.ctlz"))
843 return ConstantInt::get(Ty
, Op
->getValue().countLeadingZeros());
845 } else if (NumOperands
== 2) {
846 if (ConstantFP
*Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
847 if (Ty
!=Type::getFloatTy(F
->getContext()) &&
848 Ty
!=Type::getDoubleTy(Context
))
850 double Op1V
= Ty
==Type::getFloatTy(F
->getContext()) ?
851 (double)Op1
->getValueAPF().convertToFloat():
852 Op1
->getValueAPF().convertToDouble();
853 if (ConstantFP
*Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
854 double Op2V
= Ty
==Type::getFloatTy(F
->getContext()) ?
855 (double)Op2
->getValueAPF().convertToFloat():
856 Op2
->getValueAPF().convertToDouble();
859 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
, Context
);
860 } else if (Name
== "fmod") {
861 return ConstantFoldBinaryFP(fmod
, Op1V
, Op2V
, Ty
, Context
);
862 } else if (Name
== "atan2") {
863 return ConstantFoldBinaryFP(atan2
, Op1V
, Op2V
, Ty
, Context
);
865 } else if (ConstantInt
*Op2C
= dyn_cast
<ConstantInt
>(Operands
[1])) {
866 if (Name
== "llvm.powi.f32") {
867 return ConstantFP::get(Context
, APFloat((float)std::pow((float)Op1V
,
868 (int)Op2C
->getZExtValue())));
869 } else if (Name
== "llvm.powi.f64") {
870 return ConstantFP::get(Context
, APFloat((double)std::pow((double)Op1V
,
871 (int)Op2C
->getZExtValue())));