1 //===-- ConstantFolding.cpp - Analyze constant folding possibilities ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This family of functions determines the possibility of performing constant
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Function.h"
19 #include "llvm/GlobalVariable.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/LLVMContext.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/GetElementPtrTypeIterator.h"
28 #include "llvm/Support/MathExtras.h"
33 //===----------------------------------------------------------------------===//
34 // Constant Folding internal helper functions
35 //===----------------------------------------------------------------------===//
37 /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset
38 /// from a global, return the global and the constant. Because of
39 /// constantexprs, this function is recursive.
40 static bool IsConstantOffsetFromGlobal(Constant
*C
, GlobalValue
*&GV
,
41 int64_t &Offset
, const TargetData
&TD
) {
42 // Trivial case, constant is the global.
43 if ((GV
= dyn_cast
<GlobalValue
>(C
))) {
48 // Otherwise, if this isn't a constant expr, bail out.
49 ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
);
50 if (!CE
) return false;
52 // Look through ptr->int and ptr->ptr casts.
53 if (CE
->getOpcode() == Instruction::PtrToInt
||
54 CE
->getOpcode() == Instruction::BitCast
)
55 return IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, TD
);
57 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
58 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
59 // Cannot compute this if the element type of the pointer is missing size
61 if (!cast
<PointerType
>(CE
->getOperand(0)->getType())
62 ->getElementType()->isSized())
65 // If the base isn't a global+constant, we aren't either.
66 if (!IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, TD
))
69 // Otherwise, add any offset that our operands provide.
70 gep_type_iterator GTI
= gep_type_begin(CE
);
71 for (User::const_op_iterator i
= CE
->op_begin() + 1, e
= CE
->op_end();
73 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(*i
);
74 if (!CI
) return false; // Index isn't a simple constant?
75 if (CI
->getZExtValue() == 0) continue; // Not adding anything.
77 if (const StructType
*ST
= dyn_cast
<StructType
>(*GTI
)) {
79 Offset
+= TD
.getStructLayout(ST
)->getElementOffset(CI
->getZExtValue());
81 const SequentialType
*SQT
= cast
<SequentialType
>(*GTI
);
82 Offset
+= TD
.getTypeAllocSize(SQT
->getElementType())*CI
->getSExtValue();
92 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
93 /// Attempt to symbolically evaluate the result of a binary operator merging
94 /// these together. If target data info is available, it is provided as TD,
95 /// otherwise TD is null.
96 static Constant
*SymbolicallyEvaluateBinop(unsigned Opc
, Constant
*Op0
,
97 Constant
*Op1
, const TargetData
*TD
,
98 LLVMContext
&Context
){
101 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
102 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
106 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
107 // constant. This happens frequently when iterating over a global array.
108 if (Opc
== Instruction::Sub
&& TD
) {
109 GlobalValue
*GV1
, *GV2
;
110 int64_t Offs1
, Offs2
;
112 if (IsConstantOffsetFromGlobal(Op0
, GV1
, Offs1
, *TD
))
113 if (IsConstantOffsetFromGlobal(Op1
, GV2
, Offs2
, *TD
) &&
115 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
116 return ConstantInt::get(Op0
->getType(), Offs1
-Offs2
);
123 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
124 /// constant expression, do so.
125 static Constant
*SymbolicallyEvaluateGEP(Constant
* const* Ops
, unsigned NumOps
,
126 const Type
*ResultTy
,
127 LLVMContext
&Context
,
128 const TargetData
*TD
) {
129 Constant
*Ptr
= Ops
[0];
130 if (!TD
|| !cast
<PointerType
>(Ptr
->getType())->getElementType()->isSized())
133 unsigned BitWidth
= TD
->getTypeSizeInBits(TD
->getIntPtrType(Context
));
134 APInt
BasePtr(BitWidth
, 0);
135 bool BaseIsInt
= true;
136 if (!Ptr
->isNullValue()) {
137 // If this is a inttoptr from a constant int, we can fold this as the base,
138 // otherwise we can't.
139 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
140 if (CE
->getOpcode() == Instruction::IntToPtr
)
141 if (ConstantInt
*Base
= dyn_cast
<ConstantInt
>(CE
->getOperand(0))) {
142 BasePtr
= Base
->getValue();
143 BasePtr
.zextOrTrunc(BitWidth
);
150 // If this is a constant expr gep that is effectively computing an
151 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
152 for (unsigned i
= 1; i
!= NumOps
; ++i
)
153 if (!isa
<ConstantInt
>(Ops
[i
]))
156 APInt Offset
= APInt(BitWidth
,
157 TD
->getIndexedOffset(Ptr
->getType(),
158 (Value
**)Ops
+1, NumOps
-1));
159 // If the base value for this address is a literal integer value, fold the
160 // getelementptr to the resulting integer value casted to the pointer type.
162 Constant
*C
= ConstantInt::get(Context
, Offset
+BasePtr
);
163 return ConstantExpr::getIntToPtr(C
, ResultTy
);
166 // Otherwise form a regular getelementptr. Recompute the indices so that
167 // we eliminate over-indexing of the notional static type array bounds.
168 // This makes it easy to determine if the getelementptr is "inbounds".
169 // Also, this helps GlobalOpt do SROA on GlobalVariables.
170 const Type
*Ty
= Ptr
->getType();
171 SmallVector
<Constant
*, 32> NewIdxs
;
173 if (const SequentialType
*ATy
= dyn_cast
<SequentialType
>(Ty
)) {
174 // The only pointer indexing we'll do is on the first index of the GEP.
175 if (isa
<PointerType
>(ATy
) && !NewIdxs
.empty())
177 // Determine which element of the array the offset points into.
178 APInt
ElemSize(BitWidth
, TD
->getTypeAllocSize(ATy
->getElementType()));
181 APInt NewIdx
= Offset
.udiv(ElemSize
);
182 Offset
-= NewIdx
* ElemSize
;
183 NewIdxs
.push_back(ConstantInt::get(TD
->getIntPtrType(Context
), NewIdx
));
184 Ty
= ATy
->getElementType();
185 } else if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
186 // Determine which field of the struct the offset points into. The
187 // getZExtValue is at least as safe as the StructLayout API because we
188 // know the offset is within the struct at this point.
189 const StructLayout
&SL
= *TD
->getStructLayout(STy
);
190 unsigned ElIdx
= SL
.getElementContainingOffset(Offset
.getZExtValue());
191 NewIdxs
.push_back(ConstantInt::get(Type::getInt32Ty(Context
), ElIdx
));
192 Offset
-= APInt(BitWidth
, SL
.getElementOffset(ElIdx
));
193 Ty
= STy
->getTypeAtIndex(ElIdx
);
195 // We've reached some non-indexable type.
198 } while (Ty
!= cast
<PointerType
>(ResultTy
)->getElementType());
200 // If we haven't used up the entire offset by descending the static
201 // type, then the offset is pointing into the middle of an indivisible
202 // member, so we can't simplify it.
206 // If the base is the start of a GlobalVariable and all the array indices
207 // remain in their static bounds, the GEP is inbounds. We can check that
208 // all indices are in bounds by just checking the first index only
209 // because we've just normalized all the indices.
210 Constant
*C
= isa
<GlobalVariable
>(Ptr
) && NewIdxs
[0]->isNullValue() ?
211 ConstantExpr::getInBoundsGetElementPtr(Ptr
, &NewIdxs
[0], NewIdxs
.size()) :
212 ConstantExpr::getGetElementPtr(Ptr
, &NewIdxs
[0], NewIdxs
.size());
213 assert(cast
<PointerType
>(C
->getType())->getElementType() == Ty
&&
214 "Computed GetElementPtr has unexpected type!");
216 // If we ended up indexing a member with a type that doesn't match
217 // the type of what the original indices indexed, add a cast.
218 if (Ty
!= cast
<PointerType
>(ResultTy
)->getElementType())
219 C
= ConstantExpr::getBitCast(C
, ResultTy
);
224 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
225 /// targetdata. Return 0 if unfoldable.
226 static Constant
*FoldBitCast(Constant
*C
, const Type
*DestTy
,
227 const TargetData
&TD
, LLVMContext
&Context
) {
228 // If this is a bitcast from constant vector -> vector, fold it.
229 if (ConstantVector
*CV
= dyn_cast
<ConstantVector
>(C
)) {
230 if (const VectorType
*DestVTy
= dyn_cast
<VectorType
>(DestTy
)) {
231 // If the element types match, VMCore can fold it.
232 unsigned NumDstElt
= DestVTy
->getNumElements();
233 unsigned NumSrcElt
= CV
->getNumOperands();
234 if (NumDstElt
== NumSrcElt
)
237 const Type
*SrcEltTy
= CV
->getType()->getElementType();
238 const Type
*DstEltTy
= DestVTy
->getElementType();
240 // Otherwise, we're changing the number of elements in a vector, which
241 // requires endianness information to do the right thing. For example,
242 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
243 // folds to (little endian):
244 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
245 // and to (big endian):
246 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
248 // First thing is first. We only want to think about integer here, so if
249 // we have something in FP form, recast it as integer.
250 if (DstEltTy
->isFloatingPoint()) {
251 // Fold to an vector of integers with same size as our FP type.
252 unsigned FPWidth
= DstEltTy
->getPrimitiveSizeInBits();
253 const Type
*DestIVTy
= VectorType::get(
254 IntegerType::get(Context
, FPWidth
), NumDstElt
);
255 // Recursively handle this integer conversion, if possible.
256 C
= FoldBitCast(C
, DestIVTy
, TD
, Context
);
259 // Finally, VMCore can handle this now that #elts line up.
260 return ConstantExpr::getBitCast(C
, DestTy
);
263 // Okay, we know the destination is integer, if the input is FP, convert
264 // it to integer first.
265 if (SrcEltTy
->isFloatingPoint()) {
266 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
267 const Type
*SrcIVTy
= VectorType::get(
268 IntegerType::get(Context
, FPWidth
), NumSrcElt
);
269 // Ask VMCore to do the conversion now that #elts line up.
270 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
271 CV
= dyn_cast
<ConstantVector
>(C
);
272 if (!CV
) return 0; // If VMCore wasn't able to fold it, bail out.
275 // Now we know that the input and output vectors are both integer vectors
276 // of the same size, and that their #elements is not the same. Do the
277 // conversion here, which depends on whether the input or output has
279 bool isLittleEndian
= TD
.isLittleEndian();
281 SmallVector
<Constant
*, 32> Result
;
282 if (NumDstElt
< NumSrcElt
) {
283 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
284 Constant
*Zero
= Constant::getNullValue(DstEltTy
);
285 unsigned Ratio
= NumSrcElt
/NumDstElt
;
286 unsigned SrcBitSize
= SrcEltTy
->getPrimitiveSizeInBits();
288 for (unsigned i
= 0; i
!= NumDstElt
; ++i
) {
289 // Build each element of the result.
290 Constant
*Elt
= Zero
;
291 unsigned ShiftAmt
= isLittleEndian
? 0 : SrcBitSize
*(Ratio
-1);
292 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
293 Constant
*Src
= dyn_cast
<ConstantInt
>(CV
->getOperand(SrcElt
++));
294 if (!Src
) return 0; // Reject constantexpr elements.
296 // Zero extend the element to the right size.
297 Src
= ConstantExpr::getZExt(Src
, Elt
->getType());
299 // Shift it to the right place, depending on endianness.
300 Src
= ConstantExpr::getShl(Src
,
301 ConstantInt::get(Src
->getType(), ShiftAmt
));
302 ShiftAmt
+= isLittleEndian
? SrcBitSize
: -SrcBitSize
;
305 Elt
= ConstantExpr::getOr(Elt
, Src
);
307 Result
.push_back(Elt
);
310 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
311 unsigned Ratio
= NumDstElt
/NumSrcElt
;
312 unsigned DstBitSize
= DstEltTy
->getPrimitiveSizeInBits();
314 // Loop over each source value, expanding into multiple results.
315 for (unsigned i
= 0; i
!= NumSrcElt
; ++i
) {
316 Constant
*Src
= dyn_cast
<ConstantInt
>(CV
->getOperand(i
));
317 if (!Src
) return 0; // Reject constantexpr elements.
319 unsigned ShiftAmt
= isLittleEndian
? 0 : DstBitSize
*(Ratio
-1);
320 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
321 // Shift the piece of the value into the right place, depending on
323 Constant
*Elt
= ConstantExpr::getLShr(Src
,
324 ConstantInt::get(Src
->getType(), ShiftAmt
));
325 ShiftAmt
+= isLittleEndian
? DstBitSize
: -DstBitSize
;
327 // Truncate and remember this piece.
328 Result
.push_back(ConstantExpr::getTrunc(Elt
, DstEltTy
));
333 return ConstantVector::get(Result
.data(), Result
.size());
341 //===----------------------------------------------------------------------===//
342 // Constant Folding public APIs
343 //===----------------------------------------------------------------------===//
346 /// ConstantFoldInstruction - Attempt to constant fold the specified
347 /// instruction. If successful, the constant result is returned, if not, null
348 /// is returned. Note that this function can only fail when attempting to fold
349 /// instructions like loads and stores, which have no constant expression form.
351 Constant
*llvm::ConstantFoldInstruction(Instruction
*I
, LLVMContext
&Context
,
352 const TargetData
*TD
) {
353 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
354 if (PN
->getNumIncomingValues() == 0)
355 return UndefValue::get(PN
->getType());
357 Constant
*Result
= dyn_cast
<Constant
>(PN
->getIncomingValue(0));
358 if (Result
== 0) return 0;
360 // Handle PHI nodes specially here...
361 for (unsigned i
= 1, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
362 if (PN
->getIncomingValue(i
) != Result
&& PN
->getIncomingValue(i
) != PN
)
363 return 0; // Not all the same incoming constants...
365 // If we reach here, all incoming values are the same constant.
369 // Scan the operand list, checking to see if they are all constants, if so,
370 // hand off to ConstantFoldInstOperands.
371 SmallVector
<Constant
*, 8> Ops
;
372 for (User::op_iterator i
= I
->op_begin(), e
= I
->op_end(); i
!= e
; ++i
)
373 if (Constant
*Op
= dyn_cast
<Constant
>(*i
))
376 return 0; // All operands not constant!
378 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
379 return ConstantFoldCompareInstOperands(CI
->getPredicate(),
380 Ops
.data(), Ops
.size(),
383 return ConstantFoldInstOperands(I
->getOpcode(), I
->getType(),
384 Ops
.data(), Ops
.size(), Context
, TD
);
387 /// ConstantFoldConstantExpression - Attempt to fold the constant expression
388 /// using the specified TargetData. If successful, the constant result is
389 /// result is returned, if not, null is returned.
390 Constant
*llvm::ConstantFoldConstantExpression(ConstantExpr
*CE
,
391 LLVMContext
&Context
,
392 const TargetData
*TD
) {
393 SmallVector
<Constant
*, 8> Ops
;
394 for (User::op_iterator i
= CE
->op_begin(), e
= CE
->op_end(); i
!= e
; ++i
)
395 Ops
.push_back(cast
<Constant
>(*i
));
398 return ConstantFoldCompareInstOperands(CE
->getPredicate(),
399 Ops
.data(), Ops
.size(),
402 return ConstantFoldInstOperands(CE
->getOpcode(), CE
->getType(),
403 Ops
.data(), Ops
.size(), Context
, TD
);
406 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
407 /// specified opcode and operands. If successful, the constant result is
408 /// returned, if not, null is returned. Note that this function can fail when
409 /// attempting to fold instructions like loads and stores, which have no
410 /// constant expression form.
412 Constant
*llvm::ConstantFoldInstOperands(unsigned Opcode
, const Type
*DestTy
,
413 Constant
* const* Ops
, unsigned NumOps
,
414 LLVMContext
&Context
,
415 const TargetData
*TD
) {
416 // Handle easy binops first.
417 if (Instruction::isBinaryOp(Opcode
)) {
418 if (isa
<ConstantExpr
>(Ops
[0]) || isa
<ConstantExpr
>(Ops
[1]))
419 if (Constant
*C
= SymbolicallyEvaluateBinop(Opcode
, Ops
[0], Ops
[1], TD
,
423 return ConstantExpr::get(Opcode
, Ops
[0], Ops
[1]);
428 case Instruction::Call
:
429 if (Function
*F
= dyn_cast
<Function
>(Ops
[0]))
430 if (canConstantFoldCallTo(F
))
431 return ConstantFoldCall(F
, Ops
+1, NumOps
-1);
433 case Instruction::ICmp
:
434 case Instruction::FCmp
:
435 llvm_unreachable("This function is invalid for compares: no predicate specified");
436 case Instruction::PtrToInt
:
437 // If the input is a inttoptr, eliminate the pair. This requires knowing
438 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
439 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
440 if (TD
&& CE
->getOpcode() == Instruction::IntToPtr
) {
441 Constant
*Input
= CE
->getOperand(0);
442 unsigned InWidth
= Input
->getType()->getScalarSizeInBits();
443 if (TD
->getPointerSizeInBits() < InWidth
) {
445 ConstantInt::get(Context
, APInt::getLowBitsSet(InWidth
,
446 TD
->getPointerSizeInBits()));
447 Input
= ConstantExpr::getAnd(Input
, Mask
);
449 // Do a zext or trunc to get to the dest size.
450 return ConstantExpr::getIntegerCast(Input
, DestTy
, false);
453 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
454 case Instruction::IntToPtr
:
455 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
456 // the int size is >= the ptr size. This requires knowing the width of a
457 // pointer, so it can't be done in ConstantExpr::getCast.
458 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
460 TD
->getPointerSizeInBits() <=
461 CE
->getType()->getScalarSizeInBits()) {
462 if (CE
->getOpcode() == Instruction::PtrToInt
) {
463 Constant
*Input
= CE
->getOperand(0);
464 Constant
*C
= FoldBitCast(Input
, DestTy
, *TD
, Context
);
465 return C
? C
: ConstantExpr::getBitCast(Input
, DestTy
);
467 // If there's a constant offset added to the integer value before
468 // it is casted back to a pointer, see if the expression can be
469 // converted into a GEP.
470 if (CE
->getOpcode() == Instruction::Add
)
471 if (ConstantInt
*L
= dyn_cast
<ConstantInt
>(CE
->getOperand(0)))
472 if (ConstantExpr
*R
= dyn_cast
<ConstantExpr
>(CE
->getOperand(1)))
473 if (R
->getOpcode() == Instruction::PtrToInt
)
474 if (GlobalVariable
*GV
=
475 dyn_cast
<GlobalVariable
>(R
->getOperand(0))) {
476 const PointerType
*GVTy
= cast
<PointerType
>(GV
->getType());
477 if (const ArrayType
*AT
=
478 dyn_cast
<ArrayType
>(GVTy
->getElementType())) {
479 const Type
*ElTy
= AT
->getElementType();
480 uint64_t AllocSize
= TD
->getTypeAllocSize(ElTy
);
481 APInt
PSA(L
->getValue().getBitWidth(), AllocSize
);
482 if (ElTy
== cast
<PointerType
>(DestTy
)->getElementType() &&
483 L
->getValue().urem(PSA
) == 0) {
484 APInt ElemIdx
= L
->getValue().udiv(PSA
);
485 if (ElemIdx
.ult(APInt(ElemIdx
.getBitWidth(),
486 AT
->getNumElements()))) {
487 Constant
*Index
[] = {
488 Constant::getNullValue(CE
->getType()),
489 ConstantInt::get(Context
, ElemIdx
)
492 ConstantExpr::getGetElementPtr(GV
, &Index
[0], 2);
499 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
500 case Instruction::Trunc
:
501 case Instruction::ZExt
:
502 case Instruction::SExt
:
503 case Instruction::FPTrunc
:
504 case Instruction::FPExt
:
505 case Instruction::UIToFP
:
506 case Instruction::SIToFP
:
507 case Instruction::FPToUI
:
508 case Instruction::FPToSI
:
509 return ConstantExpr::getCast(Opcode
, Ops
[0], DestTy
);
510 case Instruction::BitCast
:
512 if (Constant
*C
= FoldBitCast(Ops
[0], DestTy
, *TD
, Context
))
514 return ConstantExpr::getBitCast(Ops
[0], DestTy
);
515 case Instruction::Select
:
516 return ConstantExpr::getSelect(Ops
[0], Ops
[1], Ops
[2]);
517 case Instruction::ExtractElement
:
518 return ConstantExpr::getExtractElement(Ops
[0], Ops
[1]);
519 case Instruction::InsertElement
:
520 return ConstantExpr::getInsertElement(Ops
[0], Ops
[1], Ops
[2]);
521 case Instruction::ShuffleVector
:
522 return ConstantExpr::getShuffleVector(Ops
[0], Ops
[1], Ops
[2]);
523 case Instruction::GetElementPtr
:
524 if (Constant
*C
= SymbolicallyEvaluateGEP(Ops
, NumOps
, DestTy
, Context
, TD
))
527 return ConstantExpr::getGetElementPtr(Ops
[0], Ops
+1, NumOps
-1);
531 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
532 /// instruction (icmp/fcmp) with the specified operands. If it fails, it
533 /// returns a constant expression of the specified operands.
535 Constant
*llvm::ConstantFoldCompareInstOperands(unsigned Predicate
,
536 Constant
*const * Ops
,
538 LLVMContext
&Context
,
539 const TargetData
*TD
) {
540 // fold: icmp (inttoptr x), null -> icmp x, 0
541 // fold: icmp (ptrtoint x), 0 -> icmp x, null
542 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
543 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
545 // ConstantExpr::getCompare cannot do this, because it doesn't have TD
546 // around to know if bit truncation is happening.
547 if (ConstantExpr
*CE0
= dyn_cast
<ConstantExpr
>(Ops
[0])) {
548 if (TD
&& Ops
[1]->isNullValue()) {
549 const Type
*IntPtrTy
= TD
->getIntPtrType(Context
);
550 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
551 // Convert the integer value to the right size to ensure we get the
552 // proper extension or truncation.
553 Constant
*C
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
555 Constant
*NewOps
[] = { C
, Constant::getNullValue(C
->getType()) };
556 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
560 // Only do this transformation if the int is intptrty in size, otherwise
561 // there is a truncation or extension that we aren't modeling.
562 if (CE0
->getOpcode() == Instruction::PtrToInt
&&
563 CE0
->getType() == IntPtrTy
) {
564 Constant
*C
= CE0
->getOperand(0);
565 Constant
*NewOps
[] = { C
, Constant::getNullValue(C
->getType()) };
567 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
572 if (ConstantExpr
*CE1
= dyn_cast
<ConstantExpr
>(Ops
[1])) {
573 if (TD
&& CE0
->getOpcode() == CE1
->getOpcode()) {
574 const Type
*IntPtrTy
= TD
->getIntPtrType(Context
);
576 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
577 // Convert the integer value to the right size to ensure we get the
578 // proper extension or truncation.
579 Constant
*C0
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
581 Constant
*C1
= ConstantExpr::getIntegerCast(CE1
->getOperand(0),
583 Constant
*NewOps
[] = { C0
, C1
};
584 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
588 // Only do this transformation if the int is intptrty in size, otherwise
589 // there is a truncation or extension that we aren't modeling.
590 if ((CE0
->getOpcode() == Instruction::PtrToInt
&&
591 CE0
->getType() == IntPtrTy
&&
592 CE0
->getOperand(0)->getType() == CE1
->getOperand(0)->getType())) {
593 Constant
*NewOps
[] = {
594 CE0
->getOperand(0), CE1
->getOperand(0)
596 return ConstantFoldCompareInstOperands(Predicate
, NewOps
, 2,
602 return ConstantExpr::getCompare(Predicate
, Ops
[0], Ops
[1]);
606 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
607 /// getelementptr constantexpr, return the constant value being addressed by the
608 /// constant expression, or null if something is funny and we can't decide.
609 Constant
*llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant
*C
,
611 LLVMContext
&Context
) {
612 if (CE
->getOperand(1) != Constant::getNullValue(CE
->getOperand(1)->getType()))
613 return 0; // Do not allow stepping over the value!
615 // Loop over all of the operands, tracking down which value we are
617 gep_type_iterator I
= gep_type_begin(CE
), E
= gep_type_end(CE
);
618 for (++I
; I
!= E
; ++I
)
619 if (const StructType
*STy
= dyn_cast
<StructType
>(*I
)) {
620 ConstantInt
*CU
= cast
<ConstantInt
>(I
.getOperand());
621 assert(CU
->getZExtValue() < STy
->getNumElements() &&
622 "Struct index out of range!");
623 unsigned El
= (unsigned)CU
->getZExtValue();
624 if (ConstantStruct
*CS
= dyn_cast
<ConstantStruct
>(C
)) {
625 C
= CS
->getOperand(El
);
626 } else if (isa
<ConstantAggregateZero
>(C
)) {
627 C
= Constant::getNullValue(STy
->getElementType(El
));
628 } else if (isa
<UndefValue
>(C
)) {
629 C
= UndefValue::get(STy
->getElementType(El
));
633 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
.getOperand())) {
634 if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(*I
)) {
635 if (CI
->getZExtValue() >= ATy
->getNumElements())
637 if (ConstantArray
*CA
= dyn_cast
<ConstantArray
>(C
))
638 C
= CA
->getOperand(CI
->getZExtValue());
639 else if (isa
<ConstantAggregateZero
>(C
))
640 C
= Constant::getNullValue(ATy
->getElementType());
641 else if (isa
<UndefValue
>(C
))
642 C
= UndefValue::get(ATy
->getElementType());
645 } else if (const VectorType
*PTy
= dyn_cast
<VectorType
>(*I
)) {
646 if (CI
->getZExtValue() >= PTy
->getNumElements())
648 if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(C
))
649 C
= CP
->getOperand(CI
->getZExtValue());
650 else if (isa
<ConstantAggregateZero
>(C
))
651 C
= Constant::getNullValue(PTy
->getElementType());
652 else if (isa
<UndefValue
>(C
))
653 C
= UndefValue::get(PTy
->getElementType());
666 //===----------------------------------------------------------------------===//
667 // Constant Folding for Calls
670 /// canConstantFoldCallTo - Return true if its even possible to fold a call to
671 /// the specified function.
673 llvm::canConstantFoldCallTo(const Function
*F
) {
674 switch (F
->getIntrinsicID()) {
675 case Intrinsic::sqrt
:
676 case Intrinsic::powi
:
677 case Intrinsic::bswap
:
678 case Intrinsic::ctpop
:
679 case Intrinsic::ctlz
:
680 case Intrinsic::cttz
:
685 if (!F
->hasName()) return false;
686 StringRef Name
= F
->getName();
688 // In these cases, the check of the length is required. We don't want to
689 // return true for a name like "cos\0blah" which strcmp would return equal to
690 // "cos", but has length 8.
692 default: return false;
694 return Name
== "acos" || Name
== "asin" ||
695 Name
== "atan" || Name
== "atan2";
697 return Name
== "cos" || Name
== "ceil" || Name
== "cosf" || Name
== "cosh";
699 return Name
== "exp";
701 return Name
== "fabs" || Name
== "fmod" || Name
== "floor";
703 return Name
== "log" || Name
== "log10";
705 return Name
== "pow";
707 return Name
== "sin" || Name
== "sinh" || Name
== "sqrt" ||
708 Name
== "sinf" || Name
== "sqrtf";
710 return Name
== "tan" || Name
== "tanh";
714 static Constant
*ConstantFoldFP(double (*NativeFP
)(double), double V
,
715 const Type
*Ty
, LLVMContext
&Context
) {
723 if (Ty
== Type::getFloatTy(Context
))
724 return ConstantFP::get(Context
, APFloat((float)V
));
725 if (Ty
== Type::getDoubleTy(Context
))
726 return ConstantFP::get(Context
, APFloat(V
));
727 llvm_unreachable("Can only constant fold float/double");
728 return 0; // dummy return to suppress warning
731 static Constant
*ConstantFoldBinaryFP(double (*NativeFP
)(double, double),
734 LLVMContext
&Context
) {
742 if (Ty
== Type::getFloatTy(Context
))
743 return ConstantFP::get(Context
, APFloat((float)V
));
744 if (Ty
== Type::getDoubleTy(Context
))
745 return ConstantFP::get(Context
, APFloat(V
));
746 llvm_unreachable("Can only constant fold float/double");
747 return 0; // dummy return to suppress warning
750 /// ConstantFoldCall - Attempt to constant fold a call to the specified function
751 /// with the specified arguments, returning null if unsuccessful.
754 llvm::ConstantFoldCall(Function
*F
,
755 Constant
* const* Operands
, unsigned NumOperands
) {
756 if (!F
->hasName()) return 0;
757 LLVMContext
&Context
= F
->getContext();
758 StringRef Name
= F
->getName();
760 const Type
*Ty
= F
->getReturnType();
761 if (NumOperands
== 1) {
762 if (ConstantFP
*Op
= dyn_cast
<ConstantFP
>(Operands
[0])) {
763 if (Ty
!=Type::getFloatTy(F
->getContext()) &&
764 Ty
!=Type::getDoubleTy(Context
))
766 /// Currently APFloat versions of these functions do not exist, so we use
767 /// the host native double versions. Float versions are not called
768 /// directly but for all these it is true (float)(f((double)arg)) ==
769 /// f(arg). Long double not supported yet.
770 double V
= Ty
==Type::getFloatTy(F
->getContext()) ?
771 (double)Op
->getValueAPF().convertToFloat():
772 Op
->getValueAPF().convertToDouble();
776 return ConstantFoldFP(acos
, V
, Ty
, Context
);
777 else if (Name
== "asin")
778 return ConstantFoldFP(asin
, V
, Ty
, Context
);
779 else if (Name
== "atan")
780 return ConstantFoldFP(atan
, V
, Ty
, Context
);
784 return ConstantFoldFP(ceil
, V
, Ty
, Context
);
785 else if (Name
== "cos")
786 return ConstantFoldFP(cos
, V
, Ty
, Context
);
787 else if (Name
== "cosh")
788 return ConstantFoldFP(cosh
, V
, Ty
, Context
);
789 else if (Name
== "cosf")
790 return ConstantFoldFP(cos
, V
, Ty
, Context
);
794 return ConstantFoldFP(exp
, V
, Ty
, Context
);
798 return ConstantFoldFP(fabs
, V
, Ty
, Context
);
799 else if (Name
== "floor")
800 return ConstantFoldFP(floor
, V
, Ty
, Context
);
803 if (Name
== "log" && V
> 0)
804 return ConstantFoldFP(log
, V
, Ty
, Context
);
805 else if (Name
== "log10" && V
> 0)
806 return ConstantFoldFP(log10
, V
, Ty
, Context
);
807 else if (Name
== "llvm.sqrt.f32" ||
808 Name
== "llvm.sqrt.f64") {
810 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
812 return Constant::getNullValue(Ty
);
817 return ConstantFoldFP(sin
, V
, Ty
, Context
);
818 else if (Name
== "sinh")
819 return ConstantFoldFP(sinh
, V
, Ty
, Context
);
820 else if (Name
== "sqrt" && V
>= 0)
821 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
822 else if (Name
== "sqrtf" && V
>= 0)
823 return ConstantFoldFP(sqrt
, V
, Ty
, Context
);
824 else if (Name
== "sinf")
825 return ConstantFoldFP(sin
, V
, Ty
, Context
);
829 return ConstantFoldFP(tan
, V
, Ty
, Context
);
830 else if (Name
== "tanh")
831 return ConstantFoldFP(tanh
, V
, Ty
, Context
);
836 } else if (ConstantInt
*Op
= dyn_cast
<ConstantInt
>(Operands
[0])) {
837 if (Name
.startswith("llvm.bswap"))
838 return ConstantInt::get(Context
, Op
->getValue().byteSwap());
839 else if (Name
.startswith("llvm.ctpop"))
840 return ConstantInt::get(Ty
, Op
->getValue().countPopulation());
841 else if (Name
.startswith("llvm.cttz"))
842 return ConstantInt::get(Ty
, Op
->getValue().countTrailingZeros());
843 else if (Name
.startswith("llvm.ctlz"))
844 return ConstantInt::get(Ty
, Op
->getValue().countLeadingZeros());
846 } else if (NumOperands
== 2) {
847 if (ConstantFP
*Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
848 if (Ty
!=Type::getFloatTy(F
->getContext()) &&
849 Ty
!=Type::getDoubleTy(Context
))
851 double Op1V
= Ty
==Type::getFloatTy(F
->getContext()) ?
852 (double)Op1
->getValueAPF().convertToFloat():
853 Op1
->getValueAPF().convertToDouble();
854 if (ConstantFP
*Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
855 double Op2V
= Ty
==Type::getFloatTy(F
->getContext()) ?
856 (double)Op2
->getValueAPF().convertToFloat():
857 Op2
->getValueAPF().convertToDouble();
860 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
, Context
);
861 } else if (Name
== "fmod") {
862 return ConstantFoldBinaryFP(fmod
, Op1V
, Op2V
, Ty
, Context
);
863 } else if (Name
== "atan2") {
864 return ConstantFoldBinaryFP(atan2
, Op1V
, Op2V
, Ty
, Context
);
866 } else if (ConstantInt
*Op2C
= dyn_cast
<ConstantInt
>(Operands
[1])) {
867 if (Name
== "llvm.powi.f32") {
868 return ConstantFP::get(Context
, APFloat((float)std::pow((float)Op1V
,
869 (int)Op2C
->getZExtValue())));
870 } else if (Name
== "llvm.powi.f64") {
871 return ConstantFP::get(Context
, APFloat((double)std::pow((double)Op1V
,
872 (int)Op2C
->getZExtValue())));