[llvm-exegesis] Fix missing std::move.
[llvm-complete.git] / lib / Analysis / ConstantFolding.cpp
blobc73250a384591256091772e8f099664eba690a9d
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines routines for folding instructions into constants.
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
15 // dependency issues.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/ADT/APFloat.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/KnownBits.h"
46 #include "llvm/Support/MathExtras.h"
47 #include <cassert>
48 #include <cerrno>
49 #include <cfenv>
50 #include <cmath>
51 #include <cstddef>
52 #include <cstdint>
54 using namespace llvm;
56 namespace {
58 //===----------------------------------------------------------------------===//
59 // Constant Folding internal helper functions
60 //===----------------------------------------------------------------------===//
62 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
63 Constant *C, Type *SrcEltTy,
64 unsigned NumSrcElts,
65 const DataLayout &DL) {
66 // Now that we know that the input value is a vector of integers, just shift
67 // and insert them into our result.
68 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
69 for (unsigned i = 0; i != NumSrcElts; ++i) {
70 Constant *Element;
71 if (DL.isLittleEndian())
72 Element = C->getAggregateElement(NumSrcElts - i - 1);
73 else
74 Element = C->getAggregateElement(i);
76 if (Element && isa<UndefValue>(Element)) {
77 Result <<= BitShift;
78 continue;
81 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
82 if (!ElementCI)
83 return ConstantExpr::getBitCast(C, DestTy);
85 Result <<= BitShift;
86 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
89 return nullptr;
92 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
93 /// This always returns a non-null constant, but it may be a
94 /// ConstantExpr if unfoldable.
95 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
96 // Catch the obvious splat cases.
97 if (C->isNullValue() && !DestTy->isX86_MMXTy())
98 return Constant::getNullValue(DestTy);
99 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
100 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
101 return Constant::getAllOnesValue(DestTy);
103 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
104 // Handle a vector->scalar integer/fp cast.
105 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
106 unsigned NumSrcElts = VTy->getNumElements();
107 Type *SrcEltTy = VTy->getElementType();
109 // If the vector is a vector of floating point, convert it to vector of int
110 // to simplify things.
111 if (SrcEltTy->isFloatingPointTy()) {
112 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
113 Type *SrcIVTy =
114 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
115 // Ask IR to do the conversion now that #elts line up.
116 C = ConstantExpr::getBitCast(C, SrcIVTy);
119 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
120 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
121 SrcEltTy, NumSrcElts, DL))
122 return CE;
124 if (isa<IntegerType>(DestTy))
125 return ConstantInt::get(DestTy, Result);
127 APFloat FP(DestTy->getFltSemantics(), Result);
128 return ConstantFP::get(DestTy->getContext(), FP);
132 // The code below only handles casts to vectors currently.
133 auto *DestVTy = dyn_cast<VectorType>(DestTy);
134 if (!DestVTy)
135 return ConstantExpr::getBitCast(C, DestTy);
137 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
138 // vector so the code below can handle it uniformly.
139 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
140 Constant *Ops = C; // don't take the address of C!
141 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
144 // If this is a bitcast from constant vector -> vector, fold it.
145 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
146 return ConstantExpr::getBitCast(C, DestTy);
148 // If the element types match, IR can fold it.
149 unsigned NumDstElt = DestVTy->getNumElements();
150 unsigned NumSrcElt = C->getType()->getVectorNumElements();
151 if (NumDstElt == NumSrcElt)
152 return ConstantExpr::getBitCast(C, DestTy);
154 Type *SrcEltTy = C->getType()->getVectorElementType();
155 Type *DstEltTy = DestVTy->getElementType();
157 // Otherwise, we're changing the number of elements in a vector, which
158 // requires endianness information to do the right thing. For example,
159 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
160 // folds to (little endian):
161 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
162 // and to (big endian):
163 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
165 // First thing is first. We only want to think about integer here, so if
166 // we have something in FP form, recast it as integer.
167 if (DstEltTy->isFloatingPointTy()) {
168 // Fold to an vector of integers with same size as our FP type.
169 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
170 Type *DestIVTy =
171 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
172 // Recursively handle this integer conversion, if possible.
173 C = FoldBitCast(C, DestIVTy, DL);
175 // Finally, IR can handle this now that #elts line up.
176 return ConstantExpr::getBitCast(C, DestTy);
179 // Okay, we know the destination is integer, if the input is FP, convert
180 // it to integer first.
181 if (SrcEltTy->isFloatingPointTy()) {
182 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
183 Type *SrcIVTy =
184 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
185 // Ask IR to do the conversion now that #elts line up.
186 C = ConstantExpr::getBitCast(C, SrcIVTy);
187 // If IR wasn't able to fold it, bail out.
188 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
189 !isa<ConstantDataVector>(C))
190 return C;
193 // Now we know that the input and output vectors are both integer vectors
194 // of the same size, and that their #elements is not the same. Do the
195 // conversion here, which depends on whether the input or output has
196 // more elements.
197 bool isLittleEndian = DL.isLittleEndian();
199 SmallVector<Constant*, 32> Result;
200 if (NumDstElt < NumSrcElt) {
201 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
202 Constant *Zero = Constant::getNullValue(DstEltTy);
203 unsigned Ratio = NumSrcElt/NumDstElt;
204 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
205 unsigned SrcElt = 0;
206 for (unsigned i = 0; i != NumDstElt; ++i) {
207 // Build each element of the result.
208 Constant *Elt = Zero;
209 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
210 for (unsigned j = 0; j != Ratio; ++j) {
211 Constant *Src = C->getAggregateElement(SrcElt++);
212 if (Src && isa<UndefValue>(Src))
213 Src = Constant::getNullValue(C->getType()->getVectorElementType());
214 else
215 Src = dyn_cast_or_null<ConstantInt>(Src);
216 if (!Src) // Reject constantexpr elements.
217 return ConstantExpr::getBitCast(C, DestTy);
219 // Zero extend the element to the right size.
220 Src = ConstantExpr::getZExt(Src, Elt->getType());
222 // Shift it to the right place, depending on endianness.
223 Src = ConstantExpr::getShl(Src,
224 ConstantInt::get(Src->getType(), ShiftAmt));
225 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
227 // Mix it in.
228 Elt = ConstantExpr::getOr(Elt, Src);
230 Result.push_back(Elt);
232 return ConstantVector::get(Result);
235 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
236 unsigned Ratio = NumDstElt/NumSrcElt;
237 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
239 // Loop over each source value, expanding into multiple results.
240 for (unsigned i = 0; i != NumSrcElt; ++i) {
241 auto *Element = C->getAggregateElement(i);
243 if (!Element) // Reject constantexpr elements.
244 return ConstantExpr::getBitCast(C, DestTy);
246 if (isa<UndefValue>(Element)) {
247 // Correctly Propagate undef values.
248 Result.append(Ratio, UndefValue::get(DstEltTy));
249 continue;
252 auto *Src = dyn_cast<ConstantInt>(Element);
253 if (!Src)
254 return ConstantExpr::getBitCast(C, DestTy);
256 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
257 for (unsigned j = 0; j != Ratio; ++j) {
258 // Shift the piece of the value into the right place, depending on
259 // endianness.
260 Constant *Elt = ConstantExpr::getLShr(Src,
261 ConstantInt::get(Src->getType(), ShiftAmt));
262 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
264 // Truncate the element to an integer with the same pointer size and
265 // convert the element back to a pointer using a inttoptr.
266 if (DstEltTy->isPointerTy()) {
267 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
268 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
269 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
270 continue;
273 // Truncate and remember this piece.
274 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
278 return ConstantVector::get(Result);
281 } // end anonymous namespace
283 /// If this constant is a constant offset from a global, return the global and
284 /// the constant. Because of constantexprs, this function is recursive.
285 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
286 APInt &Offset, const DataLayout &DL) {
287 // Trivial case, constant is the global.
288 if ((GV = dyn_cast<GlobalValue>(C))) {
289 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
290 Offset = APInt(BitWidth, 0);
291 return true;
294 // Otherwise, if this isn't a constant expr, bail out.
295 auto *CE = dyn_cast<ConstantExpr>(C);
296 if (!CE) return false;
298 // Look through ptr->int and ptr->ptr casts.
299 if (CE->getOpcode() == Instruction::PtrToInt ||
300 CE->getOpcode() == Instruction::BitCast)
301 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
303 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
304 auto *GEP = dyn_cast<GEPOperator>(CE);
305 if (!GEP)
306 return false;
308 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
309 APInt TmpOffset(BitWidth, 0);
311 // If the base isn't a global+constant, we aren't either.
312 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
313 return false;
315 // Otherwise, add any offset that our operands provide.
316 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
317 return false;
319 Offset = TmpOffset;
320 return true;
323 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
324 const DataLayout &DL) {
325 do {
326 Type *SrcTy = C->getType();
328 // If the type sizes are the same and a cast is legal, just directly
329 // cast the constant.
330 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
331 Instruction::CastOps Cast = Instruction::BitCast;
332 // If we are going from a pointer to int or vice versa, we spell the cast
333 // differently.
334 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
335 Cast = Instruction::IntToPtr;
336 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
337 Cast = Instruction::PtrToInt;
339 if (CastInst::castIsValid(Cast, C, DestTy))
340 return ConstantExpr::getCast(Cast, C, DestTy);
343 // If this isn't an aggregate type, there is nothing we can do to drill down
344 // and find a bitcastable constant.
345 if (!SrcTy->isAggregateType())
346 return nullptr;
348 // We're simulating a load through a pointer that was bitcast to point to
349 // a different type, so we can try to walk down through the initial
350 // elements of an aggregate to see if some part of th e aggregate is
351 // castable to implement the "load" semantic model.
352 C = C->getAggregateElement(0u);
353 } while (C);
355 return nullptr;
358 namespace {
360 /// Recursive helper to read bits out of global. C is the constant being copied
361 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
362 /// results into and BytesLeft is the number of bytes left in
363 /// the CurPtr buffer. DL is the DataLayout.
364 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
365 unsigned BytesLeft, const DataLayout &DL) {
366 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
367 "Out of range access");
369 // If this element is zero or undefined, we can just return since *CurPtr is
370 // zero initialized.
371 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
372 return true;
374 if (auto *CI = dyn_cast<ConstantInt>(C)) {
375 if (CI->getBitWidth() > 64 ||
376 (CI->getBitWidth() & 7) != 0)
377 return false;
379 uint64_t Val = CI->getZExtValue();
380 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
382 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
383 int n = ByteOffset;
384 if (!DL.isLittleEndian())
385 n = IntBytes - n - 1;
386 CurPtr[i] = (unsigned char)(Val >> (n * 8));
387 ++ByteOffset;
389 return true;
392 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
393 if (CFP->getType()->isDoubleTy()) {
394 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
395 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
397 if (CFP->getType()->isFloatTy()){
398 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
399 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
401 if (CFP->getType()->isHalfTy()){
402 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
403 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
405 return false;
408 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
409 const StructLayout *SL = DL.getStructLayout(CS->getType());
410 unsigned Index = SL->getElementContainingOffset(ByteOffset);
411 uint64_t CurEltOffset = SL->getElementOffset(Index);
412 ByteOffset -= CurEltOffset;
414 while (true) {
415 // If the element access is to the element itself and not to tail padding,
416 // read the bytes from the element.
417 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
419 if (ByteOffset < EltSize &&
420 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
421 BytesLeft, DL))
422 return false;
424 ++Index;
426 // Check to see if we read from the last struct element, if so we're done.
427 if (Index == CS->getType()->getNumElements())
428 return true;
430 // If we read all of the bytes we needed from this element we're done.
431 uint64_t NextEltOffset = SL->getElementOffset(Index);
433 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
434 return true;
436 // Move to the next element of the struct.
437 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
438 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
439 ByteOffset = 0;
440 CurEltOffset = NextEltOffset;
442 // not reached.
445 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
446 isa<ConstantDataSequential>(C)) {
447 Type *EltTy = C->getType()->getSequentialElementType();
448 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
449 uint64_t Index = ByteOffset / EltSize;
450 uint64_t Offset = ByteOffset - Index * EltSize;
451 uint64_t NumElts;
452 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
453 NumElts = AT->getNumElements();
454 else
455 NumElts = C->getType()->getVectorNumElements();
457 for (; Index != NumElts; ++Index) {
458 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
459 BytesLeft, DL))
460 return false;
462 uint64_t BytesWritten = EltSize - Offset;
463 assert(BytesWritten <= EltSize && "Not indexing into this element?");
464 if (BytesWritten >= BytesLeft)
465 return true;
467 Offset = 0;
468 BytesLeft -= BytesWritten;
469 CurPtr += BytesWritten;
471 return true;
474 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
475 if (CE->getOpcode() == Instruction::IntToPtr &&
476 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
477 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
478 BytesLeft, DL);
482 // Otherwise, unknown initializer type.
483 return false;
486 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
487 const DataLayout &DL) {
488 auto *PTy = cast<PointerType>(C->getType());
489 auto *IntType = dyn_cast<IntegerType>(LoadTy);
491 // If this isn't an integer load we can't fold it directly.
492 if (!IntType) {
493 unsigned AS = PTy->getAddressSpace();
495 // If this is a float/double load, we can try folding it as an int32/64 load
496 // and then bitcast the result. This can be useful for union cases. Note
497 // that address spaces don't matter here since we're not going to result in
498 // an actual new load.
499 Type *MapTy;
500 if (LoadTy->isHalfTy())
501 MapTy = Type::getInt16Ty(C->getContext());
502 else if (LoadTy->isFloatTy())
503 MapTy = Type::getInt32Ty(C->getContext());
504 else if (LoadTy->isDoubleTy())
505 MapTy = Type::getInt64Ty(C->getContext());
506 else if (LoadTy->isVectorTy()) {
507 MapTy = PointerType::getIntNTy(C->getContext(),
508 DL.getTypeAllocSizeInBits(LoadTy));
509 } else
510 return nullptr;
512 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
513 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
514 return FoldBitCast(Res, LoadTy, DL);
515 return nullptr;
518 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
519 if (BytesLoaded > 32 || BytesLoaded == 0)
520 return nullptr;
522 GlobalValue *GVal;
523 APInt OffsetAI;
524 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
525 return nullptr;
527 auto *GV = dyn_cast<GlobalVariable>(GVal);
528 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
529 !GV->getInitializer()->getType()->isSized())
530 return nullptr;
532 int64_t Offset = OffsetAI.getSExtValue();
533 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
535 // If we're not accessing anything in this constant, the result is undefined.
536 if (Offset + BytesLoaded <= 0)
537 return UndefValue::get(IntType);
539 // If we're not accessing anything in this constant, the result is undefined.
540 if (Offset >= InitializerSize)
541 return UndefValue::get(IntType);
543 unsigned char RawBytes[32] = {0};
544 unsigned char *CurPtr = RawBytes;
545 unsigned BytesLeft = BytesLoaded;
547 // If we're loading off the beginning of the global, some bytes may be valid.
548 if (Offset < 0) {
549 CurPtr += -Offset;
550 BytesLeft += Offset;
551 Offset = 0;
554 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
555 return nullptr;
557 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
558 if (DL.isLittleEndian()) {
559 ResultVal = RawBytes[BytesLoaded - 1];
560 for (unsigned i = 1; i != BytesLoaded; ++i) {
561 ResultVal <<= 8;
562 ResultVal |= RawBytes[BytesLoaded - 1 - i];
564 } else {
565 ResultVal = RawBytes[0];
566 for (unsigned i = 1; i != BytesLoaded; ++i) {
567 ResultVal <<= 8;
568 ResultVal |= RawBytes[i];
572 return ConstantInt::get(IntType->getContext(), ResultVal);
575 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
576 const DataLayout &DL) {
577 auto *SrcPtr = CE->getOperand(0);
578 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
579 if (!SrcPtrTy)
580 return nullptr;
581 Type *SrcTy = SrcPtrTy->getPointerElementType();
583 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
584 if (!C)
585 return nullptr;
587 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
590 } // end anonymous namespace
592 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
593 const DataLayout &DL) {
594 // First, try the easy cases:
595 if (auto *GV = dyn_cast<GlobalVariable>(C))
596 if (GV->isConstant() && GV->hasDefinitiveInitializer())
597 return GV->getInitializer();
599 if (auto *GA = dyn_cast<GlobalAlias>(C))
600 if (GA->getAliasee() && !GA->isInterposable())
601 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
603 // If the loaded value isn't a constant expr, we can't handle it.
604 auto *CE = dyn_cast<ConstantExpr>(C);
605 if (!CE)
606 return nullptr;
608 if (CE->getOpcode() == Instruction::GetElementPtr) {
609 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
610 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
611 if (Constant *V =
612 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
613 return V;
618 if (CE->getOpcode() == Instruction::BitCast)
619 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
620 return LoadedC;
622 // Instead of loading constant c string, use corresponding integer value
623 // directly if string length is small enough.
624 StringRef Str;
625 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
626 size_t StrLen = Str.size();
627 unsigned NumBits = Ty->getPrimitiveSizeInBits();
628 // Replace load with immediate integer if the result is an integer or fp
629 // value.
630 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
631 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
632 APInt StrVal(NumBits, 0);
633 APInt SingleChar(NumBits, 0);
634 if (DL.isLittleEndian()) {
635 for (unsigned char C : reverse(Str.bytes())) {
636 SingleChar = static_cast<uint64_t>(C);
637 StrVal = (StrVal << 8) | SingleChar;
639 } else {
640 for (unsigned char C : Str.bytes()) {
641 SingleChar = static_cast<uint64_t>(C);
642 StrVal = (StrVal << 8) | SingleChar;
644 // Append NULL at the end.
645 SingleChar = 0;
646 StrVal = (StrVal << 8) | SingleChar;
649 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
650 if (Ty->isFloatingPointTy())
651 Res = ConstantExpr::getBitCast(Res, Ty);
652 return Res;
656 // If this load comes from anywhere in a constant global, and if the global
657 // is all undef or zero, we know what it loads.
658 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
659 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
660 if (GV->getInitializer()->isNullValue())
661 return Constant::getNullValue(Ty);
662 if (isa<UndefValue>(GV->getInitializer()))
663 return UndefValue::get(Ty);
667 // Try hard to fold loads from bitcasted strange and non-type-safe things.
668 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
671 namespace {
673 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
674 if (LI->isVolatile()) return nullptr;
676 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
677 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
679 return nullptr;
682 /// One of Op0/Op1 is a constant expression.
683 /// Attempt to symbolically evaluate the result of a binary operator merging
684 /// these together. If target data info is available, it is provided as DL,
685 /// otherwise DL is null.
686 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
687 const DataLayout &DL) {
688 // SROA
690 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
691 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
692 // bits.
694 if (Opc == Instruction::And) {
695 KnownBits Known0 = computeKnownBits(Op0, DL);
696 KnownBits Known1 = computeKnownBits(Op1, DL);
697 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
698 // All the bits of Op0 that the 'and' could be masking are already zero.
699 return Op0;
701 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
702 // All the bits of Op1 that the 'and' could be masking are already zero.
703 return Op1;
706 Known0.Zero |= Known1.Zero;
707 Known0.One &= Known1.One;
708 if (Known0.isConstant())
709 return ConstantInt::get(Op0->getType(), Known0.getConstant());
712 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
713 // constant. This happens frequently when iterating over a global array.
714 if (Opc == Instruction::Sub) {
715 GlobalValue *GV1, *GV2;
716 APInt Offs1, Offs2;
718 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
719 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
720 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
722 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
723 // PtrToInt may change the bitwidth so we have convert to the right size
724 // first.
725 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
726 Offs2.zextOrTrunc(OpSize));
730 return nullptr;
733 /// If array indices are not pointer-sized integers, explicitly cast them so
734 /// that they aren't implicitly casted by the getelementptr.
735 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
736 Type *ResultTy, Optional<unsigned> InRangeIndex,
737 const DataLayout &DL, const TargetLibraryInfo *TLI) {
738 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
739 Type *IntPtrScalarTy = IntPtrTy->getScalarType();
741 bool Any = false;
742 SmallVector<Constant*, 32> NewIdxs;
743 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
744 if ((i == 1 ||
745 !isa<StructType>(GetElementPtrInst::getIndexedType(
746 SrcElemTy, Ops.slice(1, i - 1)))) &&
747 Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
748 Any = true;
749 Type *NewType = Ops[i]->getType()->isVectorTy()
750 ? IntPtrTy
751 : IntPtrTy->getScalarType();
752 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
753 true,
754 NewType,
755 true),
756 Ops[i], NewType));
757 } else
758 NewIdxs.push_back(Ops[i]);
761 if (!Any)
762 return nullptr;
764 Constant *C = ConstantExpr::getGetElementPtr(
765 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
766 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
767 C = Folded;
769 return C;
772 /// Strip the pointer casts, but preserve the address space information.
773 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
774 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
775 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
776 Ptr = Ptr->stripPointerCasts();
777 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
779 ElemTy = NewPtrTy->getPointerElementType();
781 // Preserve the address space number of the pointer.
782 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
783 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
784 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
786 return Ptr;
789 /// If we can symbolically evaluate the GEP constant expression, do so.
790 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
791 ArrayRef<Constant *> Ops,
792 const DataLayout &DL,
793 const TargetLibraryInfo *TLI) {
794 const GEPOperator *InnermostGEP = GEP;
795 bool InBounds = GEP->isInBounds();
797 Type *SrcElemTy = GEP->getSourceElementType();
798 Type *ResElemTy = GEP->getResultElementType();
799 Type *ResTy = GEP->getType();
800 if (!SrcElemTy->isSized())
801 return nullptr;
803 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
804 GEP->getInRangeIndex(), DL, TLI))
805 return C;
807 Constant *Ptr = Ops[0];
808 if (!Ptr->getType()->isPointerTy())
809 return nullptr;
811 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
813 // If this is a constant expr gep that is effectively computing an
814 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
815 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
816 if (!isa<ConstantInt>(Ops[i])) {
818 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
819 // "inttoptr (sub (ptrtoint Ptr), V)"
820 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
821 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
822 assert((!CE || CE->getType() == IntPtrTy) &&
823 "CastGEPIndices didn't canonicalize index types!");
824 if (CE && CE->getOpcode() == Instruction::Sub &&
825 CE->getOperand(0)->isNullValue()) {
826 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
827 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
828 Res = ConstantExpr::getIntToPtr(Res, ResTy);
829 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
830 Res = FoldedRes;
831 return Res;
834 return nullptr;
837 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
838 APInt Offset =
839 APInt(BitWidth,
840 DL.getIndexedOffsetInType(
841 SrcElemTy,
842 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
843 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
845 // If this is a GEP of a GEP, fold it all into a single GEP.
846 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
847 InnermostGEP = GEP;
848 InBounds &= GEP->isInBounds();
850 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
852 // Do not try the incorporate the sub-GEP if some index is not a number.
853 bool AllConstantInt = true;
854 for (Value *NestedOp : NestedOps)
855 if (!isa<ConstantInt>(NestedOp)) {
856 AllConstantInt = false;
857 break;
859 if (!AllConstantInt)
860 break;
862 Ptr = cast<Constant>(GEP->getOperand(0));
863 SrcElemTy = GEP->getSourceElementType();
864 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
865 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
868 // If the base value for this address is a literal integer value, fold the
869 // getelementptr to the resulting integer value casted to the pointer type.
870 APInt BasePtr(BitWidth, 0);
871 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
872 if (CE->getOpcode() == Instruction::IntToPtr) {
873 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
874 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
878 auto *PTy = cast<PointerType>(Ptr->getType());
879 if ((Ptr->isNullValue() || BasePtr != 0) &&
880 !DL.isNonIntegralPointerType(PTy)) {
881 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
882 return ConstantExpr::getIntToPtr(C, ResTy);
885 // Otherwise form a regular getelementptr. Recompute the indices so that
886 // we eliminate over-indexing of the notional static type array bounds.
887 // This makes it easy to determine if the getelementptr is "inbounds".
888 // Also, this helps GlobalOpt do SROA on GlobalVariables.
889 Type *Ty = PTy;
890 SmallVector<Constant *, 32> NewIdxs;
892 do {
893 if (!Ty->isStructTy()) {
894 if (Ty->isPointerTy()) {
895 // The only pointer indexing we'll do is on the first index of the GEP.
896 if (!NewIdxs.empty())
897 break;
899 Ty = SrcElemTy;
901 // Only handle pointers to sized types, not pointers to functions.
902 if (!Ty->isSized())
903 return nullptr;
904 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
905 Ty = ATy->getElementType();
906 } else {
907 // We've reached some non-indexable type.
908 break;
911 // Determine which element of the array the offset points into.
912 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
913 if (ElemSize == 0) {
914 // The element size is 0. This may be [0 x Ty]*, so just use a zero
915 // index for this level and proceed to the next level to see if it can
916 // accommodate the offset.
917 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
918 } else {
919 // The element size is non-zero divide the offset by the element
920 // size (rounding down), to compute the index at this level.
921 bool Overflow;
922 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
923 if (Overflow)
924 break;
925 Offset -= NewIdx * ElemSize;
926 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
928 } else {
929 auto *STy = cast<StructType>(Ty);
930 // If we end up with an offset that isn't valid for this struct type, we
931 // can't re-form this GEP in a regular form, so bail out. The pointer
932 // operand likely went through casts that are necessary to make the GEP
933 // sensible.
934 const StructLayout &SL = *DL.getStructLayout(STy);
935 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
936 break;
938 // Determine which field of the struct the offset points into. The
939 // getZExtValue is fine as we've already ensured that the offset is
940 // within the range representable by the StructLayout API.
941 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
942 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
943 ElIdx));
944 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
945 Ty = STy->getTypeAtIndex(ElIdx);
947 } while (Ty != ResElemTy);
949 // If we haven't used up the entire offset by descending the static
950 // type, then the offset is pointing into the middle of an indivisible
951 // member, so we can't simplify it.
952 if (Offset != 0)
953 return nullptr;
955 // Preserve the inrange index from the innermost GEP if possible. We must
956 // have calculated the same indices up to and including the inrange index.
957 Optional<unsigned> InRangeIndex;
958 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
959 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
960 NewIdxs.size() > *LastIRIndex) {
961 InRangeIndex = LastIRIndex;
962 for (unsigned I = 0; I <= *LastIRIndex; ++I)
963 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
964 return nullptr;
967 // Create a GEP.
968 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
969 InBounds, InRangeIndex);
970 assert(C->getType()->getPointerElementType() == Ty &&
971 "Computed GetElementPtr has unexpected type!");
973 // If we ended up indexing a member with a type that doesn't match
974 // the type of what the original indices indexed, add a cast.
975 if (Ty != ResElemTy)
976 C = FoldBitCast(C, ResTy, DL);
978 return C;
981 /// Attempt to constant fold an instruction with the
982 /// specified opcode and operands. If successful, the constant result is
983 /// returned, if not, null is returned. Note that this function can fail when
984 /// attempting to fold instructions like loads and stores, which have no
985 /// constant expression form.
986 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
987 ArrayRef<Constant *> Ops,
988 const DataLayout &DL,
989 const TargetLibraryInfo *TLI) {
990 Type *DestTy = InstOrCE->getType();
992 // Handle easy binops first.
993 if (Instruction::isBinaryOp(Opcode))
994 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
996 if (Instruction::isCast(Opcode))
997 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
999 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1000 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1001 return C;
1003 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1004 Ops.slice(1), GEP->isInBounds(),
1005 GEP->getInRangeIndex());
1008 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1009 return CE->getWithOperands(Ops);
1011 switch (Opcode) {
1012 default: return nullptr;
1013 case Instruction::ICmp:
1014 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1015 case Instruction::Call:
1016 if (auto *F = dyn_cast<Function>(Ops.back())) {
1017 ImmutableCallSite CS(cast<CallInst>(InstOrCE));
1018 if (canConstantFoldCallTo(CS, F))
1019 return ConstantFoldCall(CS, F, Ops.slice(0, Ops.size() - 1), TLI);
1021 return nullptr;
1022 case Instruction::Select:
1023 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1024 case Instruction::ExtractElement:
1025 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1026 case Instruction::InsertElement:
1027 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1028 case Instruction::ShuffleVector:
1029 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1033 } // end anonymous namespace
1035 //===----------------------------------------------------------------------===//
1036 // Constant Folding public APIs
1037 //===----------------------------------------------------------------------===//
1039 namespace {
1041 Constant *
1042 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1043 const TargetLibraryInfo *TLI,
1044 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1045 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1046 return nullptr;
1048 SmallVector<Constant *, 8> Ops;
1049 for (const Use &NewU : C->operands()) {
1050 auto *NewC = cast<Constant>(&NewU);
1051 // Recursively fold the ConstantExpr's operands. If we have already folded
1052 // a ConstantExpr, we don't have to process it again.
1053 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1054 auto It = FoldedOps.find(NewC);
1055 if (It == FoldedOps.end()) {
1056 if (auto *FoldedC =
1057 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1058 FoldedOps.insert({NewC, FoldedC});
1059 NewC = FoldedC;
1060 } else {
1061 FoldedOps.insert({NewC, NewC});
1063 } else {
1064 NewC = It->second;
1067 Ops.push_back(NewC);
1070 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1071 if (CE->isCompare())
1072 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1073 DL, TLI);
1075 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1078 assert(isa<ConstantVector>(C));
1079 return ConstantVector::get(Ops);
1082 } // end anonymous namespace
1084 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1085 const TargetLibraryInfo *TLI) {
1086 // Handle PHI nodes quickly here...
1087 if (auto *PN = dyn_cast<PHINode>(I)) {
1088 Constant *CommonValue = nullptr;
1090 SmallDenseMap<Constant *, Constant *> FoldedOps;
1091 for (Value *Incoming : PN->incoming_values()) {
1092 // If the incoming value is undef then skip it. Note that while we could
1093 // skip the value if it is equal to the phi node itself we choose not to
1094 // because that would break the rule that constant folding only applies if
1095 // all operands are constants.
1096 if (isa<UndefValue>(Incoming))
1097 continue;
1098 // If the incoming value is not a constant, then give up.
1099 auto *C = dyn_cast<Constant>(Incoming);
1100 if (!C)
1101 return nullptr;
1102 // Fold the PHI's operands.
1103 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1104 C = FoldedC;
1105 // If the incoming value is a different constant to
1106 // the one we saw previously, then give up.
1107 if (CommonValue && C != CommonValue)
1108 return nullptr;
1109 CommonValue = C;
1112 // If we reach here, all incoming values are the same constant or undef.
1113 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1116 // Scan the operand list, checking to see if they are all constants, if so,
1117 // hand off to ConstantFoldInstOperandsImpl.
1118 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1119 return nullptr;
1121 SmallDenseMap<Constant *, Constant *> FoldedOps;
1122 SmallVector<Constant *, 8> Ops;
1123 for (const Use &OpU : I->operands()) {
1124 auto *Op = cast<Constant>(&OpU);
1125 // Fold the Instruction's operands.
1126 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1127 Op = FoldedOp;
1129 Ops.push_back(Op);
1132 if (const auto *CI = dyn_cast<CmpInst>(I))
1133 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1134 DL, TLI);
1136 if (const auto *LI = dyn_cast<LoadInst>(I))
1137 return ConstantFoldLoadInst(LI, DL);
1139 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1140 return ConstantExpr::getInsertValue(
1141 cast<Constant>(IVI->getAggregateOperand()),
1142 cast<Constant>(IVI->getInsertedValueOperand()),
1143 IVI->getIndices());
1146 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1147 return ConstantExpr::getExtractValue(
1148 cast<Constant>(EVI->getAggregateOperand()),
1149 EVI->getIndices());
1152 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1155 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1156 const TargetLibraryInfo *TLI) {
1157 SmallDenseMap<Constant *, Constant *> FoldedOps;
1158 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1161 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1162 ArrayRef<Constant *> Ops,
1163 const DataLayout &DL,
1164 const TargetLibraryInfo *TLI) {
1165 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1168 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1169 Constant *Ops0, Constant *Ops1,
1170 const DataLayout &DL,
1171 const TargetLibraryInfo *TLI) {
1172 // fold: icmp (inttoptr x), null -> icmp x, 0
1173 // fold: icmp null, (inttoptr x) -> icmp 0, x
1174 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1175 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1176 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1177 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1179 // FIXME: The following comment is out of data and the DataLayout is here now.
1180 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1181 // around to know if bit truncation is happening.
1182 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1183 if (Ops1->isNullValue()) {
1184 if (CE0->getOpcode() == Instruction::IntToPtr) {
1185 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1186 // Convert the integer value to the right size to ensure we get the
1187 // proper extension or truncation.
1188 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1189 IntPtrTy, false);
1190 Constant *Null = Constant::getNullValue(C->getType());
1191 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1194 // Only do this transformation if the int is intptrty in size, otherwise
1195 // there is a truncation or extension that we aren't modeling.
1196 if (CE0->getOpcode() == Instruction::PtrToInt) {
1197 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1198 if (CE0->getType() == IntPtrTy) {
1199 Constant *C = CE0->getOperand(0);
1200 Constant *Null = Constant::getNullValue(C->getType());
1201 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1206 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1207 if (CE0->getOpcode() == CE1->getOpcode()) {
1208 if (CE0->getOpcode() == Instruction::IntToPtr) {
1209 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1211 // Convert the integer value to the right size to ensure we get the
1212 // proper extension or truncation.
1213 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1214 IntPtrTy, false);
1215 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1216 IntPtrTy, false);
1217 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1220 // Only do this transformation if the int is intptrty in size, otherwise
1221 // there is a truncation or extension that we aren't modeling.
1222 if (CE0->getOpcode() == Instruction::PtrToInt) {
1223 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1224 if (CE0->getType() == IntPtrTy &&
1225 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1226 return ConstantFoldCompareInstOperands(
1227 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1233 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1234 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1235 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1236 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1237 Constant *LHS = ConstantFoldCompareInstOperands(
1238 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1239 Constant *RHS = ConstantFoldCompareInstOperands(
1240 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1241 unsigned OpC =
1242 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1243 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1245 } else if (isa<ConstantExpr>(Ops1)) {
1246 // If RHS is a constant expression, but the left side isn't, swap the
1247 // operands and try again.
1248 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1249 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1252 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1255 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1256 Constant *RHS,
1257 const DataLayout &DL) {
1258 assert(Instruction::isBinaryOp(Opcode));
1259 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1260 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1261 return C;
1263 return ConstantExpr::get(Opcode, LHS, RHS);
1266 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1267 Type *DestTy, const DataLayout &DL) {
1268 assert(Instruction::isCast(Opcode));
1269 switch (Opcode) {
1270 default:
1271 llvm_unreachable("Missing case");
1272 case Instruction::PtrToInt:
1273 // If the input is a inttoptr, eliminate the pair. This requires knowing
1274 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1275 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1276 if (CE->getOpcode() == Instruction::IntToPtr) {
1277 Constant *Input = CE->getOperand(0);
1278 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1279 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1280 if (PtrWidth < InWidth) {
1281 Constant *Mask =
1282 ConstantInt::get(CE->getContext(),
1283 APInt::getLowBitsSet(InWidth, PtrWidth));
1284 Input = ConstantExpr::getAnd(Input, Mask);
1286 // Do a zext or trunc to get to the dest size.
1287 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1290 return ConstantExpr::getCast(Opcode, C, DestTy);
1291 case Instruction::IntToPtr:
1292 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1293 // the int size is >= the ptr size and the address spaces are the same.
1294 // This requires knowing the width of a pointer, so it can't be done in
1295 // ConstantExpr::getCast.
1296 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1297 if (CE->getOpcode() == Instruction::PtrToInt) {
1298 Constant *SrcPtr = CE->getOperand(0);
1299 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1300 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1302 if (MidIntSize >= SrcPtrSize) {
1303 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1304 if (SrcAS == DestTy->getPointerAddressSpace())
1305 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1310 return ConstantExpr::getCast(Opcode, C, DestTy);
1311 case Instruction::Trunc:
1312 case Instruction::ZExt:
1313 case Instruction::SExt:
1314 case Instruction::FPTrunc:
1315 case Instruction::FPExt:
1316 case Instruction::UIToFP:
1317 case Instruction::SIToFP:
1318 case Instruction::FPToUI:
1319 case Instruction::FPToSI:
1320 case Instruction::AddrSpaceCast:
1321 return ConstantExpr::getCast(Opcode, C, DestTy);
1322 case Instruction::BitCast:
1323 return FoldBitCast(C, DestTy, DL);
1327 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1328 ConstantExpr *CE) {
1329 if (!CE->getOperand(1)->isNullValue())
1330 return nullptr; // Do not allow stepping over the value!
1332 // Loop over all of the operands, tracking down which value we are
1333 // addressing.
1334 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1335 C = C->getAggregateElement(CE->getOperand(i));
1336 if (!C)
1337 return nullptr;
1339 return C;
1342 Constant *
1343 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1344 ArrayRef<Constant *> Indices) {
1345 // Loop over all of the operands, tracking down which value we are
1346 // addressing.
1347 for (Constant *Index : Indices) {
1348 C = C->getAggregateElement(Index);
1349 if (!C)
1350 return nullptr;
1352 return C;
1355 //===----------------------------------------------------------------------===//
1356 // Constant Folding for Calls
1359 bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
1360 if (CS.isNoBuiltin() || CS.isStrictFP())
1361 return false;
1362 switch (F->getIntrinsicID()) {
1363 case Intrinsic::fabs:
1364 case Intrinsic::minnum:
1365 case Intrinsic::maxnum:
1366 case Intrinsic::log:
1367 case Intrinsic::log2:
1368 case Intrinsic::log10:
1369 case Intrinsic::exp:
1370 case Intrinsic::exp2:
1371 case Intrinsic::floor:
1372 case Intrinsic::ceil:
1373 case Intrinsic::sqrt:
1374 case Intrinsic::sin:
1375 case Intrinsic::cos:
1376 case Intrinsic::trunc:
1377 case Intrinsic::rint:
1378 case Intrinsic::nearbyint:
1379 case Intrinsic::pow:
1380 case Intrinsic::powi:
1381 case Intrinsic::bswap:
1382 case Intrinsic::ctpop:
1383 case Intrinsic::ctlz:
1384 case Intrinsic::cttz:
1385 case Intrinsic::fshl:
1386 case Intrinsic::fshr:
1387 case Intrinsic::fma:
1388 case Intrinsic::fmuladd:
1389 case Intrinsic::copysign:
1390 case Intrinsic::launder_invariant_group:
1391 case Intrinsic::strip_invariant_group:
1392 case Intrinsic::round:
1393 case Intrinsic::masked_load:
1394 case Intrinsic::sadd_with_overflow:
1395 case Intrinsic::uadd_with_overflow:
1396 case Intrinsic::ssub_with_overflow:
1397 case Intrinsic::usub_with_overflow:
1398 case Intrinsic::smul_with_overflow:
1399 case Intrinsic::umul_with_overflow:
1400 case Intrinsic::convert_from_fp16:
1401 case Intrinsic::convert_to_fp16:
1402 case Intrinsic::bitreverse:
1403 case Intrinsic::x86_sse_cvtss2si:
1404 case Intrinsic::x86_sse_cvtss2si64:
1405 case Intrinsic::x86_sse_cvttss2si:
1406 case Intrinsic::x86_sse_cvttss2si64:
1407 case Intrinsic::x86_sse2_cvtsd2si:
1408 case Intrinsic::x86_sse2_cvtsd2si64:
1409 case Intrinsic::x86_sse2_cvttsd2si:
1410 case Intrinsic::x86_sse2_cvttsd2si64:
1411 case Intrinsic::x86_avx512_vcvtss2si32:
1412 case Intrinsic::x86_avx512_vcvtss2si64:
1413 case Intrinsic::x86_avx512_cvttss2si:
1414 case Intrinsic::x86_avx512_cvttss2si64:
1415 case Intrinsic::x86_avx512_vcvtsd2si32:
1416 case Intrinsic::x86_avx512_vcvtsd2si64:
1417 case Intrinsic::x86_avx512_cvttsd2si:
1418 case Intrinsic::x86_avx512_cvttsd2si64:
1419 case Intrinsic::x86_avx512_vcvtss2usi32:
1420 case Intrinsic::x86_avx512_vcvtss2usi64:
1421 case Intrinsic::x86_avx512_cvttss2usi:
1422 case Intrinsic::x86_avx512_cvttss2usi64:
1423 case Intrinsic::x86_avx512_vcvtsd2usi32:
1424 case Intrinsic::x86_avx512_vcvtsd2usi64:
1425 case Intrinsic::x86_avx512_cvttsd2usi:
1426 case Intrinsic::x86_avx512_cvttsd2usi64:
1427 return true;
1428 default:
1429 return false;
1430 case Intrinsic::not_intrinsic: break;
1433 if (!F->hasName())
1434 return false;
1435 StringRef Name = F->getName();
1437 // In these cases, the check of the length is required. We don't want to
1438 // return true for a name like "cos\0blah" which strcmp would return equal to
1439 // "cos", but has length 8.
1440 switch (Name[0]) {
1441 default:
1442 return false;
1443 case 'a':
1444 return Name == "acos" || Name == "asin" || Name == "atan" ||
1445 Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1446 Name == "atanf" || Name == "atan2f";
1447 case 'c':
1448 return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1449 Name == "ceilf" || Name == "cosf" || Name == "coshf";
1450 case 'e':
1451 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1452 case 'f':
1453 return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1454 Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1455 case 'l':
1456 return Name == "log" || Name == "log10" || Name == "logf" ||
1457 Name == "log10f";
1458 case 'p':
1459 return Name == "pow" || Name == "powf";
1460 case 'r':
1461 return Name == "round" || Name == "roundf";
1462 case 's':
1463 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1464 Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1465 case 't':
1466 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1467 case '_':
1469 // Check for various function names that get used for the math functions
1470 // when the header files are preprocessed with the macro
1471 // __FINITE_MATH_ONLY__ enabled.
1472 // The '12' here is the length of the shortest name that can match.
1473 // We need to check the size before looking at Name[1] and Name[2]
1474 // so we may as well check a limit that will eliminate mismatches.
1475 if (Name.size() < 12 || Name[1] != '_')
1476 return false;
1477 switch (Name[2]) {
1478 default:
1479 return false;
1480 case 'a':
1481 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1482 Name == "__asin_finite" || Name == "__asinf_finite" ||
1483 Name == "__atan2_finite" || Name == "__atan2f_finite";
1484 case 'c':
1485 return Name == "__cosh_finite" || Name == "__coshf_finite";
1486 case 'e':
1487 return Name == "__exp_finite" || Name == "__expf_finite" ||
1488 Name == "__exp2_finite" || Name == "__exp2f_finite";
1489 case 'l':
1490 return Name == "__log_finite" || Name == "__logf_finite" ||
1491 Name == "__log10_finite" || Name == "__log10f_finite";
1492 case 'p':
1493 return Name == "__pow_finite" || Name == "__powf_finite";
1494 case 's':
1495 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1500 namespace {
1502 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1503 if (Ty->isHalfTy()) {
1504 APFloat APF(V);
1505 bool unused;
1506 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &unused);
1507 return ConstantFP::get(Ty->getContext(), APF);
1509 if (Ty->isFloatTy())
1510 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1511 if (Ty->isDoubleTy())
1512 return ConstantFP::get(Ty->getContext(), APFloat(V));
1513 llvm_unreachable("Can only constant fold half/float/double");
1516 /// Clear the floating-point exception state.
1517 inline void llvm_fenv_clearexcept() {
1518 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1519 feclearexcept(FE_ALL_EXCEPT);
1520 #endif
1521 errno = 0;
1524 /// Test if a floating-point exception was raised.
1525 inline bool llvm_fenv_testexcept() {
1526 int errno_val = errno;
1527 if (errno_val == ERANGE || errno_val == EDOM)
1528 return true;
1529 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1530 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1531 return true;
1532 #endif
1533 return false;
1536 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1537 llvm_fenv_clearexcept();
1538 V = NativeFP(V);
1539 if (llvm_fenv_testexcept()) {
1540 llvm_fenv_clearexcept();
1541 return nullptr;
1544 return GetConstantFoldFPValue(V, Ty);
1547 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1548 double W, Type *Ty) {
1549 llvm_fenv_clearexcept();
1550 V = NativeFP(V, W);
1551 if (llvm_fenv_testexcept()) {
1552 llvm_fenv_clearexcept();
1553 return nullptr;
1556 return GetConstantFoldFPValue(V, Ty);
1559 /// Attempt to fold an SSE floating point to integer conversion of a constant
1560 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1561 /// used (toward nearest, ties to even). This matches the behavior of the
1562 /// non-truncating SSE instructions in the default rounding mode. The desired
1563 /// integer type Ty is used to select how many bits are available for the
1564 /// result. Returns null if the conversion cannot be performed, otherwise
1565 /// returns the Constant value resulting from the conversion.
1566 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1567 Type *Ty, bool IsSigned) {
1568 // All of these conversion intrinsics form an integer of at most 64bits.
1569 unsigned ResultWidth = Ty->getIntegerBitWidth();
1570 assert(ResultWidth <= 64 &&
1571 "Can only constant fold conversions to 64 and 32 bit ints");
1573 uint64_t UIntVal;
1574 bool isExact = false;
1575 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1576 : APFloat::rmNearestTiesToEven;
1577 APFloat::opStatus status =
1578 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1579 IsSigned, mode, &isExact);
1580 if (status != APFloat::opOK &&
1581 (!roundTowardZero || status != APFloat::opInexact))
1582 return nullptr;
1583 return ConstantInt::get(Ty, UIntVal, IsSigned);
1586 double getValueAsDouble(ConstantFP *Op) {
1587 Type *Ty = Op->getType();
1589 if (Ty->isFloatTy())
1590 return Op->getValueAPF().convertToFloat();
1592 if (Ty->isDoubleTy())
1593 return Op->getValueAPF().convertToDouble();
1595 bool unused;
1596 APFloat APF = Op->getValueAPF();
1597 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1598 return APF.convertToDouble();
1601 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1602 ArrayRef<Constant *> Operands,
1603 const TargetLibraryInfo *TLI,
1604 ImmutableCallSite CS) {
1605 if (Operands.size() == 1) {
1606 if (isa<UndefValue>(Operands[0])) {
1607 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
1608 if (IntrinsicID == Intrinsic::cos)
1609 return Constant::getNullValue(Ty);
1610 if (IntrinsicID == Intrinsic::bswap ||
1611 IntrinsicID == Intrinsic::bitreverse ||
1612 IntrinsicID == Intrinsic::launder_invariant_group ||
1613 IntrinsicID == Intrinsic::strip_invariant_group)
1614 return Operands[0];
1617 if (isa<ConstantPointerNull>(Operands[0])) {
1618 // launder(null) == null == strip(null) iff in addrspace 0
1619 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1620 IntrinsicID == Intrinsic::strip_invariant_group) {
1621 // If instruction is not yet put in a basic block (e.g. when cloning
1622 // a function during inlining), CS caller may not be available.
1623 // So check CS's BB first before querying CS.getCaller.
1624 const Function *Caller = CS.getParent() ? CS.getCaller() : nullptr;
1625 if (Caller &&
1626 !NullPointerIsDefined(
1627 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1628 return Operands[0];
1630 return nullptr;
1634 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1635 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1636 APFloat Val(Op->getValueAPF());
1638 bool lost = false;
1639 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1641 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1644 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1645 return nullptr;
1647 if (IntrinsicID == Intrinsic::round) {
1648 APFloat V = Op->getValueAPF();
1649 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1650 return ConstantFP::get(Ty->getContext(), V);
1653 if (IntrinsicID == Intrinsic::floor) {
1654 APFloat V = Op->getValueAPF();
1655 V.roundToIntegral(APFloat::rmTowardNegative);
1656 return ConstantFP::get(Ty->getContext(), V);
1659 if (IntrinsicID == Intrinsic::ceil) {
1660 APFloat V = Op->getValueAPF();
1661 V.roundToIntegral(APFloat::rmTowardPositive);
1662 return ConstantFP::get(Ty->getContext(), V);
1665 if (IntrinsicID == Intrinsic::trunc) {
1666 APFloat V = Op->getValueAPF();
1667 V.roundToIntegral(APFloat::rmTowardZero);
1668 return ConstantFP::get(Ty->getContext(), V);
1671 if (IntrinsicID == Intrinsic::rint) {
1672 APFloat V = Op->getValueAPF();
1673 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1674 return ConstantFP::get(Ty->getContext(), V);
1677 if (IntrinsicID == Intrinsic::nearbyint) {
1678 APFloat V = Op->getValueAPF();
1679 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1680 return ConstantFP::get(Ty->getContext(), V);
1683 /// We only fold functions with finite arguments. Folding NaN and inf is
1684 /// likely to be aborted with an exception anyway, and some host libms
1685 /// have known errors raising exceptions.
1686 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1687 return nullptr;
1689 /// Currently APFloat versions of these functions do not exist, so we use
1690 /// the host native double versions. Float versions are not called
1691 /// directly but for all these it is true (float)(f((double)arg)) ==
1692 /// f(arg). Long double not supported yet.
1693 double V = getValueAsDouble(Op);
1695 switch (IntrinsicID) {
1696 default: break;
1697 case Intrinsic::fabs:
1698 return ConstantFoldFP(fabs, V, Ty);
1699 case Intrinsic::log2:
1700 return ConstantFoldFP(Log2, V, Ty);
1701 case Intrinsic::log:
1702 return ConstantFoldFP(log, V, Ty);
1703 case Intrinsic::log10:
1704 return ConstantFoldFP(log10, V, Ty);
1705 case Intrinsic::exp:
1706 return ConstantFoldFP(exp, V, Ty);
1707 case Intrinsic::exp2:
1708 return ConstantFoldFP(exp2, V, Ty);
1709 case Intrinsic::sin:
1710 return ConstantFoldFP(sin, V, Ty);
1711 case Intrinsic::cos:
1712 return ConstantFoldFP(cos, V, Ty);
1713 case Intrinsic::sqrt:
1714 return ConstantFoldFP(sqrt, V, Ty);
1717 if (!TLI)
1718 return nullptr;
1720 char NameKeyChar = Name[0];
1721 if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
1722 NameKeyChar = Name[2];
1724 switch (NameKeyChar) {
1725 case 'a':
1726 if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1727 (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
1728 (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
1729 (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
1730 return ConstantFoldFP(acos, V, Ty);
1731 else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1732 (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
1733 (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
1734 (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
1735 return ConstantFoldFP(asin, V, Ty);
1736 else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1737 (Name == "atanf" && TLI->has(LibFunc_atanf)))
1738 return ConstantFoldFP(atan, V, Ty);
1739 break;
1740 case 'c':
1741 if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1742 (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1743 return ConstantFoldFP(ceil, V, Ty);
1744 else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1745 (Name == "cosf" && TLI->has(LibFunc_cosf)))
1746 return ConstantFoldFP(cos, V, Ty);
1747 else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1748 (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
1749 (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
1750 (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
1751 return ConstantFoldFP(cosh, V, Ty);
1752 break;
1753 case 'e':
1754 if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1755 (Name == "expf" && TLI->has(LibFunc_expf)) ||
1756 (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
1757 (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
1758 return ConstantFoldFP(exp, V, Ty);
1759 if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1760 (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
1761 (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
1762 (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
1763 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1764 // C99 library.
1765 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1766 break;
1767 case 'f':
1768 if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1769 (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1770 return ConstantFoldFP(fabs, V, Ty);
1771 else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1772 (Name == "floorf" && TLI->has(LibFunc_floorf)))
1773 return ConstantFoldFP(floor, V, Ty);
1774 break;
1775 case 'l':
1776 if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1777 (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
1778 (Name == "__log_finite" && V > 0 &&
1779 TLI->has(LibFunc_log_finite)) ||
1780 (Name == "__logf_finite" && V > 0 &&
1781 TLI->has(LibFunc_logf_finite)))
1782 return ConstantFoldFP(log, V, Ty);
1783 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1784 (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
1785 (Name == "__log10_finite" && V > 0 &&
1786 TLI->has(LibFunc_log10_finite)) ||
1787 (Name == "__log10f_finite" && V > 0 &&
1788 TLI->has(LibFunc_log10f_finite)))
1789 return ConstantFoldFP(log10, V, Ty);
1790 break;
1791 case 'r':
1792 if ((Name == "round" && TLI->has(LibFunc_round)) ||
1793 (Name == "roundf" && TLI->has(LibFunc_roundf)))
1794 return ConstantFoldFP(round, V, Ty);
1795 break;
1796 case 's':
1797 if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1798 (Name == "sinf" && TLI->has(LibFunc_sinf)))
1799 return ConstantFoldFP(sin, V, Ty);
1800 else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1801 (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
1802 (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
1803 (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
1804 return ConstantFoldFP(sinh, V, Ty);
1805 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1806 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1807 return ConstantFoldFP(sqrt, V, Ty);
1808 break;
1809 case 't':
1810 if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1811 (Name == "tanf" && TLI->has(LibFunc_tanf)))
1812 return ConstantFoldFP(tan, V, Ty);
1813 else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1814 (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1815 return ConstantFoldFP(tanh, V, Ty);
1816 break;
1817 default:
1818 break;
1820 return nullptr;
1823 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1824 switch (IntrinsicID) {
1825 case Intrinsic::bswap:
1826 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1827 case Intrinsic::ctpop:
1828 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1829 case Intrinsic::bitreverse:
1830 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1831 case Intrinsic::convert_from_fp16: {
1832 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1834 bool lost = false;
1835 APFloat::opStatus status = Val.convert(
1836 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1838 // Conversion is always precise.
1839 (void)status;
1840 assert(status == APFloat::opOK && !lost &&
1841 "Precision lost during fp16 constfolding");
1843 return ConstantFP::get(Ty->getContext(), Val);
1845 default:
1846 return nullptr;
1850 // Support ConstantVector in case we have an Undef in the top.
1851 if (isa<ConstantVector>(Operands[0]) ||
1852 isa<ConstantDataVector>(Operands[0])) {
1853 auto *Op = cast<Constant>(Operands[0]);
1854 switch (IntrinsicID) {
1855 default: break;
1856 case Intrinsic::x86_sse_cvtss2si:
1857 case Intrinsic::x86_sse_cvtss2si64:
1858 case Intrinsic::x86_sse2_cvtsd2si:
1859 case Intrinsic::x86_sse2_cvtsd2si64:
1860 if (ConstantFP *FPOp =
1861 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1862 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1863 /*roundTowardZero=*/false, Ty,
1864 /*IsSigned*/true);
1865 break;
1866 case Intrinsic::x86_sse_cvttss2si:
1867 case Intrinsic::x86_sse_cvttss2si64:
1868 case Intrinsic::x86_sse2_cvttsd2si:
1869 case Intrinsic::x86_sse2_cvttsd2si64:
1870 if (ConstantFP *FPOp =
1871 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1872 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1873 /*roundTowardZero=*/true, Ty,
1874 /*IsSigned*/true);
1875 break;
1879 return nullptr;
1882 if (Operands.size() == 2) {
1883 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1884 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1885 return nullptr;
1886 double Op1V = getValueAsDouble(Op1);
1888 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1889 if (Op2->getType() != Op1->getType())
1890 return nullptr;
1892 double Op2V = getValueAsDouble(Op2);
1893 if (IntrinsicID == Intrinsic::pow) {
1894 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1896 if (IntrinsicID == Intrinsic::copysign) {
1897 APFloat V1 = Op1->getValueAPF();
1898 const APFloat &V2 = Op2->getValueAPF();
1899 V1.copySign(V2);
1900 return ConstantFP::get(Ty->getContext(), V1);
1903 if (IntrinsicID == Intrinsic::minnum) {
1904 const APFloat &C1 = Op1->getValueAPF();
1905 const APFloat &C2 = Op2->getValueAPF();
1906 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1909 if (IntrinsicID == Intrinsic::maxnum) {
1910 const APFloat &C1 = Op1->getValueAPF();
1911 const APFloat &C2 = Op2->getValueAPF();
1912 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1915 if (!TLI)
1916 return nullptr;
1917 if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
1918 (Name == "powf" && TLI->has(LibFunc_powf)) ||
1919 (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
1920 (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
1921 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1922 if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
1923 (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
1924 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1925 if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
1926 (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
1927 (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
1928 (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
1929 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1930 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1931 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1932 return ConstantFP::get(Ty->getContext(),
1933 APFloat((float)std::pow((float)Op1V,
1934 (int)Op2C->getZExtValue())));
1935 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1936 return ConstantFP::get(Ty->getContext(),
1937 APFloat((float)std::pow((float)Op1V,
1938 (int)Op2C->getZExtValue())));
1939 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
1940 return ConstantFP::get(Ty->getContext(),
1941 APFloat((double)std::pow((double)Op1V,
1942 (int)Op2C->getZExtValue())));
1944 return nullptr;
1947 if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1948 if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1949 switch (IntrinsicID) {
1950 default: break;
1951 case Intrinsic::sadd_with_overflow:
1952 case Intrinsic::uadd_with_overflow:
1953 case Intrinsic::ssub_with_overflow:
1954 case Intrinsic::usub_with_overflow:
1955 case Intrinsic::smul_with_overflow:
1956 case Intrinsic::umul_with_overflow: {
1957 APInt Res;
1958 bool Overflow;
1959 switch (IntrinsicID) {
1960 default: llvm_unreachable("Invalid case");
1961 case Intrinsic::sadd_with_overflow:
1962 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1963 break;
1964 case Intrinsic::uadd_with_overflow:
1965 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1966 break;
1967 case Intrinsic::ssub_with_overflow:
1968 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1969 break;
1970 case Intrinsic::usub_with_overflow:
1971 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1972 break;
1973 case Intrinsic::smul_with_overflow:
1974 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1975 break;
1976 case Intrinsic::umul_with_overflow:
1977 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1978 break;
1980 Constant *Ops[] = {
1981 ConstantInt::get(Ty->getContext(), Res),
1982 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
1984 return ConstantStruct::get(cast<StructType>(Ty), Ops);
1986 case Intrinsic::cttz:
1987 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1988 return UndefValue::get(Ty);
1989 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1990 case Intrinsic::ctlz:
1991 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1992 return UndefValue::get(Ty);
1993 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
1997 return nullptr;
2000 // Support ConstantVector in case we have an Undef in the top.
2001 if ((isa<ConstantVector>(Operands[0]) ||
2002 isa<ConstantDataVector>(Operands[0])) &&
2003 // Check for default rounding mode.
2004 // FIXME: Support other rounding modes?
2005 isa<ConstantInt>(Operands[1]) &&
2006 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2007 auto *Op = cast<Constant>(Operands[0]);
2008 switch (IntrinsicID) {
2009 default: break;
2010 case Intrinsic::x86_avx512_vcvtss2si32:
2011 case Intrinsic::x86_avx512_vcvtss2si64:
2012 case Intrinsic::x86_avx512_vcvtsd2si32:
2013 case Intrinsic::x86_avx512_vcvtsd2si64:
2014 if (ConstantFP *FPOp =
2015 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2016 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2017 /*roundTowardZero=*/false, Ty,
2018 /*IsSigned*/true);
2019 break;
2020 case Intrinsic::x86_avx512_vcvtss2usi32:
2021 case Intrinsic::x86_avx512_vcvtss2usi64:
2022 case Intrinsic::x86_avx512_vcvtsd2usi32:
2023 case Intrinsic::x86_avx512_vcvtsd2usi64:
2024 if (ConstantFP *FPOp =
2025 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2026 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2027 /*roundTowardZero=*/false, Ty,
2028 /*IsSigned*/false);
2029 break;
2030 case Intrinsic::x86_avx512_cvttss2si:
2031 case Intrinsic::x86_avx512_cvttss2si64:
2032 case Intrinsic::x86_avx512_cvttsd2si:
2033 case Intrinsic::x86_avx512_cvttsd2si64:
2034 if (ConstantFP *FPOp =
2035 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2036 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2037 /*roundTowardZero=*/true, Ty,
2038 /*IsSigned*/true);
2039 break;
2040 case Intrinsic::x86_avx512_cvttss2usi:
2041 case Intrinsic::x86_avx512_cvttss2usi64:
2042 case Intrinsic::x86_avx512_cvttsd2usi:
2043 case Intrinsic::x86_avx512_cvttsd2usi64:
2044 if (ConstantFP *FPOp =
2045 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2046 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2047 /*roundTowardZero=*/true, Ty,
2048 /*IsSigned*/false);
2049 break;
2052 return nullptr;
2055 if (Operands.size() != 3)
2056 return nullptr;
2058 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2059 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2060 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2061 switch (IntrinsicID) {
2062 default: break;
2063 case Intrinsic::fma:
2064 case Intrinsic::fmuladd: {
2065 APFloat V = Op1->getValueAPF();
2066 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
2067 Op3->getValueAPF(),
2068 APFloat::rmNearestTiesToEven);
2069 if (s != APFloat::opInvalidOp)
2070 return ConstantFP::get(Ty->getContext(), V);
2072 return nullptr;
2079 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2080 auto *C0 = dyn_cast<ConstantInt>(Operands[0]);
2081 auto *C1 = dyn_cast<ConstantInt>(Operands[1]);
2082 auto *C2 = dyn_cast<ConstantInt>(Operands[2]);
2083 if (!(C0 && C1 && C2))
2084 return nullptr;
2086 // The shift amount is interpreted as modulo the bitwidth. If the shift
2087 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2088 unsigned BitWidth = C0->getBitWidth();
2089 unsigned ShAmt = C2->getValue().urem(BitWidth);
2090 bool IsRight = IntrinsicID == Intrinsic::fshr;
2091 if (!ShAmt)
2092 return IsRight ? C1 : C0;
2094 // (X << ShlAmt) | (Y >> LshrAmt)
2095 const APInt &X = C0->getValue();
2096 const APInt &Y = C1->getValue();
2097 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2098 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2099 return ConstantInt::get(Ty->getContext(), X.shl(ShlAmt) | Y.lshr(LshrAmt));
2102 return nullptr;
2105 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
2106 VectorType *VTy, ArrayRef<Constant *> Operands,
2107 const DataLayout &DL,
2108 const TargetLibraryInfo *TLI,
2109 ImmutableCallSite CS) {
2110 SmallVector<Constant *, 4> Result(VTy->getNumElements());
2111 SmallVector<Constant *, 4> Lane(Operands.size());
2112 Type *Ty = VTy->getElementType();
2114 if (IntrinsicID == Intrinsic::masked_load) {
2115 auto *SrcPtr = Operands[0];
2116 auto *Mask = Operands[2];
2117 auto *Passthru = Operands[3];
2119 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2121 SmallVector<Constant *, 32> NewElements;
2122 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2123 auto *MaskElt = Mask->getAggregateElement(I);
2124 if (!MaskElt)
2125 break;
2126 auto *PassthruElt = Passthru->getAggregateElement(I);
2127 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2128 if (isa<UndefValue>(MaskElt)) {
2129 if (PassthruElt)
2130 NewElements.push_back(PassthruElt);
2131 else if (VecElt)
2132 NewElements.push_back(VecElt);
2133 else
2134 return nullptr;
2136 if (MaskElt->isNullValue()) {
2137 if (!PassthruElt)
2138 return nullptr;
2139 NewElements.push_back(PassthruElt);
2140 } else if (MaskElt->isOneValue()) {
2141 if (!VecElt)
2142 return nullptr;
2143 NewElements.push_back(VecElt);
2144 } else {
2145 return nullptr;
2148 if (NewElements.size() != VTy->getNumElements())
2149 return nullptr;
2150 return ConstantVector::get(NewElements);
2153 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2154 // Gather a column of constants.
2155 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2156 // These intrinsics use a scalar type for their second argument.
2157 if (J == 1 &&
2158 (IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
2159 IntrinsicID == Intrinsic::powi)) {
2160 Lane[J] = Operands[J];
2161 continue;
2164 Constant *Agg = Operands[J]->getAggregateElement(I);
2165 if (!Agg)
2166 return nullptr;
2168 Lane[J] = Agg;
2171 // Use the regular scalar folding to simplify this column.
2172 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, CS);
2173 if (!Folded)
2174 return nullptr;
2175 Result[I] = Folded;
2178 return ConstantVector::get(Result);
2181 } // end anonymous namespace
2183 Constant *
2184 llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
2185 ArrayRef<Constant *> Operands,
2186 const TargetLibraryInfo *TLI) {
2187 if (CS.isNoBuiltin() || CS.isStrictFP())
2188 return nullptr;
2189 if (!F->hasName())
2190 return nullptr;
2191 StringRef Name = F->getName();
2193 Type *Ty = F->getReturnType();
2195 if (auto *VTy = dyn_cast<VectorType>(Ty))
2196 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2197 F->getParent()->getDataLayout(), TLI, CS);
2199 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, CS);
2202 bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
2203 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2204 // (and to some extent ConstantFoldScalarCall).
2205 if (CS.isNoBuiltin() || CS.isStrictFP())
2206 return false;
2207 Function *F = CS.getCalledFunction();
2208 if (!F)
2209 return false;
2211 LibFunc Func;
2212 if (!TLI || !TLI->getLibFunc(*F, Func))
2213 return false;
2215 if (CS.getNumArgOperands() == 1) {
2216 if (ConstantFP *OpC = dyn_cast<ConstantFP>(CS.getArgOperand(0))) {
2217 const APFloat &Op = OpC->getValueAPF();
2218 switch (Func) {
2219 case LibFunc_logl:
2220 case LibFunc_log:
2221 case LibFunc_logf:
2222 case LibFunc_log2l:
2223 case LibFunc_log2:
2224 case LibFunc_log2f:
2225 case LibFunc_log10l:
2226 case LibFunc_log10:
2227 case LibFunc_log10f:
2228 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2230 case LibFunc_expl:
2231 case LibFunc_exp:
2232 case LibFunc_expf:
2233 // FIXME: These boundaries are slightly conservative.
2234 if (OpC->getType()->isDoubleTy())
2235 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2236 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2237 if (OpC->getType()->isFloatTy())
2238 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2239 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2240 break;
2242 case LibFunc_exp2l:
2243 case LibFunc_exp2:
2244 case LibFunc_exp2f:
2245 // FIXME: These boundaries are slightly conservative.
2246 if (OpC->getType()->isDoubleTy())
2247 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2248 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2249 if (OpC->getType()->isFloatTy())
2250 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2251 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2252 break;
2254 case LibFunc_sinl:
2255 case LibFunc_sin:
2256 case LibFunc_sinf:
2257 case LibFunc_cosl:
2258 case LibFunc_cos:
2259 case LibFunc_cosf:
2260 return !Op.isInfinity();
2262 case LibFunc_tanl:
2263 case LibFunc_tan:
2264 case LibFunc_tanf: {
2265 // FIXME: Stop using the host math library.
2266 // FIXME: The computation isn't done in the right precision.
2267 Type *Ty = OpC->getType();
2268 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2269 double OpV = getValueAsDouble(OpC);
2270 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2272 break;
2275 case LibFunc_asinl:
2276 case LibFunc_asin:
2277 case LibFunc_asinf:
2278 case LibFunc_acosl:
2279 case LibFunc_acos:
2280 case LibFunc_acosf:
2281 return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2282 APFloat::cmpLessThan &&
2283 Op.compare(APFloat(Op.getSemantics(), "1")) !=
2284 APFloat::cmpGreaterThan;
2286 case LibFunc_sinh:
2287 case LibFunc_cosh:
2288 case LibFunc_sinhf:
2289 case LibFunc_coshf:
2290 case LibFunc_sinhl:
2291 case LibFunc_coshl:
2292 // FIXME: These boundaries are slightly conservative.
2293 if (OpC->getType()->isDoubleTy())
2294 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2295 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2296 if (OpC->getType()->isFloatTy())
2297 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2298 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2299 break;
2301 case LibFunc_sqrtl:
2302 case LibFunc_sqrt:
2303 case LibFunc_sqrtf:
2304 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2306 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2307 // maybe others?
2308 default:
2309 break;
2314 if (CS.getNumArgOperands() == 2) {
2315 ConstantFP *Op0C = dyn_cast<ConstantFP>(CS.getArgOperand(0));
2316 ConstantFP *Op1C = dyn_cast<ConstantFP>(CS.getArgOperand(1));
2317 if (Op0C && Op1C) {
2318 const APFloat &Op0 = Op0C->getValueAPF();
2319 const APFloat &Op1 = Op1C->getValueAPF();
2321 switch (Func) {
2322 case LibFunc_powl:
2323 case LibFunc_pow:
2324 case LibFunc_powf: {
2325 // FIXME: Stop using the host math library.
2326 // FIXME: The computation isn't done in the right precision.
2327 Type *Ty = Op0C->getType();
2328 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2329 if (Ty == Op1C->getType()) {
2330 double Op0V = getValueAsDouble(Op0C);
2331 double Op1V = getValueAsDouble(Op1C);
2332 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2335 break;
2338 case LibFunc_fmodl:
2339 case LibFunc_fmod:
2340 case LibFunc_fmodf:
2341 return Op0.isNaN() || Op1.isNaN() ||
2342 (!Op0.isInfinity() && !Op1.isZero());
2344 default:
2345 break;
2350 return false;