Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Analysis / ConstantFolding.cpp
blob3b4e60315662a89cd46a4eff71acddb56737be7f
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Config/config.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/KnownBits.h"
45 #include "llvm/Support/MathExtras.h"
46 #include <cassert>
47 #include <cerrno>
48 #include <cfenv>
49 #include <cmath>
50 #include <cstddef>
51 #include <cstdint>
53 using namespace llvm;
55 namespace {
57 //===----------------------------------------------------------------------===//
58 // Constant Folding internal helper functions
59 //===----------------------------------------------------------------------===//
61 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
62 Constant *C, Type *SrcEltTy,
63 unsigned NumSrcElts,
64 const DataLayout &DL) {
65 // Now that we know that the input value is a vector of integers, just shift
66 // and insert them into our result.
67 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
68 for (unsigned i = 0; i != NumSrcElts; ++i) {
69 Constant *Element;
70 if (DL.isLittleEndian())
71 Element = C->getAggregateElement(NumSrcElts - i - 1);
72 else
73 Element = C->getAggregateElement(i);
75 if (Element && isa<UndefValue>(Element)) {
76 Result <<= BitShift;
77 continue;
80 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
81 if (!ElementCI)
82 return ConstantExpr::getBitCast(C, DestTy);
84 Result <<= BitShift;
85 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
88 return nullptr;
91 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
92 /// This always returns a non-null constant, but it may be a
93 /// ConstantExpr if unfoldable.
94 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
95 // Catch the obvious splat cases.
96 if (C->isNullValue() && !DestTy->isX86_MMXTy())
97 return Constant::getNullValue(DestTy);
98 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
99 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
100 return Constant::getAllOnesValue(DestTy);
102 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
103 // Handle a vector->scalar integer/fp cast.
104 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
105 unsigned NumSrcElts = VTy->getNumElements();
106 Type *SrcEltTy = VTy->getElementType();
108 // If the vector is a vector of floating point, convert it to vector of int
109 // to simplify things.
110 if (SrcEltTy->isFloatingPointTy()) {
111 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
112 Type *SrcIVTy =
113 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
114 // Ask IR to do the conversion now that #elts line up.
115 C = ConstantExpr::getBitCast(C, SrcIVTy);
118 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
119 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
120 SrcEltTy, NumSrcElts, DL))
121 return CE;
123 if (isa<IntegerType>(DestTy))
124 return ConstantInt::get(DestTy, Result);
126 APFloat FP(DestTy->getFltSemantics(), Result);
127 return ConstantFP::get(DestTy->getContext(), FP);
131 // The code below only handles casts to vectors currently.
132 auto *DestVTy = dyn_cast<VectorType>(DestTy);
133 if (!DestVTy)
134 return ConstantExpr::getBitCast(C, DestTy);
136 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
137 // vector so the code below can handle it uniformly.
138 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
139 Constant *Ops = C; // don't take the address of C!
140 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
143 // If this is a bitcast from constant vector -> vector, fold it.
144 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
145 return ConstantExpr::getBitCast(C, DestTy);
147 // If the element types match, IR can fold it.
148 unsigned NumDstElt = DestVTy->getNumElements();
149 unsigned NumSrcElt = C->getType()->getVectorNumElements();
150 if (NumDstElt == NumSrcElt)
151 return ConstantExpr::getBitCast(C, DestTy);
153 Type *SrcEltTy = C->getType()->getVectorElementType();
154 Type *DstEltTy = DestVTy->getElementType();
156 // Otherwise, we're changing the number of elements in a vector, which
157 // requires endianness information to do the right thing. For example,
158 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
159 // folds to (little endian):
160 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
161 // and to (big endian):
162 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
164 // First thing is first. We only want to think about integer here, so if
165 // we have something in FP form, recast it as integer.
166 if (DstEltTy->isFloatingPointTy()) {
167 // Fold to an vector of integers with same size as our FP type.
168 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
169 Type *DestIVTy =
170 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
171 // Recursively handle this integer conversion, if possible.
172 C = FoldBitCast(C, DestIVTy, DL);
174 // Finally, IR can handle this now that #elts line up.
175 return ConstantExpr::getBitCast(C, DestTy);
178 // Okay, we know the destination is integer, if the input is FP, convert
179 // it to integer first.
180 if (SrcEltTy->isFloatingPointTy()) {
181 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
182 Type *SrcIVTy =
183 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
184 // Ask IR to do the conversion now that #elts line up.
185 C = ConstantExpr::getBitCast(C, SrcIVTy);
186 // If IR wasn't able to fold it, bail out.
187 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
188 !isa<ConstantDataVector>(C))
189 return C;
192 // Now we know that the input and output vectors are both integer vectors
193 // of the same size, and that their #elements is not the same. Do the
194 // conversion here, which depends on whether the input or output has
195 // more elements.
196 bool isLittleEndian = DL.isLittleEndian();
198 SmallVector<Constant*, 32> Result;
199 if (NumDstElt < NumSrcElt) {
200 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
201 Constant *Zero = Constant::getNullValue(DstEltTy);
202 unsigned Ratio = NumSrcElt/NumDstElt;
203 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
204 unsigned SrcElt = 0;
205 for (unsigned i = 0; i != NumDstElt; ++i) {
206 // Build each element of the result.
207 Constant *Elt = Zero;
208 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
209 for (unsigned j = 0; j != Ratio; ++j) {
210 Constant *Src = C->getAggregateElement(SrcElt++);
211 if (Src && isa<UndefValue>(Src))
212 Src = Constant::getNullValue(C->getType()->getVectorElementType());
213 else
214 Src = dyn_cast_or_null<ConstantInt>(Src);
215 if (!Src) // Reject constantexpr elements.
216 return ConstantExpr::getBitCast(C, DestTy);
218 // Zero extend the element to the right size.
219 Src = ConstantExpr::getZExt(Src, Elt->getType());
221 // Shift it to the right place, depending on endianness.
222 Src = ConstantExpr::getShl(Src,
223 ConstantInt::get(Src->getType(), ShiftAmt));
224 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
226 // Mix it in.
227 Elt = ConstantExpr::getOr(Elt, Src);
229 Result.push_back(Elt);
231 return ConstantVector::get(Result);
234 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
235 unsigned Ratio = NumDstElt/NumSrcElt;
236 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
238 // Loop over each source value, expanding into multiple results.
239 for (unsigned i = 0; i != NumSrcElt; ++i) {
240 auto *Element = C->getAggregateElement(i);
242 if (!Element) // Reject constantexpr elements.
243 return ConstantExpr::getBitCast(C, DestTy);
245 if (isa<UndefValue>(Element)) {
246 // Correctly Propagate undef values.
247 Result.append(Ratio, UndefValue::get(DstEltTy));
248 continue;
251 auto *Src = dyn_cast<ConstantInt>(Element);
252 if (!Src)
253 return ConstantExpr::getBitCast(C, DestTy);
255 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
256 for (unsigned j = 0; j != Ratio; ++j) {
257 // Shift the piece of the value into the right place, depending on
258 // endianness.
259 Constant *Elt = ConstantExpr::getLShr(Src,
260 ConstantInt::get(Src->getType(), ShiftAmt));
261 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
263 // Truncate the element to an integer with the same pointer size and
264 // convert the element back to a pointer using a inttoptr.
265 if (DstEltTy->isPointerTy()) {
266 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
267 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
268 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
269 continue;
272 // Truncate and remember this piece.
273 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
277 return ConstantVector::get(Result);
280 } // end anonymous namespace
282 /// If this constant is a constant offset from a global, return the global and
283 /// the constant. Because of constantexprs, this function is recursive.
284 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
285 APInt &Offset, const DataLayout &DL) {
286 // Trivial case, constant is the global.
287 if ((GV = dyn_cast<GlobalValue>(C))) {
288 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
289 Offset = APInt(BitWidth, 0);
290 return true;
293 // Otherwise, if this isn't a constant expr, bail out.
294 auto *CE = dyn_cast<ConstantExpr>(C);
295 if (!CE) return false;
297 // Look through ptr->int and ptr->ptr casts.
298 if (CE->getOpcode() == Instruction::PtrToInt ||
299 CE->getOpcode() == Instruction::BitCast)
300 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
302 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
303 auto *GEP = dyn_cast<GEPOperator>(CE);
304 if (!GEP)
305 return false;
307 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
308 APInt TmpOffset(BitWidth, 0);
310 // If the base isn't a global+constant, we aren't either.
311 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
312 return false;
314 // Otherwise, add any offset that our operands provide.
315 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
316 return false;
318 Offset = TmpOffset;
319 return true;
322 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
323 const DataLayout &DL) {
324 do {
325 Type *SrcTy = C->getType();
327 // If the type sizes are the same and a cast is legal, just directly
328 // cast the constant.
329 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
330 Instruction::CastOps Cast = Instruction::BitCast;
331 // If we are going from a pointer to int or vice versa, we spell the cast
332 // differently.
333 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
334 Cast = Instruction::IntToPtr;
335 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
336 Cast = Instruction::PtrToInt;
338 if (CastInst::castIsValid(Cast, C, DestTy))
339 return ConstantExpr::getCast(Cast, C, DestTy);
342 // If this isn't an aggregate type, there is nothing we can do to drill down
343 // and find a bitcastable constant.
344 if (!SrcTy->isAggregateType())
345 return nullptr;
347 // We're simulating a load through a pointer that was bitcast to point to
348 // a different type, so we can try to walk down through the initial
349 // elements of an aggregate to see if some part of the aggregate is
350 // castable to implement the "load" semantic model.
351 if (SrcTy->isStructTy()) {
352 // Struct types might have leading zero-length elements like [0 x i32],
353 // which are certainly not what we are looking for, so skip them.
354 unsigned Elem = 0;
355 Constant *ElemC;
356 do {
357 ElemC = C->getAggregateElement(Elem++);
358 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0);
359 C = ElemC;
360 } else {
361 C = C->getAggregateElement(0u);
363 } while (C);
365 return nullptr;
368 namespace {
370 /// Recursive helper to read bits out of global. C is the constant being copied
371 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
372 /// results into and BytesLeft is the number of bytes left in
373 /// the CurPtr buffer. DL is the DataLayout.
374 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
375 unsigned BytesLeft, const DataLayout &DL) {
376 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
377 "Out of range access");
379 // If this element is zero or undefined, we can just return since *CurPtr is
380 // zero initialized.
381 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
382 return true;
384 if (auto *CI = dyn_cast<ConstantInt>(C)) {
385 if (CI->getBitWidth() > 64 ||
386 (CI->getBitWidth() & 7) != 0)
387 return false;
389 uint64_t Val = CI->getZExtValue();
390 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
392 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
393 int n = ByteOffset;
394 if (!DL.isLittleEndian())
395 n = IntBytes - n - 1;
396 CurPtr[i] = (unsigned char)(Val >> (n * 8));
397 ++ByteOffset;
399 return true;
402 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
403 if (CFP->getType()->isDoubleTy()) {
404 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
405 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
407 if (CFP->getType()->isFloatTy()){
408 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
409 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
411 if (CFP->getType()->isHalfTy()){
412 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
413 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
415 return false;
418 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
419 const StructLayout *SL = DL.getStructLayout(CS->getType());
420 unsigned Index = SL->getElementContainingOffset(ByteOffset);
421 uint64_t CurEltOffset = SL->getElementOffset(Index);
422 ByteOffset -= CurEltOffset;
424 while (true) {
425 // If the element access is to the element itself and not to tail padding,
426 // read the bytes from the element.
427 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
429 if (ByteOffset < EltSize &&
430 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
431 BytesLeft, DL))
432 return false;
434 ++Index;
436 // Check to see if we read from the last struct element, if so we're done.
437 if (Index == CS->getType()->getNumElements())
438 return true;
440 // If we read all of the bytes we needed from this element we're done.
441 uint64_t NextEltOffset = SL->getElementOffset(Index);
443 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
444 return true;
446 // Move to the next element of the struct.
447 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
448 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
449 ByteOffset = 0;
450 CurEltOffset = NextEltOffset;
452 // not reached.
455 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
456 isa<ConstantDataSequential>(C)) {
457 Type *EltTy = C->getType()->getSequentialElementType();
458 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
459 uint64_t Index = ByteOffset / EltSize;
460 uint64_t Offset = ByteOffset - Index * EltSize;
461 uint64_t NumElts;
462 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
463 NumElts = AT->getNumElements();
464 else
465 NumElts = C->getType()->getVectorNumElements();
467 for (; Index != NumElts; ++Index) {
468 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
469 BytesLeft, DL))
470 return false;
472 uint64_t BytesWritten = EltSize - Offset;
473 assert(BytesWritten <= EltSize && "Not indexing into this element?");
474 if (BytesWritten >= BytesLeft)
475 return true;
477 Offset = 0;
478 BytesLeft -= BytesWritten;
479 CurPtr += BytesWritten;
481 return true;
484 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
485 if (CE->getOpcode() == Instruction::IntToPtr &&
486 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
487 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
488 BytesLeft, DL);
492 // Otherwise, unknown initializer type.
493 return false;
496 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
497 const DataLayout &DL) {
498 auto *PTy = cast<PointerType>(C->getType());
499 auto *IntType = dyn_cast<IntegerType>(LoadTy);
501 // If this isn't an integer load we can't fold it directly.
502 if (!IntType) {
503 unsigned AS = PTy->getAddressSpace();
505 // If this is a float/double load, we can try folding it as an int32/64 load
506 // and then bitcast the result. This can be useful for union cases. Note
507 // that address spaces don't matter here since we're not going to result in
508 // an actual new load.
509 Type *MapTy;
510 if (LoadTy->isHalfTy())
511 MapTy = Type::getInt16Ty(C->getContext());
512 else if (LoadTy->isFloatTy())
513 MapTy = Type::getInt32Ty(C->getContext());
514 else if (LoadTy->isDoubleTy())
515 MapTy = Type::getInt64Ty(C->getContext());
516 else if (LoadTy->isVectorTy()) {
517 MapTy = PointerType::getIntNTy(C->getContext(),
518 DL.getTypeAllocSizeInBits(LoadTy));
519 } else
520 return nullptr;
522 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
523 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
524 return FoldBitCast(Res, LoadTy, DL);
525 return nullptr;
528 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
529 if (BytesLoaded > 32 || BytesLoaded == 0)
530 return nullptr;
532 GlobalValue *GVal;
533 APInt OffsetAI;
534 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
535 return nullptr;
537 auto *GV = dyn_cast<GlobalVariable>(GVal);
538 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
539 !GV->getInitializer()->getType()->isSized())
540 return nullptr;
542 int64_t Offset = OffsetAI.getSExtValue();
543 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
545 // If we're not accessing anything in this constant, the result is undefined.
546 if (Offset + BytesLoaded <= 0)
547 return UndefValue::get(IntType);
549 // If we're not accessing anything in this constant, the result is undefined.
550 if (Offset >= InitializerSize)
551 return UndefValue::get(IntType);
553 unsigned char RawBytes[32] = {0};
554 unsigned char *CurPtr = RawBytes;
555 unsigned BytesLeft = BytesLoaded;
557 // If we're loading off the beginning of the global, some bytes may be valid.
558 if (Offset < 0) {
559 CurPtr += -Offset;
560 BytesLeft += Offset;
561 Offset = 0;
564 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
565 return nullptr;
567 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
568 if (DL.isLittleEndian()) {
569 ResultVal = RawBytes[BytesLoaded - 1];
570 for (unsigned i = 1; i != BytesLoaded; ++i) {
571 ResultVal <<= 8;
572 ResultVal |= RawBytes[BytesLoaded - 1 - i];
574 } else {
575 ResultVal = RawBytes[0];
576 for (unsigned i = 1; i != BytesLoaded; ++i) {
577 ResultVal <<= 8;
578 ResultVal |= RawBytes[i];
582 return ConstantInt::get(IntType->getContext(), ResultVal);
585 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
586 const DataLayout &DL) {
587 auto *SrcPtr = CE->getOperand(0);
588 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
589 if (!SrcPtrTy)
590 return nullptr;
591 Type *SrcTy = SrcPtrTy->getPointerElementType();
593 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
594 if (!C)
595 return nullptr;
597 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
600 } // end anonymous namespace
602 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
603 const DataLayout &DL) {
604 // First, try the easy cases:
605 if (auto *GV = dyn_cast<GlobalVariable>(C))
606 if (GV->isConstant() && GV->hasDefinitiveInitializer())
607 return GV->getInitializer();
609 if (auto *GA = dyn_cast<GlobalAlias>(C))
610 if (GA->getAliasee() && !GA->isInterposable())
611 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
613 // If the loaded value isn't a constant expr, we can't handle it.
614 auto *CE = dyn_cast<ConstantExpr>(C);
615 if (!CE)
616 return nullptr;
618 if (CE->getOpcode() == Instruction::GetElementPtr) {
619 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
620 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
621 if (Constant *V =
622 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
623 return V;
628 if (CE->getOpcode() == Instruction::BitCast)
629 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
630 return LoadedC;
632 // Instead of loading constant c string, use corresponding integer value
633 // directly if string length is small enough.
634 StringRef Str;
635 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
636 size_t StrLen = Str.size();
637 unsigned NumBits = Ty->getPrimitiveSizeInBits();
638 // Replace load with immediate integer if the result is an integer or fp
639 // value.
640 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
641 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
642 APInt StrVal(NumBits, 0);
643 APInt SingleChar(NumBits, 0);
644 if (DL.isLittleEndian()) {
645 for (unsigned char C : reverse(Str.bytes())) {
646 SingleChar = static_cast<uint64_t>(C);
647 StrVal = (StrVal << 8) | SingleChar;
649 } else {
650 for (unsigned char C : Str.bytes()) {
651 SingleChar = static_cast<uint64_t>(C);
652 StrVal = (StrVal << 8) | SingleChar;
654 // Append NULL at the end.
655 SingleChar = 0;
656 StrVal = (StrVal << 8) | SingleChar;
659 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
660 if (Ty->isFloatingPointTy())
661 Res = ConstantExpr::getBitCast(Res, Ty);
662 return Res;
666 // If this load comes from anywhere in a constant global, and if the global
667 // is all undef or zero, we know what it loads.
668 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
669 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
670 if (GV->getInitializer()->isNullValue())
671 return Constant::getNullValue(Ty);
672 if (isa<UndefValue>(GV->getInitializer()))
673 return UndefValue::get(Ty);
677 // Try hard to fold loads from bitcasted strange and non-type-safe things.
678 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
681 namespace {
683 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
684 if (LI->isVolatile()) return nullptr;
686 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
687 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
689 return nullptr;
692 /// One of Op0/Op1 is a constant expression.
693 /// Attempt to symbolically evaluate the result of a binary operator merging
694 /// these together. If target data info is available, it is provided as DL,
695 /// otherwise DL is null.
696 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
697 const DataLayout &DL) {
698 // SROA
700 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
701 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
702 // bits.
704 if (Opc == Instruction::And) {
705 KnownBits Known0 = computeKnownBits(Op0, DL);
706 KnownBits Known1 = computeKnownBits(Op1, DL);
707 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
708 // All the bits of Op0 that the 'and' could be masking are already zero.
709 return Op0;
711 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
712 // All the bits of Op1 that the 'and' could be masking are already zero.
713 return Op1;
716 Known0.Zero |= Known1.Zero;
717 Known0.One &= Known1.One;
718 if (Known0.isConstant())
719 return ConstantInt::get(Op0->getType(), Known0.getConstant());
722 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
723 // constant. This happens frequently when iterating over a global array.
724 if (Opc == Instruction::Sub) {
725 GlobalValue *GV1, *GV2;
726 APInt Offs1, Offs2;
728 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
729 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
730 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
732 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
733 // PtrToInt may change the bitwidth so we have convert to the right size
734 // first.
735 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
736 Offs2.zextOrTrunc(OpSize));
740 return nullptr;
743 /// If array indices are not pointer-sized integers, explicitly cast them so
744 /// that they aren't implicitly casted by the getelementptr.
745 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
746 Type *ResultTy, Optional<unsigned> InRangeIndex,
747 const DataLayout &DL, const TargetLibraryInfo *TLI) {
748 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
749 Type *IntPtrScalarTy = IntPtrTy->getScalarType();
751 bool Any = false;
752 SmallVector<Constant*, 32> NewIdxs;
753 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
754 if ((i == 1 ||
755 !isa<StructType>(GetElementPtrInst::getIndexedType(
756 SrcElemTy, Ops.slice(1, i - 1)))) &&
757 Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
758 Any = true;
759 Type *NewType = Ops[i]->getType()->isVectorTy()
760 ? IntPtrTy
761 : IntPtrTy->getScalarType();
762 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
763 true,
764 NewType,
765 true),
766 Ops[i], NewType));
767 } else
768 NewIdxs.push_back(Ops[i]);
771 if (!Any)
772 return nullptr;
774 Constant *C = ConstantExpr::getGetElementPtr(
775 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
776 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
777 C = Folded;
779 return C;
782 /// Strip the pointer casts, but preserve the address space information.
783 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
784 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
785 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
786 Ptr = Ptr->stripPointerCasts();
787 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
789 ElemTy = NewPtrTy->getPointerElementType();
791 // Preserve the address space number of the pointer.
792 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
793 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
794 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
796 return Ptr;
799 /// If we can symbolically evaluate the GEP constant expression, do so.
800 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
801 ArrayRef<Constant *> Ops,
802 const DataLayout &DL,
803 const TargetLibraryInfo *TLI) {
804 const GEPOperator *InnermostGEP = GEP;
805 bool InBounds = GEP->isInBounds();
807 Type *SrcElemTy = GEP->getSourceElementType();
808 Type *ResElemTy = GEP->getResultElementType();
809 Type *ResTy = GEP->getType();
810 if (!SrcElemTy->isSized())
811 return nullptr;
813 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
814 GEP->getInRangeIndex(), DL, TLI))
815 return C;
817 Constant *Ptr = Ops[0];
818 if (!Ptr->getType()->isPointerTy())
819 return nullptr;
821 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
823 // If this is a constant expr gep that is effectively computing an
824 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
825 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
826 if (!isa<ConstantInt>(Ops[i])) {
828 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
829 // "inttoptr (sub (ptrtoint Ptr), V)"
830 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
831 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
832 assert((!CE || CE->getType() == IntPtrTy) &&
833 "CastGEPIndices didn't canonicalize index types!");
834 if (CE && CE->getOpcode() == Instruction::Sub &&
835 CE->getOperand(0)->isNullValue()) {
836 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
837 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
838 Res = ConstantExpr::getIntToPtr(Res, ResTy);
839 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
840 Res = FoldedRes;
841 return Res;
844 return nullptr;
847 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
848 APInt Offset =
849 APInt(BitWidth,
850 DL.getIndexedOffsetInType(
851 SrcElemTy,
852 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
853 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
855 // If this is a GEP of a GEP, fold it all into a single GEP.
856 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
857 InnermostGEP = GEP;
858 InBounds &= GEP->isInBounds();
860 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
862 // Do not try the incorporate the sub-GEP if some index is not a number.
863 bool AllConstantInt = true;
864 for (Value *NestedOp : NestedOps)
865 if (!isa<ConstantInt>(NestedOp)) {
866 AllConstantInt = false;
867 break;
869 if (!AllConstantInt)
870 break;
872 Ptr = cast<Constant>(GEP->getOperand(0));
873 SrcElemTy = GEP->getSourceElementType();
874 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
875 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
878 // If the base value for this address is a literal integer value, fold the
879 // getelementptr to the resulting integer value casted to the pointer type.
880 APInt BasePtr(BitWidth, 0);
881 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
882 if (CE->getOpcode() == Instruction::IntToPtr) {
883 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
884 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
888 auto *PTy = cast<PointerType>(Ptr->getType());
889 if ((Ptr->isNullValue() || BasePtr != 0) &&
890 !DL.isNonIntegralPointerType(PTy)) {
891 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
892 return ConstantExpr::getIntToPtr(C, ResTy);
895 // Otherwise form a regular getelementptr. Recompute the indices so that
896 // we eliminate over-indexing of the notional static type array bounds.
897 // This makes it easy to determine if the getelementptr is "inbounds".
898 // Also, this helps GlobalOpt do SROA on GlobalVariables.
899 Type *Ty = PTy;
900 SmallVector<Constant *, 32> NewIdxs;
902 do {
903 if (!Ty->isStructTy()) {
904 if (Ty->isPointerTy()) {
905 // The only pointer indexing we'll do is on the first index of the GEP.
906 if (!NewIdxs.empty())
907 break;
909 Ty = SrcElemTy;
911 // Only handle pointers to sized types, not pointers to functions.
912 if (!Ty->isSized())
913 return nullptr;
914 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
915 Ty = ATy->getElementType();
916 } else {
917 // We've reached some non-indexable type.
918 break;
921 // Determine which element of the array the offset points into.
922 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
923 if (ElemSize == 0) {
924 // The element size is 0. This may be [0 x Ty]*, so just use a zero
925 // index for this level and proceed to the next level to see if it can
926 // accommodate the offset.
927 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
928 } else {
929 // The element size is non-zero divide the offset by the element
930 // size (rounding down), to compute the index at this level.
931 bool Overflow;
932 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
933 if (Overflow)
934 break;
935 Offset -= NewIdx * ElemSize;
936 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
938 } else {
939 auto *STy = cast<StructType>(Ty);
940 // If we end up with an offset that isn't valid for this struct type, we
941 // can't re-form this GEP in a regular form, so bail out. The pointer
942 // operand likely went through casts that are necessary to make the GEP
943 // sensible.
944 const StructLayout &SL = *DL.getStructLayout(STy);
945 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
946 break;
948 // Determine which field of the struct the offset points into. The
949 // getZExtValue is fine as we've already ensured that the offset is
950 // within the range representable by the StructLayout API.
951 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
952 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
953 ElIdx));
954 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
955 Ty = STy->getTypeAtIndex(ElIdx);
957 } while (Ty != ResElemTy);
959 // If we haven't used up the entire offset by descending the static
960 // type, then the offset is pointing into the middle of an indivisible
961 // member, so we can't simplify it.
962 if (Offset != 0)
963 return nullptr;
965 // Preserve the inrange index from the innermost GEP if possible. We must
966 // have calculated the same indices up to and including the inrange index.
967 Optional<unsigned> InRangeIndex;
968 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
969 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
970 NewIdxs.size() > *LastIRIndex) {
971 InRangeIndex = LastIRIndex;
972 for (unsigned I = 0; I <= *LastIRIndex; ++I)
973 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
974 return nullptr;
977 // Create a GEP.
978 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
979 InBounds, InRangeIndex);
980 assert(C->getType()->getPointerElementType() == Ty &&
981 "Computed GetElementPtr has unexpected type!");
983 // If we ended up indexing a member with a type that doesn't match
984 // the type of what the original indices indexed, add a cast.
985 if (Ty != ResElemTy)
986 C = FoldBitCast(C, ResTy, DL);
988 return C;
991 /// Attempt to constant fold an instruction with the
992 /// specified opcode and operands. If successful, the constant result is
993 /// returned, if not, null is returned. Note that this function can fail when
994 /// attempting to fold instructions like loads and stores, which have no
995 /// constant expression form.
996 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
997 ArrayRef<Constant *> Ops,
998 const DataLayout &DL,
999 const TargetLibraryInfo *TLI) {
1000 Type *DestTy = InstOrCE->getType();
1002 // Handle easy binops first.
1003 if (Instruction::isBinaryOp(Opcode))
1004 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1006 if (Instruction::isCast(Opcode))
1007 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1009 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1010 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1011 return C;
1013 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1014 Ops.slice(1), GEP->isInBounds(),
1015 GEP->getInRangeIndex());
1018 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1019 return CE->getWithOperands(Ops);
1021 switch (Opcode) {
1022 default: return nullptr;
1023 case Instruction::ICmp:
1024 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1025 case Instruction::Call:
1026 if (auto *F = dyn_cast<Function>(Ops.back())) {
1027 const auto *Call = cast<CallBase>(InstOrCE);
1028 if (canConstantFoldCallTo(Call, F))
1029 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1031 return nullptr;
1032 case Instruction::Select:
1033 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1034 case Instruction::ExtractElement:
1035 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1036 case Instruction::InsertElement:
1037 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1038 case Instruction::ShuffleVector:
1039 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1043 } // end anonymous namespace
1045 //===----------------------------------------------------------------------===//
1046 // Constant Folding public APIs
1047 //===----------------------------------------------------------------------===//
1049 namespace {
1051 Constant *
1052 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1053 const TargetLibraryInfo *TLI,
1054 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1055 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1056 return nullptr;
1058 SmallVector<Constant *, 8> Ops;
1059 for (const Use &NewU : C->operands()) {
1060 auto *NewC = cast<Constant>(&NewU);
1061 // Recursively fold the ConstantExpr's operands. If we have already folded
1062 // a ConstantExpr, we don't have to process it again.
1063 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1064 auto It = FoldedOps.find(NewC);
1065 if (It == FoldedOps.end()) {
1066 if (auto *FoldedC =
1067 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1068 FoldedOps.insert({NewC, FoldedC});
1069 NewC = FoldedC;
1070 } else {
1071 FoldedOps.insert({NewC, NewC});
1073 } else {
1074 NewC = It->second;
1077 Ops.push_back(NewC);
1080 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1081 if (CE->isCompare())
1082 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1083 DL, TLI);
1085 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1088 assert(isa<ConstantVector>(C));
1089 return ConstantVector::get(Ops);
1092 } // end anonymous namespace
1094 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1095 const TargetLibraryInfo *TLI) {
1096 // Handle PHI nodes quickly here...
1097 if (auto *PN = dyn_cast<PHINode>(I)) {
1098 Constant *CommonValue = nullptr;
1100 SmallDenseMap<Constant *, Constant *> FoldedOps;
1101 for (Value *Incoming : PN->incoming_values()) {
1102 // If the incoming value is undef then skip it. Note that while we could
1103 // skip the value if it is equal to the phi node itself we choose not to
1104 // because that would break the rule that constant folding only applies if
1105 // all operands are constants.
1106 if (isa<UndefValue>(Incoming))
1107 continue;
1108 // If the incoming value is not a constant, then give up.
1109 auto *C = dyn_cast<Constant>(Incoming);
1110 if (!C)
1111 return nullptr;
1112 // Fold the PHI's operands.
1113 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1114 C = FoldedC;
1115 // If the incoming value is a different constant to
1116 // the one we saw previously, then give up.
1117 if (CommonValue && C != CommonValue)
1118 return nullptr;
1119 CommonValue = C;
1122 // If we reach here, all incoming values are the same constant or undef.
1123 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1126 // Scan the operand list, checking to see if they are all constants, if so,
1127 // hand off to ConstantFoldInstOperandsImpl.
1128 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1129 return nullptr;
1131 SmallDenseMap<Constant *, Constant *> FoldedOps;
1132 SmallVector<Constant *, 8> Ops;
1133 for (const Use &OpU : I->operands()) {
1134 auto *Op = cast<Constant>(&OpU);
1135 // Fold the Instruction's operands.
1136 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1137 Op = FoldedOp;
1139 Ops.push_back(Op);
1142 if (const auto *CI = dyn_cast<CmpInst>(I))
1143 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1144 DL, TLI);
1146 if (const auto *LI = dyn_cast<LoadInst>(I))
1147 return ConstantFoldLoadInst(LI, DL);
1149 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1150 return ConstantExpr::getInsertValue(
1151 cast<Constant>(IVI->getAggregateOperand()),
1152 cast<Constant>(IVI->getInsertedValueOperand()),
1153 IVI->getIndices());
1156 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1157 return ConstantExpr::getExtractValue(
1158 cast<Constant>(EVI->getAggregateOperand()),
1159 EVI->getIndices());
1162 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1165 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1166 const TargetLibraryInfo *TLI) {
1167 SmallDenseMap<Constant *, Constant *> FoldedOps;
1168 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1171 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1172 ArrayRef<Constant *> Ops,
1173 const DataLayout &DL,
1174 const TargetLibraryInfo *TLI) {
1175 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1178 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1179 Constant *Ops0, Constant *Ops1,
1180 const DataLayout &DL,
1181 const TargetLibraryInfo *TLI) {
1182 // fold: icmp (inttoptr x), null -> icmp x, 0
1183 // fold: icmp null, (inttoptr x) -> icmp 0, x
1184 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1185 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1186 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1187 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1189 // FIXME: The following comment is out of data and the DataLayout is here now.
1190 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1191 // around to know if bit truncation is happening.
1192 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1193 if (Ops1->isNullValue()) {
1194 if (CE0->getOpcode() == Instruction::IntToPtr) {
1195 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1196 // Convert the integer value to the right size to ensure we get the
1197 // proper extension or truncation.
1198 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1199 IntPtrTy, false);
1200 Constant *Null = Constant::getNullValue(C->getType());
1201 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1204 // Only do this transformation if the int is intptrty in size, otherwise
1205 // there is a truncation or extension that we aren't modeling.
1206 if (CE0->getOpcode() == Instruction::PtrToInt) {
1207 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1208 if (CE0->getType() == IntPtrTy) {
1209 Constant *C = CE0->getOperand(0);
1210 Constant *Null = Constant::getNullValue(C->getType());
1211 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1216 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1217 if (CE0->getOpcode() == CE1->getOpcode()) {
1218 if (CE0->getOpcode() == Instruction::IntToPtr) {
1219 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1221 // Convert the integer value to the right size to ensure we get the
1222 // proper extension or truncation.
1223 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1224 IntPtrTy, false);
1225 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1226 IntPtrTy, false);
1227 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1230 // Only do this transformation if the int is intptrty in size, otherwise
1231 // there is a truncation or extension that we aren't modeling.
1232 if (CE0->getOpcode() == Instruction::PtrToInt) {
1233 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1234 if (CE0->getType() == IntPtrTy &&
1235 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1236 return ConstantFoldCompareInstOperands(
1237 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1243 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1244 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1245 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1246 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1247 Constant *LHS = ConstantFoldCompareInstOperands(
1248 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1249 Constant *RHS = ConstantFoldCompareInstOperands(
1250 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1251 unsigned OpC =
1252 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1253 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1255 } else if (isa<ConstantExpr>(Ops1)) {
1256 // If RHS is a constant expression, but the left side isn't, swap the
1257 // operands and try again.
1258 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1259 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1262 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1265 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1266 Constant *RHS,
1267 const DataLayout &DL) {
1268 assert(Instruction::isBinaryOp(Opcode));
1269 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1270 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1271 return C;
1273 return ConstantExpr::get(Opcode, LHS, RHS);
1276 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1277 Type *DestTy, const DataLayout &DL) {
1278 assert(Instruction::isCast(Opcode));
1279 switch (Opcode) {
1280 default:
1281 llvm_unreachable("Missing case");
1282 case Instruction::PtrToInt:
1283 // If the input is a inttoptr, eliminate the pair. This requires knowing
1284 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1285 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1286 if (CE->getOpcode() == Instruction::IntToPtr) {
1287 Constant *Input = CE->getOperand(0);
1288 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1289 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1290 if (PtrWidth < InWidth) {
1291 Constant *Mask =
1292 ConstantInt::get(CE->getContext(),
1293 APInt::getLowBitsSet(InWidth, PtrWidth));
1294 Input = ConstantExpr::getAnd(Input, Mask);
1296 // Do a zext or trunc to get to the dest size.
1297 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1300 return ConstantExpr::getCast(Opcode, C, DestTy);
1301 case Instruction::IntToPtr:
1302 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1303 // the int size is >= the ptr size and the address spaces are the same.
1304 // This requires knowing the width of a pointer, so it can't be done in
1305 // ConstantExpr::getCast.
1306 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1307 if (CE->getOpcode() == Instruction::PtrToInt) {
1308 Constant *SrcPtr = CE->getOperand(0);
1309 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1310 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1312 if (MidIntSize >= SrcPtrSize) {
1313 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1314 if (SrcAS == DestTy->getPointerAddressSpace())
1315 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1320 return ConstantExpr::getCast(Opcode, C, DestTy);
1321 case Instruction::Trunc:
1322 case Instruction::ZExt:
1323 case Instruction::SExt:
1324 case Instruction::FPTrunc:
1325 case Instruction::FPExt:
1326 case Instruction::UIToFP:
1327 case Instruction::SIToFP:
1328 case Instruction::FPToUI:
1329 case Instruction::FPToSI:
1330 case Instruction::AddrSpaceCast:
1331 return ConstantExpr::getCast(Opcode, C, DestTy);
1332 case Instruction::BitCast:
1333 return FoldBitCast(C, DestTy, DL);
1337 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1338 ConstantExpr *CE) {
1339 if (!CE->getOperand(1)->isNullValue())
1340 return nullptr; // Do not allow stepping over the value!
1342 // Loop over all of the operands, tracking down which value we are
1343 // addressing.
1344 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1345 C = C->getAggregateElement(CE->getOperand(i));
1346 if (!C)
1347 return nullptr;
1349 return C;
1352 Constant *
1353 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1354 ArrayRef<Constant *> Indices) {
1355 // Loop over all of the operands, tracking down which value we are
1356 // addressing.
1357 for (Constant *Index : Indices) {
1358 C = C->getAggregateElement(Index);
1359 if (!C)
1360 return nullptr;
1362 return C;
1365 //===----------------------------------------------------------------------===//
1366 // Constant Folding for Calls
1369 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1370 if (Call->isNoBuiltin() || Call->isStrictFP())
1371 return false;
1372 switch (F->getIntrinsicID()) {
1373 case Intrinsic::fabs:
1374 case Intrinsic::minnum:
1375 case Intrinsic::maxnum:
1376 case Intrinsic::minimum:
1377 case Intrinsic::maximum:
1378 case Intrinsic::log:
1379 case Intrinsic::log2:
1380 case Intrinsic::log10:
1381 case Intrinsic::exp:
1382 case Intrinsic::exp2:
1383 case Intrinsic::floor:
1384 case Intrinsic::ceil:
1385 case Intrinsic::sqrt:
1386 case Intrinsic::sin:
1387 case Intrinsic::cos:
1388 case Intrinsic::trunc:
1389 case Intrinsic::rint:
1390 case Intrinsic::nearbyint:
1391 case Intrinsic::pow:
1392 case Intrinsic::powi:
1393 case Intrinsic::bswap:
1394 case Intrinsic::ctpop:
1395 case Intrinsic::ctlz:
1396 case Intrinsic::cttz:
1397 case Intrinsic::fshl:
1398 case Intrinsic::fshr:
1399 case Intrinsic::fma:
1400 case Intrinsic::fmuladd:
1401 case Intrinsic::copysign:
1402 case Intrinsic::launder_invariant_group:
1403 case Intrinsic::strip_invariant_group:
1404 case Intrinsic::round:
1405 case Intrinsic::masked_load:
1406 case Intrinsic::sadd_with_overflow:
1407 case Intrinsic::uadd_with_overflow:
1408 case Intrinsic::ssub_with_overflow:
1409 case Intrinsic::usub_with_overflow:
1410 case Intrinsic::smul_with_overflow:
1411 case Intrinsic::umul_with_overflow:
1412 case Intrinsic::sadd_sat:
1413 case Intrinsic::uadd_sat:
1414 case Intrinsic::ssub_sat:
1415 case Intrinsic::usub_sat:
1416 case Intrinsic::convert_from_fp16:
1417 case Intrinsic::convert_to_fp16:
1418 case Intrinsic::bitreverse:
1419 case Intrinsic::x86_sse_cvtss2si:
1420 case Intrinsic::x86_sse_cvtss2si64:
1421 case Intrinsic::x86_sse_cvttss2si:
1422 case Intrinsic::x86_sse_cvttss2si64:
1423 case Intrinsic::x86_sse2_cvtsd2si:
1424 case Intrinsic::x86_sse2_cvtsd2si64:
1425 case Intrinsic::x86_sse2_cvttsd2si:
1426 case Intrinsic::x86_sse2_cvttsd2si64:
1427 case Intrinsic::x86_avx512_vcvtss2si32:
1428 case Intrinsic::x86_avx512_vcvtss2si64:
1429 case Intrinsic::x86_avx512_cvttss2si:
1430 case Intrinsic::x86_avx512_cvttss2si64:
1431 case Intrinsic::x86_avx512_vcvtsd2si32:
1432 case Intrinsic::x86_avx512_vcvtsd2si64:
1433 case Intrinsic::x86_avx512_cvttsd2si:
1434 case Intrinsic::x86_avx512_cvttsd2si64:
1435 case Intrinsic::x86_avx512_vcvtss2usi32:
1436 case Intrinsic::x86_avx512_vcvtss2usi64:
1437 case Intrinsic::x86_avx512_cvttss2usi:
1438 case Intrinsic::x86_avx512_cvttss2usi64:
1439 case Intrinsic::x86_avx512_vcvtsd2usi32:
1440 case Intrinsic::x86_avx512_vcvtsd2usi64:
1441 case Intrinsic::x86_avx512_cvttsd2usi:
1442 case Intrinsic::x86_avx512_cvttsd2usi64:
1443 case Intrinsic::is_constant:
1444 return true;
1445 default:
1446 return false;
1447 case Intrinsic::not_intrinsic: break;
1450 if (!F->hasName())
1451 return false;
1452 StringRef Name = F->getName();
1454 // In these cases, the check of the length is required. We don't want to
1455 // return true for a name like "cos\0blah" which strcmp would return equal to
1456 // "cos", but has length 8.
1457 switch (Name[0]) {
1458 default:
1459 return false;
1460 case 'a':
1461 return Name == "acos" || Name == "asin" || Name == "atan" ||
1462 Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1463 Name == "atanf" || Name == "atan2f";
1464 case 'c':
1465 return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1466 Name == "ceilf" || Name == "cosf" || Name == "coshf";
1467 case 'e':
1468 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1469 case 'f':
1470 return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1471 Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1472 case 'l':
1473 return Name == "log" || Name == "log10" || Name == "logf" ||
1474 Name == "log10f";
1475 case 'p':
1476 return Name == "pow" || Name == "powf";
1477 case 'r':
1478 return Name == "round" || Name == "roundf";
1479 case 's':
1480 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1481 Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1482 case 't':
1483 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1484 case '_':
1486 // Check for various function names that get used for the math functions
1487 // when the header files are preprocessed with the macro
1488 // __FINITE_MATH_ONLY__ enabled.
1489 // The '12' here is the length of the shortest name that can match.
1490 // We need to check the size before looking at Name[1] and Name[2]
1491 // so we may as well check a limit that will eliminate mismatches.
1492 if (Name.size() < 12 || Name[1] != '_')
1493 return false;
1494 switch (Name[2]) {
1495 default:
1496 return false;
1497 case 'a':
1498 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1499 Name == "__asin_finite" || Name == "__asinf_finite" ||
1500 Name == "__atan2_finite" || Name == "__atan2f_finite";
1501 case 'c':
1502 return Name == "__cosh_finite" || Name == "__coshf_finite";
1503 case 'e':
1504 return Name == "__exp_finite" || Name == "__expf_finite" ||
1505 Name == "__exp2_finite" || Name == "__exp2f_finite";
1506 case 'l':
1507 return Name == "__log_finite" || Name == "__logf_finite" ||
1508 Name == "__log10_finite" || Name == "__log10f_finite";
1509 case 'p':
1510 return Name == "__pow_finite" || Name == "__powf_finite";
1511 case 's':
1512 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1517 namespace {
1519 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1520 if (Ty->isHalfTy()) {
1521 APFloat APF(V);
1522 bool unused;
1523 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &unused);
1524 return ConstantFP::get(Ty->getContext(), APF);
1526 if (Ty->isFloatTy())
1527 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1528 if (Ty->isDoubleTy())
1529 return ConstantFP::get(Ty->getContext(), APFloat(V));
1530 llvm_unreachable("Can only constant fold half/float/double");
1533 /// Clear the floating-point exception state.
1534 inline void llvm_fenv_clearexcept() {
1535 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1536 feclearexcept(FE_ALL_EXCEPT);
1537 #endif
1538 errno = 0;
1541 /// Test if a floating-point exception was raised.
1542 inline bool llvm_fenv_testexcept() {
1543 int errno_val = errno;
1544 if (errno_val == ERANGE || errno_val == EDOM)
1545 return true;
1546 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1547 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1548 return true;
1549 #endif
1550 return false;
1553 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1554 llvm_fenv_clearexcept();
1555 V = NativeFP(V);
1556 if (llvm_fenv_testexcept()) {
1557 llvm_fenv_clearexcept();
1558 return nullptr;
1561 return GetConstantFoldFPValue(V, Ty);
1564 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1565 double W, Type *Ty) {
1566 llvm_fenv_clearexcept();
1567 V = NativeFP(V, W);
1568 if (llvm_fenv_testexcept()) {
1569 llvm_fenv_clearexcept();
1570 return nullptr;
1573 return GetConstantFoldFPValue(V, Ty);
1576 /// Attempt to fold an SSE floating point to integer conversion of a constant
1577 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1578 /// used (toward nearest, ties to even). This matches the behavior of the
1579 /// non-truncating SSE instructions in the default rounding mode. The desired
1580 /// integer type Ty is used to select how many bits are available for the
1581 /// result. Returns null if the conversion cannot be performed, otherwise
1582 /// returns the Constant value resulting from the conversion.
1583 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1584 Type *Ty, bool IsSigned) {
1585 // All of these conversion intrinsics form an integer of at most 64bits.
1586 unsigned ResultWidth = Ty->getIntegerBitWidth();
1587 assert(ResultWidth <= 64 &&
1588 "Can only constant fold conversions to 64 and 32 bit ints");
1590 uint64_t UIntVal;
1591 bool isExact = false;
1592 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1593 : APFloat::rmNearestTiesToEven;
1594 APFloat::opStatus status =
1595 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1596 IsSigned, mode, &isExact);
1597 if (status != APFloat::opOK &&
1598 (!roundTowardZero || status != APFloat::opInexact))
1599 return nullptr;
1600 return ConstantInt::get(Ty, UIntVal, IsSigned);
1603 double getValueAsDouble(ConstantFP *Op) {
1604 Type *Ty = Op->getType();
1606 if (Ty->isFloatTy())
1607 return Op->getValueAPF().convertToFloat();
1609 if (Ty->isDoubleTy())
1610 return Op->getValueAPF().convertToDouble();
1612 bool unused;
1613 APFloat APF = Op->getValueAPF();
1614 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1615 return APF.convertToDouble();
1618 static bool isManifestConstant(const Constant *c) {
1619 if (isa<ConstantData>(c)) {
1620 return true;
1621 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
1622 for (const Value *subc : c->operand_values()) {
1623 if (!isManifestConstant(cast<Constant>(subc)))
1624 return false;
1626 return true;
1628 return false;
1631 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1632 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1633 C = &CI->getValue();
1634 return true;
1636 if (isa<UndefValue>(Op)) {
1637 C = nullptr;
1638 return true;
1640 return false;
1643 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1644 ArrayRef<Constant *> Operands,
1645 const TargetLibraryInfo *TLI,
1646 const CallBase *Call) {
1647 if (Operands.size() == 1) {
1648 if (IntrinsicID == Intrinsic::is_constant) {
1649 // We know we have a "Constant" argument. But we want to only
1650 // return true for manifest constants, not those that depend on
1651 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1652 if (isManifestConstant(Operands[0]))
1653 return ConstantInt::getTrue(Ty->getContext());
1654 return nullptr;
1656 if (isa<UndefValue>(Operands[0])) {
1657 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1658 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1659 if (IntrinsicID == Intrinsic::cos ||
1660 IntrinsicID == Intrinsic::ctpop)
1661 return Constant::getNullValue(Ty);
1662 if (IntrinsicID == Intrinsic::bswap ||
1663 IntrinsicID == Intrinsic::bitreverse ||
1664 IntrinsicID == Intrinsic::launder_invariant_group ||
1665 IntrinsicID == Intrinsic::strip_invariant_group)
1666 return Operands[0];
1669 if (isa<ConstantPointerNull>(Operands[0])) {
1670 // launder(null) == null == strip(null) iff in addrspace 0
1671 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1672 IntrinsicID == Intrinsic::strip_invariant_group) {
1673 // If instruction is not yet put in a basic block (e.g. when cloning
1674 // a function during inlining), Call's caller may not be available.
1675 // So check Call's BB first before querying Call->getCaller.
1676 const Function *Caller =
1677 Call->getParent() ? Call->getCaller() : nullptr;
1678 if (Caller &&
1679 !NullPointerIsDefined(
1680 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1681 return Operands[0];
1683 return nullptr;
1687 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1688 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1689 APFloat Val(Op->getValueAPF());
1691 bool lost = false;
1692 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1694 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1697 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1698 return nullptr;
1700 if (IntrinsicID == Intrinsic::round) {
1701 APFloat V = Op->getValueAPF();
1702 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1703 return ConstantFP::get(Ty->getContext(), V);
1706 if (IntrinsicID == Intrinsic::floor) {
1707 APFloat V = Op->getValueAPF();
1708 V.roundToIntegral(APFloat::rmTowardNegative);
1709 return ConstantFP::get(Ty->getContext(), V);
1712 if (IntrinsicID == Intrinsic::ceil) {
1713 APFloat V = Op->getValueAPF();
1714 V.roundToIntegral(APFloat::rmTowardPositive);
1715 return ConstantFP::get(Ty->getContext(), V);
1718 if (IntrinsicID == Intrinsic::trunc) {
1719 APFloat V = Op->getValueAPF();
1720 V.roundToIntegral(APFloat::rmTowardZero);
1721 return ConstantFP::get(Ty->getContext(), V);
1724 if (IntrinsicID == Intrinsic::rint) {
1725 APFloat V = Op->getValueAPF();
1726 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1727 return ConstantFP::get(Ty->getContext(), V);
1730 if (IntrinsicID == Intrinsic::nearbyint) {
1731 APFloat V = Op->getValueAPF();
1732 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1733 return ConstantFP::get(Ty->getContext(), V);
1736 /// We only fold functions with finite arguments. Folding NaN and inf is
1737 /// likely to be aborted with an exception anyway, and some host libms
1738 /// have known errors raising exceptions.
1739 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1740 return nullptr;
1742 /// Currently APFloat versions of these functions do not exist, so we use
1743 /// the host native double versions. Float versions are not called
1744 /// directly but for all these it is true (float)(f((double)arg)) ==
1745 /// f(arg). Long double not supported yet.
1746 double V = getValueAsDouble(Op);
1748 switch (IntrinsicID) {
1749 default: break;
1750 case Intrinsic::fabs:
1751 return ConstantFoldFP(fabs, V, Ty);
1752 case Intrinsic::log2:
1753 return ConstantFoldFP(Log2, V, Ty);
1754 case Intrinsic::log:
1755 return ConstantFoldFP(log, V, Ty);
1756 case Intrinsic::log10:
1757 return ConstantFoldFP(log10, V, Ty);
1758 case Intrinsic::exp:
1759 return ConstantFoldFP(exp, V, Ty);
1760 case Intrinsic::exp2:
1761 return ConstantFoldFP(exp2, V, Ty);
1762 case Intrinsic::sin:
1763 return ConstantFoldFP(sin, V, Ty);
1764 case Intrinsic::cos:
1765 return ConstantFoldFP(cos, V, Ty);
1766 case Intrinsic::sqrt:
1767 return ConstantFoldFP(sqrt, V, Ty);
1770 if (!TLI)
1771 return nullptr;
1773 char NameKeyChar = Name[0];
1774 if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
1775 NameKeyChar = Name[2];
1777 switch (NameKeyChar) {
1778 case 'a':
1779 if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1780 (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
1781 (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
1782 (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
1783 return ConstantFoldFP(acos, V, Ty);
1784 else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1785 (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
1786 (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
1787 (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
1788 return ConstantFoldFP(asin, V, Ty);
1789 else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1790 (Name == "atanf" && TLI->has(LibFunc_atanf)))
1791 return ConstantFoldFP(atan, V, Ty);
1792 break;
1793 case 'c':
1794 if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1795 (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1796 return ConstantFoldFP(ceil, V, Ty);
1797 else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1798 (Name == "cosf" && TLI->has(LibFunc_cosf)))
1799 return ConstantFoldFP(cos, V, Ty);
1800 else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1801 (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
1802 (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
1803 (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
1804 return ConstantFoldFP(cosh, V, Ty);
1805 break;
1806 case 'e':
1807 if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1808 (Name == "expf" && TLI->has(LibFunc_expf)) ||
1809 (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
1810 (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
1811 return ConstantFoldFP(exp, V, Ty);
1812 if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1813 (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
1814 (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
1815 (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
1816 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1817 // C99 library.
1818 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1819 break;
1820 case 'f':
1821 if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1822 (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1823 return ConstantFoldFP(fabs, V, Ty);
1824 else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1825 (Name == "floorf" && TLI->has(LibFunc_floorf)))
1826 return ConstantFoldFP(floor, V, Ty);
1827 break;
1828 case 'l':
1829 if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1830 (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
1831 (Name == "__log_finite" && V > 0 &&
1832 TLI->has(LibFunc_log_finite)) ||
1833 (Name == "__logf_finite" && V > 0 &&
1834 TLI->has(LibFunc_logf_finite)))
1835 return ConstantFoldFP(log, V, Ty);
1836 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1837 (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
1838 (Name == "__log10_finite" && V > 0 &&
1839 TLI->has(LibFunc_log10_finite)) ||
1840 (Name == "__log10f_finite" && V > 0 &&
1841 TLI->has(LibFunc_log10f_finite)))
1842 return ConstantFoldFP(log10, V, Ty);
1843 break;
1844 case 'r':
1845 if ((Name == "round" && TLI->has(LibFunc_round)) ||
1846 (Name == "roundf" && TLI->has(LibFunc_roundf)))
1847 return ConstantFoldFP(round, V, Ty);
1848 break;
1849 case 's':
1850 if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1851 (Name == "sinf" && TLI->has(LibFunc_sinf)))
1852 return ConstantFoldFP(sin, V, Ty);
1853 else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1854 (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
1855 (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
1856 (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
1857 return ConstantFoldFP(sinh, V, Ty);
1858 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1859 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1860 return ConstantFoldFP(sqrt, V, Ty);
1861 break;
1862 case 't':
1863 if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1864 (Name == "tanf" && TLI->has(LibFunc_tanf)))
1865 return ConstantFoldFP(tan, V, Ty);
1866 else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1867 (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1868 return ConstantFoldFP(tanh, V, Ty);
1869 break;
1870 default:
1871 break;
1873 return nullptr;
1876 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1877 switch (IntrinsicID) {
1878 case Intrinsic::bswap:
1879 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1880 case Intrinsic::ctpop:
1881 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1882 case Intrinsic::bitreverse:
1883 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1884 case Intrinsic::convert_from_fp16: {
1885 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1887 bool lost = false;
1888 APFloat::opStatus status = Val.convert(
1889 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1891 // Conversion is always precise.
1892 (void)status;
1893 assert(status == APFloat::opOK && !lost &&
1894 "Precision lost during fp16 constfolding");
1896 return ConstantFP::get(Ty->getContext(), Val);
1898 default:
1899 return nullptr;
1903 // Support ConstantVector in case we have an Undef in the top.
1904 if (isa<ConstantVector>(Operands[0]) ||
1905 isa<ConstantDataVector>(Operands[0])) {
1906 auto *Op = cast<Constant>(Operands[0]);
1907 switch (IntrinsicID) {
1908 default: break;
1909 case Intrinsic::x86_sse_cvtss2si:
1910 case Intrinsic::x86_sse_cvtss2si64:
1911 case Intrinsic::x86_sse2_cvtsd2si:
1912 case Intrinsic::x86_sse2_cvtsd2si64:
1913 if (ConstantFP *FPOp =
1914 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1915 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1916 /*roundTowardZero=*/false, Ty,
1917 /*IsSigned*/true);
1918 break;
1919 case Intrinsic::x86_sse_cvttss2si:
1920 case Intrinsic::x86_sse_cvttss2si64:
1921 case Intrinsic::x86_sse2_cvttsd2si:
1922 case Intrinsic::x86_sse2_cvttsd2si64:
1923 if (ConstantFP *FPOp =
1924 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1925 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1926 /*roundTowardZero=*/true, Ty,
1927 /*IsSigned*/true);
1928 break;
1932 return nullptr;
1935 if (Operands.size() == 2) {
1936 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1937 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1938 return nullptr;
1939 double Op1V = getValueAsDouble(Op1);
1941 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1942 if (Op2->getType() != Op1->getType())
1943 return nullptr;
1945 double Op2V = getValueAsDouble(Op2);
1946 if (IntrinsicID == Intrinsic::pow) {
1947 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1949 if (IntrinsicID == Intrinsic::copysign) {
1950 APFloat V1 = Op1->getValueAPF();
1951 const APFloat &V2 = Op2->getValueAPF();
1952 V1.copySign(V2);
1953 return ConstantFP::get(Ty->getContext(), V1);
1956 if (IntrinsicID == Intrinsic::minnum) {
1957 const APFloat &C1 = Op1->getValueAPF();
1958 const APFloat &C2 = Op2->getValueAPF();
1959 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1962 if (IntrinsicID == Intrinsic::maxnum) {
1963 const APFloat &C1 = Op1->getValueAPF();
1964 const APFloat &C2 = Op2->getValueAPF();
1965 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1968 if (IntrinsicID == Intrinsic::minimum) {
1969 const APFloat &C1 = Op1->getValueAPF();
1970 const APFloat &C2 = Op2->getValueAPF();
1971 return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
1974 if (IntrinsicID == Intrinsic::maximum) {
1975 const APFloat &C1 = Op1->getValueAPF();
1976 const APFloat &C2 = Op2->getValueAPF();
1977 return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
1980 if (!TLI)
1981 return nullptr;
1982 if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
1983 (Name == "powf" && TLI->has(LibFunc_powf)) ||
1984 (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
1985 (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
1986 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1987 if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
1988 (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
1989 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1990 if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
1991 (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
1992 (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
1993 (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
1994 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1995 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1996 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1997 return ConstantFP::get(Ty->getContext(),
1998 APFloat((float)std::pow((float)Op1V,
1999 (int)Op2C->getZExtValue())));
2000 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2001 return ConstantFP::get(Ty->getContext(),
2002 APFloat((float)std::pow((float)Op1V,
2003 (int)Op2C->getZExtValue())));
2004 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2005 return ConstantFP::get(Ty->getContext(),
2006 APFloat((double)std::pow((double)Op1V,
2007 (int)Op2C->getZExtValue())));
2009 return nullptr;
2012 if (Operands[0]->getType()->isIntegerTy() &&
2013 Operands[1]->getType()->isIntegerTy()) {
2014 const APInt *C0, *C1;
2015 if (!getConstIntOrUndef(Operands[0], C0) ||
2016 !getConstIntOrUndef(Operands[1], C1))
2017 return nullptr;
2019 switch (IntrinsicID) {
2020 default: break;
2021 case Intrinsic::smul_with_overflow:
2022 case Intrinsic::umul_with_overflow:
2023 // Even if both operands are undef, we cannot fold muls to undef
2024 // in the general case. For example, on i2 there are no inputs
2025 // that would produce { i2 -1, i1 true } as the result.
2026 if (!C0 || !C1)
2027 return Constant::getNullValue(Ty);
2028 LLVM_FALLTHROUGH;
2029 case Intrinsic::sadd_with_overflow:
2030 case Intrinsic::uadd_with_overflow:
2031 case Intrinsic::ssub_with_overflow:
2032 case Intrinsic::usub_with_overflow: {
2033 if (!C0 || !C1)
2034 return UndefValue::get(Ty);
2036 APInt Res;
2037 bool Overflow;
2038 switch (IntrinsicID) {
2039 default: llvm_unreachable("Invalid case");
2040 case Intrinsic::sadd_with_overflow:
2041 Res = C0->sadd_ov(*C1, Overflow);
2042 break;
2043 case Intrinsic::uadd_with_overflow:
2044 Res = C0->uadd_ov(*C1, Overflow);
2045 break;
2046 case Intrinsic::ssub_with_overflow:
2047 Res = C0->ssub_ov(*C1, Overflow);
2048 break;
2049 case Intrinsic::usub_with_overflow:
2050 Res = C0->usub_ov(*C1, Overflow);
2051 break;
2052 case Intrinsic::smul_with_overflow:
2053 Res = C0->smul_ov(*C1, Overflow);
2054 break;
2055 case Intrinsic::umul_with_overflow:
2056 Res = C0->umul_ov(*C1, Overflow);
2057 break;
2059 Constant *Ops[] = {
2060 ConstantInt::get(Ty->getContext(), Res),
2061 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2063 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2065 case Intrinsic::uadd_sat:
2066 case Intrinsic::sadd_sat:
2067 if (!C0 && !C1)
2068 return UndefValue::get(Ty);
2069 if (!C0 || !C1)
2070 return Constant::getAllOnesValue(Ty);
2071 if (IntrinsicID == Intrinsic::uadd_sat)
2072 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2073 else
2074 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2075 case Intrinsic::usub_sat:
2076 case Intrinsic::ssub_sat:
2077 if (!C0 && !C1)
2078 return UndefValue::get(Ty);
2079 if (!C0 || !C1)
2080 return Constant::getNullValue(Ty);
2081 if (IntrinsicID == Intrinsic::usub_sat)
2082 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2083 else
2084 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2085 case Intrinsic::cttz:
2086 case Intrinsic::ctlz:
2087 assert(C1 && "Must be constant int");
2089 // cttz(0, 1) and ctlz(0, 1) are undef.
2090 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2091 return UndefValue::get(Ty);
2092 if (!C0)
2093 return Constant::getNullValue(Ty);
2094 if (IntrinsicID == Intrinsic::cttz)
2095 return ConstantInt::get(Ty, C0->countTrailingZeros());
2096 else
2097 return ConstantInt::get(Ty, C0->countLeadingZeros());
2100 return nullptr;
2103 // Support ConstantVector in case we have an Undef in the top.
2104 if ((isa<ConstantVector>(Operands[0]) ||
2105 isa<ConstantDataVector>(Operands[0])) &&
2106 // Check for default rounding mode.
2107 // FIXME: Support other rounding modes?
2108 isa<ConstantInt>(Operands[1]) &&
2109 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2110 auto *Op = cast<Constant>(Operands[0]);
2111 switch (IntrinsicID) {
2112 default: break;
2113 case Intrinsic::x86_avx512_vcvtss2si32:
2114 case Intrinsic::x86_avx512_vcvtss2si64:
2115 case Intrinsic::x86_avx512_vcvtsd2si32:
2116 case Intrinsic::x86_avx512_vcvtsd2si64:
2117 if (ConstantFP *FPOp =
2118 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2119 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2120 /*roundTowardZero=*/false, Ty,
2121 /*IsSigned*/true);
2122 break;
2123 case Intrinsic::x86_avx512_vcvtss2usi32:
2124 case Intrinsic::x86_avx512_vcvtss2usi64:
2125 case Intrinsic::x86_avx512_vcvtsd2usi32:
2126 case Intrinsic::x86_avx512_vcvtsd2usi64:
2127 if (ConstantFP *FPOp =
2128 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2129 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2130 /*roundTowardZero=*/false, Ty,
2131 /*IsSigned*/false);
2132 break;
2133 case Intrinsic::x86_avx512_cvttss2si:
2134 case Intrinsic::x86_avx512_cvttss2si64:
2135 case Intrinsic::x86_avx512_cvttsd2si:
2136 case Intrinsic::x86_avx512_cvttsd2si64:
2137 if (ConstantFP *FPOp =
2138 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2139 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2140 /*roundTowardZero=*/true, Ty,
2141 /*IsSigned*/true);
2142 break;
2143 case Intrinsic::x86_avx512_cvttss2usi:
2144 case Intrinsic::x86_avx512_cvttss2usi64:
2145 case Intrinsic::x86_avx512_cvttsd2usi:
2146 case Intrinsic::x86_avx512_cvttsd2usi64:
2147 if (ConstantFP *FPOp =
2148 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2149 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2150 /*roundTowardZero=*/true, Ty,
2151 /*IsSigned*/false);
2152 break;
2155 return nullptr;
2158 if (Operands.size() != 3)
2159 return nullptr;
2161 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2162 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2163 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2164 switch (IntrinsicID) {
2165 default: break;
2166 case Intrinsic::fma:
2167 case Intrinsic::fmuladd: {
2168 APFloat V = Op1->getValueAPF();
2169 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
2170 Op3->getValueAPF(),
2171 APFloat::rmNearestTiesToEven);
2172 if (s != APFloat::opInvalidOp)
2173 return ConstantFP::get(Ty->getContext(), V);
2175 return nullptr;
2182 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2183 const APInt *C0, *C1, *C2;
2184 if (!getConstIntOrUndef(Operands[0], C0) ||
2185 !getConstIntOrUndef(Operands[1], C1) ||
2186 !getConstIntOrUndef(Operands[2], C2))
2187 return nullptr;
2189 bool IsRight = IntrinsicID == Intrinsic::fshr;
2190 if (!C2)
2191 return Operands[IsRight ? 1 : 0];
2192 if (!C0 && !C1)
2193 return UndefValue::get(Ty);
2195 // The shift amount is interpreted as modulo the bitwidth. If the shift
2196 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2197 unsigned BitWidth = C2->getBitWidth();
2198 unsigned ShAmt = C2->urem(BitWidth);
2199 if (!ShAmt)
2200 return Operands[IsRight ? 1 : 0];
2202 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2203 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2204 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2205 if (!C0)
2206 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2207 if (!C1)
2208 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2209 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2212 return nullptr;
2215 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
2216 VectorType *VTy, ArrayRef<Constant *> Operands,
2217 const DataLayout &DL,
2218 const TargetLibraryInfo *TLI,
2219 const CallBase *Call) {
2220 SmallVector<Constant *, 4> Result(VTy->getNumElements());
2221 SmallVector<Constant *, 4> Lane(Operands.size());
2222 Type *Ty = VTy->getElementType();
2224 if (IntrinsicID == Intrinsic::masked_load) {
2225 auto *SrcPtr = Operands[0];
2226 auto *Mask = Operands[2];
2227 auto *Passthru = Operands[3];
2229 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2231 SmallVector<Constant *, 32> NewElements;
2232 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2233 auto *MaskElt = Mask->getAggregateElement(I);
2234 if (!MaskElt)
2235 break;
2236 auto *PassthruElt = Passthru->getAggregateElement(I);
2237 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2238 if (isa<UndefValue>(MaskElt)) {
2239 if (PassthruElt)
2240 NewElements.push_back(PassthruElt);
2241 else if (VecElt)
2242 NewElements.push_back(VecElt);
2243 else
2244 return nullptr;
2246 if (MaskElt->isNullValue()) {
2247 if (!PassthruElt)
2248 return nullptr;
2249 NewElements.push_back(PassthruElt);
2250 } else if (MaskElt->isOneValue()) {
2251 if (!VecElt)
2252 return nullptr;
2253 NewElements.push_back(VecElt);
2254 } else {
2255 return nullptr;
2258 if (NewElements.size() != VTy->getNumElements())
2259 return nullptr;
2260 return ConstantVector::get(NewElements);
2263 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2264 // Gather a column of constants.
2265 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2266 // These intrinsics use a scalar type for their second argument.
2267 if (J == 1 &&
2268 (IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
2269 IntrinsicID == Intrinsic::powi)) {
2270 Lane[J] = Operands[J];
2271 continue;
2274 Constant *Agg = Operands[J]->getAggregateElement(I);
2275 if (!Agg)
2276 return nullptr;
2278 Lane[J] = Agg;
2281 // Use the regular scalar folding to simplify this column.
2282 Constant *Folded =
2283 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2284 if (!Folded)
2285 return nullptr;
2286 Result[I] = Folded;
2289 return ConstantVector::get(Result);
2292 } // end anonymous namespace
2294 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2295 ArrayRef<Constant *> Operands,
2296 const TargetLibraryInfo *TLI) {
2297 if (Call->isNoBuiltin() || Call->isStrictFP())
2298 return nullptr;
2299 if (!F->hasName())
2300 return nullptr;
2301 StringRef Name = F->getName();
2303 Type *Ty = F->getReturnType();
2305 if (auto *VTy = dyn_cast<VectorType>(Ty))
2306 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2307 F->getParent()->getDataLayout(), TLI, Call);
2309 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
2310 Call);
2313 bool llvm::isMathLibCallNoop(const CallBase *Call,
2314 const TargetLibraryInfo *TLI) {
2315 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2316 // (and to some extent ConstantFoldScalarCall).
2317 if (Call->isNoBuiltin() || Call->isStrictFP())
2318 return false;
2319 Function *F = Call->getCalledFunction();
2320 if (!F)
2321 return false;
2323 LibFunc Func;
2324 if (!TLI || !TLI->getLibFunc(*F, Func))
2325 return false;
2327 if (Call->getNumArgOperands() == 1) {
2328 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
2329 const APFloat &Op = OpC->getValueAPF();
2330 switch (Func) {
2331 case LibFunc_logl:
2332 case LibFunc_log:
2333 case LibFunc_logf:
2334 case LibFunc_log2l:
2335 case LibFunc_log2:
2336 case LibFunc_log2f:
2337 case LibFunc_log10l:
2338 case LibFunc_log10:
2339 case LibFunc_log10f:
2340 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2342 case LibFunc_expl:
2343 case LibFunc_exp:
2344 case LibFunc_expf:
2345 // FIXME: These boundaries are slightly conservative.
2346 if (OpC->getType()->isDoubleTy())
2347 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2348 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2349 if (OpC->getType()->isFloatTy())
2350 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2351 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2352 break;
2354 case LibFunc_exp2l:
2355 case LibFunc_exp2:
2356 case LibFunc_exp2f:
2357 // FIXME: These boundaries are slightly conservative.
2358 if (OpC->getType()->isDoubleTy())
2359 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2360 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2361 if (OpC->getType()->isFloatTy())
2362 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2363 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2364 break;
2366 case LibFunc_sinl:
2367 case LibFunc_sin:
2368 case LibFunc_sinf:
2369 case LibFunc_cosl:
2370 case LibFunc_cos:
2371 case LibFunc_cosf:
2372 return !Op.isInfinity();
2374 case LibFunc_tanl:
2375 case LibFunc_tan:
2376 case LibFunc_tanf: {
2377 // FIXME: Stop using the host math library.
2378 // FIXME: The computation isn't done in the right precision.
2379 Type *Ty = OpC->getType();
2380 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2381 double OpV = getValueAsDouble(OpC);
2382 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2384 break;
2387 case LibFunc_asinl:
2388 case LibFunc_asin:
2389 case LibFunc_asinf:
2390 case LibFunc_acosl:
2391 case LibFunc_acos:
2392 case LibFunc_acosf:
2393 return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2394 APFloat::cmpLessThan &&
2395 Op.compare(APFloat(Op.getSemantics(), "1")) !=
2396 APFloat::cmpGreaterThan;
2398 case LibFunc_sinh:
2399 case LibFunc_cosh:
2400 case LibFunc_sinhf:
2401 case LibFunc_coshf:
2402 case LibFunc_sinhl:
2403 case LibFunc_coshl:
2404 // FIXME: These boundaries are slightly conservative.
2405 if (OpC->getType()->isDoubleTy())
2406 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2407 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2408 if (OpC->getType()->isFloatTy())
2409 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2410 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2411 break;
2413 case LibFunc_sqrtl:
2414 case LibFunc_sqrt:
2415 case LibFunc_sqrtf:
2416 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2418 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2419 // maybe others?
2420 default:
2421 break;
2426 if (Call->getNumArgOperands() == 2) {
2427 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
2428 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
2429 if (Op0C && Op1C) {
2430 const APFloat &Op0 = Op0C->getValueAPF();
2431 const APFloat &Op1 = Op1C->getValueAPF();
2433 switch (Func) {
2434 case LibFunc_powl:
2435 case LibFunc_pow:
2436 case LibFunc_powf: {
2437 // FIXME: Stop using the host math library.
2438 // FIXME: The computation isn't done in the right precision.
2439 Type *Ty = Op0C->getType();
2440 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2441 if (Ty == Op1C->getType()) {
2442 double Op0V = getValueAsDouble(Op0C);
2443 double Op1V = getValueAsDouble(Op1C);
2444 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2447 break;
2450 case LibFunc_fmodl:
2451 case LibFunc_fmod:
2452 case LibFunc_fmodf:
2453 return Op0.isNaN() || Op1.isNaN() ||
2454 (!Op0.isInfinity() && !Op1.isZero());
2456 default:
2457 break;
2462 return false;