[llvm-objcopy] - Reimplement strip-dwo-groups.test to stop using the precompiled...
[llvm-complete.git] / lib / Analysis / ConstantFolding.cpp
blob74f4bea41d8cbe57468870ae883c5f800e6f45dd
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/KnownBits.h"
46 #include "llvm/Support/MathExtras.h"
47 #include <cassert>
48 #include <cerrno>
49 #include <cfenv>
50 #include <cmath>
51 #include <cstddef>
52 #include <cstdint>
54 using namespace llvm;
56 namespace {
58 //===----------------------------------------------------------------------===//
59 // Constant Folding internal helper functions
60 //===----------------------------------------------------------------------===//
62 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
63 Constant *C, Type *SrcEltTy,
64 unsigned NumSrcElts,
65 const DataLayout &DL) {
66 // Now that we know that the input value is a vector of integers, just shift
67 // and insert them into our result.
68 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
69 for (unsigned i = 0; i != NumSrcElts; ++i) {
70 Constant *Element;
71 if (DL.isLittleEndian())
72 Element = C->getAggregateElement(NumSrcElts - i - 1);
73 else
74 Element = C->getAggregateElement(i);
76 if (Element && isa<UndefValue>(Element)) {
77 Result <<= BitShift;
78 continue;
81 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
82 if (!ElementCI)
83 return ConstantExpr::getBitCast(C, DestTy);
85 Result <<= BitShift;
86 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
89 return nullptr;
92 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
93 /// This always returns a non-null constant, but it may be a
94 /// ConstantExpr if unfoldable.
95 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
96 // Catch the obvious splat cases.
97 if (C->isNullValue() && !DestTy->isX86_MMXTy())
98 return Constant::getNullValue(DestTy);
99 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
100 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
101 return Constant::getAllOnesValue(DestTy);
103 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
104 // Handle a vector->scalar integer/fp cast.
105 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
106 unsigned NumSrcElts = VTy->getNumElements();
107 Type *SrcEltTy = VTy->getElementType();
109 // If the vector is a vector of floating point, convert it to vector of int
110 // to simplify things.
111 if (SrcEltTy->isFloatingPointTy()) {
112 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
113 Type *SrcIVTy =
114 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
115 // Ask IR to do the conversion now that #elts line up.
116 C = ConstantExpr::getBitCast(C, SrcIVTy);
119 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
120 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
121 SrcEltTy, NumSrcElts, DL))
122 return CE;
124 if (isa<IntegerType>(DestTy))
125 return ConstantInt::get(DestTy, Result);
127 APFloat FP(DestTy->getFltSemantics(), Result);
128 return ConstantFP::get(DestTy->getContext(), FP);
132 // The code below only handles casts to vectors currently.
133 auto *DestVTy = dyn_cast<VectorType>(DestTy);
134 if (!DestVTy)
135 return ConstantExpr::getBitCast(C, DestTy);
137 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
138 // vector so the code below can handle it uniformly.
139 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
140 Constant *Ops = C; // don't take the address of C!
141 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
144 // If this is a bitcast from constant vector -> vector, fold it.
145 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
146 return ConstantExpr::getBitCast(C, DestTy);
148 // If the element types match, IR can fold it.
149 unsigned NumDstElt = DestVTy->getNumElements();
150 unsigned NumSrcElt = C->getType()->getVectorNumElements();
151 if (NumDstElt == NumSrcElt)
152 return ConstantExpr::getBitCast(C, DestTy);
154 Type *SrcEltTy = C->getType()->getVectorElementType();
155 Type *DstEltTy = DestVTy->getElementType();
157 // Otherwise, we're changing the number of elements in a vector, which
158 // requires endianness information to do the right thing. For example,
159 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
160 // folds to (little endian):
161 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
162 // and to (big endian):
163 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
165 // First thing is first. We only want to think about integer here, so if
166 // we have something in FP form, recast it as integer.
167 if (DstEltTy->isFloatingPointTy()) {
168 // Fold to an vector of integers with same size as our FP type.
169 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
170 Type *DestIVTy =
171 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
172 // Recursively handle this integer conversion, if possible.
173 C = FoldBitCast(C, DestIVTy, DL);
175 // Finally, IR can handle this now that #elts line up.
176 return ConstantExpr::getBitCast(C, DestTy);
179 // Okay, we know the destination is integer, if the input is FP, convert
180 // it to integer first.
181 if (SrcEltTy->isFloatingPointTy()) {
182 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
183 Type *SrcIVTy =
184 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
185 // Ask IR to do the conversion now that #elts line up.
186 C = ConstantExpr::getBitCast(C, SrcIVTy);
187 // If IR wasn't able to fold it, bail out.
188 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
189 !isa<ConstantDataVector>(C))
190 return C;
193 // Now we know that the input and output vectors are both integer vectors
194 // of the same size, and that their #elements is not the same. Do the
195 // conversion here, which depends on whether the input or output has
196 // more elements.
197 bool isLittleEndian = DL.isLittleEndian();
199 SmallVector<Constant*, 32> Result;
200 if (NumDstElt < NumSrcElt) {
201 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
202 Constant *Zero = Constant::getNullValue(DstEltTy);
203 unsigned Ratio = NumSrcElt/NumDstElt;
204 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
205 unsigned SrcElt = 0;
206 for (unsigned i = 0; i != NumDstElt; ++i) {
207 // Build each element of the result.
208 Constant *Elt = Zero;
209 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
210 for (unsigned j = 0; j != Ratio; ++j) {
211 Constant *Src = C->getAggregateElement(SrcElt++);
212 if (Src && isa<UndefValue>(Src))
213 Src = Constant::getNullValue(C->getType()->getVectorElementType());
214 else
215 Src = dyn_cast_or_null<ConstantInt>(Src);
216 if (!Src) // Reject constantexpr elements.
217 return ConstantExpr::getBitCast(C, DestTy);
219 // Zero extend the element to the right size.
220 Src = ConstantExpr::getZExt(Src, Elt->getType());
222 // Shift it to the right place, depending on endianness.
223 Src = ConstantExpr::getShl(Src,
224 ConstantInt::get(Src->getType(), ShiftAmt));
225 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
227 // Mix it in.
228 Elt = ConstantExpr::getOr(Elt, Src);
230 Result.push_back(Elt);
232 return ConstantVector::get(Result);
235 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
236 unsigned Ratio = NumDstElt/NumSrcElt;
237 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
239 // Loop over each source value, expanding into multiple results.
240 for (unsigned i = 0; i != NumSrcElt; ++i) {
241 auto *Element = C->getAggregateElement(i);
243 if (!Element) // Reject constantexpr elements.
244 return ConstantExpr::getBitCast(C, DestTy);
246 if (isa<UndefValue>(Element)) {
247 // Correctly Propagate undef values.
248 Result.append(Ratio, UndefValue::get(DstEltTy));
249 continue;
252 auto *Src = dyn_cast<ConstantInt>(Element);
253 if (!Src)
254 return ConstantExpr::getBitCast(C, DestTy);
256 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
257 for (unsigned j = 0; j != Ratio; ++j) {
258 // Shift the piece of the value into the right place, depending on
259 // endianness.
260 Constant *Elt = ConstantExpr::getLShr(Src,
261 ConstantInt::get(Src->getType(), ShiftAmt));
262 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
264 // Truncate the element to an integer with the same pointer size and
265 // convert the element back to a pointer using a inttoptr.
266 if (DstEltTy->isPointerTy()) {
267 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
268 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
269 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
270 continue;
273 // Truncate and remember this piece.
274 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
278 return ConstantVector::get(Result);
281 } // end anonymous namespace
283 /// If this constant is a constant offset from a global, return the global and
284 /// the constant. Because of constantexprs, this function is recursive.
285 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
286 APInt &Offset, const DataLayout &DL) {
287 // Trivial case, constant is the global.
288 if ((GV = dyn_cast<GlobalValue>(C))) {
289 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
290 Offset = APInt(BitWidth, 0);
291 return true;
294 // Otherwise, if this isn't a constant expr, bail out.
295 auto *CE = dyn_cast<ConstantExpr>(C);
296 if (!CE) return false;
298 // Look through ptr->int and ptr->ptr casts.
299 if (CE->getOpcode() == Instruction::PtrToInt ||
300 CE->getOpcode() == Instruction::BitCast)
301 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
303 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
304 auto *GEP = dyn_cast<GEPOperator>(CE);
305 if (!GEP)
306 return false;
308 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
309 APInt TmpOffset(BitWidth, 0);
311 // If the base isn't a global+constant, we aren't either.
312 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
313 return false;
315 // Otherwise, add any offset that our operands provide.
316 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
317 return false;
319 Offset = TmpOffset;
320 return true;
323 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
324 const DataLayout &DL) {
325 do {
326 Type *SrcTy = C->getType();
328 // If the type sizes are the same and a cast is legal, just directly
329 // cast the constant.
330 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
331 Instruction::CastOps Cast = Instruction::BitCast;
332 // If we are going from a pointer to int or vice versa, we spell the cast
333 // differently.
334 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
335 Cast = Instruction::IntToPtr;
336 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
337 Cast = Instruction::PtrToInt;
339 if (CastInst::castIsValid(Cast, C, DestTy))
340 return ConstantExpr::getCast(Cast, C, DestTy);
343 // If this isn't an aggregate type, there is nothing we can do to drill down
344 // and find a bitcastable constant.
345 if (!SrcTy->isAggregateType())
346 return nullptr;
348 // We're simulating a load through a pointer that was bitcast to point to
349 // a different type, so we can try to walk down through the initial
350 // elements of an aggregate to see if some part of the aggregate is
351 // castable to implement the "load" semantic model.
352 if (SrcTy->isStructTy()) {
353 // Struct types might have leading zero-length elements like [0 x i32],
354 // which are certainly not what we are looking for, so skip them.
355 unsigned Elem = 0;
356 Constant *ElemC;
357 do {
358 ElemC = C->getAggregateElement(Elem++);
359 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0);
360 C = ElemC;
361 } else {
362 C = C->getAggregateElement(0u);
364 } while (C);
366 return nullptr;
369 namespace {
371 /// Recursive helper to read bits out of global. C is the constant being copied
372 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
373 /// results into and BytesLeft is the number of bytes left in
374 /// the CurPtr buffer. DL is the DataLayout.
375 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
376 unsigned BytesLeft, const DataLayout &DL) {
377 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
378 "Out of range access");
380 // If this element is zero or undefined, we can just return since *CurPtr is
381 // zero initialized.
382 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
383 return true;
385 if (auto *CI = dyn_cast<ConstantInt>(C)) {
386 if (CI->getBitWidth() > 64 ||
387 (CI->getBitWidth() & 7) != 0)
388 return false;
390 uint64_t Val = CI->getZExtValue();
391 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
393 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
394 int n = ByteOffset;
395 if (!DL.isLittleEndian())
396 n = IntBytes - n - 1;
397 CurPtr[i] = (unsigned char)(Val >> (n * 8));
398 ++ByteOffset;
400 return true;
403 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
404 if (CFP->getType()->isDoubleTy()) {
405 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
406 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
408 if (CFP->getType()->isFloatTy()){
409 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
410 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
412 if (CFP->getType()->isHalfTy()){
413 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
414 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
416 return false;
419 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
420 const StructLayout *SL = DL.getStructLayout(CS->getType());
421 unsigned Index = SL->getElementContainingOffset(ByteOffset);
422 uint64_t CurEltOffset = SL->getElementOffset(Index);
423 ByteOffset -= CurEltOffset;
425 while (true) {
426 // If the element access is to the element itself and not to tail padding,
427 // read the bytes from the element.
428 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
430 if (ByteOffset < EltSize &&
431 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
432 BytesLeft, DL))
433 return false;
435 ++Index;
437 // Check to see if we read from the last struct element, if so we're done.
438 if (Index == CS->getType()->getNumElements())
439 return true;
441 // If we read all of the bytes we needed from this element we're done.
442 uint64_t NextEltOffset = SL->getElementOffset(Index);
444 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
445 return true;
447 // Move to the next element of the struct.
448 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
449 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
450 ByteOffset = 0;
451 CurEltOffset = NextEltOffset;
453 // not reached.
456 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
457 isa<ConstantDataSequential>(C)) {
458 Type *EltTy = C->getType()->getSequentialElementType();
459 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
460 uint64_t Index = ByteOffset / EltSize;
461 uint64_t Offset = ByteOffset - Index * EltSize;
462 uint64_t NumElts;
463 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
464 NumElts = AT->getNumElements();
465 else
466 NumElts = C->getType()->getVectorNumElements();
468 for (; Index != NumElts; ++Index) {
469 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
470 BytesLeft, DL))
471 return false;
473 uint64_t BytesWritten = EltSize - Offset;
474 assert(BytesWritten <= EltSize && "Not indexing into this element?");
475 if (BytesWritten >= BytesLeft)
476 return true;
478 Offset = 0;
479 BytesLeft -= BytesWritten;
480 CurPtr += BytesWritten;
482 return true;
485 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
486 if (CE->getOpcode() == Instruction::IntToPtr &&
487 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
488 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
489 BytesLeft, DL);
493 // Otherwise, unknown initializer type.
494 return false;
497 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
498 const DataLayout &DL) {
499 auto *PTy = cast<PointerType>(C->getType());
500 auto *IntType = dyn_cast<IntegerType>(LoadTy);
502 // If this isn't an integer load we can't fold it directly.
503 if (!IntType) {
504 unsigned AS = PTy->getAddressSpace();
506 // If this is a float/double load, we can try folding it as an int32/64 load
507 // and then bitcast the result. This can be useful for union cases. Note
508 // that address spaces don't matter here since we're not going to result in
509 // an actual new load.
510 Type *MapTy;
511 if (LoadTy->isHalfTy())
512 MapTy = Type::getInt16Ty(C->getContext());
513 else if (LoadTy->isFloatTy())
514 MapTy = Type::getInt32Ty(C->getContext());
515 else if (LoadTy->isDoubleTy())
516 MapTy = Type::getInt64Ty(C->getContext());
517 else if (LoadTy->isVectorTy()) {
518 MapTy = PointerType::getIntNTy(C->getContext(),
519 DL.getTypeSizeInBits(LoadTy));
520 } else
521 return nullptr;
523 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
524 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
525 return FoldBitCast(Res, LoadTy, DL);
526 return nullptr;
529 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
530 if (BytesLoaded > 32 || BytesLoaded == 0)
531 return nullptr;
533 GlobalValue *GVal;
534 APInt OffsetAI;
535 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
536 return nullptr;
538 auto *GV = dyn_cast<GlobalVariable>(GVal);
539 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
540 !GV->getInitializer()->getType()->isSized())
541 return nullptr;
543 int64_t Offset = OffsetAI.getSExtValue();
544 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
546 // If we're not accessing anything in this constant, the result is undefined.
547 if (Offset + BytesLoaded <= 0)
548 return UndefValue::get(IntType);
550 // If we're not accessing anything in this constant, the result is undefined.
551 if (Offset >= InitializerSize)
552 return UndefValue::get(IntType);
554 unsigned char RawBytes[32] = {0};
555 unsigned char *CurPtr = RawBytes;
556 unsigned BytesLeft = BytesLoaded;
558 // If we're loading off the beginning of the global, some bytes may be valid.
559 if (Offset < 0) {
560 CurPtr += -Offset;
561 BytesLeft += Offset;
562 Offset = 0;
565 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
566 return nullptr;
568 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
569 if (DL.isLittleEndian()) {
570 ResultVal = RawBytes[BytesLoaded - 1];
571 for (unsigned i = 1; i != BytesLoaded; ++i) {
572 ResultVal <<= 8;
573 ResultVal |= RawBytes[BytesLoaded - 1 - i];
575 } else {
576 ResultVal = RawBytes[0];
577 for (unsigned i = 1; i != BytesLoaded; ++i) {
578 ResultVal <<= 8;
579 ResultVal |= RawBytes[i];
583 return ConstantInt::get(IntType->getContext(), ResultVal);
586 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
587 const DataLayout &DL) {
588 auto *SrcPtr = CE->getOperand(0);
589 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
590 if (!SrcPtrTy)
591 return nullptr;
592 Type *SrcTy = SrcPtrTy->getPointerElementType();
594 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
595 if (!C)
596 return nullptr;
598 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
601 } // end anonymous namespace
603 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
604 const DataLayout &DL) {
605 // First, try the easy cases:
606 if (auto *GV = dyn_cast<GlobalVariable>(C))
607 if (GV->isConstant() && GV->hasDefinitiveInitializer())
608 return GV->getInitializer();
610 if (auto *GA = dyn_cast<GlobalAlias>(C))
611 if (GA->getAliasee() && !GA->isInterposable())
612 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
614 // If the loaded value isn't a constant expr, we can't handle it.
615 auto *CE = dyn_cast<ConstantExpr>(C);
616 if (!CE)
617 return nullptr;
619 if (CE->getOpcode() == Instruction::GetElementPtr) {
620 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
621 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
622 if (Constant *V =
623 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
624 return V;
629 if (CE->getOpcode() == Instruction::BitCast)
630 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
631 return LoadedC;
633 // Instead of loading constant c string, use corresponding integer value
634 // directly if string length is small enough.
635 StringRef Str;
636 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
637 size_t StrLen = Str.size();
638 unsigned NumBits = Ty->getPrimitiveSizeInBits();
639 // Replace load with immediate integer if the result is an integer or fp
640 // value.
641 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
642 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
643 APInt StrVal(NumBits, 0);
644 APInt SingleChar(NumBits, 0);
645 if (DL.isLittleEndian()) {
646 for (unsigned char C : reverse(Str.bytes())) {
647 SingleChar = static_cast<uint64_t>(C);
648 StrVal = (StrVal << 8) | SingleChar;
650 } else {
651 for (unsigned char C : Str.bytes()) {
652 SingleChar = static_cast<uint64_t>(C);
653 StrVal = (StrVal << 8) | SingleChar;
655 // Append NULL at the end.
656 SingleChar = 0;
657 StrVal = (StrVal << 8) | SingleChar;
660 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
661 if (Ty->isFloatingPointTy())
662 Res = ConstantExpr::getBitCast(Res, Ty);
663 return Res;
667 // If this load comes from anywhere in a constant global, and if the global
668 // is all undef or zero, we know what it loads.
669 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
670 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
671 if (GV->getInitializer()->isNullValue())
672 return Constant::getNullValue(Ty);
673 if (isa<UndefValue>(GV->getInitializer()))
674 return UndefValue::get(Ty);
678 // Try hard to fold loads from bitcasted strange and non-type-safe things.
679 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
682 namespace {
684 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
685 if (LI->isVolatile()) return nullptr;
687 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
688 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
690 return nullptr;
693 /// One of Op0/Op1 is a constant expression.
694 /// Attempt to symbolically evaluate the result of a binary operator merging
695 /// these together. If target data info is available, it is provided as DL,
696 /// otherwise DL is null.
697 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
698 const DataLayout &DL) {
699 // SROA
701 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
702 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
703 // bits.
705 if (Opc == Instruction::And) {
706 KnownBits Known0 = computeKnownBits(Op0, DL);
707 KnownBits Known1 = computeKnownBits(Op1, DL);
708 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
709 // All the bits of Op0 that the 'and' could be masking are already zero.
710 return Op0;
712 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
713 // All the bits of Op1 that the 'and' could be masking are already zero.
714 return Op1;
717 Known0.Zero |= Known1.Zero;
718 Known0.One &= Known1.One;
719 if (Known0.isConstant())
720 return ConstantInt::get(Op0->getType(), Known0.getConstant());
723 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
724 // constant. This happens frequently when iterating over a global array.
725 if (Opc == Instruction::Sub) {
726 GlobalValue *GV1, *GV2;
727 APInt Offs1, Offs2;
729 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
730 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
731 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
733 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
734 // PtrToInt may change the bitwidth so we have convert to the right size
735 // first.
736 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
737 Offs2.zextOrTrunc(OpSize));
741 return nullptr;
744 /// If array indices are not pointer-sized integers, explicitly cast them so
745 /// that they aren't implicitly casted by the getelementptr.
746 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
747 Type *ResultTy, Optional<unsigned> InRangeIndex,
748 const DataLayout &DL, const TargetLibraryInfo *TLI) {
749 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
750 Type *IntPtrScalarTy = IntPtrTy->getScalarType();
752 bool Any = false;
753 SmallVector<Constant*, 32> NewIdxs;
754 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
755 if ((i == 1 ||
756 !isa<StructType>(GetElementPtrInst::getIndexedType(
757 SrcElemTy, Ops.slice(1, i - 1)))) &&
758 Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
759 Any = true;
760 Type *NewType = Ops[i]->getType()->isVectorTy()
761 ? IntPtrTy
762 : IntPtrTy->getScalarType();
763 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
764 true,
765 NewType,
766 true),
767 Ops[i], NewType));
768 } else
769 NewIdxs.push_back(Ops[i]);
772 if (!Any)
773 return nullptr;
775 Constant *C = ConstantExpr::getGetElementPtr(
776 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
777 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
778 C = Folded;
780 return C;
783 /// Strip the pointer casts, but preserve the address space information.
784 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
785 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
786 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
787 Ptr = cast<Constant>(Ptr->stripPointerCastsNoFollowAliases());
788 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
790 ElemTy = NewPtrTy->getPointerElementType();
792 // Preserve the address space number of the pointer.
793 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
794 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
795 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
797 return Ptr;
800 /// If we can symbolically evaluate the GEP constant expression, do so.
801 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
802 ArrayRef<Constant *> Ops,
803 const DataLayout &DL,
804 const TargetLibraryInfo *TLI) {
805 const GEPOperator *InnermostGEP = GEP;
806 bool InBounds = GEP->isInBounds();
808 Type *SrcElemTy = GEP->getSourceElementType();
809 Type *ResElemTy = GEP->getResultElementType();
810 Type *ResTy = GEP->getType();
811 if (!SrcElemTy->isSized())
812 return nullptr;
814 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
815 GEP->getInRangeIndex(), DL, TLI))
816 return C;
818 Constant *Ptr = Ops[0];
819 if (!Ptr->getType()->isPointerTy())
820 return nullptr;
822 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
824 // If this is a constant expr gep that is effectively computing an
825 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
826 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
827 if (!isa<ConstantInt>(Ops[i])) {
829 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
830 // "inttoptr (sub (ptrtoint Ptr), V)"
831 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
832 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
833 assert((!CE || CE->getType() == IntPtrTy) &&
834 "CastGEPIndices didn't canonicalize index types!");
835 if (CE && CE->getOpcode() == Instruction::Sub &&
836 CE->getOperand(0)->isNullValue()) {
837 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
838 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
839 Res = ConstantExpr::getIntToPtr(Res, ResTy);
840 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
841 Res = FoldedRes;
842 return Res;
845 return nullptr;
848 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
849 APInt Offset =
850 APInt(BitWidth,
851 DL.getIndexedOffsetInType(
852 SrcElemTy,
853 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
854 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
856 // If this is a GEP of a GEP, fold it all into a single GEP.
857 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
858 InnermostGEP = GEP;
859 InBounds &= GEP->isInBounds();
861 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
863 // Do not try the incorporate the sub-GEP if some index is not a number.
864 bool AllConstantInt = true;
865 for (Value *NestedOp : NestedOps)
866 if (!isa<ConstantInt>(NestedOp)) {
867 AllConstantInt = false;
868 break;
870 if (!AllConstantInt)
871 break;
873 Ptr = cast<Constant>(GEP->getOperand(0));
874 SrcElemTy = GEP->getSourceElementType();
875 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
876 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
879 // If the base value for this address is a literal integer value, fold the
880 // getelementptr to the resulting integer value casted to the pointer type.
881 APInt BasePtr(BitWidth, 0);
882 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
883 if (CE->getOpcode() == Instruction::IntToPtr) {
884 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
885 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
889 auto *PTy = cast<PointerType>(Ptr->getType());
890 if ((Ptr->isNullValue() || BasePtr != 0) &&
891 !DL.isNonIntegralPointerType(PTy)) {
892 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
893 return ConstantExpr::getIntToPtr(C, ResTy);
896 // Otherwise form a regular getelementptr. Recompute the indices so that
897 // we eliminate over-indexing of the notional static type array bounds.
898 // This makes it easy to determine if the getelementptr is "inbounds".
899 // Also, this helps GlobalOpt do SROA on GlobalVariables.
900 Type *Ty = PTy;
901 SmallVector<Constant *, 32> NewIdxs;
903 do {
904 if (!Ty->isStructTy()) {
905 if (Ty->isPointerTy()) {
906 // The only pointer indexing we'll do is on the first index of the GEP.
907 if (!NewIdxs.empty())
908 break;
910 Ty = SrcElemTy;
912 // Only handle pointers to sized types, not pointers to functions.
913 if (!Ty->isSized())
914 return nullptr;
915 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
916 Ty = ATy->getElementType();
917 } else {
918 // We've reached some non-indexable type.
919 break;
922 // Determine which element of the array the offset points into.
923 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
924 if (ElemSize == 0) {
925 // The element size is 0. This may be [0 x Ty]*, so just use a zero
926 // index for this level and proceed to the next level to see if it can
927 // accommodate the offset.
928 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
929 } else {
930 // The element size is non-zero divide the offset by the element
931 // size (rounding down), to compute the index at this level.
932 bool Overflow;
933 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
934 if (Overflow)
935 break;
936 Offset -= NewIdx * ElemSize;
937 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
939 } else {
940 auto *STy = cast<StructType>(Ty);
941 // If we end up with an offset that isn't valid for this struct type, we
942 // can't re-form this GEP in a regular form, so bail out. The pointer
943 // operand likely went through casts that are necessary to make the GEP
944 // sensible.
945 const StructLayout &SL = *DL.getStructLayout(STy);
946 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
947 break;
949 // Determine which field of the struct the offset points into. The
950 // getZExtValue is fine as we've already ensured that the offset is
951 // within the range representable by the StructLayout API.
952 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
953 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
954 ElIdx));
955 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
956 Ty = STy->getTypeAtIndex(ElIdx);
958 } while (Ty != ResElemTy);
960 // If we haven't used up the entire offset by descending the static
961 // type, then the offset is pointing into the middle of an indivisible
962 // member, so we can't simplify it.
963 if (Offset != 0)
964 return nullptr;
966 // Preserve the inrange index from the innermost GEP if possible. We must
967 // have calculated the same indices up to and including the inrange index.
968 Optional<unsigned> InRangeIndex;
969 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
970 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
971 NewIdxs.size() > *LastIRIndex) {
972 InRangeIndex = LastIRIndex;
973 for (unsigned I = 0; I <= *LastIRIndex; ++I)
974 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
975 return nullptr;
978 // Create a GEP.
979 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
980 InBounds, InRangeIndex);
981 assert(C->getType()->getPointerElementType() == Ty &&
982 "Computed GetElementPtr has unexpected type!");
984 // If we ended up indexing a member with a type that doesn't match
985 // the type of what the original indices indexed, add a cast.
986 if (Ty != ResElemTy)
987 C = FoldBitCast(C, ResTy, DL);
989 return C;
992 /// Attempt to constant fold an instruction with the
993 /// specified opcode and operands. If successful, the constant result is
994 /// returned, if not, null is returned. Note that this function can fail when
995 /// attempting to fold instructions like loads and stores, which have no
996 /// constant expression form.
997 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
998 ArrayRef<Constant *> Ops,
999 const DataLayout &DL,
1000 const TargetLibraryInfo *TLI) {
1001 Type *DestTy = InstOrCE->getType();
1003 if (Instruction::isUnaryOp(Opcode))
1004 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1006 if (Instruction::isBinaryOp(Opcode))
1007 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1009 if (Instruction::isCast(Opcode))
1010 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1012 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1013 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1014 return C;
1016 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1017 Ops.slice(1), GEP->isInBounds(),
1018 GEP->getInRangeIndex());
1021 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1022 return CE->getWithOperands(Ops);
1024 switch (Opcode) {
1025 default: return nullptr;
1026 case Instruction::ICmp:
1027 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1028 case Instruction::Call:
1029 if (auto *F = dyn_cast<Function>(Ops.back())) {
1030 const auto *Call = cast<CallBase>(InstOrCE);
1031 if (canConstantFoldCallTo(Call, F))
1032 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1034 return nullptr;
1035 case Instruction::Select:
1036 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1037 case Instruction::ExtractElement:
1038 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1039 case Instruction::ExtractValue:
1040 return ConstantExpr::getExtractValue(
1041 Ops[0], dyn_cast<ExtractValueInst>(InstOrCE)->getIndices());
1042 case Instruction::InsertElement:
1043 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1044 case Instruction::ShuffleVector:
1045 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1049 } // end anonymous namespace
1051 //===----------------------------------------------------------------------===//
1052 // Constant Folding public APIs
1053 //===----------------------------------------------------------------------===//
1055 namespace {
1057 Constant *
1058 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1059 const TargetLibraryInfo *TLI,
1060 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1061 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1062 return nullptr;
1064 SmallVector<Constant *, 8> Ops;
1065 for (const Use &NewU : C->operands()) {
1066 auto *NewC = cast<Constant>(&NewU);
1067 // Recursively fold the ConstantExpr's operands. If we have already folded
1068 // a ConstantExpr, we don't have to process it again.
1069 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1070 auto It = FoldedOps.find(NewC);
1071 if (It == FoldedOps.end()) {
1072 if (auto *FoldedC =
1073 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1074 FoldedOps.insert({NewC, FoldedC});
1075 NewC = FoldedC;
1076 } else {
1077 FoldedOps.insert({NewC, NewC});
1079 } else {
1080 NewC = It->second;
1083 Ops.push_back(NewC);
1086 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1087 if (CE->isCompare())
1088 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1089 DL, TLI);
1091 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1094 assert(isa<ConstantVector>(C));
1095 return ConstantVector::get(Ops);
1098 } // end anonymous namespace
1100 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1101 const TargetLibraryInfo *TLI) {
1102 // Handle PHI nodes quickly here...
1103 if (auto *PN = dyn_cast<PHINode>(I)) {
1104 Constant *CommonValue = nullptr;
1106 SmallDenseMap<Constant *, Constant *> FoldedOps;
1107 for (Value *Incoming : PN->incoming_values()) {
1108 // If the incoming value is undef then skip it. Note that while we could
1109 // skip the value if it is equal to the phi node itself we choose not to
1110 // because that would break the rule that constant folding only applies if
1111 // all operands are constants.
1112 if (isa<UndefValue>(Incoming))
1113 continue;
1114 // If the incoming value is not a constant, then give up.
1115 auto *C = dyn_cast<Constant>(Incoming);
1116 if (!C)
1117 return nullptr;
1118 // Fold the PHI's operands.
1119 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1120 C = FoldedC;
1121 // If the incoming value is a different constant to
1122 // the one we saw previously, then give up.
1123 if (CommonValue && C != CommonValue)
1124 return nullptr;
1125 CommonValue = C;
1128 // If we reach here, all incoming values are the same constant or undef.
1129 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1132 // Scan the operand list, checking to see if they are all constants, if so,
1133 // hand off to ConstantFoldInstOperandsImpl.
1134 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1135 return nullptr;
1137 SmallDenseMap<Constant *, Constant *> FoldedOps;
1138 SmallVector<Constant *, 8> Ops;
1139 for (const Use &OpU : I->operands()) {
1140 auto *Op = cast<Constant>(&OpU);
1141 // Fold the Instruction's operands.
1142 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1143 Op = FoldedOp;
1145 Ops.push_back(Op);
1148 if (const auto *CI = dyn_cast<CmpInst>(I))
1149 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1150 DL, TLI);
1152 if (const auto *LI = dyn_cast<LoadInst>(I))
1153 return ConstantFoldLoadInst(LI, DL);
1155 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1156 return ConstantExpr::getInsertValue(
1157 cast<Constant>(IVI->getAggregateOperand()),
1158 cast<Constant>(IVI->getInsertedValueOperand()),
1159 IVI->getIndices());
1162 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1163 return ConstantExpr::getExtractValue(
1164 cast<Constant>(EVI->getAggregateOperand()),
1165 EVI->getIndices());
1168 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1171 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1172 const TargetLibraryInfo *TLI) {
1173 SmallDenseMap<Constant *, Constant *> FoldedOps;
1174 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1177 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1178 ArrayRef<Constant *> Ops,
1179 const DataLayout &DL,
1180 const TargetLibraryInfo *TLI) {
1181 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1184 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1185 Constant *Ops0, Constant *Ops1,
1186 const DataLayout &DL,
1187 const TargetLibraryInfo *TLI) {
1188 // fold: icmp (inttoptr x), null -> icmp x, 0
1189 // fold: icmp null, (inttoptr x) -> icmp 0, x
1190 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1191 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1192 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1193 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1195 // FIXME: The following comment is out of data and the DataLayout is here now.
1196 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1197 // around to know if bit truncation is happening.
1198 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1199 if (Ops1->isNullValue()) {
1200 if (CE0->getOpcode() == Instruction::IntToPtr) {
1201 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1202 // Convert the integer value to the right size to ensure we get the
1203 // proper extension or truncation.
1204 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1205 IntPtrTy, false);
1206 Constant *Null = Constant::getNullValue(C->getType());
1207 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1210 // Only do this transformation if the int is intptrty in size, otherwise
1211 // there is a truncation or extension that we aren't modeling.
1212 if (CE0->getOpcode() == Instruction::PtrToInt) {
1213 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1214 if (CE0->getType() == IntPtrTy) {
1215 Constant *C = CE0->getOperand(0);
1216 Constant *Null = Constant::getNullValue(C->getType());
1217 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1222 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1223 if (CE0->getOpcode() == CE1->getOpcode()) {
1224 if (CE0->getOpcode() == Instruction::IntToPtr) {
1225 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1227 // Convert the integer value to the right size to ensure we get the
1228 // proper extension or truncation.
1229 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1230 IntPtrTy, false);
1231 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1232 IntPtrTy, false);
1233 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1236 // Only do this transformation if the int is intptrty in size, otherwise
1237 // there is a truncation or extension that we aren't modeling.
1238 if (CE0->getOpcode() == Instruction::PtrToInt) {
1239 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1240 if (CE0->getType() == IntPtrTy &&
1241 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1242 return ConstantFoldCompareInstOperands(
1243 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1249 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1250 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1251 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1252 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1253 Constant *LHS = ConstantFoldCompareInstOperands(
1254 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1255 Constant *RHS = ConstantFoldCompareInstOperands(
1256 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1257 unsigned OpC =
1258 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1259 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1261 } else if (isa<ConstantExpr>(Ops1)) {
1262 // If RHS is a constant expression, but the left side isn't, swap the
1263 // operands and try again.
1264 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1265 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1268 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1271 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1272 const DataLayout &DL) {
1273 assert(Instruction::isUnaryOp(Opcode));
1275 return ConstantExpr::get(Opcode, Op);
1278 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1279 Constant *RHS,
1280 const DataLayout &DL) {
1281 assert(Instruction::isBinaryOp(Opcode));
1282 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1283 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1284 return C;
1286 return ConstantExpr::get(Opcode, LHS, RHS);
1289 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1290 Type *DestTy, const DataLayout &DL) {
1291 assert(Instruction::isCast(Opcode));
1292 switch (Opcode) {
1293 default:
1294 llvm_unreachable("Missing case");
1295 case Instruction::PtrToInt:
1296 // If the input is a inttoptr, eliminate the pair. This requires knowing
1297 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1298 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1299 if (CE->getOpcode() == Instruction::IntToPtr) {
1300 Constant *Input = CE->getOperand(0);
1301 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1302 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1303 if (PtrWidth < InWidth) {
1304 Constant *Mask =
1305 ConstantInt::get(CE->getContext(),
1306 APInt::getLowBitsSet(InWidth, PtrWidth));
1307 Input = ConstantExpr::getAnd(Input, Mask);
1309 // Do a zext or trunc to get to the dest size.
1310 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1313 return ConstantExpr::getCast(Opcode, C, DestTy);
1314 case Instruction::IntToPtr:
1315 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1316 // the int size is >= the ptr size and the address spaces are the same.
1317 // This requires knowing the width of a pointer, so it can't be done in
1318 // ConstantExpr::getCast.
1319 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1320 if (CE->getOpcode() == Instruction::PtrToInt) {
1321 Constant *SrcPtr = CE->getOperand(0);
1322 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1323 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1325 if (MidIntSize >= SrcPtrSize) {
1326 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1327 if (SrcAS == DestTy->getPointerAddressSpace())
1328 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1333 return ConstantExpr::getCast(Opcode, C, DestTy);
1334 case Instruction::Trunc:
1335 case Instruction::ZExt:
1336 case Instruction::SExt:
1337 case Instruction::FPTrunc:
1338 case Instruction::FPExt:
1339 case Instruction::UIToFP:
1340 case Instruction::SIToFP:
1341 case Instruction::FPToUI:
1342 case Instruction::FPToSI:
1343 case Instruction::AddrSpaceCast:
1344 return ConstantExpr::getCast(Opcode, C, DestTy);
1345 case Instruction::BitCast:
1346 return FoldBitCast(C, DestTy, DL);
1350 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1351 ConstantExpr *CE) {
1352 if (!CE->getOperand(1)->isNullValue())
1353 return nullptr; // Do not allow stepping over the value!
1355 // Loop over all of the operands, tracking down which value we are
1356 // addressing.
1357 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1358 C = C->getAggregateElement(CE->getOperand(i));
1359 if (!C)
1360 return nullptr;
1362 return C;
1365 Constant *
1366 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1367 ArrayRef<Constant *> Indices) {
1368 // Loop over all of the operands, tracking down which value we are
1369 // addressing.
1370 for (Constant *Index : Indices) {
1371 C = C->getAggregateElement(Index);
1372 if (!C)
1373 return nullptr;
1375 return C;
1378 //===----------------------------------------------------------------------===//
1379 // Constant Folding for Calls
1382 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1383 if (Call->isNoBuiltin() || Call->isStrictFP())
1384 return false;
1385 switch (F->getIntrinsicID()) {
1386 case Intrinsic::fabs:
1387 case Intrinsic::minnum:
1388 case Intrinsic::maxnum:
1389 case Intrinsic::minimum:
1390 case Intrinsic::maximum:
1391 case Intrinsic::log:
1392 case Intrinsic::log2:
1393 case Intrinsic::log10:
1394 case Intrinsic::exp:
1395 case Intrinsic::exp2:
1396 case Intrinsic::floor:
1397 case Intrinsic::ceil:
1398 case Intrinsic::sqrt:
1399 case Intrinsic::sin:
1400 case Intrinsic::cos:
1401 case Intrinsic::trunc:
1402 case Intrinsic::rint:
1403 case Intrinsic::nearbyint:
1404 case Intrinsic::pow:
1405 case Intrinsic::powi:
1406 case Intrinsic::bswap:
1407 case Intrinsic::ctpop:
1408 case Intrinsic::ctlz:
1409 case Intrinsic::cttz:
1410 case Intrinsic::fshl:
1411 case Intrinsic::fshr:
1412 case Intrinsic::fma:
1413 case Intrinsic::fmuladd:
1414 case Intrinsic::copysign:
1415 case Intrinsic::launder_invariant_group:
1416 case Intrinsic::strip_invariant_group:
1417 case Intrinsic::round:
1418 case Intrinsic::masked_load:
1419 case Intrinsic::sadd_with_overflow:
1420 case Intrinsic::uadd_with_overflow:
1421 case Intrinsic::ssub_with_overflow:
1422 case Intrinsic::usub_with_overflow:
1423 case Intrinsic::smul_with_overflow:
1424 case Intrinsic::umul_with_overflow:
1425 case Intrinsic::sadd_sat:
1426 case Intrinsic::uadd_sat:
1427 case Intrinsic::ssub_sat:
1428 case Intrinsic::usub_sat:
1429 case Intrinsic::smul_fix:
1430 case Intrinsic::smul_fix_sat:
1431 case Intrinsic::convert_from_fp16:
1432 case Intrinsic::convert_to_fp16:
1433 case Intrinsic::bitreverse:
1434 case Intrinsic::x86_sse_cvtss2si:
1435 case Intrinsic::x86_sse_cvtss2si64:
1436 case Intrinsic::x86_sse_cvttss2si:
1437 case Intrinsic::x86_sse_cvttss2si64:
1438 case Intrinsic::x86_sse2_cvtsd2si:
1439 case Intrinsic::x86_sse2_cvtsd2si64:
1440 case Intrinsic::x86_sse2_cvttsd2si:
1441 case Intrinsic::x86_sse2_cvttsd2si64:
1442 case Intrinsic::x86_avx512_vcvtss2si32:
1443 case Intrinsic::x86_avx512_vcvtss2si64:
1444 case Intrinsic::x86_avx512_cvttss2si:
1445 case Intrinsic::x86_avx512_cvttss2si64:
1446 case Intrinsic::x86_avx512_vcvtsd2si32:
1447 case Intrinsic::x86_avx512_vcvtsd2si64:
1448 case Intrinsic::x86_avx512_cvttsd2si:
1449 case Intrinsic::x86_avx512_cvttsd2si64:
1450 case Intrinsic::x86_avx512_vcvtss2usi32:
1451 case Intrinsic::x86_avx512_vcvtss2usi64:
1452 case Intrinsic::x86_avx512_cvttss2usi:
1453 case Intrinsic::x86_avx512_cvttss2usi64:
1454 case Intrinsic::x86_avx512_vcvtsd2usi32:
1455 case Intrinsic::x86_avx512_vcvtsd2usi64:
1456 case Intrinsic::x86_avx512_cvttsd2usi:
1457 case Intrinsic::x86_avx512_cvttsd2usi64:
1458 case Intrinsic::is_constant:
1459 return true;
1460 default:
1461 return false;
1462 case Intrinsic::not_intrinsic: break;
1465 if (!F->hasName())
1466 return false;
1467 StringRef Name = F->getName();
1469 // In these cases, the check of the length is required. We don't want to
1470 // return true for a name like "cos\0blah" which strcmp would return equal to
1471 // "cos", but has length 8.
1472 switch (Name[0]) {
1473 default:
1474 return false;
1475 case 'a':
1476 return Name == "acos" || Name == "asin" || Name == "atan" ||
1477 Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1478 Name == "atanf" || Name == "atan2f";
1479 case 'c':
1480 return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1481 Name == "ceilf" || Name == "cosf" || Name == "coshf";
1482 case 'e':
1483 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1484 case 'f':
1485 return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1486 Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1487 case 'l':
1488 return Name == "log" || Name == "log10" || Name == "logf" ||
1489 Name == "log10f";
1490 case 'p':
1491 return Name == "pow" || Name == "powf";
1492 case 'r':
1493 return Name == "round" || Name == "roundf";
1494 case 's':
1495 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1496 Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1497 case 't':
1498 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1499 case '_':
1501 // Check for various function names that get used for the math functions
1502 // when the header files are preprocessed with the macro
1503 // __FINITE_MATH_ONLY__ enabled.
1504 // The '12' here is the length of the shortest name that can match.
1505 // We need to check the size before looking at Name[1] and Name[2]
1506 // so we may as well check a limit that will eliminate mismatches.
1507 if (Name.size() < 12 || Name[1] != '_')
1508 return false;
1509 switch (Name[2]) {
1510 default:
1511 return false;
1512 case 'a':
1513 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1514 Name == "__asin_finite" || Name == "__asinf_finite" ||
1515 Name == "__atan2_finite" || Name == "__atan2f_finite";
1516 case 'c':
1517 return Name == "__cosh_finite" || Name == "__coshf_finite";
1518 case 'e':
1519 return Name == "__exp_finite" || Name == "__expf_finite" ||
1520 Name == "__exp2_finite" || Name == "__exp2f_finite";
1521 case 'l':
1522 return Name == "__log_finite" || Name == "__logf_finite" ||
1523 Name == "__log10_finite" || Name == "__log10f_finite";
1524 case 'p':
1525 return Name == "__pow_finite" || Name == "__powf_finite";
1526 case 's':
1527 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1532 namespace {
1534 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1535 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1536 APFloat APF(V);
1537 bool unused;
1538 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1539 return ConstantFP::get(Ty->getContext(), APF);
1541 if (Ty->isDoubleTy())
1542 return ConstantFP::get(Ty->getContext(), APFloat(V));
1543 llvm_unreachable("Can only constant fold half/float/double");
1546 /// Clear the floating-point exception state.
1547 inline void llvm_fenv_clearexcept() {
1548 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1549 feclearexcept(FE_ALL_EXCEPT);
1550 #endif
1551 errno = 0;
1554 /// Test if a floating-point exception was raised.
1555 inline bool llvm_fenv_testexcept() {
1556 int errno_val = errno;
1557 if (errno_val == ERANGE || errno_val == EDOM)
1558 return true;
1559 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1560 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1561 return true;
1562 #endif
1563 return false;
1566 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1567 llvm_fenv_clearexcept();
1568 V = NativeFP(V);
1569 if (llvm_fenv_testexcept()) {
1570 llvm_fenv_clearexcept();
1571 return nullptr;
1574 return GetConstantFoldFPValue(V, Ty);
1577 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1578 double W, Type *Ty) {
1579 llvm_fenv_clearexcept();
1580 V = NativeFP(V, W);
1581 if (llvm_fenv_testexcept()) {
1582 llvm_fenv_clearexcept();
1583 return nullptr;
1586 return GetConstantFoldFPValue(V, Ty);
1589 /// Attempt to fold an SSE floating point to integer conversion of a constant
1590 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1591 /// used (toward nearest, ties to even). This matches the behavior of the
1592 /// non-truncating SSE instructions in the default rounding mode. The desired
1593 /// integer type Ty is used to select how many bits are available for the
1594 /// result. Returns null if the conversion cannot be performed, otherwise
1595 /// returns the Constant value resulting from the conversion.
1596 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1597 Type *Ty, bool IsSigned) {
1598 // All of these conversion intrinsics form an integer of at most 64bits.
1599 unsigned ResultWidth = Ty->getIntegerBitWidth();
1600 assert(ResultWidth <= 64 &&
1601 "Can only constant fold conversions to 64 and 32 bit ints");
1603 uint64_t UIntVal;
1604 bool isExact = false;
1605 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1606 : APFloat::rmNearestTiesToEven;
1607 APFloat::opStatus status =
1608 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1609 IsSigned, mode, &isExact);
1610 if (status != APFloat::opOK &&
1611 (!roundTowardZero || status != APFloat::opInexact))
1612 return nullptr;
1613 return ConstantInt::get(Ty, UIntVal, IsSigned);
1616 double getValueAsDouble(ConstantFP *Op) {
1617 Type *Ty = Op->getType();
1619 if (Ty->isFloatTy())
1620 return Op->getValueAPF().convertToFloat();
1622 if (Ty->isDoubleTy())
1623 return Op->getValueAPF().convertToDouble();
1625 bool unused;
1626 APFloat APF = Op->getValueAPF();
1627 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1628 return APF.convertToDouble();
1631 static bool isManifestConstant(const Constant *c) {
1632 if (isa<ConstantData>(c)) {
1633 return true;
1634 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
1635 for (const Value *subc : c->operand_values()) {
1636 if (!isManifestConstant(cast<Constant>(subc)))
1637 return false;
1639 return true;
1641 return false;
1644 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1645 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1646 C = &CI->getValue();
1647 return true;
1649 if (isa<UndefValue>(Op)) {
1650 C = nullptr;
1651 return true;
1653 return false;
1656 static Constant *ConstantFoldScalarCall1(StringRef Name,
1657 Intrinsic::ID IntrinsicID,
1658 Type *Ty,
1659 ArrayRef<Constant *> Operands,
1660 const TargetLibraryInfo *TLI,
1661 const CallBase *Call) {
1662 assert(Operands.size() == 1 && "Wrong number of operands.");
1664 if (IntrinsicID == Intrinsic::is_constant) {
1665 // We know we have a "Constant" argument. But we want to only
1666 // return true for manifest constants, not those that depend on
1667 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1668 if (isManifestConstant(Operands[0]))
1669 return ConstantInt::getTrue(Ty->getContext());
1670 return nullptr;
1672 if (isa<UndefValue>(Operands[0])) {
1673 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1674 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1675 if (IntrinsicID == Intrinsic::cos ||
1676 IntrinsicID == Intrinsic::ctpop)
1677 return Constant::getNullValue(Ty);
1678 if (IntrinsicID == Intrinsic::bswap ||
1679 IntrinsicID == Intrinsic::bitreverse ||
1680 IntrinsicID == Intrinsic::launder_invariant_group ||
1681 IntrinsicID == Intrinsic::strip_invariant_group)
1682 return Operands[0];
1685 if (isa<ConstantPointerNull>(Operands[0])) {
1686 // launder(null) == null == strip(null) iff in addrspace 0
1687 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1688 IntrinsicID == Intrinsic::strip_invariant_group) {
1689 // If instruction is not yet put in a basic block (e.g. when cloning
1690 // a function during inlining), Call's caller may not be available.
1691 // So check Call's BB first before querying Call->getCaller.
1692 const Function *Caller =
1693 Call->getParent() ? Call->getCaller() : nullptr;
1694 if (Caller &&
1695 !NullPointerIsDefined(
1696 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1697 return Operands[0];
1699 return nullptr;
1703 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1704 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1705 APFloat Val(Op->getValueAPF());
1707 bool lost = false;
1708 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1710 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1713 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1714 return nullptr;
1716 if (IntrinsicID == Intrinsic::round) {
1717 APFloat V = Op->getValueAPF();
1718 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1719 return ConstantFP::get(Ty->getContext(), V);
1722 if (IntrinsicID == Intrinsic::floor) {
1723 APFloat V = Op->getValueAPF();
1724 V.roundToIntegral(APFloat::rmTowardNegative);
1725 return ConstantFP::get(Ty->getContext(), V);
1728 if (IntrinsicID == Intrinsic::ceil) {
1729 APFloat V = Op->getValueAPF();
1730 V.roundToIntegral(APFloat::rmTowardPositive);
1731 return ConstantFP::get(Ty->getContext(), V);
1734 if (IntrinsicID == Intrinsic::trunc) {
1735 APFloat V = Op->getValueAPF();
1736 V.roundToIntegral(APFloat::rmTowardZero);
1737 return ConstantFP::get(Ty->getContext(), V);
1740 if (IntrinsicID == Intrinsic::rint) {
1741 APFloat V = Op->getValueAPF();
1742 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1743 return ConstantFP::get(Ty->getContext(), V);
1746 if (IntrinsicID == Intrinsic::nearbyint) {
1747 APFloat V = Op->getValueAPF();
1748 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1749 return ConstantFP::get(Ty->getContext(), V);
1752 /// We only fold functions with finite arguments. Folding NaN and inf is
1753 /// likely to be aborted with an exception anyway, and some host libms
1754 /// have known errors raising exceptions.
1755 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1756 return nullptr;
1758 /// Currently APFloat versions of these functions do not exist, so we use
1759 /// the host native double versions. Float versions are not called
1760 /// directly but for all these it is true (float)(f((double)arg)) ==
1761 /// f(arg). Long double not supported yet.
1762 double V = getValueAsDouble(Op);
1764 switch (IntrinsicID) {
1765 default: break;
1766 case Intrinsic::fabs:
1767 return ConstantFoldFP(fabs, V, Ty);
1768 case Intrinsic::log2:
1769 return ConstantFoldFP(Log2, V, Ty);
1770 case Intrinsic::log:
1771 return ConstantFoldFP(log, V, Ty);
1772 case Intrinsic::log10:
1773 return ConstantFoldFP(log10, V, Ty);
1774 case Intrinsic::exp:
1775 return ConstantFoldFP(exp, V, Ty);
1776 case Intrinsic::exp2:
1777 return ConstantFoldFP(exp2, V, Ty);
1778 case Intrinsic::sin:
1779 return ConstantFoldFP(sin, V, Ty);
1780 case Intrinsic::cos:
1781 return ConstantFoldFP(cos, V, Ty);
1782 case Intrinsic::sqrt:
1783 return ConstantFoldFP(sqrt, V, Ty);
1786 if (!TLI)
1787 return nullptr;
1789 char NameKeyChar = Name[0];
1790 if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
1791 NameKeyChar = Name[2];
1793 switch (NameKeyChar) {
1794 case 'a':
1795 if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1796 (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
1797 (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
1798 (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
1799 return ConstantFoldFP(acos, V, Ty);
1800 else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1801 (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
1802 (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
1803 (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
1804 return ConstantFoldFP(asin, V, Ty);
1805 else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1806 (Name == "atanf" && TLI->has(LibFunc_atanf)))
1807 return ConstantFoldFP(atan, V, Ty);
1808 break;
1809 case 'c':
1810 if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1811 (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1812 return ConstantFoldFP(ceil, V, Ty);
1813 else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1814 (Name == "cosf" && TLI->has(LibFunc_cosf)))
1815 return ConstantFoldFP(cos, V, Ty);
1816 else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1817 (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
1818 (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
1819 (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
1820 return ConstantFoldFP(cosh, V, Ty);
1821 break;
1822 case 'e':
1823 if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1824 (Name == "expf" && TLI->has(LibFunc_expf)) ||
1825 (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
1826 (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
1827 return ConstantFoldFP(exp, V, Ty);
1828 if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1829 (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
1830 (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
1831 (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
1832 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1833 // C99 library.
1834 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1835 break;
1836 case 'f':
1837 if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1838 (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1839 return ConstantFoldFP(fabs, V, Ty);
1840 else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1841 (Name == "floorf" && TLI->has(LibFunc_floorf)))
1842 return ConstantFoldFP(floor, V, Ty);
1843 break;
1844 case 'l':
1845 if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1846 (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
1847 (Name == "__log_finite" && V > 0 &&
1848 TLI->has(LibFunc_log_finite)) ||
1849 (Name == "__logf_finite" && V > 0 &&
1850 TLI->has(LibFunc_logf_finite)))
1851 return ConstantFoldFP(log, V, Ty);
1852 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1853 (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
1854 (Name == "__log10_finite" && V > 0 &&
1855 TLI->has(LibFunc_log10_finite)) ||
1856 (Name == "__log10f_finite" && V > 0 &&
1857 TLI->has(LibFunc_log10f_finite)))
1858 return ConstantFoldFP(log10, V, Ty);
1859 break;
1860 case 'r':
1861 if ((Name == "round" && TLI->has(LibFunc_round)) ||
1862 (Name == "roundf" && TLI->has(LibFunc_roundf)))
1863 return ConstantFoldFP(round, V, Ty);
1864 break;
1865 case 's':
1866 if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1867 (Name == "sinf" && TLI->has(LibFunc_sinf)))
1868 return ConstantFoldFP(sin, V, Ty);
1869 else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1870 (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
1871 (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
1872 (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
1873 return ConstantFoldFP(sinh, V, Ty);
1874 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1875 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1876 return ConstantFoldFP(sqrt, V, Ty);
1877 break;
1878 case 't':
1879 if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1880 (Name == "tanf" && TLI->has(LibFunc_tanf)))
1881 return ConstantFoldFP(tan, V, Ty);
1882 else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1883 (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1884 return ConstantFoldFP(tanh, V, Ty);
1885 break;
1886 default:
1887 break;
1889 return nullptr;
1892 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1893 switch (IntrinsicID) {
1894 case Intrinsic::bswap:
1895 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1896 case Intrinsic::ctpop:
1897 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1898 case Intrinsic::bitreverse:
1899 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1900 case Intrinsic::convert_from_fp16: {
1901 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1903 bool lost = false;
1904 APFloat::opStatus status = Val.convert(
1905 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1907 // Conversion is always precise.
1908 (void)status;
1909 assert(status == APFloat::opOK && !lost &&
1910 "Precision lost during fp16 constfolding");
1912 return ConstantFP::get(Ty->getContext(), Val);
1914 default:
1915 return nullptr;
1919 // Support ConstantVector in case we have an Undef in the top.
1920 if (isa<ConstantVector>(Operands[0]) ||
1921 isa<ConstantDataVector>(Operands[0])) {
1922 auto *Op = cast<Constant>(Operands[0]);
1923 switch (IntrinsicID) {
1924 default: break;
1925 case Intrinsic::x86_sse_cvtss2si:
1926 case Intrinsic::x86_sse_cvtss2si64:
1927 case Intrinsic::x86_sse2_cvtsd2si:
1928 case Intrinsic::x86_sse2_cvtsd2si64:
1929 if (ConstantFP *FPOp =
1930 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1931 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1932 /*roundTowardZero=*/false, Ty,
1933 /*IsSigned*/true);
1934 break;
1935 case Intrinsic::x86_sse_cvttss2si:
1936 case Intrinsic::x86_sse_cvttss2si64:
1937 case Intrinsic::x86_sse2_cvttsd2si:
1938 case Intrinsic::x86_sse2_cvttsd2si64:
1939 if (ConstantFP *FPOp =
1940 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1941 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1942 /*roundTowardZero=*/true, Ty,
1943 /*IsSigned*/true);
1944 break;
1948 return nullptr;
1951 static Constant *ConstantFoldScalarCall2(StringRef Name,
1952 Intrinsic::ID IntrinsicID,
1953 Type *Ty,
1954 ArrayRef<Constant *> Operands,
1955 const TargetLibraryInfo *TLI,
1956 const CallBase *Call) {
1957 assert(Operands.size() == 2 && "Wrong number of operands.");
1959 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1960 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1961 return nullptr;
1962 double Op1V = getValueAsDouble(Op1);
1964 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1965 if (Op2->getType() != Op1->getType())
1966 return nullptr;
1968 double Op2V = getValueAsDouble(Op2);
1969 if (IntrinsicID == Intrinsic::pow) {
1970 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1972 if (IntrinsicID == Intrinsic::copysign) {
1973 APFloat V1 = Op1->getValueAPF();
1974 const APFloat &V2 = Op2->getValueAPF();
1975 V1.copySign(V2);
1976 return ConstantFP::get(Ty->getContext(), V1);
1979 if (IntrinsicID == Intrinsic::minnum) {
1980 const APFloat &C1 = Op1->getValueAPF();
1981 const APFloat &C2 = Op2->getValueAPF();
1982 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1985 if (IntrinsicID == Intrinsic::maxnum) {
1986 const APFloat &C1 = Op1->getValueAPF();
1987 const APFloat &C2 = Op2->getValueAPF();
1988 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1991 if (IntrinsicID == Intrinsic::minimum) {
1992 const APFloat &C1 = Op1->getValueAPF();
1993 const APFloat &C2 = Op2->getValueAPF();
1994 return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
1997 if (IntrinsicID == Intrinsic::maximum) {
1998 const APFloat &C1 = Op1->getValueAPF();
1999 const APFloat &C2 = Op2->getValueAPF();
2000 return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
2003 if (!TLI)
2004 return nullptr;
2005 if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
2006 (Name == "powf" && TLI->has(LibFunc_powf)) ||
2007 (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
2008 (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
2009 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2010 if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
2011 (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
2012 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
2013 if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
2014 (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
2015 (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
2016 (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
2017 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2018 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2019 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2020 return ConstantFP::get(Ty->getContext(),
2021 APFloat((float)std::pow((float)Op1V,
2022 (int)Op2C->getZExtValue())));
2023 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2024 return ConstantFP::get(Ty->getContext(),
2025 APFloat((float)std::pow((float)Op1V,
2026 (int)Op2C->getZExtValue())));
2027 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2028 return ConstantFP::get(Ty->getContext(),
2029 APFloat((double)std::pow((double)Op1V,
2030 (int)Op2C->getZExtValue())));
2032 return nullptr;
2035 if (Operands[0]->getType()->isIntegerTy() &&
2036 Operands[1]->getType()->isIntegerTy()) {
2037 const APInt *C0, *C1;
2038 if (!getConstIntOrUndef(Operands[0], C0) ||
2039 !getConstIntOrUndef(Operands[1], C1))
2040 return nullptr;
2042 switch (IntrinsicID) {
2043 default: break;
2044 case Intrinsic::smul_with_overflow:
2045 case Intrinsic::umul_with_overflow:
2046 // Even if both operands are undef, we cannot fold muls to undef
2047 // in the general case. For example, on i2 there are no inputs
2048 // that would produce { i2 -1, i1 true } as the result.
2049 if (!C0 || !C1)
2050 return Constant::getNullValue(Ty);
2051 LLVM_FALLTHROUGH;
2052 case Intrinsic::sadd_with_overflow:
2053 case Intrinsic::uadd_with_overflow:
2054 case Intrinsic::ssub_with_overflow:
2055 case Intrinsic::usub_with_overflow: {
2056 if (!C0 || !C1)
2057 return UndefValue::get(Ty);
2059 APInt Res;
2060 bool Overflow;
2061 switch (IntrinsicID) {
2062 default: llvm_unreachable("Invalid case");
2063 case Intrinsic::sadd_with_overflow:
2064 Res = C0->sadd_ov(*C1, Overflow);
2065 break;
2066 case Intrinsic::uadd_with_overflow:
2067 Res = C0->uadd_ov(*C1, Overflow);
2068 break;
2069 case Intrinsic::ssub_with_overflow:
2070 Res = C0->ssub_ov(*C1, Overflow);
2071 break;
2072 case Intrinsic::usub_with_overflow:
2073 Res = C0->usub_ov(*C1, Overflow);
2074 break;
2075 case Intrinsic::smul_with_overflow:
2076 Res = C0->smul_ov(*C1, Overflow);
2077 break;
2078 case Intrinsic::umul_with_overflow:
2079 Res = C0->umul_ov(*C1, Overflow);
2080 break;
2082 Constant *Ops[] = {
2083 ConstantInt::get(Ty->getContext(), Res),
2084 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2086 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2088 case Intrinsic::uadd_sat:
2089 case Intrinsic::sadd_sat:
2090 if (!C0 && !C1)
2091 return UndefValue::get(Ty);
2092 if (!C0 || !C1)
2093 return Constant::getAllOnesValue(Ty);
2094 if (IntrinsicID == Intrinsic::uadd_sat)
2095 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2096 else
2097 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2098 case Intrinsic::usub_sat:
2099 case Intrinsic::ssub_sat:
2100 if (!C0 && !C1)
2101 return UndefValue::get(Ty);
2102 if (!C0 || !C1)
2103 return Constant::getNullValue(Ty);
2104 if (IntrinsicID == Intrinsic::usub_sat)
2105 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2106 else
2107 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2108 case Intrinsic::cttz:
2109 case Intrinsic::ctlz:
2110 assert(C1 && "Must be constant int");
2112 // cttz(0, 1) and ctlz(0, 1) are undef.
2113 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2114 return UndefValue::get(Ty);
2115 if (!C0)
2116 return Constant::getNullValue(Ty);
2117 if (IntrinsicID == Intrinsic::cttz)
2118 return ConstantInt::get(Ty, C0->countTrailingZeros());
2119 else
2120 return ConstantInt::get(Ty, C0->countLeadingZeros());
2123 return nullptr;
2126 // Support ConstantVector in case we have an Undef in the top.
2127 if ((isa<ConstantVector>(Operands[0]) ||
2128 isa<ConstantDataVector>(Operands[0])) &&
2129 // Check for default rounding mode.
2130 // FIXME: Support other rounding modes?
2131 isa<ConstantInt>(Operands[1]) &&
2132 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2133 auto *Op = cast<Constant>(Operands[0]);
2134 switch (IntrinsicID) {
2135 default: break;
2136 case Intrinsic::x86_avx512_vcvtss2si32:
2137 case Intrinsic::x86_avx512_vcvtss2si64:
2138 case Intrinsic::x86_avx512_vcvtsd2si32:
2139 case Intrinsic::x86_avx512_vcvtsd2si64:
2140 if (ConstantFP *FPOp =
2141 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2142 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2143 /*roundTowardZero=*/false, Ty,
2144 /*IsSigned*/true);
2145 break;
2146 case Intrinsic::x86_avx512_vcvtss2usi32:
2147 case Intrinsic::x86_avx512_vcvtss2usi64:
2148 case Intrinsic::x86_avx512_vcvtsd2usi32:
2149 case Intrinsic::x86_avx512_vcvtsd2usi64:
2150 if (ConstantFP *FPOp =
2151 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2152 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2153 /*roundTowardZero=*/false, Ty,
2154 /*IsSigned*/false);
2155 break;
2156 case Intrinsic::x86_avx512_cvttss2si:
2157 case Intrinsic::x86_avx512_cvttss2si64:
2158 case Intrinsic::x86_avx512_cvttsd2si:
2159 case Intrinsic::x86_avx512_cvttsd2si64:
2160 if (ConstantFP *FPOp =
2161 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2162 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2163 /*roundTowardZero=*/true, Ty,
2164 /*IsSigned*/true);
2165 break;
2166 case Intrinsic::x86_avx512_cvttss2usi:
2167 case Intrinsic::x86_avx512_cvttss2usi64:
2168 case Intrinsic::x86_avx512_cvttsd2usi:
2169 case Intrinsic::x86_avx512_cvttsd2usi64:
2170 if (ConstantFP *FPOp =
2171 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2172 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2173 /*roundTowardZero=*/true, Ty,
2174 /*IsSigned*/false);
2175 break;
2178 return nullptr;
2181 static Constant *ConstantFoldScalarCall3(StringRef Name,
2182 Intrinsic::ID IntrinsicID,
2183 Type *Ty,
2184 ArrayRef<Constant *> Operands,
2185 const TargetLibraryInfo *TLI,
2186 const CallBase *Call) {
2187 assert(Operands.size() == 3 && "Wrong number of operands.");
2189 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2190 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2191 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2192 switch (IntrinsicID) {
2193 default: break;
2194 case Intrinsic::fma:
2195 case Intrinsic::fmuladd: {
2196 APFloat V = Op1->getValueAPF();
2197 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
2198 Op3->getValueAPF(),
2199 APFloat::rmNearestTiesToEven);
2200 if (s != APFloat::opInvalidOp)
2201 return ConstantFP::get(Ty->getContext(), V);
2203 return nullptr;
2210 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
2211 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
2212 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) {
2213 switch (IntrinsicID) {
2214 default: break;
2215 case Intrinsic::smul_fix:
2216 case Intrinsic::smul_fix_sat: {
2217 // This code performs rounding towards negative infinity in case the
2218 // result cannot be represented exactly for the given scale. Targets
2219 // that do care about rounding should use a target hook for specifying
2220 // how rounding should be done, and provide their own folding to be
2221 // consistent with rounding. This is the same approach as used by
2222 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2223 APInt Lhs = Op1->getValue();
2224 APInt Rhs = Op2->getValue();
2225 unsigned Scale = Op3->getValue().getZExtValue();
2226 unsigned Width = Lhs.getBitWidth();
2227 assert(Scale < Width && "Illegal scale.");
2228 unsigned ExtendedWidth = Width * 2;
2229 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) *
2230 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale);
2231 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2232 APInt MaxValue =
2233 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2234 APInt MinValue =
2235 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2236 Product = APIntOps::smin(Product, MaxValue);
2237 Product = APIntOps::smax(Product, MinValue);
2239 return ConstantInt::get(Ty->getContext(),
2240 Product.sextOrTrunc(Width));
2247 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2248 const APInt *C0, *C1, *C2;
2249 if (!getConstIntOrUndef(Operands[0], C0) ||
2250 !getConstIntOrUndef(Operands[1], C1) ||
2251 !getConstIntOrUndef(Operands[2], C2))
2252 return nullptr;
2254 bool IsRight = IntrinsicID == Intrinsic::fshr;
2255 if (!C2)
2256 return Operands[IsRight ? 1 : 0];
2257 if (!C0 && !C1)
2258 return UndefValue::get(Ty);
2260 // The shift amount is interpreted as modulo the bitwidth. If the shift
2261 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2262 unsigned BitWidth = C2->getBitWidth();
2263 unsigned ShAmt = C2->urem(BitWidth);
2264 if (!ShAmt)
2265 return Operands[IsRight ? 1 : 0];
2267 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2268 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2269 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2270 if (!C0)
2271 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2272 if (!C1)
2273 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2274 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2277 return nullptr;
2280 static Constant *ConstantFoldScalarCall(StringRef Name,
2281 Intrinsic::ID IntrinsicID,
2282 Type *Ty,
2283 ArrayRef<Constant *> Operands,
2284 const TargetLibraryInfo *TLI,
2285 const CallBase *Call) {
2286 if (Operands.size() == 1)
2287 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2289 if (Operands.size() == 2)
2290 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2292 if (Operands.size() == 3)
2293 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2295 return nullptr;
2298 static Constant *ConstantFoldVectorCall(StringRef Name,
2299 Intrinsic::ID IntrinsicID,
2300 VectorType *VTy,
2301 ArrayRef<Constant *> Operands,
2302 const DataLayout &DL,
2303 const TargetLibraryInfo *TLI,
2304 const CallBase *Call) {
2305 SmallVector<Constant *, 4> Result(VTy->getNumElements());
2306 SmallVector<Constant *, 4> Lane(Operands.size());
2307 Type *Ty = VTy->getElementType();
2309 if (IntrinsicID == Intrinsic::masked_load) {
2310 auto *SrcPtr = Operands[0];
2311 auto *Mask = Operands[2];
2312 auto *Passthru = Operands[3];
2314 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2316 SmallVector<Constant *, 32> NewElements;
2317 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2318 auto *MaskElt = Mask->getAggregateElement(I);
2319 if (!MaskElt)
2320 break;
2321 auto *PassthruElt = Passthru->getAggregateElement(I);
2322 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2323 if (isa<UndefValue>(MaskElt)) {
2324 if (PassthruElt)
2325 NewElements.push_back(PassthruElt);
2326 else if (VecElt)
2327 NewElements.push_back(VecElt);
2328 else
2329 return nullptr;
2331 if (MaskElt->isNullValue()) {
2332 if (!PassthruElt)
2333 return nullptr;
2334 NewElements.push_back(PassthruElt);
2335 } else if (MaskElt->isOneValue()) {
2336 if (!VecElt)
2337 return nullptr;
2338 NewElements.push_back(VecElt);
2339 } else {
2340 return nullptr;
2343 if (NewElements.size() != VTy->getNumElements())
2344 return nullptr;
2345 return ConstantVector::get(NewElements);
2348 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2349 // Gather a column of constants.
2350 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2351 // Some intrinsics use a scalar type for certain arguments.
2352 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
2353 Lane[J] = Operands[J];
2354 continue;
2357 Constant *Agg = Operands[J]->getAggregateElement(I);
2358 if (!Agg)
2359 return nullptr;
2361 Lane[J] = Agg;
2364 // Use the regular scalar folding to simplify this column.
2365 Constant *Folded =
2366 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2367 if (!Folded)
2368 return nullptr;
2369 Result[I] = Folded;
2372 return ConstantVector::get(Result);
2375 } // end anonymous namespace
2377 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2378 ArrayRef<Constant *> Operands,
2379 const TargetLibraryInfo *TLI) {
2380 if (Call->isNoBuiltin() || Call->isStrictFP())
2381 return nullptr;
2382 if (!F->hasName())
2383 return nullptr;
2384 StringRef Name = F->getName();
2386 Type *Ty = F->getReturnType();
2388 if (auto *VTy = dyn_cast<VectorType>(Ty))
2389 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2390 F->getParent()->getDataLayout(), TLI, Call);
2392 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
2393 Call);
2396 bool llvm::isMathLibCallNoop(const CallBase *Call,
2397 const TargetLibraryInfo *TLI) {
2398 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2399 // (and to some extent ConstantFoldScalarCall).
2400 if (Call->isNoBuiltin() || Call->isStrictFP())
2401 return false;
2402 Function *F = Call->getCalledFunction();
2403 if (!F)
2404 return false;
2406 LibFunc Func;
2407 if (!TLI || !TLI->getLibFunc(*F, Func))
2408 return false;
2410 if (Call->getNumArgOperands() == 1) {
2411 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
2412 const APFloat &Op = OpC->getValueAPF();
2413 switch (Func) {
2414 case LibFunc_logl:
2415 case LibFunc_log:
2416 case LibFunc_logf:
2417 case LibFunc_log2l:
2418 case LibFunc_log2:
2419 case LibFunc_log2f:
2420 case LibFunc_log10l:
2421 case LibFunc_log10:
2422 case LibFunc_log10f:
2423 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2425 case LibFunc_expl:
2426 case LibFunc_exp:
2427 case LibFunc_expf:
2428 // FIXME: These boundaries are slightly conservative.
2429 if (OpC->getType()->isDoubleTy())
2430 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2431 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2432 if (OpC->getType()->isFloatTy())
2433 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2434 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2435 break;
2437 case LibFunc_exp2l:
2438 case LibFunc_exp2:
2439 case LibFunc_exp2f:
2440 // FIXME: These boundaries are slightly conservative.
2441 if (OpC->getType()->isDoubleTy())
2442 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2443 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2444 if (OpC->getType()->isFloatTy())
2445 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2446 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2447 break;
2449 case LibFunc_sinl:
2450 case LibFunc_sin:
2451 case LibFunc_sinf:
2452 case LibFunc_cosl:
2453 case LibFunc_cos:
2454 case LibFunc_cosf:
2455 return !Op.isInfinity();
2457 case LibFunc_tanl:
2458 case LibFunc_tan:
2459 case LibFunc_tanf: {
2460 // FIXME: Stop using the host math library.
2461 // FIXME: The computation isn't done in the right precision.
2462 Type *Ty = OpC->getType();
2463 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2464 double OpV = getValueAsDouble(OpC);
2465 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2467 break;
2470 case LibFunc_asinl:
2471 case LibFunc_asin:
2472 case LibFunc_asinf:
2473 case LibFunc_acosl:
2474 case LibFunc_acos:
2475 case LibFunc_acosf:
2476 return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2477 APFloat::cmpLessThan &&
2478 Op.compare(APFloat(Op.getSemantics(), "1")) !=
2479 APFloat::cmpGreaterThan;
2481 case LibFunc_sinh:
2482 case LibFunc_cosh:
2483 case LibFunc_sinhf:
2484 case LibFunc_coshf:
2485 case LibFunc_sinhl:
2486 case LibFunc_coshl:
2487 // FIXME: These boundaries are slightly conservative.
2488 if (OpC->getType()->isDoubleTy())
2489 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2490 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2491 if (OpC->getType()->isFloatTy())
2492 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2493 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2494 break;
2496 case LibFunc_sqrtl:
2497 case LibFunc_sqrt:
2498 case LibFunc_sqrtf:
2499 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2501 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2502 // maybe others?
2503 default:
2504 break;
2509 if (Call->getNumArgOperands() == 2) {
2510 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
2511 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
2512 if (Op0C && Op1C) {
2513 const APFloat &Op0 = Op0C->getValueAPF();
2514 const APFloat &Op1 = Op1C->getValueAPF();
2516 switch (Func) {
2517 case LibFunc_powl:
2518 case LibFunc_pow:
2519 case LibFunc_powf: {
2520 // FIXME: Stop using the host math library.
2521 // FIXME: The computation isn't done in the right precision.
2522 Type *Ty = Op0C->getType();
2523 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2524 if (Ty == Op1C->getType()) {
2525 double Op0V = getValueAsDouble(Op0C);
2526 double Op1V = getValueAsDouble(Op1C);
2527 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2530 break;
2533 case LibFunc_fmodl:
2534 case LibFunc_fmod:
2535 case LibFunc_fmodf:
2536 return Op0.isNaN() || Op1.isNaN() ||
2537 (!Op0.isInfinity() && !Op1.isZero());
2539 default:
2540 break;
2545 return false;