[clang][bytecode][NFC] Only get expr when checking for UB (#125397)
[llvm-project.git] / llvm / lib / Analysis / ConstantFolding.cpp
blobd645bf8f7b6212d4515af7da018e470c1bd6a600
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantFold.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GlobalValue.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/IntrinsicsAArch64.h"
46 #include "llvm/IR/IntrinsicsAMDGPU.h"
47 #include "llvm/IR/IntrinsicsARM.h"
48 #include "llvm/IR/IntrinsicsNVPTX.h"
49 #include "llvm/IR/IntrinsicsWebAssembly.h"
50 #include "llvm/IR/IntrinsicsX86.h"
51 #include "llvm/IR/NVVMIntrinsicUtils.h"
52 #include "llvm/IR/Operator.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/KnownBits.h"
58 #include "llvm/Support/MathExtras.h"
59 #include <cassert>
60 #include <cerrno>
61 #include <cfenv>
62 #include <cmath>
63 #include <cstdint>
65 using namespace llvm;
67 namespace {
69 //===----------------------------------------------------------------------===//
70 // Constant Folding internal helper functions
71 //===----------------------------------------------------------------------===//
73 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
74 Constant *C, Type *SrcEltTy,
75 unsigned NumSrcElts,
76 const DataLayout &DL) {
77 // Now that we know that the input value is a vector of integers, just shift
78 // and insert them into our result.
79 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
80 for (unsigned i = 0; i != NumSrcElts; ++i) {
81 Constant *Element;
82 if (DL.isLittleEndian())
83 Element = C->getAggregateElement(NumSrcElts - i - 1);
84 else
85 Element = C->getAggregateElement(i);
87 if (isa_and_nonnull<UndefValue>(Element)) {
88 Result <<= BitShift;
89 continue;
92 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
93 if (!ElementCI)
94 return ConstantExpr::getBitCast(C, DestTy);
96 Result <<= BitShift;
97 Result |= ElementCI->getValue().zext(Result.getBitWidth());
100 return nullptr;
103 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
104 /// This always returns a non-null constant, but it may be a
105 /// ConstantExpr if unfoldable.
106 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
107 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
108 "Invalid constantexpr bitcast!");
110 // Catch the obvious splat cases.
111 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
112 return Res;
114 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
115 // Handle a vector->scalar integer/fp cast.
116 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
117 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
118 Type *SrcEltTy = VTy->getElementType();
120 // If the vector is a vector of floating point, convert it to vector of int
121 // to simplify things.
122 if (SrcEltTy->isFloatingPointTy()) {
123 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
124 auto *SrcIVTy = FixedVectorType::get(
125 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
126 // Ask IR to do the conversion now that #elts line up.
127 C = ConstantExpr::getBitCast(C, SrcIVTy);
130 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
131 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
132 SrcEltTy, NumSrcElts, DL))
133 return CE;
135 if (isa<IntegerType>(DestTy))
136 return ConstantInt::get(DestTy, Result);
138 APFloat FP(DestTy->getFltSemantics(), Result);
139 return ConstantFP::get(DestTy->getContext(), FP);
143 // The code below only handles casts to vectors currently.
144 auto *DestVTy = dyn_cast<VectorType>(DestTy);
145 if (!DestVTy)
146 return ConstantExpr::getBitCast(C, DestTy);
148 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
149 // vector so the code below can handle it uniformly.
150 if (!isa<VectorType>(C->getType()) &&
151 (isa<ConstantFP>(C) || isa<ConstantInt>(C))) {
152 Constant *Ops = C; // don't take the address of C!
153 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
156 // Some of what follows may extend to cover scalable vectors but the current
157 // implementation is fixed length specific.
158 if (!isa<FixedVectorType>(C->getType()))
159 return ConstantExpr::getBitCast(C, DestTy);
161 // If this is a bitcast from constant vector -> vector, fold it.
162 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C) &&
163 !isa<ConstantInt>(C) && !isa<ConstantFP>(C))
164 return ConstantExpr::getBitCast(C, DestTy);
166 // If the element types match, IR can fold it.
167 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
168 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
169 if (NumDstElt == NumSrcElt)
170 return ConstantExpr::getBitCast(C, DestTy);
172 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
173 Type *DstEltTy = DestVTy->getElementType();
175 // Otherwise, we're changing the number of elements in a vector, which
176 // requires endianness information to do the right thing. For example,
177 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
178 // folds to (little endian):
179 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
180 // and to (big endian):
181 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
183 // First thing is first. We only want to think about integer here, so if
184 // we have something in FP form, recast it as integer.
185 if (DstEltTy->isFloatingPointTy()) {
186 // Fold to an vector of integers with same size as our FP type.
187 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
188 auto *DestIVTy = FixedVectorType::get(
189 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
190 // Recursively handle this integer conversion, if possible.
191 C = FoldBitCast(C, DestIVTy, DL);
193 // Finally, IR can handle this now that #elts line up.
194 return ConstantExpr::getBitCast(C, DestTy);
197 // Okay, we know the destination is integer, if the input is FP, convert
198 // it to integer first.
199 if (SrcEltTy->isFloatingPointTy()) {
200 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
201 auto *SrcIVTy = FixedVectorType::get(
202 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
203 // Ask IR to do the conversion now that #elts line up.
204 C = ConstantExpr::getBitCast(C, SrcIVTy);
205 assert((isa<ConstantVector>(C) || // FIXME: Remove ConstantVector.
206 isa<ConstantDataVector>(C) || isa<ConstantInt>(C)) &&
207 "Constant folding cannot fail for plain fp->int bitcast!");
210 // Now we know that the input and output vectors are both integer vectors
211 // of the same size, and that their #elements is not the same. Do the
212 // conversion here, which depends on whether the input or output has
213 // more elements.
214 bool isLittleEndian = DL.isLittleEndian();
216 SmallVector<Constant*, 32> Result;
217 if (NumDstElt < NumSrcElt) {
218 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
219 Constant *Zero = Constant::getNullValue(DstEltTy);
220 unsigned Ratio = NumSrcElt/NumDstElt;
221 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
222 unsigned SrcElt = 0;
223 for (unsigned i = 0; i != NumDstElt; ++i) {
224 // Build each element of the result.
225 Constant *Elt = Zero;
226 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
227 for (unsigned j = 0; j != Ratio; ++j) {
228 Constant *Src = C->getAggregateElement(SrcElt++);
229 if (isa_and_nonnull<UndefValue>(Src))
230 Src = Constant::getNullValue(
231 cast<VectorType>(C->getType())->getElementType());
232 else
233 Src = dyn_cast_or_null<ConstantInt>(Src);
234 if (!Src) // Reject constantexpr elements.
235 return ConstantExpr::getBitCast(C, DestTy);
237 // Zero extend the element to the right size.
238 Src = ConstantFoldCastOperand(Instruction::ZExt, Src, Elt->getType(),
239 DL);
240 assert(Src && "Constant folding cannot fail on plain integers");
242 // Shift it to the right place, depending on endianness.
243 Src = ConstantFoldBinaryOpOperands(
244 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
245 DL);
246 assert(Src && "Constant folding cannot fail on plain integers");
248 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
250 // Mix it in.
251 Elt = ConstantFoldBinaryOpOperands(Instruction::Or, Elt, Src, DL);
252 assert(Elt && "Constant folding cannot fail on plain integers");
254 Result.push_back(Elt);
256 return ConstantVector::get(Result);
259 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
260 unsigned Ratio = NumDstElt/NumSrcElt;
261 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
263 // Loop over each source value, expanding into multiple results.
264 for (unsigned i = 0; i != NumSrcElt; ++i) {
265 auto *Element = C->getAggregateElement(i);
267 if (!Element) // Reject constantexpr elements.
268 return ConstantExpr::getBitCast(C, DestTy);
270 if (isa<UndefValue>(Element)) {
271 // Correctly Propagate undef values.
272 Result.append(Ratio, UndefValue::get(DstEltTy));
273 continue;
276 auto *Src = dyn_cast<ConstantInt>(Element);
277 if (!Src)
278 return ConstantExpr::getBitCast(C, DestTy);
280 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
281 for (unsigned j = 0; j != Ratio; ++j) {
282 // Shift the piece of the value into the right place, depending on
283 // endianness.
284 APInt Elt = Src->getValue().lshr(ShiftAmt);
285 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
287 // Truncate and remember this piece.
288 Result.push_back(ConstantInt::get(DstEltTy, Elt.trunc(DstBitSize)));
292 return ConstantVector::get(Result);
295 } // end anonymous namespace
297 /// If this constant is a constant offset from a global, return the global and
298 /// the constant. Because of constantexprs, this function is recursive.
299 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
300 APInt &Offset, const DataLayout &DL,
301 DSOLocalEquivalent **DSOEquiv) {
302 if (DSOEquiv)
303 *DSOEquiv = nullptr;
305 // Trivial case, constant is the global.
306 if ((GV = dyn_cast<GlobalValue>(C))) {
307 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
308 Offset = APInt(BitWidth, 0);
309 return true;
312 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
313 if (DSOEquiv)
314 *DSOEquiv = FoundDSOEquiv;
315 GV = FoundDSOEquiv->getGlobalValue();
316 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
317 Offset = APInt(BitWidth, 0);
318 return true;
321 // Otherwise, if this isn't a constant expr, bail out.
322 auto *CE = dyn_cast<ConstantExpr>(C);
323 if (!CE) return false;
325 // Look through ptr->int and ptr->ptr casts.
326 if (CE->getOpcode() == Instruction::PtrToInt ||
327 CE->getOpcode() == Instruction::BitCast)
328 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
329 DSOEquiv);
331 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
332 auto *GEP = dyn_cast<GEPOperator>(CE);
333 if (!GEP)
334 return false;
336 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
337 APInt TmpOffset(BitWidth, 0);
339 // If the base isn't a global+constant, we aren't either.
340 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
341 DSOEquiv))
342 return false;
344 // Otherwise, add any offset that our operands provide.
345 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
346 return false;
348 Offset = TmpOffset;
349 return true;
352 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
353 const DataLayout &DL) {
354 do {
355 Type *SrcTy = C->getType();
356 if (SrcTy == DestTy)
357 return C;
359 TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
360 TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
361 if (!TypeSize::isKnownGE(SrcSize, DestSize))
362 return nullptr;
364 // Catch the obvious splat cases (since all-zeros can coerce non-integral
365 // pointers legally).
366 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
367 return Res;
369 // If the type sizes are the same and a cast is legal, just directly
370 // cast the constant.
371 // But be careful not to coerce non-integral pointers illegally.
372 if (SrcSize == DestSize &&
373 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
374 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
375 Instruction::CastOps Cast = Instruction::BitCast;
376 // If we are going from a pointer to int or vice versa, we spell the cast
377 // differently.
378 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
379 Cast = Instruction::IntToPtr;
380 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
381 Cast = Instruction::PtrToInt;
383 if (CastInst::castIsValid(Cast, C, DestTy))
384 return ConstantFoldCastOperand(Cast, C, DestTy, DL);
387 // If this isn't an aggregate type, there is nothing we can do to drill down
388 // and find a bitcastable constant.
389 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
390 return nullptr;
392 // We're simulating a load through a pointer that was bitcast to point to
393 // a different type, so we can try to walk down through the initial
394 // elements of an aggregate to see if some part of the aggregate is
395 // castable to implement the "load" semantic model.
396 if (SrcTy->isStructTy()) {
397 // Struct types might have leading zero-length elements like [0 x i32],
398 // which are certainly not what we are looking for, so skip them.
399 unsigned Elem = 0;
400 Constant *ElemC;
401 do {
402 ElemC = C->getAggregateElement(Elem++);
403 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
404 C = ElemC;
405 } else {
406 // For non-byte-sized vector elements, the first element is not
407 // necessarily located at the vector base address.
408 if (auto *VT = dyn_cast<VectorType>(SrcTy))
409 if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
410 return nullptr;
412 C = C->getAggregateElement(0u);
414 } while (C);
416 return nullptr;
419 namespace {
421 /// Recursive helper to read bits out of global. C is the constant being copied
422 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
423 /// results into and BytesLeft is the number of bytes left in
424 /// the CurPtr buffer. DL is the DataLayout.
425 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
426 unsigned BytesLeft, const DataLayout &DL) {
427 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
428 "Out of range access");
430 // If this element is zero or undefined, we can just return since *CurPtr is
431 // zero initialized.
432 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
433 return true;
435 if (auto *CI = dyn_cast<ConstantInt>(C)) {
436 if ((CI->getBitWidth() & 7) != 0)
437 return false;
438 const APInt &Val = CI->getValue();
439 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
441 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
442 unsigned n = ByteOffset;
443 if (!DL.isLittleEndian())
444 n = IntBytes - n - 1;
445 CurPtr[i] = Val.extractBits(8, n * 8).getZExtValue();
446 ++ByteOffset;
448 return true;
451 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
452 if (CFP->getType()->isDoubleTy()) {
453 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
454 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
456 if (CFP->getType()->isFloatTy()){
457 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
458 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
460 if (CFP->getType()->isHalfTy()){
461 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
462 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
464 return false;
467 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
468 const StructLayout *SL = DL.getStructLayout(CS->getType());
469 unsigned Index = SL->getElementContainingOffset(ByteOffset);
470 uint64_t CurEltOffset = SL->getElementOffset(Index);
471 ByteOffset -= CurEltOffset;
473 while (true) {
474 // If the element access is to the element itself and not to tail padding,
475 // read the bytes from the element.
476 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
478 if (ByteOffset < EltSize &&
479 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
480 BytesLeft, DL))
481 return false;
483 ++Index;
485 // Check to see if we read from the last struct element, if so we're done.
486 if (Index == CS->getType()->getNumElements())
487 return true;
489 // If we read all of the bytes we needed from this element we're done.
490 uint64_t NextEltOffset = SL->getElementOffset(Index);
492 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
493 return true;
495 // Move to the next element of the struct.
496 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
497 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
498 ByteOffset = 0;
499 CurEltOffset = NextEltOffset;
501 // not reached.
504 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
505 isa<ConstantDataSequential>(C)) {
506 uint64_t NumElts, EltSize;
507 Type *EltTy;
508 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
509 NumElts = AT->getNumElements();
510 EltTy = AT->getElementType();
511 EltSize = DL.getTypeAllocSize(EltTy);
512 } else {
513 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
514 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
515 // TODO: For non-byte-sized vectors, current implementation assumes there is
516 // padding to the next byte boundary between elements.
517 if (!DL.typeSizeEqualsStoreSize(EltTy))
518 return false;
520 EltSize = DL.getTypeStoreSize(EltTy);
522 uint64_t Index = ByteOffset / EltSize;
523 uint64_t Offset = ByteOffset - Index * EltSize;
525 for (; Index != NumElts; ++Index) {
526 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
527 BytesLeft, DL))
528 return false;
530 uint64_t BytesWritten = EltSize - Offset;
531 assert(BytesWritten <= EltSize && "Not indexing into this element?");
532 if (BytesWritten >= BytesLeft)
533 return true;
535 Offset = 0;
536 BytesLeft -= BytesWritten;
537 CurPtr += BytesWritten;
539 return true;
542 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
543 if (CE->getOpcode() == Instruction::IntToPtr &&
544 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
545 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
546 BytesLeft, DL);
550 // Otherwise, unknown initializer type.
551 return false;
554 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
555 int64_t Offset, const DataLayout &DL) {
556 // Bail out early. Not expect to load from scalable global variable.
557 if (isa<ScalableVectorType>(LoadTy))
558 return nullptr;
560 auto *IntType = dyn_cast<IntegerType>(LoadTy);
562 // If this isn't an integer load we can't fold it directly.
563 if (!IntType) {
564 // If this is a non-integer load, we can try folding it as an int load and
565 // then bitcast the result. This can be useful for union cases. Note
566 // that address spaces don't matter here since we're not going to result in
567 // an actual new load.
568 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
569 !LoadTy->isVectorTy())
570 return nullptr;
572 Type *MapTy = Type::getIntNTy(C->getContext(),
573 DL.getTypeSizeInBits(LoadTy).getFixedValue());
574 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
575 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
576 // Materializing a zero can be done trivially without a bitcast
577 return Constant::getNullValue(LoadTy);
578 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
579 Res = FoldBitCast(Res, CastTy, DL);
580 if (LoadTy->isPtrOrPtrVectorTy()) {
581 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
582 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
583 return Constant::getNullValue(LoadTy);
584 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
585 // Be careful not to replace a load of an addrspace value with an inttoptr here
586 return nullptr;
587 Res = ConstantExpr::getIntToPtr(Res, LoadTy);
589 return Res;
591 return nullptr;
594 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
595 if (BytesLoaded > 32 || BytesLoaded == 0)
596 return nullptr;
598 // If we're not accessing anything in this constant, the result is undefined.
599 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
600 return PoisonValue::get(IntType);
602 // TODO: We should be able to support scalable types.
603 TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
604 if (InitializerSize.isScalable())
605 return nullptr;
607 // If we're not accessing anything in this constant, the result is undefined.
608 if (Offset >= (int64_t)InitializerSize.getFixedValue())
609 return PoisonValue::get(IntType);
611 unsigned char RawBytes[32] = {0};
612 unsigned char *CurPtr = RawBytes;
613 unsigned BytesLeft = BytesLoaded;
615 // If we're loading off the beginning of the global, some bytes may be valid.
616 if (Offset < 0) {
617 CurPtr += -Offset;
618 BytesLeft += Offset;
619 Offset = 0;
622 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
623 return nullptr;
625 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
626 if (DL.isLittleEndian()) {
627 ResultVal = RawBytes[BytesLoaded - 1];
628 for (unsigned i = 1; i != BytesLoaded; ++i) {
629 ResultVal <<= 8;
630 ResultVal |= RawBytes[BytesLoaded - 1 - i];
632 } else {
633 ResultVal = RawBytes[0];
634 for (unsigned i = 1; i != BytesLoaded; ++i) {
635 ResultVal <<= 8;
636 ResultVal |= RawBytes[i];
640 return ConstantInt::get(IntType->getContext(), ResultVal);
643 } // anonymous namespace
645 // If GV is a constant with an initializer read its representation starting
646 // at Offset and return it as a constant array of unsigned char. Otherwise
647 // return null.
648 Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
649 uint64_t Offset) {
650 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
651 return nullptr;
653 const DataLayout &DL = GV->getDataLayout();
654 Constant *Init = const_cast<Constant *>(GV->getInitializer());
655 TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
656 if (InitSize < Offset)
657 return nullptr;
659 uint64_t NBytes = InitSize - Offset;
660 if (NBytes > UINT16_MAX)
661 // Bail for large initializers in excess of 64K to avoid allocating
662 // too much memory.
663 // Offset is assumed to be less than or equal than InitSize (this
664 // is enforced in ReadDataFromGlobal).
665 return nullptr;
667 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
668 unsigned char *CurPtr = RawBytes.data();
670 if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
671 return nullptr;
673 return ConstantDataArray::get(GV->getContext(), RawBytes);
676 /// If this Offset points exactly to the start of an aggregate element, return
677 /// that element, otherwise return nullptr.
678 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
679 const DataLayout &DL) {
680 if (Offset.isZero())
681 return Base;
683 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
684 return nullptr;
686 Type *ElemTy = Base->getType();
687 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
688 if (!Offset.isZero() || !Indices[0].isZero())
689 return nullptr;
691 Constant *C = Base;
692 for (const APInt &Index : drop_begin(Indices)) {
693 if (Index.isNegative() || Index.getActiveBits() >= 32)
694 return nullptr;
696 C = C->getAggregateElement(Index.getZExtValue());
697 if (!C)
698 return nullptr;
701 return C;
704 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
705 const APInt &Offset,
706 const DataLayout &DL) {
707 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
708 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
709 return Result;
711 // Explicitly check for out-of-bounds access, so we return poison even if the
712 // constant is a uniform value.
713 TypeSize Size = DL.getTypeAllocSize(C->getType());
714 if (!Size.isScalable() && Offset.sge(Size.getFixedValue()))
715 return PoisonValue::get(Ty);
717 // Try an offset-independent fold of a uniform value.
718 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
719 return Result;
721 // Try hard to fold loads from bitcasted strange and non-type-safe things.
722 if (Offset.getSignificantBits() <= 64)
723 if (Constant *Result =
724 FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
725 return Result;
727 return nullptr;
730 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
731 const DataLayout &DL) {
732 return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
735 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
736 APInt Offset,
737 const DataLayout &DL) {
738 // We can only fold loads from constant globals with a definitive initializer.
739 // Check this upfront, to skip expensive offset calculations.
740 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C));
741 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
742 return nullptr;
744 C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
745 DL, Offset, /* AllowNonInbounds */ true));
747 if (C == GV)
748 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
749 Offset, DL))
750 return Result;
752 // If this load comes from anywhere in a uniform constant global, the value
753 // is always the same, regardless of the loaded offset.
754 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty, DL);
757 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
758 const DataLayout &DL) {
759 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
760 return ConstantFoldLoadFromConstPtr(C, Ty, std::move(Offset), DL);
763 Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
764 const DataLayout &DL) {
765 if (isa<PoisonValue>(C))
766 return PoisonValue::get(Ty);
767 if (isa<UndefValue>(C))
768 return UndefValue::get(Ty);
769 // If padding is needed when storing C to memory, then it isn't considered as
770 // uniform.
771 if (!DL.typeSizeEqualsStoreSize(C->getType()))
772 return nullptr;
773 if (C->isNullValue() && !Ty->isX86_AMXTy())
774 return Constant::getNullValue(Ty);
775 if (C->isAllOnesValue() &&
776 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
777 return Constant::getAllOnesValue(Ty);
778 return nullptr;
781 namespace {
783 /// One of Op0/Op1 is a constant expression.
784 /// Attempt to symbolically evaluate the result of a binary operator merging
785 /// these together. If target data info is available, it is provided as DL,
786 /// otherwise DL is null.
787 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
788 const DataLayout &DL) {
789 // SROA
791 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
792 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
793 // bits.
795 if (Opc == Instruction::And) {
796 KnownBits Known0 = computeKnownBits(Op0, DL);
797 KnownBits Known1 = computeKnownBits(Op1, DL);
798 if ((Known1.One | Known0.Zero).isAllOnes()) {
799 // All the bits of Op0 that the 'and' could be masking are already zero.
800 return Op0;
802 if ((Known0.One | Known1.Zero).isAllOnes()) {
803 // All the bits of Op1 that the 'and' could be masking are already zero.
804 return Op1;
807 Known0 &= Known1;
808 if (Known0.isConstant())
809 return ConstantInt::get(Op0->getType(), Known0.getConstant());
812 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
813 // constant. This happens frequently when iterating over a global array.
814 if (Opc == Instruction::Sub) {
815 GlobalValue *GV1, *GV2;
816 APInt Offs1, Offs2;
818 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
819 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
820 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
822 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
823 // PtrToInt may change the bitwidth so we have convert to the right size
824 // first.
825 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
826 Offs2.zextOrTrunc(OpSize));
830 return nullptr;
833 /// If array indices are not pointer-sized integers, explicitly cast them so
834 /// that they aren't implicitly casted by the getelementptr.
835 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
836 Type *ResultTy, GEPNoWrapFlags NW,
837 std::optional<ConstantRange> InRange,
838 const DataLayout &DL, const TargetLibraryInfo *TLI) {
839 Type *IntIdxTy = DL.getIndexType(ResultTy);
840 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
842 bool Any = false;
843 SmallVector<Constant*, 32> NewIdxs;
844 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
845 if ((i == 1 ||
846 !isa<StructType>(GetElementPtrInst::getIndexedType(
847 SrcElemTy, Ops.slice(1, i - 1)))) &&
848 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
849 Any = true;
850 Type *NewType =
851 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
852 Constant *NewIdx = ConstantFoldCastOperand(
853 CastInst::getCastOpcode(Ops[i], true, NewType, true), Ops[i], NewType,
854 DL);
855 if (!NewIdx)
856 return nullptr;
857 NewIdxs.push_back(NewIdx);
858 } else
859 NewIdxs.push_back(Ops[i]);
862 if (!Any)
863 return nullptr;
865 Constant *C =
866 ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs, NW, InRange);
867 return ConstantFoldConstant(C, DL, TLI);
870 /// If we can symbolically evaluate the GEP constant expression, do so.
871 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
872 ArrayRef<Constant *> Ops,
873 const DataLayout &DL,
874 const TargetLibraryInfo *TLI) {
875 Type *SrcElemTy = GEP->getSourceElementType();
876 Type *ResTy = GEP->getType();
877 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
878 return nullptr;
880 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, GEP->getNoWrapFlags(),
881 GEP->getInRange(), DL, TLI))
882 return C;
884 Constant *Ptr = Ops[0];
885 if (!Ptr->getType()->isPointerTy())
886 return nullptr;
888 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
890 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
891 if (!isa<ConstantInt>(Ops[i]) || !Ops[i]->getType()->isIntegerTy())
892 return nullptr;
894 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
895 APInt Offset = APInt(
896 BitWidth,
897 DL.getIndexedOffsetInType(
898 SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
899 /*isSigned=*/true, /*implicitTrunc=*/true);
901 std::optional<ConstantRange> InRange = GEP->getInRange();
902 if (InRange)
903 InRange = InRange->sextOrTrunc(BitWidth);
905 // If this is a GEP of a GEP, fold it all into a single GEP.
906 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
907 bool Overflow = false;
908 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
909 NW &= GEP->getNoWrapFlags();
911 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
913 // Do not try the incorporate the sub-GEP if some index is not a number.
914 bool AllConstantInt = true;
915 for (Value *NestedOp : NestedOps)
916 if (!isa<ConstantInt>(NestedOp)) {
917 AllConstantInt = false;
918 break;
920 if (!AllConstantInt)
921 break;
923 // TODO: Try to intersect two inrange attributes?
924 if (!InRange) {
925 InRange = GEP->getInRange();
926 if (InRange)
927 // Adjust inrange by offset until now.
928 InRange = InRange->sextOrTrunc(BitWidth).subtract(Offset);
931 Ptr = cast<Constant>(GEP->getOperand(0));
932 SrcElemTy = GEP->getSourceElementType();
933 Offset = Offset.sadd_ov(
934 APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps),
935 /*isSigned=*/true, /*implicitTrunc=*/true),
936 Overflow);
939 // Preserving nusw (without inbounds) also requires that the offset
940 // additions did not overflow.
941 if (NW.hasNoUnsignedSignedWrap() && !NW.isInBounds() && Overflow)
942 NW = NW.withoutNoUnsignedSignedWrap();
944 // If the base value for this address is a literal integer value, fold the
945 // getelementptr to the resulting integer value casted to the pointer type.
946 APInt BasePtr(BitWidth, 0);
947 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
948 if (CE->getOpcode() == Instruction::IntToPtr) {
949 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
950 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
954 auto *PTy = cast<PointerType>(Ptr->getType());
955 if ((Ptr->isNullValue() || BasePtr != 0) &&
956 !DL.isNonIntegralPointerType(PTy)) {
957 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
958 return ConstantExpr::getIntToPtr(C, ResTy);
961 // Try to infer inbounds for GEPs of globals.
962 if (!NW.isInBounds() && Offset.isNonNegative()) {
963 bool CanBeNull, CanBeFreed;
964 uint64_t DerefBytes =
965 Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
966 if (DerefBytes != 0 && !CanBeNull && Offset.sle(DerefBytes))
967 NW |= GEPNoWrapFlags::inBounds();
970 // nusw + nneg -> nuw
971 if (NW.hasNoUnsignedSignedWrap() && Offset.isNonNegative())
972 NW |= GEPNoWrapFlags::noUnsignedWrap();
974 // Otherwise canonicalize this to a single ptradd.
975 LLVMContext &Ctx = Ptr->getContext();
976 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ctx), Ptr,
977 ConstantInt::get(Ctx, Offset), NW,
978 InRange);
981 /// Attempt to constant fold an instruction with the
982 /// specified opcode and operands. If successful, the constant result is
983 /// returned, if not, null is returned. Note that this function can fail when
984 /// attempting to fold instructions like loads and stores, which have no
985 /// constant expression form.
986 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
987 ArrayRef<Constant *> Ops,
988 const DataLayout &DL,
989 const TargetLibraryInfo *TLI,
990 bool AllowNonDeterministic) {
991 Type *DestTy = InstOrCE->getType();
993 if (Instruction::isUnaryOp(Opcode))
994 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
996 if (Instruction::isBinaryOp(Opcode)) {
997 switch (Opcode) {
998 default:
999 break;
1000 case Instruction::FAdd:
1001 case Instruction::FSub:
1002 case Instruction::FMul:
1003 case Instruction::FDiv:
1004 case Instruction::FRem:
1005 // Handle floating point instructions separately to account for denormals
1006 // TODO: If a constant expression is being folded rather than an
1007 // instruction, denormals will not be flushed/treated as zero
1008 if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
1009 return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I,
1010 AllowNonDeterministic);
1013 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1016 if (Instruction::isCast(Opcode))
1017 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1019 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1020 Type *SrcElemTy = GEP->getSourceElementType();
1021 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1022 return nullptr;
1024 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1025 return C;
1027 return ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], Ops.slice(1),
1028 GEP->getNoWrapFlags(),
1029 GEP->getInRange());
1032 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1033 return CE->getWithOperands(Ops);
1035 switch (Opcode) {
1036 default: return nullptr;
1037 case Instruction::ICmp:
1038 case Instruction::FCmp: {
1039 auto *C = cast<CmpInst>(InstOrCE);
1040 return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1],
1041 DL, TLI, C);
1043 case Instruction::Freeze:
1044 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1045 case Instruction::Call:
1046 if (auto *F = dyn_cast<Function>(Ops.back())) {
1047 const auto *Call = cast<CallBase>(InstOrCE);
1048 if (canConstantFoldCallTo(Call, F))
1049 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI,
1050 AllowNonDeterministic);
1052 return nullptr;
1053 case Instruction::Select:
1054 return ConstantFoldSelectInstruction(Ops[0], Ops[1], Ops[2]);
1055 case Instruction::ExtractElement:
1056 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1057 case Instruction::ExtractValue:
1058 return ConstantFoldExtractValueInstruction(
1059 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1060 case Instruction::InsertElement:
1061 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1062 case Instruction::InsertValue:
1063 return ConstantFoldInsertValueInstruction(
1064 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1065 case Instruction::ShuffleVector:
1066 return ConstantExpr::getShuffleVector(
1067 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1068 case Instruction::Load: {
1069 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1070 if (LI->isVolatile())
1071 return nullptr;
1072 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1077 } // end anonymous namespace
1079 //===----------------------------------------------------------------------===//
1080 // Constant Folding public APIs
1081 //===----------------------------------------------------------------------===//
1083 namespace {
1085 Constant *
1086 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1087 const TargetLibraryInfo *TLI,
1088 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1089 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1090 return const_cast<Constant *>(C);
1092 SmallVector<Constant *, 8> Ops;
1093 for (const Use &OldU : C->operands()) {
1094 Constant *OldC = cast<Constant>(&OldU);
1095 Constant *NewC = OldC;
1096 // Recursively fold the ConstantExpr's operands. If we have already folded
1097 // a ConstantExpr, we don't have to process it again.
1098 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1099 auto It = FoldedOps.find(OldC);
1100 if (It == FoldedOps.end()) {
1101 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1102 FoldedOps.insert({OldC, NewC});
1103 } else {
1104 NewC = It->second;
1107 Ops.push_back(NewC);
1110 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1111 if (Constant *Res = ConstantFoldInstOperandsImpl(
1112 CE, CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
1113 return Res;
1114 return const_cast<Constant *>(C);
1117 assert(isa<ConstantVector>(C));
1118 return ConstantVector::get(Ops);
1121 } // end anonymous namespace
1123 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1124 const TargetLibraryInfo *TLI) {
1125 // Handle PHI nodes quickly here...
1126 if (auto *PN = dyn_cast<PHINode>(I)) {
1127 Constant *CommonValue = nullptr;
1129 SmallDenseMap<Constant *, Constant *> FoldedOps;
1130 for (Value *Incoming : PN->incoming_values()) {
1131 // If the incoming value is undef then skip it. Note that while we could
1132 // skip the value if it is equal to the phi node itself we choose not to
1133 // because that would break the rule that constant folding only applies if
1134 // all operands are constants.
1135 if (isa<UndefValue>(Incoming))
1136 continue;
1137 // If the incoming value is not a constant, then give up.
1138 auto *C = dyn_cast<Constant>(Incoming);
1139 if (!C)
1140 return nullptr;
1141 // Fold the PHI's operands.
1142 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1143 // If the incoming value is a different constant to
1144 // the one we saw previously, then give up.
1145 if (CommonValue && C != CommonValue)
1146 return nullptr;
1147 CommonValue = C;
1150 // If we reach here, all incoming values are the same constant or undef.
1151 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1154 // Scan the operand list, checking to see if they are all constants, if so,
1155 // hand off to ConstantFoldInstOperandsImpl.
1156 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1157 return nullptr;
1159 SmallDenseMap<Constant *, Constant *> FoldedOps;
1160 SmallVector<Constant *, 8> Ops;
1161 for (const Use &OpU : I->operands()) {
1162 auto *Op = cast<Constant>(&OpU);
1163 // Fold the Instruction's operands.
1164 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1165 Ops.push_back(Op);
1168 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1171 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1172 const TargetLibraryInfo *TLI) {
1173 SmallDenseMap<Constant *, Constant *> FoldedOps;
1174 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1177 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1178 ArrayRef<Constant *> Ops,
1179 const DataLayout &DL,
1180 const TargetLibraryInfo *TLI,
1181 bool AllowNonDeterministic) {
1182 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI,
1183 AllowNonDeterministic);
1186 Constant *llvm::ConstantFoldCompareInstOperands(
1187 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1188 const TargetLibraryInfo *TLI, const Instruction *I) {
1189 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1190 // fold: icmp (inttoptr x), null -> icmp x, 0
1191 // fold: icmp null, (inttoptr x) -> icmp 0, x
1192 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1193 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1194 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1195 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1197 // FIXME: The following comment is out of data and the DataLayout is here now.
1198 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1199 // around to know if bit truncation is happening.
1200 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1201 if (Ops1->isNullValue()) {
1202 if (CE0->getOpcode() == Instruction::IntToPtr) {
1203 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1204 // Convert the integer value to the right size to ensure we get the
1205 // proper extension or truncation.
1206 if (Constant *C = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1207 /*IsSigned*/ false, DL)) {
1208 Constant *Null = Constant::getNullValue(C->getType());
1209 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1213 // Only do this transformation if the int is intptrty in size, otherwise
1214 // there is a truncation or extension that we aren't modeling.
1215 if (CE0->getOpcode() == Instruction::PtrToInt) {
1216 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1217 if (CE0->getType() == IntPtrTy) {
1218 Constant *C = CE0->getOperand(0);
1219 Constant *Null = Constant::getNullValue(C->getType());
1220 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1225 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1226 if (CE0->getOpcode() == CE1->getOpcode()) {
1227 if (CE0->getOpcode() == Instruction::IntToPtr) {
1228 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1230 // Convert the integer value to the right size to ensure we get the
1231 // proper extension or truncation.
1232 Constant *C0 = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1233 /*IsSigned*/ false, DL);
1234 Constant *C1 = ConstantFoldIntegerCast(CE1->getOperand(0), IntPtrTy,
1235 /*IsSigned*/ false, DL);
1236 if (C0 && C1)
1237 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1240 // Only do this transformation if the int is intptrty in size, otherwise
1241 // there is a truncation or extension that we aren't modeling.
1242 if (CE0->getOpcode() == Instruction::PtrToInt) {
1243 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1244 if (CE0->getType() == IntPtrTy &&
1245 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1246 return ConstantFoldCompareInstOperands(
1247 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1253 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1254 // offset1 pred offset2, for the case where the offset is inbounds. This
1255 // only works for equality and unsigned comparison, as inbounds permits
1256 // crossing the sign boundary. However, the offset comparison itself is
1257 // signed.
1258 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1259 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1260 APInt Offset0(IndexWidth, 0);
1261 bool IsEqPred = ICmpInst::isEquality(Predicate);
1262 Value *Stripped0 = Ops0->stripAndAccumulateConstantOffsets(
1263 DL, Offset0, /*AllowNonInbounds=*/IsEqPred,
1264 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1265 /*LookThroughIntToPtr=*/IsEqPred);
1266 APInt Offset1(IndexWidth, 0);
1267 Value *Stripped1 = Ops1->stripAndAccumulateConstantOffsets(
1268 DL, Offset1, /*AllowNonInbounds=*/IsEqPred,
1269 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1270 /*LookThroughIntToPtr=*/IsEqPred);
1271 if (Stripped0 == Stripped1)
1272 return ConstantInt::getBool(
1273 Ops0->getContext(),
1274 ICmpInst::compare(Offset0, Offset1,
1275 ICmpInst::getSignedPredicate(Predicate)));
1277 } else if (isa<ConstantExpr>(Ops1)) {
1278 // If RHS is a constant expression, but the left side isn't, swap the
1279 // operands and try again.
1280 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1281 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1284 if (CmpInst::isFPPredicate(Predicate)) {
1285 // Flush any denormal constant float input according to denormal handling
1286 // mode.
1287 Ops0 = FlushFPConstant(Ops0, I, /*IsOutput=*/false);
1288 if (!Ops0)
1289 return nullptr;
1290 Ops1 = FlushFPConstant(Ops1, I, /*IsOutput=*/false);
1291 if (!Ops1)
1292 return nullptr;
1295 return ConstantFoldCompareInstruction(Predicate, Ops0, Ops1);
1298 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1299 const DataLayout &DL) {
1300 assert(Instruction::isUnaryOp(Opcode));
1302 return ConstantFoldUnaryInstruction(Opcode, Op);
1305 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1306 Constant *RHS,
1307 const DataLayout &DL) {
1308 assert(Instruction::isBinaryOp(Opcode));
1309 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1310 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1311 return C;
1313 if (ConstantExpr::isDesirableBinOp(Opcode))
1314 return ConstantExpr::get(Opcode, LHS, RHS);
1315 return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
1318 static ConstantFP *flushDenormalConstant(Type *Ty, const APFloat &APF,
1319 DenormalMode::DenormalModeKind Mode) {
1320 switch (Mode) {
1321 case DenormalMode::Dynamic:
1322 return nullptr;
1323 case DenormalMode::IEEE:
1324 return ConstantFP::get(Ty->getContext(), APF);
1325 case DenormalMode::PreserveSign:
1326 return ConstantFP::get(
1327 Ty->getContext(),
1328 APFloat::getZero(APF.getSemantics(), APF.isNegative()));
1329 case DenormalMode::PositiveZero:
1330 return ConstantFP::get(Ty->getContext(),
1331 APFloat::getZero(APF.getSemantics(), false));
1332 default:
1333 break;
1336 llvm_unreachable("unknown denormal mode");
1339 /// Return the denormal mode that can be assumed when executing a floating point
1340 /// operation at \p CtxI.
1341 static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty) {
1342 if (!CtxI || !CtxI->getParent() || !CtxI->getFunction())
1343 return DenormalMode::getDynamic();
1344 return CtxI->getFunction()->getDenormalMode(Ty->getFltSemantics());
1347 static ConstantFP *flushDenormalConstantFP(ConstantFP *CFP,
1348 const Instruction *Inst,
1349 bool IsOutput) {
1350 const APFloat &APF = CFP->getValueAPF();
1351 if (!APF.isDenormal())
1352 return CFP;
1354 DenormalMode Mode = getInstrDenormalMode(Inst, CFP->getType());
1355 return flushDenormalConstant(CFP->getType(), APF,
1356 IsOutput ? Mode.Output : Mode.Input);
1359 Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
1360 bool IsOutput) {
1361 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Operand))
1362 return flushDenormalConstantFP(CFP, Inst, IsOutput);
1364 if (isa<ConstantAggregateZero, UndefValue, ConstantExpr>(Operand))
1365 return Operand;
1367 Type *Ty = Operand->getType();
1368 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1369 if (VecTy) {
1370 if (auto *Splat = dyn_cast_or_null<ConstantFP>(Operand->getSplatValue())) {
1371 ConstantFP *Folded = flushDenormalConstantFP(Splat, Inst, IsOutput);
1372 if (!Folded)
1373 return nullptr;
1374 return ConstantVector::getSplat(VecTy->getElementCount(), Folded);
1377 Ty = VecTy->getElementType();
1380 if (const auto *CV = dyn_cast<ConstantVector>(Operand)) {
1381 SmallVector<Constant *, 16> NewElts;
1382 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1383 Constant *Element = CV->getAggregateElement(i);
1384 if (isa<UndefValue>(Element)) {
1385 NewElts.push_back(Element);
1386 continue;
1389 ConstantFP *CFP = dyn_cast<ConstantFP>(Element);
1390 if (!CFP)
1391 return nullptr;
1393 ConstantFP *Folded = flushDenormalConstantFP(CFP, Inst, IsOutput);
1394 if (!Folded)
1395 return nullptr;
1396 NewElts.push_back(Folded);
1399 return ConstantVector::get(NewElts);
1402 if (const auto *CDV = dyn_cast<ConstantDataVector>(Operand)) {
1403 SmallVector<Constant *, 16> NewElts;
1404 for (unsigned I = 0, E = CDV->getNumElements(); I < E; ++I) {
1405 const APFloat &Elt = CDV->getElementAsAPFloat(I);
1406 if (!Elt.isDenormal()) {
1407 NewElts.push_back(ConstantFP::get(Ty, Elt));
1408 } else {
1409 DenormalMode Mode = getInstrDenormalMode(Inst, Ty);
1410 ConstantFP *Folded =
1411 flushDenormalConstant(Ty, Elt, IsOutput ? Mode.Output : Mode.Input);
1412 if (!Folded)
1413 return nullptr;
1414 NewElts.push_back(Folded);
1418 return ConstantVector::get(NewElts);
1421 return nullptr;
1424 Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1425 Constant *RHS, const DataLayout &DL,
1426 const Instruction *I,
1427 bool AllowNonDeterministic) {
1428 if (Instruction::isBinaryOp(Opcode)) {
1429 // Flush denormal inputs if needed.
1430 Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
1431 if (!Op0)
1432 return nullptr;
1433 Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false);
1434 if (!Op1)
1435 return nullptr;
1437 // If nsz or an algebraic FMF flag is set, the result of the FP operation
1438 // may change due to future optimization. Don't constant fold them if
1439 // non-deterministic results are not allowed.
1440 if (!AllowNonDeterministic)
1441 if (auto *FP = dyn_cast_or_null<FPMathOperator>(I))
1442 if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
1443 FP->hasAllowContract() || FP->hasAllowReciprocal())
1444 return nullptr;
1446 // Calculate constant result.
1447 Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
1448 if (!C)
1449 return nullptr;
1451 // Flush denormal output if needed.
1452 C = FlushFPConstant(C, I, /* IsOutput */ true);
1453 if (!C)
1454 return nullptr;
1456 // The precise NaN value is non-deterministic.
1457 if (!AllowNonDeterministic && C->isNaN())
1458 return nullptr;
1460 return C;
1462 // If instruction lacks a parent/function and the denormal mode cannot be
1463 // determined, use the default (IEEE).
1464 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1467 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1468 Type *DestTy, const DataLayout &DL) {
1469 assert(Instruction::isCast(Opcode));
1470 switch (Opcode) {
1471 default:
1472 llvm_unreachable("Missing case");
1473 case Instruction::PtrToInt:
1474 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1475 Constant *FoldedValue = nullptr;
1476 // If the input is a inttoptr, eliminate the pair. This requires knowing
1477 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1478 if (CE->getOpcode() == Instruction::IntToPtr) {
1479 // zext/trunc the inttoptr to pointer size.
1480 FoldedValue = ConstantFoldIntegerCast(CE->getOperand(0),
1481 DL.getIntPtrType(CE->getType()),
1482 /*IsSigned=*/false, DL);
1483 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1484 // If we have GEP, we can perform the following folds:
1485 // (ptrtoint (gep null, x)) -> x
1486 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1487 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1488 APInt BaseOffset(BitWidth, 0);
1489 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1490 DL, BaseOffset, /*AllowNonInbounds=*/true));
1491 if (Base->isNullValue()) {
1492 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1493 } else {
1494 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1495 if (GEP->getNumIndices() == 1 &&
1496 GEP->getSourceElementType()->isIntegerTy(8)) {
1497 auto *Ptr = cast<Constant>(GEP->getPointerOperand());
1498 auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1));
1499 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
1500 if (Sub && Sub->getType() == IntIdxTy &&
1501 Sub->getOpcode() == Instruction::Sub &&
1502 Sub->getOperand(0)->isNullValue())
1503 FoldedValue = ConstantExpr::getSub(
1504 ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1));
1508 if (FoldedValue) {
1509 // Do a zext or trunc to get to the ptrtoint dest size.
1510 return ConstantFoldIntegerCast(FoldedValue, DestTy, /*IsSigned=*/false,
1511 DL);
1514 break;
1515 case Instruction::IntToPtr:
1516 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1517 // the int size is >= the ptr size and the address spaces are the same.
1518 // This requires knowing the width of a pointer, so it can't be done in
1519 // ConstantExpr::getCast.
1520 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1521 if (CE->getOpcode() == Instruction::PtrToInt) {
1522 Constant *SrcPtr = CE->getOperand(0);
1523 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1524 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1526 if (MidIntSize >= SrcPtrSize) {
1527 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1528 if (SrcAS == DestTy->getPointerAddressSpace())
1529 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1533 break;
1534 case Instruction::Trunc:
1535 case Instruction::ZExt:
1536 case Instruction::SExt:
1537 case Instruction::FPTrunc:
1538 case Instruction::FPExt:
1539 case Instruction::UIToFP:
1540 case Instruction::SIToFP:
1541 case Instruction::FPToUI:
1542 case Instruction::FPToSI:
1543 case Instruction::AddrSpaceCast:
1544 break;
1545 case Instruction::BitCast:
1546 return FoldBitCast(C, DestTy, DL);
1549 if (ConstantExpr::isDesirableCastOp(Opcode))
1550 return ConstantExpr::getCast(Opcode, C, DestTy);
1551 return ConstantFoldCastInstruction(Opcode, C, DestTy);
1554 Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1555 bool IsSigned, const DataLayout &DL) {
1556 Type *SrcTy = C->getType();
1557 if (SrcTy == DestTy)
1558 return C;
1559 if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1560 return ConstantFoldCastOperand(Instruction::Trunc, C, DestTy, DL);
1561 if (IsSigned)
1562 return ConstantFoldCastOperand(Instruction::SExt, C, DestTy, DL);
1563 return ConstantFoldCastOperand(Instruction::ZExt, C, DestTy, DL);
1566 //===----------------------------------------------------------------------===//
1567 // Constant Folding for Calls
1570 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1571 if (Call->isNoBuiltin())
1572 return false;
1573 if (Call->getFunctionType() != F->getFunctionType())
1574 return false;
1575 switch (F->getIntrinsicID()) {
1576 // Operations that do not operate floating-point numbers and do not depend on
1577 // FP environment can be folded even in strictfp functions.
1578 case Intrinsic::bswap:
1579 case Intrinsic::ctpop:
1580 case Intrinsic::ctlz:
1581 case Intrinsic::cttz:
1582 case Intrinsic::fshl:
1583 case Intrinsic::fshr:
1584 case Intrinsic::launder_invariant_group:
1585 case Intrinsic::strip_invariant_group:
1586 case Intrinsic::masked_load:
1587 case Intrinsic::get_active_lane_mask:
1588 case Intrinsic::abs:
1589 case Intrinsic::smax:
1590 case Intrinsic::smin:
1591 case Intrinsic::umax:
1592 case Intrinsic::umin:
1593 case Intrinsic::scmp:
1594 case Intrinsic::ucmp:
1595 case Intrinsic::sadd_with_overflow:
1596 case Intrinsic::uadd_with_overflow:
1597 case Intrinsic::ssub_with_overflow:
1598 case Intrinsic::usub_with_overflow:
1599 case Intrinsic::smul_with_overflow:
1600 case Intrinsic::umul_with_overflow:
1601 case Intrinsic::sadd_sat:
1602 case Intrinsic::uadd_sat:
1603 case Intrinsic::ssub_sat:
1604 case Intrinsic::usub_sat:
1605 case Intrinsic::smul_fix:
1606 case Intrinsic::smul_fix_sat:
1607 case Intrinsic::bitreverse:
1608 case Intrinsic::is_constant:
1609 case Intrinsic::vector_reduce_add:
1610 case Intrinsic::vector_reduce_mul:
1611 case Intrinsic::vector_reduce_and:
1612 case Intrinsic::vector_reduce_or:
1613 case Intrinsic::vector_reduce_xor:
1614 case Intrinsic::vector_reduce_smin:
1615 case Intrinsic::vector_reduce_smax:
1616 case Intrinsic::vector_reduce_umin:
1617 case Intrinsic::vector_reduce_umax:
1618 // Target intrinsics
1619 case Intrinsic::amdgcn_perm:
1620 case Intrinsic::amdgcn_wave_reduce_umin:
1621 case Intrinsic::amdgcn_wave_reduce_umax:
1622 case Intrinsic::amdgcn_s_wqm:
1623 case Intrinsic::amdgcn_s_quadmask:
1624 case Intrinsic::amdgcn_s_bitreplicate:
1625 case Intrinsic::arm_mve_vctp8:
1626 case Intrinsic::arm_mve_vctp16:
1627 case Intrinsic::arm_mve_vctp32:
1628 case Intrinsic::arm_mve_vctp64:
1629 case Intrinsic::aarch64_sve_convert_from_svbool:
1630 // WebAssembly float semantics are always known
1631 case Intrinsic::wasm_trunc_signed:
1632 case Intrinsic::wasm_trunc_unsigned:
1633 return true;
1635 // Floating point operations cannot be folded in strictfp functions in
1636 // general case. They can be folded if FP environment is known to compiler.
1637 case Intrinsic::minnum:
1638 case Intrinsic::maxnum:
1639 case Intrinsic::minimum:
1640 case Intrinsic::maximum:
1641 case Intrinsic::log:
1642 case Intrinsic::log2:
1643 case Intrinsic::log10:
1644 case Intrinsic::exp:
1645 case Intrinsic::exp2:
1646 case Intrinsic::exp10:
1647 case Intrinsic::sqrt:
1648 case Intrinsic::sin:
1649 case Intrinsic::cos:
1650 case Intrinsic::sincos:
1651 case Intrinsic::pow:
1652 case Intrinsic::powi:
1653 case Intrinsic::ldexp:
1654 case Intrinsic::fma:
1655 case Intrinsic::fmuladd:
1656 case Intrinsic::frexp:
1657 case Intrinsic::fptoui_sat:
1658 case Intrinsic::fptosi_sat:
1659 case Intrinsic::convert_from_fp16:
1660 case Intrinsic::convert_to_fp16:
1661 case Intrinsic::amdgcn_cos:
1662 case Intrinsic::amdgcn_cubeid:
1663 case Intrinsic::amdgcn_cubema:
1664 case Intrinsic::amdgcn_cubesc:
1665 case Intrinsic::amdgcn_cubetc:
1666 case Intrinsic::amdgcn_fmul_legacy:
1667 case Intrinsic::amdgcn_fma_legacy:
1668 case Intrinsic::amdgcn_fract:
1669 case Intrinsic::amdgcn_sin:
1670 // The intrinsics below depend on rounding mode in MXCSR.
1671 case Intrinsic::x86_sse_cvtss2si:
1672 case Intrinsic::x86_sse_cvtss2si64:
1673 case Intrinsic::x86_sse_cvttss2si:
1674 case Intrinsic::x86_sse_cvttss2si64:
1675 case Intrinsic::x86_sse2_cvtsd2si:
1676 case Intrinsic::x86_sse2_cvtsd2si64:
1677 case Intrinsic::x86_sse2_cvttsd2si:
1678 case Intrinsic::x86_sse2_cvttsd2si64:
1679 case Intrinsic::x86_avx512_vcvtss2si32:
1680 case Intrinsic::x86_avx512_vcvtss2si64:
1681 case Intrinsic::x86_avx512_cvttss2si:
1682 case Intrinsic::x86_avx512_cvttss2si64:
1683 case Intrinsic::x86_avx512_vcvtsd2si32:
1684 case Intrinsic::x86_avx512_vcvtsd2si64:
1685 case Intrinsic::x86_avx512_cvttsd2si:
1686 case Intrinsic::x86_avx512_cvttsd2si64:
1687 case Intrinsic::x86_avx512_vcvtss2usi32:
1688 case Intrinsic::x86_avx512_vcvtss2usi64:
1689 case Intrinsic::x86_avx512_cvttss2usi:
1690 case Intrinsic::x86_avx512_cvttss2usi64:
1691 case Intrinsic::x86_avx512_vcvtsd2usi32:
1692 case Intrinsic::x86_avx512_vcvtsd2usi64:
1693 case Intrinsic::x86_avx512_cvttsd2usi:
1694 case Intrinsic::x86_avx512_cvttsd2usi64:
1695 return !Call->isStrictFP();
1697 // NVVM FMax intrinsics
1698 case Intrinsic::nvvm_fmax_d:
1699 case Intrinsic::nvvm_fmax_f:
1700 case Intrinsic::nvvm_fmax_ftz_f:
1701 case Intrinsic::nvvm_fmax_ftz_nan_f:
1702 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
1703 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
1704 case Intrinsic::nvvm_fmax_nan_f:
1705 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
1706 case Intrinsic::nvvm_fmax_xorsign_abs_f:
1708 // NVVM FMin intrinsics
1709 case Intrinsic::nvvm_fmin_d:
1710 case Intrinsic::nvvm_fmin_f:
1711 case Intrinsic::nvvm_fmin_ftz_f:
1712 case Intrinsic::nvvm_fmin_ftz_nan_f:
1713 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
1714 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
1715 case Intrinsic::nvvm_fmin_nan_f:
1716 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
1717 case Intrinsic::nvvm_fmin_xorsign_abs_f:
1719 // NVVM float/double to int32/uint32 conversion intrinsics
1720 case Intrinsic::nvvm_f2i_rm:
1721 case Intrinsic::nvvm_f2i_rn:
1722 case Intrinsic::nvvm_f2i_rp:
1723 case Intrinsic::nvvm_f2i_rz:
1724 case Intrinsic::nvvm_f2i_rm_ftz:
1725 case Intrinsic::nvvm_f2i_rn_ftz:
1726 case Intrinsic::nvvm_f2i_rp_ftz:
1727 case Intrinsic::nvvm_f2i_rz_ftz:
1728 case Intrinsic::nvvm_f2ui_rm:
1729 case Intrinsic::nvvm_f2ui_rn:
1730 case Intrinsic::nvvm_f2ui_rp:
1731 case Intrinsic::nvvm_f2ui_rz:
1732 case Intrinsic::nvvm_f2ui_rm_ftz:
1733 case Intrinsic::nvvm_f2ui_rn_ftz:
1734 case Intrinsic::nvvm_f2ui_rp_ftz:
1735 case Intrinsic::nvvm_f2ui_rz_ftz:
1736 case Intrinsic::nvvm_d2i_rm:
1737 case Intrinsic::nvvm_d2i_rn:
1738 case Intrinsic::nvvm_d2i_rp:
1739 case Intrinsic::nvvm_d2i_rz:
1740 case Intrinsic::nvvm_d2ui_rm:
1741 case Intrinsic::nvvm_d2ui_rn:
1742 case Intrinsic::nvvm_d2ui_rp:
1743 case Intrinsic::nvvm_d2ui_rz:
1745 // NVVM float/double to int64/uint64 conversion intrinsics
1746 case Intrinsic::nvvm_f2ll_rm:
1747 case Intrinsic::nvvm_f2ll_rn:
1748 case Intrinsic::nvvm_f2ll_rp:
1749 case Intrinsic::nvvm_f2ll_rz:
1750 case Intrinsic::nvvm_f2ll_rm_ftz:
1751 case Intrinsic::nvvm_f2ll_rn_ftz:
1752 case Intrinsic::nvvm_f2ll_rp_ftz:
1753 case Intrinsic::nvvm_f2ll_rz_ftz:
1754 case Intrinsic::nvvm_f2ull_rm:
1755 case Intrinsic::nvvm_f2ull_rn:
1756 case Intrinsic::nvvm_f2ull_rp:
1757 case Intrinsic::nvvm_f2ull_rz:
1758 case Intrinsic::nvvm_f2ull_rm_ftz:
1759 case Intrinsic::nvvm_f2ull_rn_ftz:
1760 case Intrinsic::nvvm_f2ull_rp_ftz:
1761 case Intrinsic::nvvm_f2ull_rz_ftz:
1762 case Intrinsic::nvvm_d2ll_rm:
1763 case Intrinsic::nvvm_d2ll_rn:
1764 case Intrinsic::nvvm_d2ll_rp:
1765 case Intrinsic::nvvm_d2ll_rz:
1766 case Intrinsic::nvvm_d2ull_rm:
1767 case Intrinsic::nvvm_d2ull_rn:
1768 case Intrinsic::nvvm_d2ull_rp:
1769 case Intrinsic::nvvm_d2ull_rz:
1771 // Sign operations are actually bitwise operations, they do not raise
1772 // exceptions even for SNANs.
1773 case Intrinsic::fabs:
1774 case Intrinsic::copysign:
1775 case Intrinsic::is_fpclass:
1776 // Non-constrained variants of rounding operations means default FP
1777 // environment, they can be folded in any case.
1778 case Intrinsic::ceil:
1779 case Intrinsic::floor:
1780 case Intrinsic::round:
1781 case Intrinsic::roundeven:
1782 case Intrinsic::trunc:
1783 case Intrinsic::nearbyint:
1784 case Intrinsic::rint:
1785 case Intrinsic::canonicalize:
1786 // Constrained intrinsics can be folded if FP environment is known
1787 // to compiler.
1788 case Intrinsic::experimental_constrained_fma:
1789 case Intrinsic::experimental_constrained_fmuladd:
1790 case Intrinsic::experimental_constrained_fadd:
1791 case Intrinsic::experimental_constrained_fsub:
1792 case Intrinsic::experimental_constrained_fmul:
1793 case Intrinsic::experimental_constrained_fdiv:
1794 case Intrinsic::experimental_constrained_frem:
1795 case Intrinsic::experimental_constrained_ceil:
1796 case Intrinsic::experimental_constrained_floor:
1797 case Intrinsic::experimental_constrained_round:
1798 case Intrinsic::experimental_constrained_roundeven:
1799 case Intrinsic::experimental_constrained_trunc:
1800 case Intrinsic::experimental_constrained_nearbyint:
1801 case Intrinsic::experimental_constrained_rint:
1802 case Intrinsic::experimental_constrained_fcmp:
1803 case Intrinsic::experimental_constrained_fcmps:
1804 return true;
1805 default:
1806 return false;
1807 case Intrinsic::not_intrinsic: break;
1810 if (!F->hasName() || Call->isStrictFP())
1811 return false;
1813 // In these cases, the check of the length is required. We don't want to
1814 // return true for a name like "cos\0blah" which strcmp would return equal to
1815 // "cos", but has length 8.
1816 StringRef Name = F->getName();
1817 switch (Name[0]) {
1818 default:
1819 return false;
1820 case 'a':
1821 return Name == "acos" || Name == "acosf" ||
1822 Name == "asin" || Name == "asinf" ||
1823 Name == "atan" || Name == "atanf" ||
1824 Name == "atan2" || Name == "atan2f";
1825 case 'c':
1826 return Name == "ceil" || Name == "ceilf" ||
1827 Name == "cos" || Name == "cosf" ||
1828 Name == "cosh" || Name == "coshf";
1829 case 'e':
1830 return Name == "exp" || Name == "expf" || Name == "exp2" ||
1831 Name == "exp2f" || Name == "erf" || Name == "erff";
1832 case 'f':
1833 return Name == "fabs" || Name == "fabsf" ||
1834 Name == "floor" || Name == "floorf" ||
1835 Name == "fmod" || Name == "fmodf";
1836 case 'i':
1837 return Name == "ilogb" || Name == "ilogbf";
1838 case 'l':
1839 return Name == "log" || Name == "logf" || Name == "logl" ||
1840 Name == "log2" || Name == "log2f" || Name == "log10" ||
1841 Name == "log10f" || Name == "logb" || Name == "logbf" ||
1842 Name == "log1p" || Name == "log1pf";
1843 case 'n':
1844 return Name == "nearbyint" || Name == "nearbyintf";
1845 case 'p':
1846 return Name == "pow" || Name == "powf";
1847 case 'r':
1848 return Name == "remainder" || Name == "remainderf" ||
1849 Name == "rint" || Name == "rintf" ||
1850 Name == "round" || Name == "roundf";
1851 case 's':
1852 return Name == "sin" || Name == "sinf" ||
1853 Name == "sinh" || Name == "sinhf" ||
1854 Name == "sqrt" || Name == "sqrtf";
1855 case 't':
1856 return Name == "tan" || Name == "tanf" ||
1857 Name == "tanh" || Name == "tanhf" ||
1858 Name == "trunc" || Name == "truncf";
1859 case '_':
1860 // Check for various function names that get used for the math functions
1861 // when the header files are preprocessed with the macro
1862 // __FINITE_MATH_ONLY__ enabled.
1863 // The '12' here is the length of the shortest name that can match.
1864 // We need to check the size before looking at Name[1] and Name[2]
1865 // so we may as well check a limit that will eliminate mismatches.
1866 if (Name.size() < 12 || Name[1] != '_')
1867 return false;
1868 switch (Name[2]) {
1869 default:
1870 return false;
1871 case 'a':
1872 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1873 Name == "__asin_finite" || Name == "__asinf_finite" ||
1874 Name == "__atan2_finite" || Name == "__atan2f_finite";
1875 case 'c':
1876 return Name == "__cosh_finite" || Name == "__coshf_finite";
1877 case 'e':
1878 return Name == "__exp_finite" || Name == "__expf_finite" ||
1879 Name == "__exp2_finite" || Name == "__exp2f_finite";
1880 case 'l':
1881 return Name == "__log_finite" || Name == "__logf_finite" ||
1882 Name == "__log10_finite" || Name == "__log10f_finite";
1883 case 'p':
1884 return Name == "__pow_finite" || Name == "__powf_finite";
1885 case 's':
1886 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1891 namespace {
1893 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1894 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1895 APFloat APF(V);
1896 bool unused;
1897 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1898 return ConstantFP::get(Ty->getContext(), APF);
1900 if (Ty->isDoubleTy())
1901 return ConstantFP::get(Ty->getContext(), APFloat(V));
1902 llvm_unreachable("Can only constant fold half/float/double");
1905 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1906 Constant *GetConstantFoldFPValue128(float128 V, Type *Ty) {
1907 if (Ty->isFP128Ty())
1908 return ConstantFP::get(Ty, V);
1909 llvm_unreachable("Can only constant fold fp128");
1911 #endif
1913 /// Clear the floating-point exception state.
1914 inline void llvm_fenv_clearexcept() {
1915 #if HAVE_DECL_FE_ALL_EXCEPT
1916 feclearexcept(FE_ALL_EXCEPT);
1917 #endif
1918 errno = 0;
1921 /// Test if a floating-point exception was raised.
1922 inline bool llvm_fenv_testexcept() {
1923 int errno_val = errno;
1924 if (errno_val == ERANGE || errno_val == EDOM)
1925 return true;
1926 #if HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1927 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1928 return true;
1929 #endif
1930 return false;
1933 static const APFloat FTZPreserveSign(const APFloat &V) {
1934 if (V.isDenormal())
1935 return APFloat::getZero(V.getSemantics(), V.isNegative());
1936 return V;
1939 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1940 Type *Ty) {
1941 llvm_fenv_clearexcept();
1942 double Result = NativeFP(V.convertToDouble());
1943 if (llvm_fenv_testexcept()) {
1944 llvm_fenv_clearexcept();
1945 return nullptr;
1948 return GetConstantFoldFPValue(Result, Ty);
1951 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1952 Constant *ConstantFoldFP128(float128 (*NativeFP)(float128), const APFloat &V,
1953 Type *Ty) {
1954 llvm_fenv_clearexcept();
1955 float128 Result = NativeFP(V.convertToQuad());
1956 if (llvm_fenv_testexcept()) {
1957 llvm_fenv_clearexcept();
1958 return nullptr;
1961 return GetConstantFoldFPValue128(Result, Ty);
1963 #endif
1965 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1966 const APFloat &V, const APFloat &W, Type *Ty) {
1967 llvm_fenv_clearexcept();
1968 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1969 if (llvm_fenv_testexcept()) {
1970 llvm_fenv_clearexcept();
1971 return nullptr;
1974 return GetConstantFoldFPValue(Result, Ty);
1977 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1978 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1979 if (!VT)
1980 return nullptr;
1982 // This isn't strictly necessary, but handle the special/common case of zero:
1983 // all integer reductions of a zero input produce zero.
1984 if (isa<ConstantAggregateZero>(Op))
1985 return ConstantInt::get(VT->getElementType(), 0);
1987 // This is the same as the underlying binops - poison propagates.
1988 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1989 return PoisonValue::get(VT->getElementType());
1991 // TODO: Handle undef.
1992 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1993 return nullptr;
1995 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1996 if (!EltC)
1997 return nullptr;
1999 APInt Acc = EltC->getValue();
2000 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
2001 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
2002 return nullptr;
2003 const APInt &X = EltC->getValue();
2004 switch (IID) {
2005 case Intrinsic::vector_reduce_add:
2006 Acc = Acc + X;
2007 break;
2008 case Intrinsic::vector_reduce_mul:
2009 Acc = Acc * X;
2010 break;
2011 case Intrinsic::vector_reduce_and:
2012 Acc = Acc & X;
2013 break;
2014 case Intrinsic::vector_reduce_or:
2015 Acc = Acc | X;
2016 break;
2017 case Intrinsic::vector_reduce_xor:
2018 Acc = Acc ^ X;
2019 break;
2020 case Intrinsic::vector_reduce_smin:
2021 Acc = APIntOps::smin(Acc, X);
2022 break;
2023 case Intrinsic::vector_reduce_smax:
2024 Acc = APIntOps::smax(Acc, X);
2025 break;
2026 case Intrinsic::vector_reduce_umin:
2027 Acc = APIntOps::umin(Acc, X);
2028 break;
2029 case Intrinsic::vector_reduce_umax:
2030 Acc = APIntOps::umax(Acc, X);
2031 break;
2035 return ConstantInt::get(Op->getContext(), Acc);
2038 /// Attempt to fold an SSE floating point to integer conversion of a constant
2039 /// floating point. If roundTowardZero is false, the default IEEE rounding is
2040 /// used (toward nearest, ties to even). This matches the behavior of the
2041 /// non-truncating SSE instructions in the default rounding mode. The desired
2042 /// integer type Ty is used to select how many bits are available for the
2043 /// result. Returns null if the conversion cannot be performed, otherwise
2044 /// returns the Constant value resulting from the conversion.
2045 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
2046 Type *Ty, bool IsSigned) {
2047 // All of these conversion intrinsics form an integer of at most 64bits.
2048 unsigned ResultWidth = Ty->getIntegerBitWidth();
2049 assert(ResultWidth <= 64 &&
2050 "Can only constant fold conversions to 64 and 32 bit ints");
2052 uint64_t UIntVal;
2053 bool isExact = false;
2054 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
2055 : APFloat::rmNearestTiesToEven;
2056 APFloat::opStatus status =
2057 Val.convertToInteger(MutableArrayRef(UIntVal), ResultWidth,
2058 IsSigned, mode, &isExact);
2059 if (status != APFloat::opOK &&
2060 (!roundTowardZero || status != APFloat::opInexact))
2061 return nullptr;
2062 return ConstantInt::get(Ty, UIntVal, IsSigned);
2065 double getValueAsDouble(ConstantFP *Op) {
2066 Type *Ty = Op->getType();
2068 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
2069 return Op->getValueAPF().convertToDouble();
2071 bool unused;
2072 APFloat APF = Op->getValueAPF();
2073 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
2074 return APF.convertToDouble();
2077 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
2078 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2079 C = &CI->getValue();
2080 return true;
2082 if (isa<UndefValue>(Op)) {
2083 C = nullptr;
2084 return true;
2086 return false;
2089 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
2090 /// to be folded.
2092 /// \param CI Constrained intrinsic call.
2093 /// \param St Exception flags raised during constant evaluation.
2094 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
2095 APFloat::opStatus St) {
2096 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2097 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2099 // If the operation does not change exception status flags, it is safe
2100 // to fold.
2101 if (St == APFloat::opStatus::opOK)
2102 return true;
2104 // If evaluation raised FP exception, the result can depend on rounding
2105 // mode. If the latter is unknown, folding is not possible.
2106 if (ORM && *ORM == RoundingMode::Dynamic)
2107 return false;
2109 // If FP exceptions are ignored, fold the call, even if such exception is
2110 // raised.
2111 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
2112 return true;
2114 // Leave the calculation for runtime so that exception flags be correctly set
2115 // in hardware.
2116 return false;
2119 /// Returns the rounding mode that should be used for constant evaluation.
2120 static RoundingMode
2121 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
2122 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2123 if (!ORM || *ORM == RoundingMode::Dynamic)
2124 // Even if the rounding mode is unknown, try evaluating the operation.
2125 // If it does not raise inexact exception, rounding was not applied,
2126 // so the result is exact and does not depend on rounding mode. Whether
2127 // other FP exceptions are raised, it does not depend on rounding mode.
2128 return RoundingMode::NearestTiesToEven;
2129 return *ORM;
2132 /// Try to constant fold llvm.canonicalize for the given caller and value.
2133 static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
2134 const APFloat &Src) {
2135 // Zero, positive and negative, is always OK to fold.
2136 if (Src.isZero()) {
2137 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
2138 return ConstantFP::get(
2139 CI->getContext(),
2140 APFloat::getZero(Src.getSemantics(), Src.isNegative()));
2143 if (!Ty->isIEEELikeFPTy())
2144 return nullptr;
2146 // Zero is always canonical and the sign must be preserved.
2148 // Denorms and nans may have special encodings, but it should be OK to fold a
2149 // totally average number.
2150 if (Src.isNormal() || Src.isInfinity())
2151 return ConstantFP::get(CI->getContext(), Src);
2153 if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
2154 DenormalMode DenormMode =
2155 CI->getFunction()->getDenormalMode(Src.getSemantics());
2157 if (DenormMode == DenormalMode::getIEEE())
2158 return ConstantFP::get(CI->getContext(), Src);
2160 if (DenormMode.Input == DenormalMode::Dynamic)
2161 return nullptr;
2163 // If we know if either input or output is flushed, we can fold.
2164 if ((DenormMode.Input == DenormalMode::Dynamic &&
2165 DenormMode.Output == DenormalMode::IEEE) ||
2166 (DenormMode.Input == DenormalMode::IEEE &&
2167 DenormMode.Output == DenormalMode::Dynamic))
2168 return nullptr;
2170 bool IsPositive =
2171 (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
2172 (DenormMode.Output == DenormalMode::PositiveZero &&
2173 DenormMode.Input == DenormalMode::IEEE));
2175 return ConstantFP::get(CI->getContext(),
2176 APFloat::getZero(Src.getSemantics(), !IsPositive));
2179 return nullptr;
2182 static Constant *ConstantFoldScalarCall1(StringRef Name,
2183 Intrinsic::ID IntrinsicID,
2184 Type *Ty,
2185 ArrayRef<Constant *> Operands,
2186 const TargetLibraryInfo *TLI,
2187 const CallBase *Call) {
2188 assert(Operands.size() == 1 && "Wrong number of operands.");
2190 if (IntrinsicID == Intrinsic::is_constant) {
2191 // We know we have a "Constant" argument. But we want to only
2192 // return true for manifest constants, not those that depend on
2193 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2194 if (Operands[0]->isManifestConstant())
2195 return ConstantInt::getTrue(Ty->getContext());
2196 return nullptr;
2199 if (isa<PoisonValue>(Operands[0])) {
2200 // TODO: All of these operations should probably propagate poison.
2201 if (IntrinsicID == Intrinsic::canonicalize)
2202 return PoisonValue::get(Ty);
2205 if (isa<UndefValue>(Operands[0])) {
2206 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2207 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2208 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2209 if (IntrinsicID == Intrinsic::cos ||
2210 IntrinsicID == Intrinsic::ctpop ||
2211 IntrinsicID == Intrinsic::fptoui_sat ||
2212 IntrinsicID == Intrinsic::fptosi_sat ||
2213 IntrinsicID == Intrinsic::canonicalize)
2214 return Constant::getNullValue(Ty);
2215 if (IntrinsicID == Intrinsic::bswap ||
2216 IntrinsicID == Intrinsic::bitreverse ||
2217 IntrinsicID == Intrinsic::launder_invariant_group ||
2218 IntrinsicID == Intrinsic::strip_invariant_group)
2219 return Operands[0];
2222 if (isa<ConstantPointerNull>(Operands[0])) {
2223 // launder(null) == null == strip(null) iff in addrspace 0
2224 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2225 IntrinsicID == Intrinsic::strip_invariant_group) {
2226 // If instruction is not yet put in a basic block (e.g. when cloning
2227 // a function during inlining), Call's caller may not be available.
2228 // So check Call's BB first before querying Call->getCaller.
2229 const Function *Caller =
2230 Call->getParent() ? Call->getCaller() : nullptr;
2231 if (Caller &&
2232 !NullPointerIsDefined(
2233 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
2234 return Operands[0];
2236 return nullptr;
2240 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
2241 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2242 APFloat Val(Op->getValueAPF());
2244 bool lost = false;
2245 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2247 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
2250 APFloat U = Op->getValueAPF();
2252 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2253 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2254 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2256 if (U.isNaN())
2257 return nullptr;
2259 unsigned Width = Ty->getIntegerBitWidth();
2260 APSInt Int(Width, !Signed);
2261 bool IsExact = false;
2262 APFloat::opStatus Status =
2263 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2265 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2266 return ConstantInt::get(Ty, Int);
2268 return nullptr;
2271 if (IntrinsicID == Intrinsic::fptoui_sat ||
2272 IntrinsicID == Intrinsic::fptosi_sat) {
2273 // convertToInteger() already has the desired saturation semantics.
2274 APSInt Int(Ty->getIntegerBitWidth(),
2275 IntrinsicID == Intrinsic::fptoui_sat);
2276 bool IsExact;
2277 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2278 return ConstantInt::get(Ty, Int);
2281 if (IntrinsicID == Intrinsic::canonicalize)
2282 return constantFoldCanonicalize(Ty, Call, U);
2284 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2285 if (Ty->isFP128Ty()) {
2286 if (IntrinsicID == Intrinsic::log) {
2287 float128 Result = logf128(Op->getValueAPF().convertToQuad());
2288 return GetConstantFoldFPValue128(Result, Ty);
2291 LibFunc Fp128Func = NotLibFunc;
2292 if (TLI && TLI->getLibFunc(Name, Fp128Func) && TLI->has(Fp128Func) &&
2293 Fp128Func == LibFunc_logl)
2294 return ConstantFoldFP128(logf128, Op->getValueAPF(), Ty);
2296 #endif
2298 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2299 !Ty->isIntegerTy())
2300 return nullptr;
2302 // Use internal versions of these intrinsics.
2304 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2305 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2306 return ConstantFP::get(Ty->getContext(), U);
2309 if (IntrinsicID == Intrinsic::round) {
2310 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2311 return ConstantFP::get(Ty->getContext(), U);
2314 if (IntrinsicID == Intrinsic::roundeven) {
2315 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2316 return ConstantFP::get(Ty->getContext(), U);
2319 if (IntrinsicID == Intrinsic::ceil) {
2320 U.roundToIntegral(APFloat::rmTowardPositive);
2321 return ConstantFP::get(Ty->getContext(), U);
2324 if (IntrinsicID == Intrinsic::floor) {
2325 U.roundToIntegral(APFloat::rmTowardNegative);
2326 return ConstantFP::get(Ty->getContext(), U);
2329 if (IntrinsicID == Intrinsic::trunc) {
2330 U.roundToIntegral(APFloat::rmTowardZero);
2331 return ConstantFP::get(Ty->getContext(), U);
2334 if (IntrinsicID == Intrinsic::fabs) {
2335 U.clearSign();
2336 return ConstantFP::get(Ty->getContext(), U);
2339 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2340 // The v_fract instruction behaves like the OpenCL spec, which defines
2341 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2342 // there to prevent fract(-small) from returning 1.0. It returns the
2343 // largest positive floating-point number less than 1.0."
2344 APFloat FloorU(U);
2345 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2346 APFloat FractU(U - FloorU);
2347 APFloat AlmostOne(U.getSemantics(), 1);
2348 AlmostOne.next(/*nextDown*/ true);
2349 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2352 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2353 // raise FP exceptions, unless the argument is signaling NaN.
2355 std::optional<APFloat::roundingMode> RM;
2356 switch (IntrinsicID) {
2357 default:
2358 break;
2359 case Intrinsic::experimental_constrained_nearbyint:
2360 case Intrinsic::experimental_constrained_rint: {
2361 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2362 RM = CI->getRoundingMode();
2363 if (!RM || *RM == RoundingMode::Dynamic)
2364 return nullptr;
2365 break;
2367 case Intrinsic::experimental_constrained_round:
2368 RM = APFloat::rmNearestTiesToAway;
2369 break;
2370 case Intrinsic::experimental_constrained_ceil:
2371 RM = APFloat::rmTowardPositive;
2372 break;
2373 case Intrinsic::experimental_constrained_floor:
2374 RM = APFloat::rmTowardNegative;
2375 break;
2376 case Intrinsic::experimental_constrained_trunc:
2377 RM = APFloat::rmTowardZero;
2378 break;
2380 if (RM) {
2381 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2382 if (U.isFinite()) {
2383 APFloat::opStatus St = U.roundToIntegral(*RM);
2384 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2385 St == APFloat::opInexact) {
2386 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2387 if (EB && *EB == fp::ebStrict)
2388 return nullptr;
2390 } else if (U.isSignaling()) {
2391 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2392 if (EB && *EB != fp::ebIgnore)
2393 return nullptr;
2394 U = APFloat::getQNaN(U.getSemantics());
2396 return ConstantFP::get(Ty->getContext(), U);
2399 // NVVM float/double to signed/unsigned int32/int64 conversions:
2400 switch (IntrinsicID) {
2401 // f2i
2402 case Intrinsic::nvvm_f2i_rm:
2403 case Intrinsic::nvvm_f2i_rn:
2404 case Intrinsic::nvvm_f2i_rp:
2405 case Intrinsic::nvvm_f2i_rz:
2406 case Intrinsic::nvvm_f2i_rm_ftz:
2407 case Intrinsic::nvvm_f2i_rn_ftz:
2408 case Intrinsic::nvvm_f2i_rp_ftz:
2409 case Intrinsic::nvvm_f2i_rz_ftz:
2410 // f2ui
2411 case Intrinsic::nvvm_f2ui_rm:
2412 case Intrinsic::nvvm_f2ui_rn:
2413 case Intrinsic::nvvm_f2ui_rp:
2414 case Intrinsic::nvvm_f2ui_rz:
2415 case Intrinsic::nvvm_f2ui_rm_ftz:
2416 case Intrinsic::nvvm_f2ui_rn_ftz:
2417 case Intrinsic::nvvm_f2ui_rp_ftz:
2418 case Intrinsic::nvvm_f2ui_rz_ftz:
2419 // d2i
2420 case Intrinsic::nvvm_d2i_rm:
2421 case Intrinsic::nvvm_d2i_rn:
2422 case Intrinsic::nvvm_d2i_rp:
2423 case Intrinsic::nvvm_d2i_rz:
2424 // d2ui
2425 case Intrinsic::nvvm_d2ui_rm:
2426 case Intrinsic::nvvm_d2ui_rn:
2427 case Intrinsic::nvvm_d2ui_rp:
2428 case Intrinsic::nvvm_d2ui_rz:
2429 // f2ll
2430 case Intrinsic::nvvm_f2ll_rm:
2431 case Intrinsic::nvvm_f2ll_rn:
2432 case Intrinsic::nvvm_f2ll_rp:
2433 case Intrinsic::nvvm_f2ll_rz:
2434 case Intrinsic::nvvm_f2ll_rm_ftz:
2435 case Intrinsic::nvvm_f2ll_rn_ftz:
2436 case Intrinsic::nvvm_f2ll_rp_ftz:
2437 case Intrinsic::nvvm_f2ll_rz_ftz:
2438 // f2ull
2439 case Intrinsic::nvvm_f2ull_rm:
2440 case Intrinsic::nvvm_f2ull_rn:
2441 case Intrinsic::nvvm_f2ull_rp:
2442 case Intrinsic::nvvm_f2ull_rz:
2443 case Intrinsic::nvvm_f2ull_rm_ftz:
2444 case Intrinsic::nvvm_f2ull_rn_ftz:
2445 case Intrinsic::nvvm_f2ull_rp_ftz:
2446 case Intrinsic::nvvm_f2ull_rz_ftz:
2447 // d2ll
2448 case Intrinsic::nvvm_d2ll_rm:
2449 case Intrinsic::nvvm_d2ll_rn:
2450 case Intrinsic::nvvm_d2ll_rp:
2451 case Intrinsic::nvvm_d2ll_rz:
2452 // d2ull
2453 case Intrinsic::nvvm_d2ull_rm:
2454 case Intrinsic::nvvm_d2ull_rn:
2455 case Intrinsic::nvvm_d2ull_rp:
2456 case Intrinsic::nvvm_d2ull_rz: {
2457 // In float-to-integer conversion, NaN inputs are converted to 0.
2458 if (U.isNaN())
2459 return ConstantInt::get(Ty, 0);
2461 APFloat::roundingMode RMode =
2462 nvvm::GetFPToIntegerRoundingMode(IntrinsicID);
2463 bool IsFTZ = nvvm::FPToIntegerIntrinsicShouldFTZ(IntrinsicID);
2464 bool IsSigned = nvvm::FPToIntegerIntrinsicResultIsSigned(IntrinsicID);
2466 APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned);
2467 auto FloatToRound = IsFTZ ? FTZPreserveSign(U) : U;
2469 bool IsExact = false;
2470 APFloat::opStatus Status =
2471 FloatToRound.convertToInteger(ResInt, RMode, &IsExact);
2473 if (Status != APFloat::opInvalidOp)
2474 return ConstantInt::get(Ty, ResInt);
2475 return nullptr;
2479 /// We only fold functions with finite arguments. Folding NaN and inf is
2480 /// likely to be aborted with an exception anyway, and some host libms
2481 /// have known errors raising exceptions.
2482 if (!U.isFinite())
2483 return nullptr;
2485 /// Currently APFloat versions of these functions do not exist, so we use
2486 /// the host native double versions. Float versions are not called
2487 /// directly but for all these it is true (float)(f((double)arg)) ==
2488 /// f(arg). Long double not supported yet.
2489 const APFloat &APF = Op->getValueAPF();
2491 switch (IntrinsicID) {
2492 default: break;
2493 case Intrinsic::log:
2494 return ConstantFoldFP(log, APF, Ty);
2495 case Intrinsic::log2:
2496 // TODO: What about hosts that lack a C99 library?
2497 return ConstantFoldFP(log2, APF, Ty);
2498 case Intrinsic::log10:
2499 // TODO: What about hosts that lack a C99 library?
2500 return ConstantFoldFP(log10, APF, Ty);
2501 case Intrinsic::exp:
2502 return ConstantFoldFP(exp, APF, Ty);
2503 case Intrinsic::exp2:
2504 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2505 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2506 case Intrinsic::exp10:
2507 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2508 return ConstantFoldBinaryFP(pow, APFloat(10.0), APF, Ty);
2509 case Intrinsic::sin:
2510 return ConstantFoldFP(sin, APF, Ty);
2511 case Intrinsic::cos:
2512 return ConstantFoldFP(cos, APF, Ty);
2513 case Intrinsic::sqrt:
2514 return ConstantFoldFP(sqrt, APF, Ty);
2515 case Intrinsic::amdgcn_cos:
2516 case Intrinsic::amdgcn_sin: {
2517 double V = getValueAsDouble(Op);
2518 if (V < -256.0 || V > 256.0)
2519 // The gfx8 and gfx9 architectures handle arguments outside the range
2520 // [-256, 256] differently. This should be a rare case so bail out
2521 // rather than trying to handle the difference.
2522 return nullptr;
2523 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2524 double V4 = V * 4.0;
2525 if (V4 == floor(V4)) {
2526 // Force exact results for quarter-integer inputs.
2527 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2528 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2529 } else {
2530 if (IsCos)
2531 V = cos(V * 2.0 * numbers::pi);
2532 else
2533 V = sin(V * 2.0 * numbers::pi);
2535 return GetConstantFoldFPValue(V, Ty);
2539 if (!TLI)
2540 return nullptr;
2542 LibFunc Func = NotLibFunc;
2543 if (!TLI->getLibFunc(Name, Func))
2544 return nullptr;
2546 switch (Func) {
2547 default:
2548 break;
2549 case LibFunc_acos:
2550 case LibFunc_acosf:
2551 case LibFunc_acos_finite:
2552 case LibFunc_acosf_finite:
2553 if (TLI->has(Func))
2554 return ConstantFoldFP(acos, APF, Ty);
2555 break;
2556 case LibFunc_asin:
2557 case LibFunc_asinf:
2558 case LibFunc_asin_finite:
2559 case LibFunc_asinf_finite:
2560 if (TLI->has(Func))
2561 return ConstantFoldFP(asin, APF, Ty);
2562 break;
2563 case LibFunc_atan:
2564 case LibFunc_atanf:
2565 if (TLI->has(Func))
2566 return ConstantFoldFP(atan, APF, Ty);
2567 break;
2568 case LibFunc_ceil:
2569 case LibFunc_ceilf:
2570 if (TLI->has(Func)) {
2571 U.roundToIntegral(APFloat::rmTowardPositive);
2572 return ConstantFP::get(Ty->getContext(), U);
2574 break;
2575 case LibFunc_cos:
2576 case LibFunc_cosf:
2577 if (TLI->has(Func))
2578 return ConstantFoldFP(cos, APF, Ty);
2579 break;
2580 case LibFunc_cosh:
2581 case LibFunc_coshf:
2582 case LibFunc_cosh_finite:
2583 case LibFunc_coshf_finite:
2584 if (TLI->has(Func))
2585 return ConstantFoldFP(cosh, APF, Ty);
2586 break;
2587 case LibFunc_exp:
2588 case LibFunc_expf:
2589 case LibFunc_exp_finite:
2590 case LibFunc_expf_finite:
2591 if (TLI->has(Func))
2592 return ConstantFoldFP(exp, APF, Ty);
2593 break;
2594 case LibFunc_exp2:
2595 case LibFunc_exp2f:
2596 case LibFunc_exp2_finite:
2597 case LibFunc_exp2f_finite:
2598 if (TLI->has(Func))
2599 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2600 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2601 break;
2602 case LibFunc_fabs:
2603 case LibFunc_fabsf:
2604 if (TLI->has(Func)) {
2605 U.clearSign();
2606 return ConstantFP::get(Ty->getContext(), U);
2608 break;
2609 case LibFunc_floor:
2610 case LibFunc_floorf:
2611 if (TLI->has(Func)) {
2612 U.roundToIntegral(APFloat::rmTowardNegative);
2613 return ConstantFP::get(Ty->getContext(), U);
2615 break;
2616 case LibFunc_log:
2617 case LibFunc_logf:
2618 case LibFunc_log_finite:
2619 case LibFunc_logf_finite:
2620 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2621 return ConstantFoldFP(log, APF, Ty);
2622 break;
2623 case LibFunc_log2:
2624 case LibFunc_log2f:
2625 case LibFunc_log2_finite:
2626 case LibFunc_log2f_finite:
2627 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2628 // TODO: What about hosts that lack a C99 library?
2629 return ConstantFoldFP(log2, APF, Ty);
2630 break;
2631 case LibFunc_log10:
2632 case LibFunc_log10f:
2633 case LibFunc_log10_finite:
2634 case LibFunc_log10f_finite:
2635 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2636 // TODO: What about hosts that lack a C99 library?
2637 return ConstantFoldFP(log10, APF, Ty);
2638 break;
2639 case LibFunc_ilogb:
2640 case LibFunc_ilogbf:
2641 if (!APF.isZero() && TLI->has(Func))
2642 return ConstantInt::get(Ty, ilogb(APF), true);
2643 break;
2644 case LibFunc_logb:
2645 case LibFunc_logbf:
2646 if (!APF.isZero() && TLI->has(Func))
2647 return ConstantFoldFP(logb, APF, Ty);
2648 break;
2649 case LibFunc_log1p:
2650 case LibFunc_log1pf:
2651 // Implement optional behavior from C's Annex F for +/-0.0.
2652 if (U.isZero())
2653 return ConstantFP::get(Ty->getContext(), U);
2654 if (APF > APFloat::getOne(APF.getSemantics(), true) && TLI->has(Func))
2655 return ConstantFoldFP(log1p, APF, Ty);
2656 break;
2657 case LibFunc_logl:
2658 return nullptr;
2659 case LibFunc_erf:
2660 case LibFunc_erff:
2661 if (TLI->has(Func))
2662 return ConstantFoldFP(erf, APF, Ty);
2663 break;
2664 case LibFunc_nearbyint:
2665 case LibFunc_nearbyintf:
2666 case LibFunc_rint:
2667 case LibFunc_rintf:
2668 if (TLI->has(Func)) {
2669 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2670 return ConstantFP::get(Ty->getContext(), U);
2672 break;
2673 case LibFunc_round:
2674 case LibFunc_roundf:
2675 if (TLI->has(Func)) {
2676 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2677 return ConstantFP::get(Ty->getContext(), U);
2679 break;
2680 case LibFunc_sin:
2681 case LibFunc_sinf:
2682 if (TLI->has(Func))
2683 return ConstantFoldFP(sin, APF, Ty);
2684 break;
2685 case LibFunc_sinh:
2686 case LibFunc_sinhf:
2687 case LibFunc_sinh_finite:
2688 case LibFunc_sinhf_finite:
2689 if (TLI->has(Func))
2690 return ConstantFoldFP(sinh, APF, Ty);
2691 break;
2692 case LibFunc_sqrt:
2693 case LibFunc_sqrtf:
2694 if (!APF.isNegative() && TLI->has(Func))
2695 return ConstantFoldFP(sqrt, APF, Ty);
2696 break;
2697 case LibFunc_tan:
2698 case LibFunc_tanf:
2699 if (TLI->has(Func))
2700 return ConstantFoldFP(tan, APF, Ty);
2701 break;
2702 case LibFunc_tanh:
2703 case LibFunc_tanhf:
2704 if (TLI->has(Func))
2705 return ConstantFoldFP(tanh, APF, Ty);
2706 break;
2707 case LibFunc_trunc:
2708 case LibFunc_truncf:
2709 if (TLI->has(Func)) {
2710 U.roundToIntegral(APFloat::rmTowardZero);
2711 return ConstantFP::get(Ty->getContext(), U);
2713 break;
2715 return nullptr;
2718 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2719 switch (IntrinsicID) {
2720 case Intrinsic::bswap:
2721 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2722 case Intrinsic::ctpop:
2723 return ConstantInt::get(Ty, Op->getValue().popcount());
2724 case Intrinsic::bitreverse:
2725 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2726 case Intrinsic::convert_from_fp16: {
2727 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2729 bool lost = false;
2730 APFloat::opStatus status = Val.convert(
2731 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2733 // Conversion is always precise.
2734 (void)status;
2735 assert(status != APFloat::opInexact && !lost &&
2736 "Precision lost during fp16 constfolding");
2738 return ConstantFP::get(Ty->getContext(), Val);
2741 case Intrinsic::amdgcn_s_wqm: {
2742 uint64_t Val = Op->getZExtValue();
2743 Val |= (Val & 0x5555555555555555ULL) << 1 |
2744 ((Val >> 1) & 0x5555555555555555ULL);
2745 Val |= (Val & 0x3333333333333333ULL) << 2 |
2746 ((Val >> 2) & 0x3333333333333333ULL);
2747 return ConstantInt::get(Ty, Val);
2750 case Intrinsic::amdgcn_s_quadmask: {
2751 uint64_t Val = Op->getZExtValue();
2752 uint64_t QuadMask = 0;
2753 for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
2754 if (!(Val & 0xF))
2755 continue;
2757 QuadMask |= (1ULL << I);
2759 return ConstantInt::get(Ty, QuadMask);
2762 case Intrinsic::amdgcn_s_bitreplicate: {
2763 uint64_t Val = Op->getZExtValue();
2764 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2765 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2766 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2767 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2768 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2769 Val = Val | Val << 1;
2770 return ConstantInt::get(Ty, Val);
2773 default:
2774 return nullptr;
2778 switch (IntrinsicID) {
2779 default: break;
2780 case Intrinsic::vector_reduce_add:
2781 case Intrinsic::vector_reduce_mul:
2782 case Intrinsic::vector_reduce_and:
2783 case Intrinsic::vector_reduce_or:
2784 case Intrinsic::vector_reduce_xor:
2785 case Intrinsic::vector_reduce_smin:
2786 case Intrinsic::vector_reduce_smax:
2787 case Intrinsic::vector_reduce_umin:
2788 case Intrinsic::vector_reduce_umax:
2789 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2790 return C;
2791 break;
2794 // Support ConstantVector in case we have an Undef in the top.
2795 if (isa<ConstantVector>(Operands[0]) ||
2796 isa<ConstantDataVector>(Operands[0])) {
2797 auto *Op = cast<Constant>(Operands[0]);
2798 switch (IntrinsicID) {
2799 default: break;
2800 case Intrinsic::x86_sse_cvtss2si:
2801 case Intrinsic::x86_sse_cvtss2si64:
2802 case Intrinsic::x86_sse2_cvtsd2si:
2803 case Intrinsic::x86_sse2_cvtsd2si64:
2804 if (ConstantFP *FPOp =
2805 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2806 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2807 /*roundTowardZero=*/false, Ty,
2808 /*IsSigned*/true);
2809 break;
2810 case Intrinsic::x86_sse_cvttss2si:
2811 case Intrinsic::x86_sse_cvttss2si64:
2812 case Intrinsic::x86_sse2_cvttsd2si:
2813 case Intrinsic::x86_sse2_cvttsd2si64:
2814 if (ConstantFP *FPOp =
2815 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2816 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2817 /*roundTowardZero=*/true, Ty,
2818 /*IsSigned*/true);
2819 break;
2823 return nullptr;
2826 static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2827 const ConstrainedFPIntrinsic *Call) {
2828 APFloat::opStatus St = APFloat::opOK;
2829 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2830 FCmpInst::Predicate Cond = FCmp->getPredicate();
2831 if (FCmp->isSignaling()) {
2832 if (Op1.isNaN() || Op2.isNaN())
2833 St = APFloat::opInvalidOp;
2834 } else {
2835 if (Op1.isSignaling() || Op2.isSignaling())
2836 St = APFloat::opInvalidOp;
2838 bool Result = FCmpInst::compare(Op1, Op2, Cond);
2839 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2840 return ConstantInt::get(Call->getType()->getScalarType(), Result);
2841 return nullptr;
2844 static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty,
2845 ArrayRef<Constant *> Operands,
2846 const TargetLibraryInfo *TLI) {
2847 if (!TLI)
2848 return nullptr;
2850 LibFunc Func = NotLibFunc;
2851 if (!TLI->getLibFunc(Name, Func))
2852 return nullptr;
2854 const auto *Op1 = dyn_cast<ConstantFP>(Operands[0]);
2855 if (!Op1)
2856 return nullptr;
2858 const auto *Op2 = dyn_cast<ConstantFP>(Operands[1]);
2859 if (!Op2)
2860 return nullptr;
2862 const APFloat &Op1V = Op1->getValueAPF();
2863 const APFloat &Op2V = Op2->getValueAPF();
2865 switch (Func) {
2866 default:
2867 break;
2868 case LibFunc_pow:
2869 case LibFunc_powf:
2870 case LibFunc_pow_finite:
2871 case LibFunc_powf_finite:
2872 if (TLI->has(Func))
2873 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2874 break;
2875 case LibFunc_fmod:
2876 case LibFunc_fmodf:
2877 if (TLI->has(Func)) {
2878 APFloat V = Op1->getValueAPF();
2879 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2880 return ConstantFP::get(Ty->getContext(), V);
2882 break;
2883 case LibFunc_remainder:
2884 case LibFunc_remainderf:
2885 if (TLI->has(Func)) {
2886 APFloat V = Op1->getValueAPF();
2887 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2888 return ConstantFP::get(Ty->getContext(), V);
2890 break;
2891 case LibFunc_atan2:
2892 case LibFunc_atan2f:
2893 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2894 // (Solaris), so we do not assume a known result for that.
2895 if (Op1V.isZero() && Op2V.isZero())
2896 return nullptr;
2897 [[fallthrough]];
2898 case LibFunc_atan2_finite:
2899 case LibFunc_atan2f_finite:
2900 if (TLI->has(Func))
2901 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2902 break;
2905 return nullptr;
2908 static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty,
2909 ArrayRef<Constant *> Operands,
2910 const CallBase *Call) {
2911 assert(Operands.size() == 2 && "Wrong number of operands.");
2913 if (Ty->isFloatingPointTy()) {
2914 // TODO: We should have undef handling for all of the FP intrinsics that
2915 // are attempted to be folded in this function.
2916 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2917 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2918 switch (IntrinsicID) {
2919 case Intrinsic::maxnum:
2920 case Intrinsic::minnum:
2921 case Intrinsic::maximum:
2922 case Intrinsic::minimum:
2923 case Intrinsic::nvvm_fmax_d:
2924 case Intrinsic::nvvm_fmin_d:
2925 // If one argument is undef, return the other argument.
2926 if (IsOp0Undef)
2927 return Operands[1];
2928 if (IsOp1Undef)
2929 return Operands[0];
2930 break;
2932 case Intrinsic::nvvm_fmax_f:
2933 case Intrinsic::nvvm_fmax_ftz_f:
2934 case Intrinsic::nvvm_fmax_ftz_nan_f:
2935 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
2936 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
2937 case Intrinsic::nvvm_fmax_nan_f:
2938 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
2939 case Intrinsic::nvvm_fmax_xorsign_abs_f:
2941 case Intrinsic::nvvm_fmin_f:
2942 case Intrinsic::nvvm_fmin_ftz_f:
2943 case Intrinsic::nvvm_fmin_ftz_nan_f:
2944 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
2945 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
2946 case Intrinsic::nvvm_fmin_nan_f:
2947 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
2948 case Intrinsic::nvvm_fmin_xorsign_abs_f:
2949 // If one arg is undef, the other arg can be returned only if it is
2950 // constant, as we may need to flush it to sign-preserving zero or
2951 // canonicalize the NaN.
2952 if (!IsOp0Undef && !IsOp1Undef)
2953 break;
2954 if (auto *Op = dyn_cast<ConstantFP>(Operands[IsOp0Undef ? 1 : 0])) {
2955 if (Op->isNaN()) {
2956 APInt NVCanonicalNaN(32, 0x7fffffff);
2957 return ConstantFP::get(
2958 Ty, APFloat(Ty->getFltSemantics(), NVCanonicalNaN));
2960 if (nvvm::FMinFMaxShouldFTZ(IntrinsicID))
2961 return ConstantFP::get(Ty, FTZPreserveSign(Op->getValueAPF()));
2962 else
2963 return Op;
2965 break;
2969 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2970 const APFloat &Op1V = Op1->getValueAPF();
2972 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2973 if (Op2->getType() != Op1->getType())
2974 return nullptr;
2975 const APFloat &Op2V = Op2->getValueAPF();
2977 if (const auto *ConstrIntr =
2978 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2979 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2980 APFloat Res = Op1V;
2981 APFloat::opStatus St;
2982 switch (IntrinsicID) {
2983 default:
2984 return nullptr;
2985 case Intrinsic::experimental_constrained_fadd:
2986 St = Res.add(Op2V, RM);
2987 break;
2988 case Intrinsic::experimental_constrained_fsub:
2989 St = Res.subtract(Op2V, RM);
2990 break;
2991 case Intrinsic::experimental_constrained_fmul:
2992 St = Res.multiply(Op2V, RM);
2993 break;
2994 case Intrinsic::experimental_constrained_fdiv:
2995 St = Res.divide(Op2V, RM);
2996 break;
2997 case Intrinsic::experimental_constrained_frem:
2998 St = Res.mod(Op2V);
2999 break;
3000 case Intrinsic::experimental_constrained_fcmp:
3001 case Intrinsic::experimental_constrained_fcmps:
3002 return evaluateCompare(Op1V, Op2V, ConstrIntr);
3004 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
3005 St))
3006 return ConstantFP::get(Ty->getContext(), Res);
3007 return nullptr;
3010 switch (IntrinsicID) {
3011 default:
3012 break;
3013 case Intrinsic::copysign:
3014 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
3015 case Intrinsic::minnum:
3016 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
3017 case Intrinsic::maxnum:
3018 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
3019 case Intrinsic::minimum:
3020 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
3021 case Intrinsic::maximum:
3022 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
3024 case Intrinsic::nvvm_fmax_d:
3025 case Intrinsic::nvvm_fmax_f:
3026 case Intrinsic::nvvm_fmax_ftz_f:
3027 case Intrinsic::nvvm_fmax_ftz_nan_f:
3028 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3029 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3030 case Intrinsic::nvvm_fmax_nan_f:
3031 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3032 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3034 case Intrinsic::nvvm_fmin_d:
3035 case Intrinsic::nvvm_fmin_f:
3036 case Intrinsic::nvvm_fmin_ftz_f:
3037 case Intrinsic::nvvm_fmin_ftz_nan_f:
3038 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3039 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3040 case Intrinsic::nvvm_fmin_nan_f:
3041 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3042 case Intrinsic::nvvm_fmin_xorsign_abs_f: {
3044 bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d ||
3045 IntrinsicID == Intrinsic::nvvm_fmin_d);
3046 bool IsFTZ = nvvm::FMinFMaxShouldFTZ(IntrinsicID);
3047 bool IsNaNPropagating = nvvm::FMinFMaxPropagatesNaNs(IntrinsicID);
3048 bool IsXorSignAbs = nvvm::FMinFMaxIsXorSignAbs(IntrinsicID);
3050 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3051 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3053 bool XorSign = false;
3054 if (IsXorSignAbs) {
3055 XorSign = A.isNegative() ^ B.isNegative();
3056 A = abs(A);
3057 B = abs(B);
3060 bool IsFMax = false;
3061 switch (IntrinsicID) {
3062 case Intrinsic::nvvm_fmax_d:
3063 case Intrinsic::nvvm_fmax_f:
3064 case Intrinsic::nvvm_fmax_ftz_f:
3065 case Intrinsic::nvvm_fmax_ftz_nan_f:
3066 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3067 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3068 case Intrinsic::nvvm_fmax_nan_f:
3069 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3070 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3071 IsFMax = true;
3072 break;
3074 APFloat Res = IsFMax ? maximum(A, B) : minimum(A, B);
3076 if (ShouldCanonicalizeNaNs) {
3077 APFloat NVCanonicalNaN(Res.getSemantics(), APInt(32, 0x7fffffff));
3078 if (A.isNaN() && B.isNaN())
3079 return ConstantFP::get(Ty, NVCanonicalNaN);
3080 else if (IsNaNPropagating && (A.isNaN() || B.isNaN()))
3081 return ConstantFP::get(Ty, NVCanonicalNaN);
3084 if (A.isNaN() && B.isNaN())
3085 return Operands[1];
3086 else if (A.isNaN())
3087 Res = B;
3088 else if (B.isNaN())
3089 Res = A;
3091 if (IsXorSignAbs && XorSign != Res.isNegative())
3092 Res.changeSign();
3094 return ConstantFP::get(Ty->getContext(), Res);
3098 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
3099 return nullptr;
3101 switch (IntrinsicID) {
3102 default:
3103 break;
3104 case Intrinsic::pow:
3105 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
3106 case Intrinsic::amdgcn_fmul_legacy:
3107 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3108 // NaN or infinity, gives +0.0.
3109 if (Op1V.isZero() || Op2V.isZero())
3110 return ConstantFP::getZero(Ty);
3111 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
3114 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
3115 switch (IntrinsicID) {
3116 case Intrinsic::ldexp: {
3117 return ConstantFP::get(
3118 Ty->getContext(),
3119 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
3121 case Intrinsic::is_fpclass: {
3122 FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
3123 bool Result =
3124 ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
3125 ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
3126 ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
3127 ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
3128 ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
3129 ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
3130 ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
3131 ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
3132 ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
3133 ((Mask & fcPosInf) && Op1V.isPosInfinity());
3134 return ConstantInt::get(Ty, Result);
3136 case Intrinsic::powi: {
3137 int Exp = static_cast<int>(Op2C->getSExtValue());
3138 switch (Ty->getTypeID()) {
3139 case Type::HalfTyID:
3140 case Type::FloatTyID: {
3141 APFloat Res(static_cast<float>(std::pow(Op1V.convertToFloat(), Exp)));
3142 if (Ty->isHalfTy()) {
3143 bool Unused;
3144 Res.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
3145 &Unused);
3147 return ConstantFP::get(Ty->getContext(), Res);
3149 case Type::DoubleTyID:
3150 return ConstantFP::get(Ty, std::pow(Op1V.convertToDouble(), Exp));
3151 default:
3152 return nullptr;
3155 default:
3156 break;
3159 return nullptr;
3162 if (Operands[0]->getType()->isIntegerTy() &&
3163 Operands[1]->getType()->isIntegerTy()) {
3164 const APInt *C0, *C1;
3165 if (!getConstIntOrUndef(Operands[0], C0) ||
3166 !getConstIntOrUndef(Operands[1], C1))
3167 return nullptr;
3169 switch (IntrinsicID) {
3170 default: break;
3171 case Intrinsic::smax:
3172 case Intrinsic::smin:
3173 case Intrinsic::umax:
3174 case Intrinsic::umin:
3175 // This is the same as for binary ops - poison propagates.
3176 // TODO: Poison handling should be consolidated.
3177 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3178 return PoisonValue::get(Ty);
3180 if (!C0 && !C1)
3181 return UndefValue::get(Ty);
3182 if (!C0 || !C1)
3183 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
3184 return ConstantInt::get(
3185 Ty, ICmpInst::compare(*C0, *C1,
3186 MinMaxIntrinsic::getPredicate(IntrinsicID))
3187 ? *C0
3188 : *C1);
3190 case Intrinsic::scmp:
3191 case Intrinsic::ucmp:
3192 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3193 return PoisonValue::get(Ty);
3195 if (!C0 || !C1)
3196 return ConstantInt::get(Ty, 0);
3198 int Res;
3199 if (IntrinsicID == Intrinsic::scmp)
3200 Res = C0->sgt(*C1) ? 1 : C0->slt(*C1) ? -1 : 0;
3201 else
3202 Res = C0->ugt(*C1) ? 1 : C0->ult(*C1) ? -1 : 0;
3203 return ConstantInt::get(Ty, Res, /*IsSigned=*/true);
3205 case Intrinsic::usub_with_overflow:
3206 case Intrinsic::ssub_with_overflow:
3207 // X - undef -> { 0, false }
3208 // undef - X -> { 0, false }
3209 if (!C0 || !C1)
3210 return Constant::getNullValue(Ty);
3211 [[fallthrough]];
3212 case Intrinsic::uadd_with_overflow:
3213 case Intrinsic::sadd_with_overflow:
3214 // X + undef -> { -1, false }
3215 // undef + x -> { -1, false }
3216 if (!C0 || !C1) {
3217 return ConstantStruct::get(
3218 cast<StructType>(Ty),
3219 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
3220 Constant::getNullValue(Ty->getStructElementType(1))});
3222 [[fallthrough]];
3223 case Intrinsic::smul_with_overflow:
3224 case Intrinsic::umul_with_overflow: {
3225 // undef * X -> { 0, false }
3226 // X * undef -> { 0, false }
3227 if (!C0 || !C1)
3228 return Constant::getNullValue(Ty);
3230 APInt Res;
3231 bool Overflow;
3232 switch (IntrinsicID) {
3233 default: llvm_unreachable("Invalid case");
3234 case Intrinsic::sadd_with_overflow:
3235 Res = C0->sadd_ov(*C1, Overflow);
3236 break;
3237 case Intrinsic::uadd_with_overflow:
3238 Res = C0->uadd_ov(*C1, Overflow);
3239 break;
3240 case Intrinsic::ssub_with_overflow:
3241 Res = C0->ssub_ov(*C1, Overflow);
3242 break;
3243 case Intrinsic::usub_with_overflow:
3244 Res = C0->usub_ov(*C1, Overflow);
3245 break;
3246 case Intrinsic::smul_with_overflow:
3247 Res = C0->smul_ov(*C1, Overflow);
3248 break;
3249 case Intrinsic::umul_with_overflow:
3250 Res = C0->umul_ov(*C1, Overflow);
3251 break;
3253 Constant *Ops[] = {
3254 ConstantInt::get(Ty->getContext(), Res),
3255 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
3257 return ConstantStruct::get(cast<StructType>(Ty), Ops);
3259 case Intrinsic::uadd_sat:
3260 case Intrinsic::sadd_sat:
3261 // This is the same as for binary ops - poison propagates.
3262 // TODO: Poison handling should be consolidated.
3263 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3264 return PoisonValue::get(Ty);
3266 if (!C0 && !C1)
3267 return UndefValue::get(Ty);
3268 if (!C0 || !C1)
3269 return Constant::getAllOnesValue(Ty);
3270 if (IntrinsicID == Intrinsic::uadd_sat)
3271 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
3272 else
3273 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
3274 case Intrinsic::usub_sat:
3275 case Intrinsic::ssub_sat:
3276 // This is the same as for binary ops - poison propagates.
3277 // TODO: Poison handling should be consolidated.
3278 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3279 return PoisonValue::get(Ty);
3281 if (!C0 && !C1)
3282 return UndefValue::get(Ty);
3283 if (!C0 || !C1)
3284 return Constant::getNullValue(Ty);
3285 if (IntrinsicID == Intrinsic::usub_sat)
3286 return ConstantInt::get(Ty, C0->usub_sat(*C1));
3287 else
3288 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
3289 case Intrinsic::cttz:
3290 case Intrinsic::ctlz:
3291 assert(C1 && "Must be constant int");
3293 // cttz(0, 1) and ctlz(0, 1) are poison.
3294 if (C1->isOne() && (!C0 || C0->isZero()))
3295 return PoisonValue::get(Ty);
3296 if (!C0)
3297 return Constant::getNullValue(Ty);
3298 if (IntrinsicID == Intrinsic::cttz)
3299 return ConstantInt::get(Ty, C0->countr_zero());
3300 else
3301 return ConstantInt::get(Ty, C0->countl_zero());
3303 case Intrinsic::abs:
3304 assert(C1 && "Must be constant int");
3305 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
3307 // Undef or minimum val operand with poison min --> poison
3308 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
3309 return PoisonValue::get(Ty);
3311 // Undef operand with no poison min --> 0 (sign bit must be clear)
3312 if (!C0)
3313 return Constant::getNullValue(Ty);
3315 return ConstantInt::get(Ty, C0->abs());
3316 case Intrinsic::amdgcn_wave_reduce_umin:
3317 case Intrinsic::amdgcn_wave_reduce_umax:
3318 return dyn_cast<Constant>(Operands[0]);
3321 return nullptr;
3324 // Support ConstantVector in case we have an Undef in the top.
3325 if ((isa<ConstantVector>(Operands[0]) ||
3326 isa<ConstantDataVector>(Operands[0])) &&
3327 // Check for default rounding mode.
3328 // FIXME: Support other rounding modes?
3329 isa<ConstantInt>(Operands[1]) &&
3330 cast<ConstantInt>(Operands[1])->getValue() == 4) {
3331 auto *Op = cast<Constant>(Operands[0]);
3332 switch (IntrinsicID) {
3333 default: break;
3334 case Intrinsic::x86_avx512_vcvtss2si32:
3335 case Intrinsic::x86_avx512_vcvtss2si64:
3336 case Intrinsic::x86_avx512_vcvtsd2si32:
3337 case Intrinsic::x86_avx512_vcvtsd2si64:
3338 if (ConstantFP *FPOp =
3339 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3340 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3341 /*roundTowardZero=*/false, Ty,
3342 /*IsSigned*/true);
3343 break;
3344 case Intrinsic::x86_avx512_vcvtss2usi32:
3345 case Intrinsic::x86_avx512_vcvtss2usi64:
3346 case Intrinsic::x86_avx512_vcvtsd2usi32:
3347 case Intrinsic::x86_avx512_vcvtsd2usi64:
3348 if (ConstantFP *FPOp =
3349 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3350 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3351 /*roundTowardZero=*/false, Ty,
3352 /*IsSigned*/false);
3353 break;
3354 case Intrinsic::x86_avx512_cvttss2si:
3355 case Intrinsic::x86_avx512_cvttss2si64:
3356 case Intrinsic::x86_avx512_cvttsd2si:
3357 case Intrinsic::x86_avx512_cvttsd2si64:
3358 if (ConstantFP *FPOp =
3359 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3360 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3361 /*roundTowardZero=*/true, Ty,
3362 /*IsSigned*/true);
3363 break;
3364 case Intrinsic::x86_avx512_cvttss2usi:
3365 case Intrinsic::x86_avx512_cvttss2usi64:
3366 case Intrinsic::x86_avx512_cvttsd2usi:
3367 case Intrinsic::x86_avx512_cvttsd2usi64:
3368 if (ConstantFP *FPOp =
3369 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3370 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3371 /*roundTowardZero=*/true, Ty,
3372 /*IsSigned*/false);
3373 break;
3376 return nullptr;
3379 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
3380 const APFloat &S0,
3381 const APFloat &S1,
3382 const APFloat &S2) {
3383 unsigned ID;
3384 const fltSemantics &Sem = S0.getSemantics();
3385 APFloat MA(Sem), SC(Sem), TC(Sem);
3386 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
3387 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
3388 // S2 < 0
3389 ID = 5;
3390 SC = -S0;
3391 } else {
3392 ID = 4;
3393 SC = S0;
3395 MA = S2;
3396 TC = -S1;
3397 } else if (abs(S1) >= abs(S0)) {
3398 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
3399 // S1 < 0
3400 ID = 3;
3401 TC = -S2;
3402 } else {
3403 ID = 2;
3404 TC = S2;
3406 MA = S1;
3407 SC = S0;
3408 } else {
3409 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
3410 // S0 < 0
3411 ID = 1;
3412 SC = S2;
3413 } else {
3414 ID = 0;
3415 SC = -S2;
3417 MA = S0;
3418 TC = -S1;
3420 switch (IntrinsicID) {
3421 default:
3422 llvm_unreachable("unhandled amdgcn cube intrinsic");
3423 case Intrinsic::amdgcn_cubeid:
3424 return APFloat(Sem, ID);
3425 case Intrinsic::amdgcn_cubema:
3426 return MA + MA;
3427 case Intrinsic::amdgcn_cubesc:
3428 return SC;
3429 case Intrinsic::amdgcn_cubetc:
3430 return TC;
3434 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
3435 Type *Ty) {
3436 const APInt *C0, *C1, *C2;
3437 if (!getConstIntOrUndef(Operands[0], C0) ||
3438 !getConstIntOrUndef(Operands[1], C1) ||
3439 !getConstIntOrUndef(Operands[2], C2))
3440 return nullptr;
3442 if (!C2)
3443 return UndefValue::get(Ty);
3445 APInt Val(32, 0);
3446 unsigned NumUndefBytes = 0;
3447 for (unsigned I = 0; I < 32; I += 8) {
3448 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
3449 unsigned B = 0;
3451 if (Sel >= 13)
3452 B = 0xff;
3453 else if (Sel == 12)
3454 B = 0x00;
3455 else {
3456 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3457 if (!Src)
3458 ++NumUndefBytes;
3459 else if (Sel < 8)
3460 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3461 else
3462 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3465 Val.insertBits(B, I, 8);
3468 if (NumUndefBytes == 4)
3469 return UndefValue::get(Ty);
3471 return ConstantInt::get(Ty, Val);
3474 static Constant *ConstantFoldScalarCall3(StringRef Name,
3475 Intrinsic::ID IntrinsicID,
3476 Type *Ty,
3477 ArrayRef<Constant *> Operands,
3478 const TargetLibraryInfo *TLI,
3479 const CallBase *Call) {
3480 assert(Operands.size() == 3 && "Wrong number of operands.");
3482 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
3483 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
3484 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
3485 const APFloat &C1 = Op1->getValueAPF();
3486 const APFloat &C2 = Op2->getValueAPF();
3487 const APFloat &C3 = Op3->getValueAPF();
3489 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3490 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
3491 APFloat Res = C1;
3492 APFloat::opStatus St;
3493 switch (IntrinsicID) {
3494 default:
3495 return nullptr;
3496 case Intrinsic::experimental_constrained_fma:
3497 case Intrinsic::experimental_constrained_fmuladd:
3498 St = Res.fusedMultiplyAdd(C2, C3, RM);
3499 break;
3501 if (mayFoldConstrained(
3502 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3503 return ConstantFP::get(Ty->getContext(), Res);
3504 return nullptr;
3507 switch (IntrinsicID) {
3508 default: break;
3509 case Intrinsic::amdgcn_fma_legacy: {
3510 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3511 // NaN or infinity, gives +0.0.
3512 if (C1.isZero() || C2.isZero()) {
3513 // It's tempting to just return C3 here, but that would give the
3514 // wrong result if C3 was -0.0.
3515 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
3517 [[fallthrough]];
3519 case Intrinsic::fma:
3520 case Intrinsic::fmuladd: {
3521 APFloat V = C1;
3522 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3523 return ConstantFP::get(Ty->getContext(), V);
3525 case Intrinsic::amdgcn_cubeid:
3526 case Intrinsic::amdgcn_cubema:
3527 case Intrinsic::amdgcn_cubesc:
3528 case Intrinsic::amdgcn_cubetc: {
3529 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3530 return ConstantFP::get(Ty->getContext(), V);
3537 if (IntrinsicID == Intrinsic::smul_fix ||
3538 IntrinsicID == Intrinsic::smul_fix_sat) {
3539 // poison * C -> poison
3540 // C * poison -> poison
3541 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3542 return PoisonValue::get(Ty);
3544 const APInt *C0, *C1;
3545 if (!getConstIntOrUndef(Operands[0], C0) ||
3546 !getConstIntOrUndef(Operands[1], C1))
3547 return nullptr;
3549 // undef * C -> 0
3550 // C * undef -> 0
3551 if (!C0 || !C1)
3552 return Constant::getNullValue(Ty);
3554 // This code performs rounding towards negative infinity in case the result
3555 // cannot be represented exactly for the given scale. Targets that do care
3556 // about rounding should use a target hook for specifying how rounding
3557 // should be done, and provide their own folding to be consistent with
3558 // rounding. This is the same approach as used by
3559 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3560 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
3561 unsigned Width = C0->getBitWidth();
3562 assert(Scale < Width && "Illegal scale.");
3563 unsigned ExtendedWidth = Width * 2;
3564 APInt Product =
3565 (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
3566 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3567 APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
3568 APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
3569 Product = APIntOps::smin(Product, Max);
3570 Product = APIntOps::smax(Product, Min);
3572 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
3575 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3576 const APInt *C0, *C1, *C2;
3577 if (!getConstIntOrUndef(Operands[0], C0) ||
3578 !getConstIntOrUndef(Operands[1], C1) ||
3579 !getConstIntOrUndef(Operands[2], C2))
3580 return nullptr;
3582 bool IsRight = IntrinsicID == Intrinsic::fshr;
3583 if (!C2)
3584 return Operands[IsRight ? 1 : 0];
3585 if (!C0 && !C1)
3586 return UndefValue::get(Ty);
3588 // The shift amount is interpreted as modulo the bitwidth. If the shift
3589 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3590 unsigned BitWidth = C2->getBitWidth();
3591 unsigned ShAmt = C2->urem(BitWidth);
3592 if (!ShAmt)
3593 return Operands[IsRight ? 1 : 0];
3595 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3596 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3597 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3598 if (!C0)
3599 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
3600 if (!C1)
3601 return ConstantInt::get(Ty, C0->shl(ShlAmt));
3602 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
3605 if (IntrinsicID == Intrinsic::amdgcn_perm)
3606 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3608 return nullptr;
3611 static Constant *ConstantFoldScalarCall(StringRef Name,
3612 Intrinsic::ID IntrinsicID,
3613 Type *Ty,
3614 ArrayRef<Constant *> Operands,
3615 const TargetLibraryInfo *TLI,
3616 const CallBase *Call) {
3617 if (Operands.size() == 1)
3618 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3620 if (Operands.size() == 2) {
3621 if (Constant *FoldedLibCall =
3622 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
3623 return FoldedLibCall;
3625 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call);
3628 if (Operands.size() == 3)
3629 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3631 return nullptr;
3634 static Constant *ConstantFoldFixedVectorCall(
3635 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3636 ArrayRef<Constant *> Operands, const DataLayout &DL,
3637 const TargetLibraryInfo *TLI, const CallBase *Call) {
3638 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3639 SmallVector<Constant *, 4> Lane(Operands.size());
3640 Type *Ty = FVTy->getElementType();
3642 switch (IntrinsicID) {
3643 case Intrinsic::masked_load: {
3644 auto *SrcPtr = Operands[0];
3645 auto *Mask = Operands[2];
3646 auto *Passthru = Operands[3];
3648 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3650 SmallVector<Constant *, 32> NewElements;
3651 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3652 auto *MaskElt = Mask->getAggregateElement(I);
3653 if (!MaskElt)
3654 break;
3655 auto *PassthruElt = Passthru->getAggregateElement(I);
3656 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3657 if (isa<UndefValue>(MaskElt)) {
3658 if (PassthruElt)
3659 NewElements.push_back(PassthruElt);
3660 else if (VecElt)
3661 NewElements.push_back(VecElt);
3662 else
3663 return nullptr;
3665 if (MaskElt->isNullValue()) {
3666 if (!PassthruElt)
3667 return nullptr;
3668 NewElements.push_back(PassthruElt);
3669 } else if (MaskElt->isOneValue()) {
3670 if (!VecElt)
3671 return nullptr;
3672 NewElements.push_back(VecElt);
3673 } else {
3674 return nullptr;
3677 if (NewElements.size() != FVTy->getNumElements())
3678 return nullptr;
3679 return ConstantVector::get(NewElements);
3681 case Intrinsic::arm_mve_vctp8:
3682 case Intrinsic::arm_mve_vctp16:
3683 case Intrinsic::arm_mve_vctp32:
3684 case Intrinsic::arm_mve_vctp64: {
3685 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3686 unsigned Lanes = FVTy->getNumElements();
3687 uint64_t Limit = Op->getZExtValue();
3689 SmallVector<Constant *, 16> NCs;
3690 for (unsigned i = 0; i < Lanes; i++) {
3691 if (i < Limit)
3692 NCs.push_back(ConstantInt::getTrue(Ty));
3693 else
3694 NCs.push_back(ConstantInt::getFalse(Ty));
3696 return ConstantVector::get(NCs);
3698 return nullptr;
3700 case Intrinsic::get_active_lane_mask: {
3701 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3702 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3703 if (Op0 && Op1) {
3704 unsigned Lanes = FVTy->getNumElements();
3705 uint64_t Base = Op0->getZExtValue();
3706 uint64_t Limit = Op1->getZExtValue();
3708 SmallVector<Constant *, 16> NCs;
3709 for (unsigned i = 0; i < Lanes; i++) {
3710 if (Base + i < Limit)
3711 NCs.push_back(ConstantInt::getTrue(Ty));
3712 else
3713 NCs.push_back(ConstantInt::getFalse(Ty));
3715 return ConstantVector::get(NCs);
3717 return nullptr;
3719 default:
3720 break;
3723 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3724 // Gather a column of constants.
3725 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3726 // Some intrinsics use a scalar type for certain arguments.
3727 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J, /*TTI=*/nullptr)) {
3728 Lane[J] = Operands[J];
3729 continue;
3732 Constant *Agg = Operands[J]->getAggregateElement(I);
3733 if (!Agg)
3734 return nullptr;
3736 Lane[J] = Agg;
3739 // Use the regular scalar folding to simplify this column.
3740 Constant *Folded =
3741 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3742 if (!Folded)
3743 return nullptr;
3744 Result[I] = Folded;
3747 return ConstantVector::get(Result);
3750 static Constant *ConstantFoldScalableVectorCall(
3751 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3752 ArrayRef<Constant *> Operands, const DataLayout &DL,
3753 const TargetLibraryInfo *TLI, const CallBase *Call) {
3754 switch (IntrinsicID) {
3755 case Intrinsic::aarch64_sve_convert_from_svbool: {
3756 auto *Src = dyn_cast<Constant>(Operands[0]);
3757 if (!Src || !Src->isNullValue())
3758 break;
3760 return ConstantInt::getFalse(SVTy);
3762 default:
3763 break;
3765 return nullptr;
3768 static std::pair<Constant *, Constant *>
3769 ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
3770 if (isa<PoisonValue>(Op))
3771 return {Op, PoisonValue::get(IntTy)};
3773 auto *ConstFP = dyn_cast<ConstantFP>(Op);
3774 if (!ConstFP)
3775 return {};
3777 const APFloat &U = ConstFP->getValueAPF();
3778 int FrexpExp;
3779 APFloat FrexpMant = frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3780 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3782 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3783 // using undef.
3784 Constant *Result1 = FrexpMant.isFinite()
3785 ? ConstantInt::getSigned(IntTy, FrexpExp)
3786 : ConstantInt::getNullValue(IntTy);
3787 return {Result0, Result1};
3790 /// Handle intrinsics that return tuples, which may be tuples of vectors.
3791 static Constant *
3792 ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
3793 StructType *StTy, ArrayRef<Constant *> Operands,
3794 const DataLayout &DL, const TargetLibraryInfo *TLI,
3795 const CallBase *Call) {
3797 switch (IntrinsicID) {
3798 case Intrinsic::frexp: {
3799 Type *Ty0 = StTy->getContainedType(0);
3800 Type *Ty1 = StTy->getContainedType(1)->getScalarType();
3802 if (auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3803 SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
3804 SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
3806 for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
3807 Constant *Lane = Operands[0]->getAggregateElement(I);
3808 std::tie(Results0[I], Results1[I]) =
3809 ConstantFoldScalarFrexpCall(Lane, Ty1);
3810 if (!Results0[I])
3811 return nullptr;
3814 return ConstantStruct::get(StTy, ConstantVector::get(Results0),
3815 ConstantVector::get(Results1));
3818 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
3819 if (!Result0)
3820 return nullptr;
3821 return ConstantStruct::get(StTy, Result0, Result1);
3823 case Intrinsic::sincos: {
3824 Type *Ty = StTy->getContainedType(0);
3825 Type *TyScalar = Ty->getScalarType();
3827 auto ConstantFoldScalarSincosCall =
3828 [&](Constant *Op) -> std::pair<Constant *, Constant *> {
3829 Constant *SinResult =
3830 ConstantFoldScalarCall(Name, Intrinsic::sin, TyScalar, Op, TLI, Call);
3831 Constant *CosResult =
3832 ConstantFoldScalarCall(Name, Intrinsic::cos, TyScalar, Op, TLI, Call);
3833 return std::make_pair(SinResult, CosResult);
3836 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3837 SmallVector<Constant *> SinResults(FVTy->getNumElements());
3838 SmallVector<Constant *> CosResults(FVTy->getNumElements());
3840 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3841 Constant *Lane = Operands[0]->getAggregateElement(I);
3842 std::tie(SinResults[I], CosResults[I]) =
3843 ConstantFoldScalarSincosCall(Lane);
3844 if (!SinResults[I] || !CosResults[I])
3845 return nullptr;
3848 return ConstantStruct::get(StTy, ConstantVector::get(SinResults),
3849 ConstantVector::get(CosResults));
3852 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
3853 if (!SinResult || !CosResult)
3854 return nullptr;
3855 return ConstantStruct::get(StTy, SinResult, CosResult);
3857 default:
3858 // TODO: Constant folding of vector intrinsics that fall through here does
3859 // not work (e.g. overflow intrinsics)
3860 return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI, Call);
3863 return nullptr;
3866 } // end anonymous namespace
3868 Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
3869 Constant *RHS, Type *Ty,
3870 Instruction *FMFSource) {
3871 return ConstantFoldIntrinsicCall2(ID, Ty, {LHS, RHS},
3872 dyn_cast_if_present<CallBase>(FMFSource));
3875 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3876 ArrayRef<Constant *> Operands,
3877 const TargetLibraryInfo *TLI,
3878 bool AllowNonDeterministic) {
3879 if (Call->isNoBuiltin())
3880 return nullptr;
3881 if (!F->hasName())
3882 return nullptr;
3884 // If this is not an intrinsic and not recognized as a library call, bail out.
3885 Intrinsic::ID IID = F->getIntrinsicID();
3886 if (IID == Intrinsic::not_intrinsic) {
3887 if (!TLI)
3888 return nullptr;
3889 LibFunc LibF;
3890 if (!TLI->getLibFunc(*F, LibF))
3891 return nullptr;
3894 // Conservatively assume that floating-point libcalls may be
3895 // non-deterministic.
3896 Type *Ty = F->getReturnType();
3897 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
3898 return nullptr;
3900 StringRef Name = F->getName();
3901 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3902 return ConstantFoldFixedVectorCall(
3903 Name, IID, FVTy, Operands, F->getDataLayout(), TLI, Call);
3905 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3906 return ConstantFoldScalableVectorCall(
3907 Name, IID, SVTy, Operands, F->getDataLayout(), TLI, Call);
3909 if (auto *StTy = dyn_cast<StructType>(Ty))
3910 return ConstantFoldStructCall(Name, IID, StTy, Operands,
3911 F->getDataLayout(), TLI, Call);
3913 // TODO: If this is a library function, we already discovered that above,
3914 // so we should pass the LibFunc, not the name (and it might be better
3915 // still to separate intrinsic handling from libcalls).
3916 return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI, Call);
3919 bool llvm::isMathLibCallNoop(const CallBase *Call,
3920 const TargetLibraryInfo *TLI) {
3921 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3922 // (and to some extent ConstantFoldScalarCall).
3923 if (Call->isNoBuiltin() || Call->isStrictFP())
3924 return false;
3925 Function *F = Call->getCalledFunction();
3926 if (!F)
3927 return false;
3929 LibFunc Func;
3930 if (!TLI || !TLI->getLibFunc(*F, Func))
3931 return false;
3933 if (Call->arg_size() == 1) {
3934 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3935 const APFloat &Op = OpC->getValueAPF();
3936 switch (Func) {
3937 case LibFunc_logl:
3938 case LibFunc_log:
3939 case LibFunc_logf:
3940 case LibFunc_log2l:
3941 case LibFunc_log2:
3942 case LibFunc_log2f:
3943 case LibFunc_log10l:
3944 case LibFunc_log10:
3945 case LibFunc_log10f:
3946 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3948 case LibFunc_ilogb:
3949 return !Op.isNaN() && !Op.isZero() && !Op.isInfinity();
3951 case LibFunc_expl:
3952 case LibFunc_exp:
3953 case LibFunc_expf:
3954 // FIXME: These boundaries are slightly conservative.
3955 if (OpC->getType()->isDoubleTy())
3956 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3957 if (OpC->getType()->isFloatTy())
3958 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3959 break;
3961 case LibFunc_exp2l:
3962 case LibFunc_exp2:
3963 case LibFunc_exp2f:
3964 // FIXME: These boundaries are slightly conservative.
3965 if (OpC->getType()->isDoubleTy())
3966 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3967 if (OpC->getType()->isFloatTy())
3968 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3969 break;
3971 case LibFunc_sinl:
3972 case LibFunc_sin:
3973 case LibFunc_sinf:
3974 case LibFunc_cosl:
3975 case LibFunc_cos:
3976 case LibFunc_cosf:
3977 return !Op.isInfinity();
3979 case LibFunc_tanl:
3980 case LibFunc_tan:
3981 case LibFunc_tanf: {
3982 // FIXME: Stop using the host math library.
3983 // FIXME: The computation isn't done in the right precision.
3984 Type *Ty = OpC->getType();
3985 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3986 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3987 break;
3990 case LibFunc_atan:
3991 case LibFunc_atanf:
3992 case LibFunc_atanl:
3993 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3994 return true;
3996 case LibFunc_asinl:
3997 case LibFunc_asin:
3998 case LibFunc_asinf:
3999 case LibFunc_acosl:
4000 case LibFunc_acos:
4001 case LibFunc_acosf:
4002 return !(Op < APFloat::getOne(Op.getSemantics(), true) ||
4003 Op > APFloat::getOne(Op.getSemantics()));
4005 case LibFunc_sinh:
4006 case LibFunc_cosh:
4007 case LibFunc_sinhf:
4008 case LibFunc_coshf:
4009 case LibFunc_sinhl:
4010 case LibFunc_coshl:
4011 // FIXME: These boundaries are slightly conservative.
4012 if (OpC->getType()->isDoubleTy())
4013 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
4014 if (OpC->getType()->isFloatTy())
4015 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
4016 break;
4018 case LibFunc_sqrtl:
4019 case LibFunc_sqrt:
4020 case LibFunc_sqrtf:
4021 return Op.isNaN() || Op.isZero() || !Op.isNegative();
4023 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
4024 // maybe others?
4025 default:
4026 break;
4031 if (Call->arg_size() == 2) {
4032 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
4033 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
4034 if (Op0C && Op1C) {
4035 const APFloat &Op0 = Op0C->getValueAPF();
4036 const APFloat &Op1 = Op1C->getValueAPF();
4038 switch (Func) {
4039 case LibFunc_powl:
4040 case LibFunc_pow:
4041 case LibFunc_powf: {
4042 // FIXME: Stop using the host math library.
4043 // FIXME: The computation isn't done in the right precision.
4044 Type *Ty = Op0C->getType();
4045 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
4046 if (Ty == Op1C->getType())
4047 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
4049 break;
4052 case LibFunc_fmodl:
4053 case LibFunc_fmod:
4054 case LibFunc_fmodf:
4055 case LibFunc_remainderl:
4056 case LibFunc_remainder:
4057 case LibFunc_remainderf:
4058 return Op0.isNaN() || Op1.isNaN() ||
4059 (!Op0.isInfinity() && !Op1.isZero());
4061 case LibFunc_atan2:
4062 case LibFunc_atan2f:
4063 case LibFunc_atan2l:
4064 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
4065 // GLIBC and MSVC do not appear to raise an error on those, we
4066 // cannot rely on that behavior. POSIX and C11 say that a domain error
4067 // may occur, so allow for that possibility.
4068 return !Op0.isZero() || !Op1.isZero();
4070 default:
4071 break;
4076 return false;
4079 void TargetFolder::anchor() {}