[nfc][mlir][scf]: Define scf.for lower/upper bounds can be also negative or zero...
[llvm-project.git] / llvm / lib / Analysis / ConstantFolding.cpp
blob3d5022e5502e28c5ce1dff276859f039176904db
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantFold.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GlobalValue.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/IntrinsicsAArch64.h"
46 #include "llvm/IR/IntrinsicsAMDGPU.h"
47 #include "llvm/IR/IntrinsicsARM.h"
48 #include "llvm/IR/IntrinsicsWebAssembly.h"
49 #include "llvm/IR/IntrinsicsX86.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include <cassert>
58 #include <cerrno>
59 #include <cfenv>
60 #include <cmath>
61 #include <cstdint>
63 using namespace llvm;
65 namespace {
67 //===----------------------------------------------------------------------===//
68 // Constant Folding internal helper functions
69 //===----------------------------------------------------------------------===//
71 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72 Constant *C, Type *SrcEltTy,
73 unsigned NumSrcElts,
74 const DataLayout &DL) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78 for (unsigned i = 0; i != NumSrcElts; ++i) {
79 Constant *Element;
80 if (DL.isLittleEndian())
81 Element = C->getAggregateElement(NumSrcElts - i - 1);
82 else
83 Element = C->getAggregateElement(i);
85 if (isa_and_nonnull<UndefValue>(Element)) {
86 Result <<= BitShift;
87 continue;
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91 if (!ElementCI)
92 return ConstantExpr::getBitCast(C, DestTy);
94 Result <<= BitShift;
95 Result |= ElementCI->getValue().zext(Result.getBitWidth());
98 return nullptr;
101 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
102 /// This always returns a non-null constant, but it may be a
103 /// ConstantExpr if unfoldable.
104 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
106 "Invalid constantexpr bitcast!");
108 // Catch the obvious splat cases.
109 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
110 return Res;
112 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
113 // Handle a vector->scalar integer/fp cast.
114 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
118 // If the vector is a vector of floating point, convert it to vector of int
119 // to simplify things.
120 if (SrcEltTy->isFloatingPointTy()) {
121 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
122 auto *SrcIVTy = FixedVectorType::get(
123 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
124 // Ask IR to do the conversion now that #elts line up.
125 C = ConstantExpr::getBitCast(C, SrcIVTy);
128 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
129 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
130 SrcEltTy, NumSrcElts, DL))
131 return CE;
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
136 APFloat FP(DestTy->getFltSemantics(), Result);
137 return ConstantFP::get(DestTy->getContext(), FP);
141 // The code below only handles casts to vectors currently.
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
143 if (!DestVTy)
144 return ConstantExpr::getBitCast(C, DestTy);
146 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
147 // vector so the code below can handle it uniformly.
148 if (!isa<VectorType>(C->getType()) &&
149 (isa<ConstantFP>(C) || isa<ConstantInt>(C))) {
150 Constant *Ops = C; // don't take the address of C!
151 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
154 // Some of what follows may extend to cover scalable vectors but the current
155 // implementation is fixed length specific.
156 if (!isa<FixedVectorType>(C->getType()))
157 return ConstantExpr::getBitCast(C, DestTy);
159 // If this is a bitcast from constant vector -> vector, fold it.
160 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C) &&
161 !isa<ConstantInt>(C) && !isa<ConstantFP>(C))
162 return ConstantExpr::getBitCast(C, DestTy);
164 // If the element types match, IR can fold it.
165 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
166 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
167 if (NumDstElt == NumSrcElt)
168 return ConstantExpr::getBitCast(C, DestTy);
170 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
171 Type *DstEltTy = DestVTy->getElementType();
173 // Otherwise, we're changing the number of elements in a vector, which
174 // requires endianness information to do the right thing. For example,
175 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
176 // folds to (little endian):
177 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
178 // and to (big endian):
179 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
181 // First thing is first. We only want to think about integer here, so if
182 // we have something in FP form, recast it as integer.
183 if (DstEltTy->isFloatingPointTy()) {
184 // Fold to an vector of integers with same size as our FP type.
185 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
186 auto *DestIVTy = FixedVectorType::get(
187 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
188 // Recursively handle this integer conversion, if possible.
189 C = FoldBitCast(C, DestIVTy, DL);
191 // Finally, IR can handle this now that #elts line up.
192 return ConstantExpr::getBitCast(C, DestTy);
195 // Okay, we know the destination is integer, if the input is FP, convert
196 // it to integer first.
197 if (SrcEltTy->isFloatingPointTy()) {
198 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
199 auto *SrcIVTy = FixedVectorType::get(
200 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
201 // Ask IR to do the conversion now that #elts line up.
202 C = ConstantExpr::getBitCast(C, SrcIVTy);
203 assert((isa<ConstantVector>(C) || // FIXME: Remove ConstantVector.
204 isa<ConstantDataVector>(C) || isa<ConstantInt>(C)) &&
205 "Constant folding cannot fail for plain fp->int bitcast!");
208 // Now we know that the input and output vectors are both integer vectors
209 // of the same size, and that their #elements is not the same. Do the
210 // conversion here, which depends on whether the input or output has
211 // more elements.
212 bool isLittleEndian = DL.isLittleEndian();
214 SmallVector<Constant*, 32> Result;
215 if (NumDstElt < NumSrcElt) {
216 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
217 Constant *Zero = Constant::getNullValue(DstEltTy);
218 unsigned Ratio = NumSrcElt/NumDstElt;
219 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
220 unsigned SrcElt = 0;
221 for (unsigned i = 0; i != NumDstElt; ++i) {
222 // Build each element of the result.
223 Constant *Elt = Zero;
224 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
225 for (unsigned j = 0; j != Ratio; ++j) {
226 Constant *Src = C->getAggregateElement(SrcElt++);
227 if (isa_and_nonnull<UndefValue>(Src))
228 Src = Constant::getNullValue(
229 cast<VectorType>(C->getType())->getElementType());
230 else
231 Src = dyn_cast_or_null<ConstantInt>(Src);
232 if (!Src) // Reject constantexpr elements.
233 return ConstantExpr::getBitCast(C, DestTy);
235 // Zero extend the element to the right size.
236 Src = ConstantFoldCastOperand(Instruction::ZExt, Src, Elt->getType(),
237 DL);
238 assert(Src && "Constant folding cannot fail on plain integers");
240 // Shift it to the right place, depending on endianness.
241 Src = ConstantFoldBinaryOpOperands(
242 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
243 DL);
244 assert(Src && "Constant folding cannot fail on plain integers");
246 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
248 // Mix it in.
249 Elt = ConstantFoldBinaryOpOperands(Instruction::Or, Elt, Src, DL);
250 assert(Elt && "Constant folding cannot fail on plain integers");
252 Result.push_back(Elt);
254 return ConstantVector::get(Result);
257 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
258 unsigned Ratio = NumDstElt/NumSrcElt;
259 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
261 // Loop over each source value, expanding into multiple results.
262 for (unsigned i = 0; i != NumSrcElt; ++i) {
263 auto *Element = C->getAggregateElement(i);
265 if (!Element) // Reject constantexpr elements.
266 return ConstantExpr::getBitCast(C, DestTy);
268 if (isa<UndefValue>(Element)) {
269 // Correctly Propagate undef values.
270 Result.append(Ratio, UndefValue::get(DstEltTy));
271 continue;
274 auto *Src = dyn_cast<ConstantInt>(Element);
275 if (!Src)
276 return ConstantExpr::getBitCast(C, DestTy);
278 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
279 for (unsigned j = 0; j != Ratio; ++j) {
280 // Shift the piece of the value into the right place, depending on
281 // endianness.
282 APInt Elt = Src->getValue().lshr(ShiftAmt);
283 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
285 // Truncate and remember this piece.
286 Result.push_back(ConstantInt::get(DstEltTy, Elt.trunc(DstBitSize)));
290 return ConstantVector::get(Result);
293 } // end anonymous namespace
295 /// If this constant is a constant offset from a global, return the global and
296 /// the constant. Because of constantexprs, this function is recursive.
297 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
298 APInt &Offset, const DataLayout &DL,
299 DSOLocalEquivalent **DSOEquiv) {
300 if (DSOEquiv)
301 *DSOEquiv = nullptr;
303 // Trivial case, constant is the global.
304 if ((GV = dyn_cast<GlobalValue>(C))) {
305 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
306 Offset = APInt(BitWidth, 0);
307 return true;
310 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
311 if (DSOEquiv)
312 *DSOEquiv = FoundDSOEquiv;
313 GV = FoundDSOEquiv->getGlobalValue();
314 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
315 Offset = APInt(BitWidth, 0);
316 return true;
319 // Otherwise, if this isn't a constant expr, bail out.
320 auto *CE = dyn_cast<ConstantExpr>(C);
321 if (!CE) return false;
323 // Look through ptr->int and ptr->ptr casts.
324 if (CE->getOpcode() == Instruction::PtrToInt ||
325 CE->getOpcode() == Instruction::BitCast)
326 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
327 DSOEquiv);
329 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
330 auto *GEP = dyn_cast<GEPOperator>(CE);
331 if (!GEP)
332 return false;
334 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
335 APInt TmpOffset(BitWidth, 0);
337 // If the base isn't a global+constant, we aren't either.
338 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
339 DSOEquiv))
340 return false;
342 // Otherwise, add any offset that our operands provide.
343 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
344 return false;
346 Offset = TmpOffset;
347 return true;
350 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
351 const DataLayout &DL) {
352 do {
353 Type *SrcTy = C->getType();
354 if (SrcTy == DestTy)
355 return C;
357 TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
358 TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
359 if (!TypeSize::isKnownGE(SrcSize, DestSize))
360 return nullptr;
362 // Catch the obvious splat cases (since all-zeros can coerce non-integral
363 // pointers legally).
364 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
365 return Res;
367 // If the type sizes are the same and a cast is legal, just directly
368 // cast the constant.
369 // But be careful not to coerce non-integral pointers illegally.
370 if (SrcSize == DestSize &&
371 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
372 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
373 Instruction::CastOps Cast = Instruction::BitCast;
374 // If we are going from a pointer to int or vice versa, we spell the cast
375 // differently.
376 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
377 Cast = Instruction::IntToPtr;
378 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
379 Cast = Instruction::PtrToInt;
381 if (CastInst::castIsValid(Cast, C, DestTy))
382 return ConstantFoldCastOperand(Cast, C, DestTy, DL);
385 // If this isn't an aggregate type, there is nothing we can do to drill down
386 // and find a bitcastable constant.
387 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
388 return nullptr;
390 // We're simulating a load through a pointer that was bitcast to point to
391 // a different type, so we can try to walk down through the initial
392 // elements of an aggregate to see if some part of the aggregate is
393 // castable to implement the "load" semantic model.
394 if (SrcTy->isStructTy()) {
395 // Struct types might have leading zero-length elements like [0 x i32],
396 // which are certainly not what we are looking for, so skip them.
397 unsigned Elem = 0;
398 Constant *ElemC;
399 do {
400 ElemC = C->getAggregateElement(Elem++);
401 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
402 C = ElemC;
403 } else {
404 // For non-byte-sized vector elements, the first element is not
405 // necessarily located at the vector base address.
406 if (auto *VT = dyn_cast<VectorType>(SrcTy))
407 if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
408 return nullptr;
410 C = C->getAggregateElement(0u);
412 } while (C);
414 return nullptr;
417 namespace {
419 /// Recursive helper to read bits out of global. C is the constant being copied
420 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
421 /// results into and BytesLeft is the number of bytes left in
422 /// the CurPtr buffer. DL is the DataLayout.
423 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
424 unsigned BytesLeft, const DataLayout &DL) {
425 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
426 "Out of range access");
428 // If this element is zero or undefined, we can just return since *CurPtr is
429 // zero initialized.
430 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
431 return true;
433 if (auto *CI = dyn_cast<ConstantInt>(C)) {
434 if ((CI->getBitWidth() & 7) != 0)
435 return false;
436 const APInt &Val = CI->getValue();
437 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
439 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
440 unsigned n = ByteOffset;
441 if (!DL.isLittleEndian())
442 n = IntBytes - n - 1;
443 CurPtr[i] = Val.extractBits(8, n * 8).getZExtValue();
444 ++ByteOffset;
446 return true;
449 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
450 if (CFP->getType()->isDoubleTy()) {
451 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
452 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
454 if (CFP->getType()->isFloatTy()){
455 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
456 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
458 if (CFP->getType()->isHalfTy()){
459 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
460 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
462 return false;
465 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
466 const StructLayout *SL = DL.getStructLayout(CS->getType());
467 unsigned Index = SL->getElementContainingOffset(ByteOffset);
468 uint64_t CurEltOffset = SL->getElementOffset(Index);
469 ByteOffset -= CurEltOffset;
471 while (true) {
472 // If the element access is to the element itself and not to tail padding,
473 // read the bytes from the element.
474 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
476 if (ByteOffset < EltSize &&
477 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
478 BytesLeft, DL))
479 return false;
481 ++Index;
483 // Check to see if we read from the last struct element, if so we're done.
484 if (Index == CS->getType()->getNumElements())
485 return true;
487 // If we read all of the bytes we needed from this element we're done.
488 uint64_t NextEltOffset = SL->getElementOffset(Index);
490 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
491 return true;
493 // Move to the next element of the struct.
494 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
495 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
496 ByteOffset = 0;
497 CurEltOffset = NextEltOffset;
499 // not reached.
502 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
503 isa<ConstantDataSequential>(C)) {
504 uint64_t NumElts, EltSize;
505 Type *EltTy;
506 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
507 NumElts = AT->getNumElements();
508 EltTy = AT->getElementType();
509 EltSize = DL.getTypeAllocSize(EltTy);
510 } else {
511 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
512 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
513 // TODO: For non-byte-sized vectors, current implementation assumes there is
514 // padding to the next byte boundary between elements.
515 if (!DL.typeSizeEqualsStoreSize(EltTy))
516 return false;
518 EltSize = DL.getTypeStoreSize(EltTy);
520 uint64_t Index = ByteOffset / EltSize;
521 uint64_t Offset = ByteOffset - Index * EltSize;
523 for (; Index != NumElts; ++Index) {
524 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
525 BytesLeft, DL))
526 return false;
528 uint64_t BytesWritten = EltSize - Offset;
529 assert(BytesWritten <= EltSize && "Not indexing into this element?");
530 if (BytesWritten >= BytesLeft)
531 return true;
533 Offset = 0;
534 BytesLeft -= BytesWritten;
535 CurPtr += BytesWritten;
537 return true;
540 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
541 if (CE->getOpcode() == Instruction::IntToPtr &&
542 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
543 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
544 BytesLeft, DL);
548 // Otherwise, unknown initializer type.
549 return false;
552 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
553 int64_t Offset, const DataLayout &DL) {
554 // Bail out early. Not expect to load from scalable global variable.
555 if (isa<ScalableVectorType>(LoadTy))
556 return nullptr;
558 auto *IntType = dyn_cast<IntegerType>(LoadTy);
560 // If this isn't an integer load we can't fold it directly.
561 if (!IntType) {
562 // If this is a non-integer load, we can try folding it as an int load and
563 // then bitcast the result. This can be useful for union cases. Note
564 // that address spaces don't matter here since we're not going to result in
565 // an actual new load.
566 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
567 !LoadTy->isVectorTy())
568 return nullptr;
570 Type *MapTy = Type::getIntNTy(C->getContext(),
571 DL.getTypeSizeInBits(LoadTy).getFixedValue());
572 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
573 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
574 // Materializing a zero can be done trivially without a bitcast
575 return Constant::getNullValue(LoadTy);
576 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
577 Res = FoldBitCast(Res, CastTy, DL);
578 if (LoadTy->isPtrOrPtrVectorTy()) {
579 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
580 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
581 return Constant::getNullValue(LoadTy);
582 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
583 // Be careful not to replace a load of an addrspace value with an inttoptr here
584 return nullptr;
585 Res = ConstantExpr::getIntToPtr(Res, LoadTy);
587 return Res;
589 return nullptr;
592 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
593 if (BytesLoaded > 32 || BytesLoaded == 0)
594 return nullptr;
596 // If we're not accessing anything in this constant, the result is undefined.
597 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
598 return PoisonValue::get(IntType);
600 // TODO: We should be able to support scalable types.
601 TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
602 if (InitializerSize.isScalable())
603 return nullptr;
605 // If we're not accessing anything in this constant, the result is undefined.
606 if (Offset >= (int64_t)InitializerSize.getFixedValue())
607 return PoisonValue::get(IntType);
609 unsigned char RawBytes[32] = {0};
610 unsigned char *CurPtr = RawBytes;
611 unsigned BytesLeft = BytesLoaded;
613 // If we're loading off the beginning of the global, some bytes may be valid.
614 if (Offset < 0) {
615 CurPtr += -Offset;
616 BytesLeft += Offset;
617 Offset = 0;
620 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
621 return nullptr;
623 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
624 if (DL.isLittleEndian()) {
625 ResultVal = RawBytes[BytesLoaded - 1];
626 for (unsigned i = 1; i != BytesLoaded; ++i) {
627 ResultVal <<= 8;
628 ResultVal |= RawBytes[BytesLoaded - 1 - i];
630 } else {
631 ResultVal = RawBytes[0];
632 for (unsigned i = 1; i != BytesLoaded; ++i) {
633 ResultVal <<= 8;
634 ResultVal |= RawBytes[i];
638 return ConstantInt::get(IntType->getContext(), ResultVal);
641 } // anonymous namespace
643 // If GV is a constant with an initializer read its representation starting
644 // at Offset and return it as a constant array of unsigned char. Otherwise
645 // return null.
646 Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
647 uint64_t Offset) {
648 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
649 return nullptr;
651 const DataLayout &DL = GV->getDataLayout();
652 Constant *Init = const_cast<Constant *>(GV->getInitializer());
653 TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
654 if (InitSize < Offset)
655 return nullptr;
657 uint64_t NBytes = InitSize - Offset;
658 if (NBytes > UINT16_MAX)
659 // Bail for large initializers in excess of 64K to avoid allocating
660 // too much memory.
661 // Offset is assumed to be less than or equal than InitSize (this
662 // is enforced in ReadDataFromGlobal).
663 return nullptr;
665 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
666 unsigned char *CurPtr = RawBytes.data();
668 if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
669 return nullptr;
671 return ConstantDataArray::get(GV->getContext(), RawBytes);
674 /// If this Offset points exactly to the start of an aggregate element, return
675 /// that element, otherwise return nullptr.
676 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
677 const DataLayout &DL) {
678 if (Offset.isZero())
679 return Base;
681 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
682 return nullptr;
684 Type *ElemTy = Base->getType();
685 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
686 if (!Offset.isZero() || !Indices[0].isZero())
687 return nullptr;
689 Constant *C = Base;
690 for (const APInt &Index : drop_begin(Indices)) {
691 if (Index.isNegative() || Index.getActiveBits() >= 32)
692 return nullptr;
694 C = C->getAggregateElement(Index.getZExtValue());
695 if (!C)
696 return nullptr;
699 return C;
702 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
703 const APInt &Offset,
704 const DataLayout &DL) {
705 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
706 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
707 return Result;
709 // Explicitly check for out-of-bounds access, so we return poison even if the
710 // constant is a uniform value.
711 TypeSize Size = DL.getTypeAllocSize(C->getType());
712 if (!Size.isScalable() && Offset.sge(Size.getFixedValue()))
713 return PoisonValue::get(Ty);
715 // Try an offset-independent fold of a uniform value.
716 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
717 return Result;
719 // Try hard to fold loads from bitcasted strange and non-type-safe things.
720 if (Offset.getSignificantBits() <= 64)
721 if (Constant *Result =
722 FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
723 return Result;
725 return nullptr;
728 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
729 const DataLayout &DL) {
730 return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
733 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
734 APInt Offset,
735 const DataLayout &DL) {
736 // We can only fold loads from constant globals with a definitive initializer.
737 // Check this upfront, to skip expensive offset calculations.
738 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C));
739 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
740 return nullptr;
742 C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
743 DL, Offset, /* AllowNonInbounds */ true));
745 if (C == GV)
746 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
747 Offset, DL))
748 return Result;
750 // If this load comes from anywhere in a uniform constant global, the value
751 // is always the same, regardless of the loaded offset.
752 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty, DL);
755 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
756 const DataLayout &DL) {
757 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
758 return ConstantFoldLoadFromConstPtr(C, Ty, std::move(Offset), DL);
761 Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
762 const DataLayout &DL) {
763 if (isa<PoisonValue>(C))
764 return PoisonValue::get(Ty);
765 if (isa<UndefValue>(C))
766 return UndefValue::get(Ty);
767 // If padding is needed when storing C to memory, then it isn't considered as
768 // uniform.
769 if (!DL.typeSizeEqualsStoreSize(C->getType()))
770 return nullptr;
771 if (C->isNullValue() && !Ty->isX86_AMXTy())
772 return Constant::getNullValue(Ty);
773 if (C->isAllOnesValue() &&
774 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
775 return Constant::getAllOnesValue(Ty);
776 return nullptr;
779 namespace {
781 /// One of Op0/Op1 is a constant expression.
782 /// Attempt to symbolically evaluate the result of a binary operator merging
783 /// these together. If target data info is available, it is provided as DL,
784 /// otherwise DL is null.
785 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
786 const DataLayout &DL) {
787 // SROA
789 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
790 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
791 // bits.
793 if (Opc == Instruction::And) {
794 KnownBits Known0 = computeKnownBits(Op0, DL);
795 KnownBits Known1 = computeKnownBits(Op1, DL);
796 if ((Known1.One | Known0.Zero).isAllOnes()) {
797 // All the bits of Op0 that the 'and' could be masking are already zero.
798 return Op0;
800 if ((Known0.One | Known1.Zero).isAllOnes()) {
801 // All the bits of Op1 that the 'and' could be masking are already zero.
802 return Op1;
805 Known0 &= Known1;
806 if (Known0.isConstant())
807 return ConstantInt::get(Op0->getType(), Known0.getConstant());
810 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
811 // constant. This happens frequently when iterating over a global array.
812 if (Opc == Instruction::Sub) {
813 GlobalValue *GV1, *GV2;
814 APInt Offs1, Offs2;
816 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
817 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
818 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
820 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
821 // PtrToInt may change the bitwidth so we have convert to the right size
822 // first.
823 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
824 Offs2.zextOrTrunc(OpSize));
828 return nullptr;
831 /// If array indices are not pointer-sized integers, explicitly cast them so
832 /// that they aren't implicitly casted by the getelementptr.
833 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
834 Type *ResultTy, GEPNoWrapFlags NW,
835 std::optional<ConstantRange> InRange,
836 const DataLayout &DL, const TargetLibraryInfo *TLI) {
837 Type *IntIdxTy = DL.getIndexType(ResultTy);
838 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
840 bool Any = false;
841 SmallVector<Constant*, 32> NewIdxs;
842 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
843 if ((i == 1 ||
844 !isa<StructType>(GetElementPtrInst::getIndexedType(
845 SrcElemTy, Ops.slice(1, i - 1)))) &&
846 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
847 Any = true;
848 Type *NewType =
849 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
850 Constant *NewIdx = ConstantFoldCastOperand(
851 CastInst::getCastOpcode(Ops[i], true, NewType, true), Ops[i], NewType,
852 DL);
853 if (!NewIdx)
854 return nullptr;
855 NewIdxs.push_back(NewIdx);
856 } else
857 NewIdxs.push_back(Ops[i]);
860 if (!Any)
861 return nullptr;
863 Constant *C =
864 ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs, NW, InRange);
865 return ConstantFoldConstant(C, DL, TLI);
868 /// If we can symbolically evaluate the GEP constant expression, do so.
869 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
870 ArrayRef<Constant *> Ops,
871 const DataLayout &DL,
872 const TargetLibraryInfo *TLI) {
873 Type *SrcElemTy = GEP->getSourceElementType();
874 Type *ResTy = GEP->getType();
875 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
876 return nullptr;
878 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, GEP->getNoWrapFlags(),
879 GEP->getInRange(), DL, TLI))
880 return C;
882 Constant *Ptr = Ops[0];
883 if (!Ptr->getType()->isPointerTy())
884 return nullptr;
886 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
888 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
889 if (!isa<ConstantInt>(Ops[i]) || !Ops[i]->getType()->isIntegerTy())
890 return nullptr;
892 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
893 APInt Offset = APInt(
894 BitWidth,
895 DL.getIndexedOffsetInType(
896 SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
897 /*isSigned=*/true, /*implicitTrunc=*/true);
899 std::optional<ConstantRange> InRange = GEP->getInRange();
900 if (InRange)
901 InRange = InRange->sextOrTrunc(BitWidth);
903 // If this is a GEP of a GEP, fold it all into a single GEP.
904 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
905 bool Overflow = false;
906 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
907 NW &= GEP->getNoWrapFlags();
909 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
911 // Do not try the incorporate the sub-GEP if some index is not a number.
912 bool AllConstantInt = true;
913 for (Value *NestedOp : NestedOps)
914 if (!isa<ConstantInt>(NestedOp)) {
915 AllConstantInt = false;
916 break;
918 if (!AllConstantInt)
919 break;
921 // TODO: Try to intersect two inrange attributes?
922 if (!InRange) {
923 InRange = GEP->getInRange();
924 if (InRange)
925 // Adjust inrange by offset until now.
926 InRange = InRange->sextOrTrunc(BitWidth).subtract(Offset);
929 Ptr = cast<Constant>(GEP->getOperand(0));
930 SrcElemTy = GEP->getSourceElementType();
931 Offset = Offset.sadd_ov(
932 APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps),
933 /*isSigned=*/true, /*implicitTrunc=*/true),
934 Overflow);
937 // Preserving nusw (without inbounds) also requires that the offset
938 // additions did not overflow.
939 if (NW.hasNoUnsignedSignedWrap() && !NW.isInBounds() && Overflow)
940 NW = NW.withoutNoUnsignedSignedWrap();
942 // If the base value for this address is a literal integer value, fold the
943 // getelementptr to the resulting integer value casted to the pointer type.
944 APInt BasePtr(BitWidth, 0);
945 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
946 if (CE->getOpcode() == Instruction::IntToPtr) {
947 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
948 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
952 auto *PTy = cast<PointerType>(Ptr->getType());
953 if ((Ptr->isNullValue() || BasePtr != 0) &&
954 !DL.isNonIntegralPointerType(PTy)) {
955 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
956 return ConstantExpr::getIntToPtr(C, ResTy);
959 // Try to infer inbounds for GEPs of globals.
960 if (!NW.isInBounds() && Offset.isNonNegative()) {
961 bool CanBeNull, CanBeFreed;
962 uint64_t DerefBytes =
963 Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
964 if (DerefBytes != 0 && !CanBeNull && Offset.sle(DerefBytes))
965 NW |= GEPNoWrapFlags::inBounds();
968 // nusw + nneg -> nuw
969 if (NW.hasNoUnsignedSignedWrap() && Offset.isNonNegative())
970 NW |= GEPNoWrapFlags::noUnsignedWrap();
972 // Otherwise canonicalize this to a single ptradd.
973 LLVMContext &Ctx = Ptr->getContext();
974 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ctx), Ptr,
975 ConstantInt::get(Ctx, Offset), NW,
976 InRange);
979 /// Attempt to constant fold an instruction with the
980 /// specified opcode and operands. If successful, the constant result is
981 /// returned, if not, null is returned. Note that this function can fail when
982 /// attempting to fold instructions like loads and stores, which have no
983 /// constant expression form.
984 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
985 ArrayRef<Constant *> Ops,
986 const DataLayout &DL,
987 const TargetLibraryInfo *TLI,
988 bool AllowNonDeterministic) {
989 Type *DestTy = InstOrCE->getType();
991 if (Instruction::isUnaryOp(Opcode))
992 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
994 if (Instruction::isBinaryOp(Opcode)) {
995 switch (Opcode) {
996 default:
997 break;
998 case Instruction::FAdd:
999 case Instruction::FSub:
1000 case Instruction::FMul:
1001 case Instruction::FDiv:
1002 case Instruction::FRem:
1003 // Handle floating point instructions separately to account for denormals
1004 // TODO: If a constant expression is being folded rather than an
1005 // instruction, denormals will not be flushed/treated as zero
1006 if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
1007 return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I,
1008 AllowNonDeterministic);
1011 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1014 if (Instruction::isCast(Opcode))
1015 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1017 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1018 Type *SrcElemTy = GEP->getSourceElementType();
1019 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1020 return nullptr;
1022 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1023 return C;
1025 return ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], Ops.slice(1),
1026 GEP->getNoWrapFlags(),
1027 GEP->getInRange());
1030 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1031 return CE->getWithOperands(Ops);
1033 switch (Opcode) {
1034 default: return nullptr;
1035 case Instruction::ICmp:
1036 case Instruction::FCmp: {
1037 auto *C = cast<CmpInst>(InstOrCE);
1038 return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1],
1039 DL, TLI, C);
1041 case Instruction::Freeze:
1042 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1043 case Instruction::Call:
1044 if (auto *F = dyn_cast<Function>(Ops.back())) {
1045 const auto *Call = cast<CallBase>(InstOrCE);
1046 if (canConstantFoldCallTo(Call, F))
1047 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI,
1048 AllowNonDeterministic);
1050 return nullptr;
1051 case Instruction::Select:
1052 return ConstantFoldSelectInstruction(Ops[0], Ops[1], Ops[2]);
1053 case Instruction::ExtractElement:
1054 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1055 case Instruction::ExtractValue:
1056 return ConstantFoldExtractValueInstruction(
1057 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1058 case Instruction::InsertElement:
1059 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1060 case Instruction::InsertValue:
1061 return ConstantFoldInsertValueInstruction(
1062 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1063 case Instruction::ShuffleVector:
1064 return ConstantExpr::getShuffleVector(
1065 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1066 case Instruction::Load: {
1067 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1068 if (LI->isVolatile())
1069 return nullptr;
1070 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1075 } // end anonymous namespace
1077 //===----------------------------------------------------------------------===//
1078 // Constant Folding public APIs
1079 //===----------------------------------------------------------------------===//
1081 namespace {
1083 Constant *
1084 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1085 const TargetLibraryInfo *TLI,
1086 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1087 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1088 return const_cast<Constant *>(C);
1090 SmallVector<Constant *, 8> Ops;
1091 for (const Use &OldU : C->operands()) {
1092 Constant *OldC = cast<Constant>(&OldU);
1093 Constant *NewC = OldC;
1094 // Recursively fold the ConstantExpr's operands. If we have already folded
1095 // a ConstantExpr, we don't have to process it again.
1096 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1097 auto It = FoldedOps.find(OldC);
1098 if (It == FoldedOps.end()) {
1099 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1100 FoldedOps.insert({OldC, NewC});
1101 } else {
1102 NewC = It->second;
1105 Ops.push_back(NewC);
1108 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1109 if (Constant *Res = ConstantFoldInstOperandsImpl(
1110 CE, CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
1111 return Res;
1112 return const_cast<Constant *>(C);
1115 assert(isa<ConstantVector>(C));
1116 return ConstantVector::get(Ops);
1119 } // end anonymous namespace
1121 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1122 const TargetLibraryInfo *TLI) {
1123 // Handle PHI nodes quickly here...
1124 if (auto *PN = dyn_cast<PHINode>(I)) {
1125 Constant *CommonValue = nullptr;
1127 SmallDenseMap<Constant *, Constant *> FoldedOps;
1128 for (Value *Incoming : PN->incoming_values()) {
1129 // If the incoming value is undef then skip it. Note that while we could
1130 // skip the value if it is equal to the phi node itself we choose not to
1131 // because that would break the rule that constant folding only applies if
1132 // all operands are constants.
1133 if (isa<UndefValue>(Incoming))
1134 continue;
1135 // If the incoming value is not a constant, then give up.
1136 auto *C = dyn_cast<Constant>(Incoming);
1137 if (!C)
1138 return nullptr;
1139 // Fold the PHI's operands.
1140 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1141 // If the incoming value is a different constant to
1142 // the one we saw previously, then give up.
1143 if (CommonValue && C != CommonValue)
1144 return nullptr;
1145 CommonValue = C;
1148 // If we reach here, all incoming values are the same constant or undef.
1149 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1152 // Scan the operand list, checking to see if they are all constants, if so,
1153 // hand off to ConstantFoldInstOperandsImpl.
1154 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1155 return nullptr;
1157 SmallDenseMap<Constant *, Constant *> FoldedOps;
1158 SmallVector<Constant *, 8> Ops;
1159 for (const Use &OpU : I->operands()) {
1160 auto *Op = cast<Constant>(&OpU);
1161 // Fold the Instruction's operands.
1162 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1163 Ops.push_back(Op);
1166 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1169 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1170 const TargetLibraryInfo *TLI) {
1171 SmallDenseMap<Constant *, Constant *> FoldedOps;
1172 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1175 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1176 ArrayRef<Constant *> Ops,
1177 const DataLayout &DL,
1178 const TargetLibraryInfo *TLI,
1179 bool AllowNonDeterministic) {
1180 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI,
1181 AllowNonDeterministic);
1184 Constant *llvm::ConstantFoldCompareInstOperands(
1185 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1186 const TargetLibraryInfo *TLI, const Instruction *I) {
1187 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1188 // fold: icmp (inttoptr x), null -> icmp x, 0
1189 // fold: icmp null, (inttoptr x) -> icmp 0, x
1190 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1191 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1192 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1193 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1195 // FIXME: The following comment is out of data and the DataLayout is here now.
1196 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1197 // around to know if bit truncation is happening.
1198 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1199 if (Ops1->isNullValue()) {
1200 if (CE0->getOpcode() == Instruction::IntToPtr) {
1201 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1202 // Convert the integer value to the right size to ensure we get the
1203 // proper extension or truncation.
1204 if (Constant *C = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1205 /*IsSigned*/ false, DL)) {
1206 Constant *Null = Constant::getNullValue(C->getType());
1207 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1211 // Only do this transformation if the int is intptrty in size, otherwise
1212 // there is a truncation or extension that we aren't modeling.
1213 if (CE0->getOpcode() == Instruction::PtrToInt) {
1214 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1215 if (CE0->getType() == IntPtrTy) {
1216 Constant *C = CE0->getOperand(0);
1217 Constant *Null = Constant::getNullValue(C->getType());
1218 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1223 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1224 if (CE0->getOpcode() == CE1->getOpcode()) {
1225 if (CE0->getOpcode() == Instruction::IntToPtr) {
1226 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1228 // Convert the integer value to the right size to ensure we get the
1229 // proper extension or truncation.
1230 Constant *C0 = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1231 /*IsSigned*/ false, DL);
1232 Constant *C1 = ConstantFoldIntegerCast(CE1->getOperand(0), IntPtrTy,
1233 /*IsSigned*/ false, DL);
1234 if (C0 && C1)
1235 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1238 // Only do this transformation if the int is intptrty in size, otherwise
1239 // there is a truncation or extension that we aren't modeling.
1240 if (CE0->getOpcode() == Instruction::PtrToInt) {
1241 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1242 if (CE0->getType() == IntPtrTy &&
1243 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1244 return ConstantFoldCompareInstOperands(
1245 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1251 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1252 // offset1 pred offset2, for the case where the offset is inbounds. This
1253 // only works for equality and unsigned comparison, as inbounds permits
1254 // crossing the sign boundary. However, the offset comparison itself is
1255 // signed.
1256 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1257 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1258 APInt Offset0(IndexWidth, 0);
1259 Value *Stripped0 =
1260 Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
1261 APInt Offset1(IndexWidth, 0);
1262 Value *Stripped1 =
1263 Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
1264 if (Stripped0 == Stripped1)
1265 return ConstantInt::getBool(
1266 Ops0->getContext(),
1267 ICmpInst::compare(Offset0, Offset1,
1268 ICmpInst::getSignedPredicate(Predicate)));
1270 } else if (isa<ConstantExpr>(Ops1)) {
1271 // If RHS is a constant expression, but the left side isn't, swap the
1272 // operands and try again.
1273 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1274 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1277 if (CmpInst::isFPPredicate(Predicate)) {
1278 // Flush any denormal constant float input according to denormal handling
1279 // mode.
1280 Ops0 = FlushFPConstant(Ops0, I, /*IsOutput=*/false);
1281 if (!Ops0)
1282 return nullptr;
1283 Ops1 = FlushFPConstant(Ops1, I, /*IsOutput=*/false);
1284 if (!Ops1)
1285 return nullptr;
1288 return ConstantFoldCompareInstruction(Predicate, Ops0, Ops1);
1291 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1292 const DataLayout &DL) {
1293 assert(Instruction::isUnaryOp(Opcode));
1295 return ConstantFoldUnaryInstruction(Opcode, Op);
1298 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1299 Constant *RHS,
1300 const DataLayout &DL) {
1301 assert(Instruction::isBinaryOp(Opcode));
1302 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1303 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1304 return C;
1306 if (ConstantExpr::isDesirableBinOp(Opcode))
1307 return ConstantExpr::get(Opcode, LHS, RHS);
1308 return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
1311 static ConstantFP *flushDenormalConstant(Type *Ty, const APFloat &APF,
1312 DenormalMode::DenormalModeKind Mode) {
1313 switch (Mode) {
1314 case DenormalMode::Dynamic:
1315 return nullptr;
1316 case DenormalMode::IEEE:
1317 return ConstantFP::get(Ty->getContext(), APF);
1318 case DenormalMode::PreserveSign:
1319 return ConstantFP::get(
1320 Ty->getContext(),
1321 APFloat::getZero(APF.getSemantics(), APF.isNegative()));
1322 case DenormalMode::PositiveZero:
1323 return ConstantFP::get(Ty->getContext(),
1324 APFloat::getZero(APF.getSemantics(), false));
1325 default:
1326 break;
1329 llvm_unreachable("unknown denormal mode");
1332 /// Return the denormal mode that can be assumed when executing a floating point
1333 /// operation at \p CtxI.
1334 static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty) {
1335 if (!CtxI || !CtxI->getParent() || !CtxI->getFunction())
1336 return DenormalMode::getDynamic();
1337 return CtxI->getFunction()->getDenormalMode(Ty->getFltSemantics());
1340 static ConstantFP *flushDenormalConstantFP(ConstantFP *CFP,
1341 const Instruction *Inst,
1342 bool IsOutput) {
1343 const APFloat &APF = CFP->getValueAPF();
1344 if (!APF.isDenormal())
1345 return CFP;
1347 DenormalMode Mode = getInstrDenormalMode(Inst, CFP->getType());
1348 return flushDenormalConstant(CFP->getType(), APF,
1349 IsOutput ? Mode.Output : Mode.Input);
1352 Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
1353 bool IsOutput) {
1354 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Operand))
1355 return flushDenormalConstantFP(CFP, Inst, IsOutput);
1357 if (isa<ConstantAggregateZero, UndefValue, ConstantExpr>(Operand))
1358 return Operand;
1360 Type *Ty = Operand->getType();
1361 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1362 if (VecTy) {
1363 if (auto *Splat = dyn_cast_or_null<ConstantFP>(Operand->getSplatValue())) {
1364 ConstantFP *Folded = flushDenormalConstantFP(Splat, Inst, IsOutput);
1365 if (!Folded)
1366 return nullptr;
1367 return ConstantVector::getSplat(VecTy->getElementCount(), Folded);
1370 Ty = VecTy->getElementType();
1373 if (const auto *CV = dyn_cast<ConstantVector>(Operand)) {
1374 SmallVector<Constant *, 16> NewElts;
1375 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1376 Constant *Element = CV->getAggregateElement(i);
1377 if (isa<UndefValue>(Element)) {
1378 NewElts.push_back(Element);
1379 continue;
1382 ConstantFP *CFP = dyn_cast<ConstantFP>(Element);
1383 if (!CFP)
1384 return nullptr;
1386 ConstantFP *Folded = flushDenormalConstantFP(CFP, Inst, IsOutput);
1387 if (!Folded)
1388 return nullptr;
1389 NewElts.push_back(Folded);
1392 return ConstantVector::get(NewElts);
1395 if (const auto *CDV = dyn_cast<ConstantDataVector>(Operand)) {
1396 SmallVector<Constant *, 16> NewElts;
1397 for (unsigned I = 0, E = CDV->getNumElements(); I < E; ++I) {
1398 const APFloat &Elt = CDV->getElementAsAPFloat(I);
1399 if (!Elt.isDenormal()) {
1400 NewElts.push_back(ConstantFP::get(Ty, Elt));
1401 } else {
1402 DenormalMode Mode = getInstrDenormalMode(Inst, Ty);
1403 ConstantFP *Folded =
1404 flushDenormalConstant(Ty, Elt, IsOutput ? Mode.Output : Mode.Input);
1405 if (!Folded)
1406 return nullptr;
1407 NewElts.push_back(Folded);
1411 return ConstantVector::get(NewElts);
1414 return nullptr;
1417 Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1418 Constant *RHS, const DataLayout &DL,
1419 const Instruction *I,
1420 bool AllowNonDeterministic) {
1421 if (Instruction::isBinaryOp(Opcode)) {
1422 // Flush denormal inputs if needed.
1423 Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
1424 if (!Op0)
1425 return nullptr;
1426 Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false);
1427 if (!Op1)
1428 return nullptr;
1430 // If nsz or an algebraic FMF flag is set, the result of the FP operation
1431 // may change due to future optimization. Don't constant fold them if
1432 // non-deterministic results are not allowed.
1433 if (!AllowNonDeterministic)
1434 if (auto *FP = dyn_cast_or_null<FPMathOperator>(I))
1435 if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
1436 FP->hasAllowContract() || FP->hasAllowReciprocal())
1437 return nullptr;
1439 // Calculate constant result.
1440 Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
1441 if (!C)
1442 return nullptr;
1444 // Flush denormal output if needed.
1445 C = FlushFPConstant(C, I, /* IsOutput */ true);
1446 if (!C)
1447 return nullptr;
1449 // The precise NaN value is non-deterministic.
1450 if (!AllowNonDeterministic && C->isNaN())
1451 return nullptr;
1453 return C;
1455 // If instruction lacks a parent/function and the denormal mode cannot be
1456 // determined, use the default (IEEE).
1457 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1460 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1461 Type *DestTy, const DataLayout &DL) {
1462 assert(Instruction::isCast(Opcode));
1463 switch (Opcode) {
1464 default:
1465 llvm_unreachable("Missing case");
1466 case Instruction::PtrToInt:
1467 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1468 Constant *FoldedValue = nullptr;
1469 // If the input is a inttoptr, eliminate the pair. This requires knowing
1470 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1471 if (CE->getOpcode() == Instruction::IntToPtr) {
1472 // zext/trunc the inttoptr to pointer size.
1473 FoldedValue = ConstantFoldIntegerCast(CE->getOperand(0),
1474 DL.getIntPtrType(CE->getType()),
1475 /*IsSigned=*/false, DL);
1476 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1477 // If we have GEP, we can perform the following folds:
1478 // (ptrtoint (gep null, x)) -> x
1479 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1480 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1481 APInt BaseOffset(BitWidth, 0);
1482 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1483 DL, BaseOffset, /*AllowNonInbounds=*/true));
1484 if (Base->isNullValue()) {
1485 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1486 } else {
1487 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1488 if (GEP->getNumIndices() == 1 &&
1489 GEP->getSourceElementType()->isIntegerTy(8)) {
1490 auto *Ptr = cast<Constant>(GEP->getPointerOperand());
1491 auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1));
1492 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
1493 if (Sub && Sub->getType() == IntIdxTy &&
1494 Sub->getOpcode() == Instruction::Sub &&
1495 Sub->getOperand(0)->isNullValue())
1496 FoldedValue = ConstantExpr::getSub(
1497 ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1));
1501 if (FoldedValue) {
1502 // Do a zext or trunc to get to the ptrtoint dest size.
1503 return ConstantFoldIntegerCast(FoldedValue, DestTy, /*IsSigned=*/false,
1504 DL);
1507 break;
1508 case Instruction::IntToPtr:
1509 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1510 // the int size is >= the ptr size and the address spaces are the same.
1511 // This requires knowing the width of a pointer, so it can't be done in
1512 // ConstantExpr::getCast.
1513 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1514 if (CE->getOpcode() == Instruction::PtrToInt) {
1515 Constant *SrcPtr = CE->getOperand(0);
1516 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1517 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1519 if (MidIntSize >= SrcPtrSize) {
1520 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1521 if (SrcAS == DestTy->getPointerAddressSpace())
1522 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1526 break;
1527 case Instruction::Trunc:
1528 case Instruction::ZExt:
1529 case Instruction::SExt:
1530 case Instruction::FPTrunc:
1531 case Instruction::FPExt:
1532 case Instruction::UIToFP:
1533 case Instruction::SIToFP:
1534 case Instruction::FPToUI:
1535 case Instruction::FPToSI:
1536 case Instruction::AddrSpaceCast:
1537 break;
1538 case Instruction::BitCast:
1539 return FoldBitCast(C, DestTy, DL);
1542 if (ConstantExpr::isDesirableCastOp(Opcode))
1543 return ConstantExpr::getCast(Opcode, C, DestTy);
1544 return ConstantFoldCastInstruction(Opcode, C, DestTy);
1547 Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1548 bool IsSigned, const DataLayout &DL) {
1549 Type *SrcTy = C->getType();
1550 if (SrcTy == DestTy)
1551 return C;
1552 if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1553 return ConstantFoldCastOperand(Instruction::Trunc, C, DestTy, DL);
1554 if (IsSigned)
1555 return ConstantFoldCastOperand(Instruction::SExt, C, DestTy, DL);
1556 return ConstantFoldCastOperand(Instruction::ZExt, C, DestTy, DL);
1559 //===----------------------------------------------------------------------===//
1560 // Constant Folding for Calls
1563 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1564 if (Call->isNoBuiltin())
1565 return false;
1566 if (Call->getFunctionType() != F->getFunctionType())
1567 return false;
1568 switch (F->getIntrinsicID()) {
1569 // Operations that do not operate floating-point numbers and do not depend on
1570 // FP environment can be folded even in strictfp functions.
1571 case Intrinsic::bswap:
1572 case Intrinsic::ctpop:
1573 case Intrinsic::ctlz:
1574 case Intrinsic::cttz:
1575 case Intrinsic::fshl:
1576 case Intrinsic::fshr:
1577 case Intrinsic::launder_invariant_group:
1578 case Intrinsic::strip_invariant_group:
1579 case Intrinsic::masked_load:
1580 case Intrinsic::get_active_lane_mask:
1581 case Intrinsic::abs:
1582 case Intrinsic::smax:
1583 case Intrinsic::smin:
1584 case Intrinsic::umax:
1585 case Intrinsic::umin:
1586 case Intrinsic::scmp:
1587 case Intrinsic::ucmp:
1588 case Intrinsic::sadd_with_overflow:
1589 case Intrinsic::uadd_with_overflow:
1590 case Intrinsic::ssub_with_overflow:
1591 case Intrinsic::usub_with_overflow:
1592 case Intrinsic::smul_with_overflow:
1593 case Intrinsic::umul_with_overflow:
1594 case Intrinsic::sadd_sat:
1595 case Intrinsic::uadd_sat:
1596 case Intrinsic::ssub_sat:
1597 case Intrinsic::usub_sat:
1598 case Intrinsic::smul_fix:
1599 case Intrinsic::smul_fix_sat:
1600 case Intrinsic::bitreverse:
1601 case Intrinsic::is_constant:
1602 case Intrinsic::vector_reduce_add:
1603 case Intrinsic::vector_reduce_mul:
1604 case Intrinsic::vector_reduce_and:
1605 case Intrinsic::vector_reduce_or:
1606 case Intrinsic::vector_reduce_xor:
1607 case Intrinsic::vector_reduce_smin:
1608 case Intrinsic::vector_reduce_smax:
1609 case Intrinsic::vector_reduce_umin:
1610 case Intrinsic::vector_reduce_umax:
1611 // Target intrinsics
1612 case Intrinsic::amdgcn_perm:
1613 case Intrinsic::amdgcn_wave_reduce_umin:
1614 case Intrinsic::amdgcn_wave_reduce_umax:
1615 case Intrinsic::amdgcn_s_wqm:
1616 case Intrinsic::amdgcn_s_quadmask:
1617 case Intrinsic::amdgcn_s_bitreplicate:
1618 case Intrinsic::arm_mve_vctp8:
1619 case Intrinsic::arm_mve_vctp16:
1620 case Intrinsic::arm_mve_vctp32:
1621 case Intrinsic::arm_mve_vctp64:
1622 case Intrinsic::aarch64_sve_convert_from_svbool:
1623 // WebAssembly float semantics are always known
1624 case Intrinsic::wasm_trunc_signed:
1625 case Intrinsic::wasm_trunc_unsigned:
1626 return true;
1628 // Floating point operations cannot be folded in strictfp functions in
1629 // general case. They can be folded if FP environment is known to compiler.
1630 case Intrinsic::minnum:
1631 case Intrinsic::maxnum:
1632 case Intrinsic::minimum:
1633 case Intrinsic::maximum:
1634 case Intrinsic::log:
1635 case Intrinsic::log2:
1636 case Intrinsic::log10:
1637 case Intrinsic::exp:
1638 case Intrinsic::exp2:
1639 case Intrinsic::exp10:
1640 case Intrinsic::sqrt:
1641 case Intrinsic::sin:
1642 case Intrinsic::cos:
1643 case Intrinsic::sincos:
1644 case Intrinsic::pow:
1645 case Intrinsic::powi:
1646 case Intrinsic::ldexp:
1647 case Intrinsic::fma:
1648 case Intrinsic::fmuladd:
1649 case Intrinsic::frexp:
1650 case Intrinsic::fptoui_sat:
1651 case Intrinsic::fptosi_sat:
1652 case Intrinsic::convert_from_fp16:
1653 case Intrinsic::convert_to_fp16:
1654 case Intrinsic::amdgcn_cos:
1655 case Intrinsic::amdgcn_cubeid:
1656 case Intrinsic::amdgcn_cubema:
1657 case Intrinsic::amdgcn_cubesc:
1658 case Intrinsic::amdgcn_cubetc:
1659 case Intrinsic::amdgcn_fmul_legacy:
1660 case Intrinsic::amdgcn_fma_legacy:
1661 case Intrinsic::amdgcn_fract:
1662 case Intrinsic::amdgcn_sin:
1663 // The intrinsics below depend on rounding mode in MXCSR.
1664 case Intrinsic::x86_sse_cvtss2si:
1665 case Intrinsic::x86_sse_cvtss2si64:
1666 case Intrinsic::x86_sse_cvttss2si:
1667 case Intrinsic::x86_sse_cvttss2si64:
1668 case Intrinsic::x86_sse2_cvtsd2si:
1669 case Intrinsic::x86_sse2_cvtsd2si64:
1670 case Intrinsic::x86_sse2_cvttsd2si:
1671 case Intrinsic::x86_sse2_cvttsd2si64:
1672 case Intrinsic::x86_avx512_vcvtss2si32:
1673 case Intrinsic::x86_avx512_vcvtss2si64:
1674 case Intrinsic::x86_avx512_cvttss2si:
1675 case Intrinsic::x86_avx512_cvttss2si64:
1676 case Intrinsic::x86_avx512_vcvtsd2si32:
1677 case Intrinsic::x86_avx512_vcvtsd2si64:
1678 case Intrinsic::x86_avx512_cvttsd2si:
1679 case Intrinsic::x86_avx512_cvttsd2si64:
1680 case Intrinsic::x86_avx512_vcvtss2usi32:
1681 case Intrinsic::x86_avx512_vcvtss2usi64:
1682 case Intrinsic::x86_avx512_cvttss2usi:
1683 case Intrinsic::x86_avx512_cvttss2usi64:
1684 case Intrinsic::x86_avx512_vcvtsd2usi32:
1685 case Intrinsic::x86_avx512_vcvtsd2usi64:
1686 case Intrinsic::x86_avx512_cvttsd2usi:
1687 case Intrinsic::x86_avx512_cvttsd2usi64:
1688 return !Call->isStrictFP();
1690 // Sign operations are actually bitwise operations, they do not raise
1691 // exceptions even for SNANs.
1692 case Intrinsic::fabs:
1693 case Intrinsic::copysign:
1694 case Intrinsic::is_fpclass:
1695 // Non-constrained variants of rounding operations means default FP
1696 // environment, they can be folded in any case.
1697 case Intrinsic::ceil:
1698 case Intrinsic::floor:
1699 case Intrinsic::round:
1700 case Intrinsic::roundeven:
1701 case Intrinsic::trunc:
1702 case Intrinsic::nearbyint:
1703 case Intrinsic::rint:
1704 case Intrinsic::canonicalize:
1705 // Constrained intrinsics can be folded if FP environment is known
1706 // to compiler.
1707 case Intrinsic::experimental_constrained_fma:
1708 case Intrinsic::experimental_constrained_fmuladd:
1709 case Intrinsic::experimental_constrained_fadd:
1710 case Intrinsic::experimental_constrained_fsub:
1711 case Intrinsic::experimental_constrained_fmul:
1712 case Intrinsic::experimental_constrained_fdiv:
1713 case Intrinsic::experimental_constrained_frem:
1714 case Intrinsic::experimental_constrained_ceil:
1715 case Intrinsic::experimental_constrained_floor:
1716 case Intrinsic::experimental_constrained_round:
1717 case Intrinsic::experimental_constrained_roundeven:
1718 case Intrinsic::experimental_constrained_trunc:
1719 case Intrinsic::experimental_constrained_nearbyint:
1720 case Intrinsic::experimental_constrained_rint:
1721 case Intrinsic::experimental_constrained_fcmp:
1722 case Intrinsic::experimental_constrained_fcmps:
1723 return true;
1724 default:
1725 return false;
1726 case Intrinsic::not_intrinsic: break;
1729 if (!F->hasName() || Call->isStrictFP())
1730 return false;
1732 // In these cases, the check of the length is required. We don't want to
1733 // return true for a name like "cos\0blah" which strcmp would return equal to
1734 // "cos", but has length 8.
1735 StringRef Name = F->getName();
1736 switch (Name[0]) {
1737 default:
1738 return false;
1739 case 'a':
1740 return Name == "acos" || Name == "acosf" ||
1741 Name == "asin" || Name == "asinf" ||
1742 Name == "atan" || Name == "atanf" ||
1743 Name == "atan2" || Name == "atan2f";
1744 case 'c':
1745 return Name == "ceil" || Name == "ceilf" ||
1746 Name == "cos" || Name == "cosf" ||
1747 Name == "cosh" || Name == "coshf";
1748 case 'e':
1749 return Name == "exp" || Name == "expf" || Name == "exp2" ||
1750 Name == "exp2f" || Name == "erf" || Name == "erff";
1751 case 'f':
1752 return Name == "fabs" || Name == "fabsf" ||
1753 Name == "floor" || Name == "floorf" ||
1754 Name == "fmod" || Name == "fmodf";
1755 case 'i':
1756 return Name == "ilogb" || Name == "ilogbf";
1757 case 'l':
1758 return Name == "log" || Name == "logf" || Name == "logl" ||
1759 Name == "log2" || Name == "log2f" || Name == "log10" ||
1760 Name == "log10f" || Name == "logb" || Name == "logbf" ||
1761 Name == "log1p" || Name == "log1pf";
1762 case 'n':
1763 return Name == "nearbyint" || Name == "nearbyintf";
1764 case 'p':
1765 return Name == "pow" || Name == "powf";
1766 case 'r':
1767 return Name == "remainder" || Name == "remainderf" ||
1768 Name == "rint" || Name == "rintf" ||
1769 Name == "round" || Name == "roundf";
1770 case 's':
1771 return Name == "sin" || Name == "sinf" ||
1772 Name == "sinh" || Name == "sinhf" ||
1773 Name == "sqrt" || Name == "sqrtf";
1774 case 't':
1775 return Name == "tan" || Name == "tanf" ||
1776 Name == "tanh" || Name == "tanhf" ||
1777 Name == "trunc" || Name == "truncf";
1778 case '_':
1779 // Check for various function names that get used for the math functions
1780 // when the header files are preprocessed with the macro
1781 // __FINITE_MATH_ONLY__ enabled.
1782 // The '12' here is the length of the shortest name that can match.
1783 // We need to check the size before looking at Name[1] and Name[2]
1784 // so we may as well check a limit that will eliminate mismatches.
1785 if (Name.size() < 12 || Name[1] != '_')
1786 return false;
1787 switch (Name[2]) {
1788 default:
1789 return false;
1790 case 'a':
1791 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1792 Name == "__asin_finite" || Name == "__asinf_finite" ||
1793 Name == "__atan2_finite" || Name == "__atan2f_finite";
1794 case 'c':
1795 return Name == "__cosh_finite" || Name == "__coshf_finite";
1796 case 'e':
1797 return Name == "__exp_finite" || Name == "__expf_finite" ||
1798 Name == "__exp2_finite" || Name == "__exp2f_finite";
1799 case 'l':
1800 return Name == "__log_finite" || Name == "__logf_finite" ||
1801 Name == "__log10_finite" || Name == "__log10f_finite";
1802 case 'p':
1803 return Name == "__pow_finite" || Name == "__powf_finite";
1804 case 's':
1805 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1810 namespace {
1812 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1813 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1814 APFloat APF(V);
1815 bool unused;
1816 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1817 return ConstantFP::get(Ty->getContext(), APF);
1819 if (Ty->isDoubleTy())
1820 return ConstantFP::get(Ty->getContext(), APFloat(V));
1821 llvm_unreachable("Can only constant fold half/float/double");
1824 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1825 Constant *GetConstantFoldFPValue128(float128 V, Type *Ty) {
1826 if (Ty->isFP128Ty())
1827 return ConstantFP::get(Ty, V);
1828 llvm_unreachable("Can only constant fold fp128");
1830 #endif
1832 /// Clear the floating-point exception state.
1833 inline void llvm_fenv_clearexcept() {
1834 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1835 feclearexcept(FE_ALL_EXCEPT);
1836 #endif
1837 errno = 0;
1840 /// Test if a floating-point exception was raised.
1841 inline bool llvm_fenv_testexcept() {
1842 int errno_val = errno;
1843 if (errno_val == ERANGE || errno_val == EDOM)
1844 return true;
1845 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1846 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1847 return true;
1848 #endif
1849 return false;
1852 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1853 Type *Ty) {
1854 llvm_fenv_clearexcept();
1855 double Result = NativeFP(V.convertToDouble());
1856 if (llvm_fenv_testexcept()) {
1857 llvm_fenv_clearexcept();
1858 return nullptr;
1861 return GetConstantFoldFPValue(Result, Ty);
1864 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1865 Constant *ConstantFoldFP128(float128 (*NativeFP)(float128), const APFloat &V,
1866 Type *Ty) {
1867 llvm_fenv_clearexcept();
1868 float128 Result = NativeFP(V.convertToQuad());
1869 if (llvm_fenv_testexcept()) {
1870 llvm_fenv_clearexcept();
1871 return nullptr;
1874 return GetConstantFoldFPValue128(Result, Ty);
1876 #endif
1878 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1879 const APFloat &V, const APFloat &W, Type *Ty) {
1880 llvm_fenv_clearexcept();
1881 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1882 if (llvm_fenv_testexcept()) {
1883 llvm_fenv_clearexcept();
1884 return nullptr;
1887 return GetConstantFoldFPValue(Result, Ty);
1890 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1891 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1892 if (!VT)
1893 return nullptr;
1895 // This isn't strictly necessary, but handle the special/common case of zero:
1896 // all integer reductions of a zero input produce zero.
1897 if (isa<ConstantAggregateZero>(Op))
1898 return ConstantInt::get(VT->getElementType(), 0);
1900 // This is the same as the underlying binops - poison propagates.
1901 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1902 return PoisonValue::get(VT->getElementType());
1904 // TODO: Handle undef.
1905 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1906 return nullptr;
1908 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1909 if (!EltC)
1910 return nullptr;
1912 APInt Acc = EltC->getValue();
1913 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1914 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1915 return nullptr;
1916 const APInt &X = EltC->getValue();
1917 switch (IID) {
1918 case Intrinsic::vector_reduce_add:
1919 Acc = Acc + X;
1920 break;
1921 case Intrinsic::vector_reduce_mul:
1922 Acc = Acc * X;
1923 break;
1924 case Intrinsic::vector_reduce_and:
1925 Acc = Acc & X;
1926 break;
1927 case Intrinsic::vector_reduce_or:
1928 Acc = Acc | X;
1929 break;
1930 case Intrinsic::vector_reduce_xor:
1931 Acc = Acc ^ X;
1932 break;
1933 case Intrinsic::vector_reduce_smin:
1934 Acc = APIntOps::smin(Acc, X);
1935 break;
1936 case Intrinsic::vector_reduce_smax:
1937 Acc = APIntOps::smax(Acc, X);
1938 break;
1939 case Intrinsic::vector_reduce_umin:
1940 Acc = APIntOps::umin(Acc, X);
1941 break;
1942 case Intrinsic::vector_reduce_umax:
1943 Acc = APIntOps::umax(Acc, X);
1944 break;
1948 return ConstantInt::get(Op->getContext(), Acc);
1951 /// Attempt to fold an SSE floating point to integer conversion of a constant
1952 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1953 /// used (toward nearest, ties to even). This matches the behavior of the
1954 /// non-truncating SSE instructions in the default rounding mode. The desired
1955 /// integer type Ty is used to select how many bits are available for the
1956 /// result. Returns null if the conversion cannot be performed, otherwise
1957 /// returns the Constant value resulting from the conversion.
1958 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1959 Type *Ty, bool IsSigned) {
1960 // All of these conversion intrinsics form an integer of at most 64bits.
1961 unsigned ResultWidth = Ty->getIntegerBitWidth();
1962 assert(ResultWidth <= 64 &&
1963 "Can only constant fold conversions to 64 and 32 bit ints");
1965 uint64_t UIntVal;
1966 bool isExact = false;
1967 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1968 : APFloat::rmNearestTiesToEven;
1969 APFloat::opStatus status =
1970 Val.convertToInteger(MutableArrayRef(UIntVal), ResultWidth,
1971 IsSigned, mode, &isExact);
1972 if (status != APFloat::opOK &&
1973 (!roundTowardZero || status != APFloat::opInexact))
1974 return nullptr;
1975 return ConstantInt::get(Ty, UIntVal, IsSigned);
1978 double getValueAsDouble(ConstantFP *Op) {
1979 Type *Ty = Op->getType();
1981 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1982 return Op->getValueAPF().convertToDouble();
1984 bool unused;
1985 APFloat APF = Op->getValueAPF();
1986 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1987 return APF.convertToDouble();
1990 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1991 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1992 C = &CI->getValue();
1993 return true;
1995 if (isa<UndefValue>(Op)) {
1996 C = nullptr;
1997 return true;
1999 return false;
2002 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
2003 /// to be folded.
2005 /// \param CI Constrained intrinsic call.
2006 /// \param St Exception flags raised during constant evaluation.
2007 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
2008 APFloat::opStatus St) {
2009 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2010 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2012 // If the operation does not change exception status flags, it is safe
2013 // to fold.
2014 if (St == APFloat::opStatus::opOK)
2015 return true;
2017 // If evaluation raised FP exception, the result can depend on rounding
2018 // mode. If the latter is unknown, folding is not possible.
2019 if (ORM && *ORM == RoundingMode::Dynamic)
2020 return false;
2022 // If FP exceptions are ignored, fold the call, even if such exception is
2023 // raised.
2024 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
2025 return true;
2027 // Leave the calculation for runtime so that exception flags be correctly set
2028 // in hardware.
2029 return false;
2032 /// Returns the rounding mode that should be used for constant evaluation.
2033 static RoundingMode
2034 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
2035 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2036 if (!ORM || *ORM == RoundingMode::Dynamic)
2037 // Even if the rounding mode is unknown, try evaluating the operation.
2038 // If it does not raise inexact exception, rounding was not applied,
2039 // so the result is exact and does not depend on rounding mode. Whether
2040 // other FP exceptions are raised, it does not depend on rounding mode.
2041 return RoundingMode::NearestTiesToEven;
2042 return *ORM;
2045 /// Try to constant fold llvm.canonicalize for the given caller and value.
2046 static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
2047 const APFloat &Src) {
2048 // Zero, positive and negative, is always OK to fold.
2049 if (Src.isZero()) {
2050 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
2051 return ConstantFP::get(
2052 CI->getContext(),
2053 APFloat::getZero(Src.getSemantics(), Src.isNegative()));
2056 if (!Ty->isIEEELikeFPTy())
2057 return nullptr;
2059 // Zero is always canonical and the sign must be preserved.
2061 // Denorms and nans may have special encodings, but it should be OK to fold a
2062 // totally average number.
2063 if (Src.isNormal() || Src.isInfinity())
2064 return ConstantFP::get(CI->getContext(), Src);
2066 if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
2067 DenormalMode DenormMode =
2068 CI->getFunction()->getDenormalMode(Src.getSemantics());
2070 if (DenormMode == DenormalMode::getIEEE())
2071 return ConstantFP::get(CI->getContext(), Src);
2073 if (DenormMode.Input == DenormalMode::Dynamic)
2074 return nullptr;
2076 // If we know if either input or output is flushed, we can fold.
2077 if ((DenormMode.Input == DenormalMode::Dynamic &&
2078 DenormMode.Output == DenormalMode::IEEE) ||
2079 (DenormMode.Input == DenormalMode::IEEE &&
2080 DenormMode.Output == DenormalMode::Dynamic))
2081 return nullptr;
2083 bool IsPositive =
2084 (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
2085 (DenormMode.Output == DenormalMode::PositiveZero &&
2086 DenormMode.Input == DenormalMode::IEEE));
2088 return ConstantFP::get(CI->getContext(),
2089 APFloat::getZero(Src.getSemantics(), !IsPositive));
2092 return nullptr;
2095 static Constant *ConstantFoldScalarCall1(StringRef Name,
2096 Intrinsic::ID IntrinsicID,
2097 Type *Ty,
2098 ArrayRef<Constant *> Operands,
2099 const TargetLibraryInfo *TLI,
2100 const CallBase *Call) {
2101 assert(Operands.size() == 1 && "Wrong number of operands.");
2103 if (IntrinsicID == Intrinsic::is_constant) {
2104 // We know we have a "Constant" argument. But we want to only
2105 // return true for manifest constants, not those that depend on
2106 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2107 if (Operands[0]->isManifestConstant())
2108 return ConstantInt::getTrue(Ty->getContext());
2109 return nullptr;
2112 if (isa<PoisonValue>(Operands[0])) {
2113 // TODO: All of these operations should probably propagate poison.
2114 if (IntrinsicID == Intrinsic::canonicalize)
2115 return PoisonValue::get(Ty);
2118 if (isa<UndefValue>(Operands[0])) {
2119 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2120 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2121 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2122 if (IntrinsicID == Intrinsic::cos ||
2123 IntrinsicID == Intrinsic::ctpop ||
2124 IntrinsicID == Intrinsic::fptoui_sat ||
2125 IntrinsicID == Intrinsic::fptosi_sat ||
2126 IntrinsicID == Intrinsic::canonicalize)
2127 return Constant::getNullValue(Ty);
2128 if (IntrinsicID == Intrinsic::bswap ||
2129 IntrinsicID == Intrinsic::bitreverse ||
2130 IntrinsicID == Intrinsic::launder_invariant_group ||
2131 IntrinsicID == Intrinsic::strip_invariant_group)
2132 return Operands[0];
2135 if (isa<ConstantPointerNull>(Operands[0])) {
2136 // launder(null) == null == strip(null) iff in addrspace 0
2137 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2138 IntrinsicID == Intrinsic::strip_invariant_group) {
2139 // If instruction is not yet put in a basic block (e.g. when cloning
2140 // a function during inlining), Call's caller may not be available.
2141 // So check Call's BB first before querying Call->getCaller.
2142 const Function *Caller =
2143 Call->getParent() ? Call->getCaller() : nullptr;
2144 if (Caller &&
2145 !NullPointerIsDefined(
2146 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
2147 return Operands[0];
2149 return nullptr;
2153 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
2154 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2155 APFloat Val(Op->getValueAPF());
2157 bool lost = false;
2158 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2160 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
2163 APFloat U = Op->getValueAPF();
2165 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2166 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2167 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2169 if (U.isNaN())
2170 return nullptr;
2172 unsigned Width = Ty->getIntegerBitWidth();
2173 APSInt Int(Width, !Signed);
2174 bool IsExact = false;
2175 APFloat::opStatus Status =
2176 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2178 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2179 return ConstantInt::get(Ty, Int);
2181 return nullptr;
2184 if (IntrinsicID == Intrinsic::fptoui_sat ||
2185 IntrinsicID == Intrinsic::fptosi_sat) {
2186 // convertToInteger() already has the desired saturation semantics.
2187 APSInt Int(Ty->getIntegerBitWidth(),
2188 IntrinsicID == Intrinsic::fptoui_sat);
2189 bool IsExact;
2190 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2191 return ConstantInt::get(Ty, Int);
2194 if (IntrinsicID == Intrinsic::canonicalize)
2195 return constantFoldCanonicalize(Ty, Call, U);
2197 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2198 if (Ty->isFP128Ty()) {
2199 if (IntrinsicID == Intrinsic::log) {
2200 float128 Result = logf128(Op->getValueAPF().convertToQuad());
2201 return GetConstantFoldFPValue128(Result, Ty);
2204 LibFunc Fp128Func = NotLibFunc;
2205 if (TLI && TLI->getLibFunc(Name, Fp128Func) && TLI->has(Fp128Func) &&
2206 Fp128Func == LibFunc_logl)
2207 return ConstantFoldFP128(logf128, Op->getValueAPF(), Ty);
2209 #endif
2211 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2212 !Ty->isIntegerTy())
2213 return nullptr;
2215 // Use internal versions of these intrinsics.
2217 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2218 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2219 return ConstantFP::get(Ty->getContext(), U);
2222 if (IntrinsicID == Intrinsic::round) {
2223 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2224 return ConstantFP::get(Ty->getContext(), U);
2227 if (IntrinsicID == Intrinsic::roundeven) {
2228 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2229 return ConstantFP::get(Ty->getContext(), U);
2232 if (IntrinsicID == Intrinsic::ceil) {
2233 U.roundToIntegral(APFloat::rmTowardPositive);
2234 return ConstantFP::get(Ty->getContext(), U);
2237 if (IntrinsicID == Intrinsic::floor) {
2238 U.roundToIntegral(APFloat::rmTowardNegative);
2239 return ConstantFP::get(Ty->getContext(), U);
2242 if (IntrinsicID == Intrinsic::trunc) {
2243 U.roundToIntegral(APFloat::rmTowardZero);
2244 return ConstantFP::get(Ty->getContext(), U);
2247 if (IntrinsicID == Intrinsic::fabs) {
2248 U.clearSign();
2249 return ConstantFP::get(Ty->getContext(), U);
2252 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2253 // The v_fract instruction behaves like the OpenCL spec, which defines
2254 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2255 // there to prevent fract(-small) from returning 1.0. It returns the
2256 // largest positive floating-point number less than 1.0."
2257 APFloat FloorU(U);
2258 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2259 APFloat FractU(U - FloorU);
2260 APFloat AlmostOne(U.getSemantics(), 1);
2261 AlmostOne.next(/*nextDown*/ true);
2262 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2265 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2266 // raise FP exceptions, unless the argument is signaling NaN.
2268 std::optional<APFloat::roundingMode> RM;
2269 switch (IntrinsicID) {
2270 default:
2271 break;
2272 case Intrinsic::experimental_constrained_nearbyint:
2273 case Intrinsic::experimental_constrained_rint: {
2274 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2275 RM = CI->getRoundingMode();
2276 if (!RM || *RM == RoundingMode::Dynamic)
2277 return nullptr;
2278 break;
2280 case Intrinsic::experimental_constrained_round:
2281 RM = APFloat::rmNearestTiesToAway;
2282 break;
2283 case Intrinsic::experimental_constrained_ceil:
2284 RM = APFloat::rmTowardPositive;
2285 break;
2286 case Intrinsic::experimental_constrained_floor:
2287 RM = APFloat::rmTowardNegative;
2288 break;
2289 case Intrinsic::experimental_constrained_trunc:
2290 RM = APFloat::rmTowardZero;
2291 break;
2293 if (RM) {
2294 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2295 if (U.isFinite()) {
2296 APFloat::opStatus St = U.roundToIntegral(*RM);
2297 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2298 St == APFloat::opInexact) {
2299 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2300 if (EB && *EB == fp::ebStrict)
2301 return nullptr;
2303 } else if (U.isSignaling()) {
2304 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2305 if (EB && *EB != fp::ebIgnore)
2306 return nullptr;
2307 U = APFloat::getQNaN(U.getSemantics());
2309 return ConstantFP::get(Ty->getContext(), U);
2312 /// We only fold functions with finite arguments. Folding NaN and inf is
2313 /// likely to be aborted with an exception anyway, and some host libms
2314 /// have known errors raising exceptions.
2315 if (!U.isFinite())
2316 return nullptr;
2318 /// Currently APFloat versions of these functions do not exist, so we use
2319 /// the host native double versions. Float versions are not called
2320 /// directly but for all these it is true (float)(f((double)arg)) ==
2321 /// f(arg). Long double not supported yet.
2322 const APFloat &APF = Op->getValueAPF();
2324 switch (IntrinsicID) {
2325 default: break;
2326 case Intrinsic::log:
2327 return ConstantFoldFP(log, APF, Ty);
2328 case Intrinsic::log2:
2329 // TODO: What about hosts that lack a C99 library?
2330 return ConstantFoldFP(log2, APF, Ty);
2331 case Intrinsic::log10:
2332 // TODO: What about hosts that lack a C99 library?
2333 return ConstantFoldFP(log10, APF, Ty);
2334 case Intrinsic::exp:
2335 return ConstantFoldFP(exp, APF, Ty);
2336 case Intrinsic::exp2:
2337 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2338 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2339 case Intrinsic::exp10:
2340 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2341 return ConstantFoldBinaryFP(pow, APFloat(10.0), APF, Ty);
2342 case Intrinsic::sin:
2343 return ConstantFoldFP(sin, APF, Ty);
2344 case Intrinsic::cos:
2345 return ConstantFoldFP(cos, APF, Ty);
2346 case Intrinsic::sqrt:
2347 return ConstantFoldFP(sqrt, APF, Ty);
2348 case Intrinsic::amdgcn_cos:
2349 case Intrinsic::amdgcn_sin: {
2350 double V = getValueAsDouble(Op);
2351 if (V < -256.0 || V > 256.0)
2352 // The gfx8 and gfx9 architectures handle arguments outside the range
2353 // [-256, 256] differently. This should be a rare case so bail out
2354 // rather than trying to handle the difference.
2355 return nullptr;
2356 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2357 double V4 = V * 4.0;
2358 if (V4 == floor(V4)) {
2359 // Force exact results for quarter-integer inputs.
2360 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2361 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2362 } else {
2363 if (IsCos)
2364 V = cos(V * 2.0 * numbers::pi);
2365 else
2366 V = sin(V * 2.0 * numbers::pi);
2368 return GetConstantFoldFPValue(V, Ty);
2372 if (!TLI)
2373 return nullptr;
2375 LibFunc Func = NotLibFunc;
2376 if (!TLI->getLibFunc(Name, Func))
2377 return nullptr;
2379 switch (Func) {
2380 default:
2381 break;
2382 case LibFunc_acos:
2383 case LibFunc_acosf:
2384 case LibFunc_acos_finite:
2385 case LibFunc_acosf_finite:
2386 if (TLI->has(Func))
2387 return ConstantFoldFP(acos, APF, Ty);
2388 break;
2389 case LibFunc_asin:
2390 case LibFunc_asinf:
2391 case LibFunc_asin_finite:
2392 case LibFunc_asinf_finite:
2393 if (TLI->has(Func))
2394 return ConstantFoldFP(asin, APF, Ty);
2395 break;
2396 case LibFunc_atan:
2397 case LibFunc_atanf:
2398 if (TLI->has(Func))
2399 return ConstantFoldFP(atan, APF, Ty);
2400 break;
2401 case LibFunc_ceil:
2402 case LibFunc_ceilf:
2403 if (TLI->has(Func)) {
2404 U.roundToIntegral(APFloat::rmTowardPositive);
2405 return ConstantFP::get(Ty->getContext(), U);
2407 break;
2408 case LibFunc_cos:
2409 case LibFunc_cosf:
2410 if (TLI->has(Func))
2411 return ConstantFoldFP(cos, APF, Ty);
2412 break;
2413 case LibFunc_cosh:
2414 case LibFunc_coshf:
2415 case LibFunc_cosh_finite:
2416 case LibFunc_coshf_finite:
2417 if (TLI->has(Func))
2418 return ConstantFoldFP(cosh, APF, Ty);
2419 break;
2420 case LibFunc_exp:
2421 case LibFunc_expf:
2422 case LibFunc_exp_finite:
2423 case LibFunc_expf_finite:
2424 if (TLI->has(Func))
2425 return ConstantFoldFP(exp, APF, Ty);
2426 break;
2427 case LibFunc_exp2:
2428 case LibFunc_exp2f:
2429 case LibFunc_exp2_finite:
2430 case LibFunc_exp2f_finite:
2431 if (TLI->has(Func))
2432 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2433 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2434 break;
2435 case LibFunc_fabs:
2436 case LibFunc_fabsf:
2437 if (TLI->has(Func)) {
2438 U.clearSign();
2439 return ConstantFP::get(Ty->getContext(), U);
2441 break;
2442 case LibFunc_floor:
2443 case LibFunc_floorf:
2444 if (TLI->has(Func)) {
2445 U.roundToIntegral(APFloat::rmTowardNegative);
2446 return ConstantFP::get(Ty->getContext(), U);
2448 break;
2449 case LibFunc_log:
2450 case LibFunc_logf:
2451 case LibFunc_log_finite:
2452 case LibFunc_logf_finite:
2453 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2454 return ConstantFoldFP(log, APF, Ty);
2455 break;
2456 case LibFunc_log2:
2457 case LibFunc_log2f:
2458 case LibFunc_log2_finite:
2459 case LibFunc_log2f_finite:
2460 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2461 // TODO: What about hosts that lack a C99 library?
2462 return ConstantFoldFP(log2, APF, Ty);
2463 break;
2464 case LibFunc_log10:
2465 case LibFunc_log10f:
2466 case LibFunc_log10_finite:
2467 case LibFunc_log10f_finite:
2468 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2469 // TODO: What about hosts that lack a C99 library?
2470 return ConstantFoldFP(log10, APF, Ty);
2471 break;
2472 case LibFunc_ilogb:
2473 case LibFunc_ilogbf:
2474 if (!APF.isZero() && TLI->has(Func))
2475 return ConstantInt::get(Ty, ilogb(APF), true);
2476 break;
2477 case LibFunc_logb:
2478 case LibFunc_logbf:
2479 if (!APF.isZero() && TLI->has(Func))
2480 return ConstantFoldFP(logb, APF, Ty);
2481 break;
2482 case LibFunc_log1p:
2483 case LibFunc_log1pf:
2484 // Implement optional behavior from C's Annex F for +/-0.0.
2485 if (U.isZero())
2486 return ConstantFP::get(Ty->getContext(), U);
2487 if (APF > APFloat::getOne(APF.getSemantics(), true) && TLI->has(Func))
2488 return ConstantFoldFP(log1p, APF, Ty);
2489 break;
2490 case LibFunc_logl:
2491 return nullptr;
2492 case LibFunc_erf:
2493 case LibFunc_erff:
2494 if (TLI->has(Func))
2495 return ConstantFoldFP(erf, APF, Ty);
2496 break;
2497 case LibFunc_nearbyint:
2498 case LibFunc_nearbyintf:
2499 case LibFunc_rint:
2500 case LibFunc_rintf:
2501 if (TLI->has(Func)) {
2502 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2503 return ConstantFP::get(Ty->getContext(), U);
2505 break;
2506 case LibFunc_round:
2507 case LibFunc_roundf:
2508 if (TLI->has(Func)) {
2509 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2510 return ConstantFP::get(Ty->getContext(), U);
2512 break;
2513 case LibFunc_sin:
2514 case LibFunc_sinf:
2515 if (TLI->has(Func))
2516 return ConstantFoldFP(sin, APF, Ty);
2517 break;
2518 case LibFunc_sinh:
2519 case LibFunc_sinhf:
2520 case LibFunc_sinh_finite:
2521 case LibFunc_sinhf_finite:
2522 if (TLI->has(Func))
2523 return ConstantFoldFP(sinh, APF, Ty);
2524 break;
2525 case LibFunc_sqrt:
2526 case LibFunc_sqrtf:
2527 if (!APF.isNegative() && TLI->has(Func))
2528 return ConstantFoldFP(sqrt, APF, Ty);
2529 break;
2530 case LibFunc_tan:
2531 case LibFunc_tanf:
2532 if (TLI->has(Func))
2533 return ConstantFoldFP(tan, APF, Ty);
2534 break;
2535 case LibFunc_tanh:
2536 case LibFunc_tanhf:
2537 if (TLI->has(Func))
2538 return ConstantFoldFP(tanh, APF, Ty);
2539 break;
2540 case LibFunc_trunc:
2541 case LibFunc_truncf:
2542 if (TLI->has(Func)) {
2543 U.roundToIntegral(APFloat::rmTowardZero);
2544 return ConstantFP::get(Ty->getContext(), U);
2546 break;
2548 return nullptr;
2551 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2552 switch (IntrinsicID) {
2553 case Intrinsic::bswap:
2554 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2555 case Intrinsic::ctpop:
2556 return ConstantInt::get(Ty, Op->getValue().popcount());
2557 case Intrinsic::bitreverse:
2558 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2559 case Intrinsic::convert_from_fp16: {
2560 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2562 bool lost = false;
2563 APFloat::opStatus status = Val.convert(
2564 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2566 // Conversion is always precise.
2567 (void)status;
2568 assert(status != APFloat::opInexact && !lost &&
2569 "Precision lost during fp16 constfolding");
2571 return ConstantFP::get(Ty->getContext(), Val);
2574 case Intrinsic::amdgcn_s_wqm: {
2575 uint64_t Val = Op->getZExtValue();
2576 Val |= (Val & 0x5555555555555555ULL) << 1 |
2577 ((Val >> 1) & 0x5555555555555555ULL);
2578 Val |= (Val & 0x3333333333333333ULL) << 2 |
2579 ((Val >> 2) & 0x3333333333333333ULL);
2580 return ConstantInt::get(Ty, Val);
2583 case Intrinsic::amdgcn_s_quadmask: {
2584 uint64_t Val = Op->getZExtValue();
2585 uint64_t QuadMask = 0;
2586 for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
2587 if (!(Val & 0xF))
2588 continue;
2590 QuadMask |= (1ULL << I);
2592 return ConstantInt::get(Ty, QuadMask);
2595 case Intrinsic::amdgcn_s_bitreplicate: {
2596 uint64_t Val = Op->getZExtValue();
2597 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2598 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2599 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2600 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2601 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2602 Val = Val | Val << 1;
2603 return ConstantInt::get(Ty, Val);
2606 default:
2607 return nullptr;
2611 switch (IntrinsicID) {
2612 default: break;
2613 case Intrinsic::vector_reduce_add:
2614 case Intrinsic::vector_reduce_mul:
2615 case Intrinsic::vector_reduce_and:
2616 case Intrinsic::vector_reduce_or:
2617 case Intrinsic::vector_reduce_xor:
2618 case Intrinsic::vector_reduce_smin:
2619 case Intrinsic::vector_reduce_smax:
2620 case Intrinsic::vector_reduce_umin:
2621 case Intrinsic::vector_reduce_umax:
2622 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2623 return C;
2624 break;
2627 // Support ConstantVector in case we have an Undef in the top.
2628 if (isa<ConstantVector>(Operands[0]) ||
2629 isa<ConstantDataVector>(Operands[0])) {
2630 auto *Op = cast<Constant>(Operands[0]);
2631 switch (IntrinsicID) {
2632 default: break;
2633 case Intrinsic::x86_sse_cvtss2si:
2634 case Intrinsic::x86_sse_cvtss2si64:
2635 case Intrinsic::x86_sse2_cvtsd2si:
2636 case Intrinsic::x86_sse2_cvtsd2si64:
2637 if (ConstantFP *FPOp =
2638 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2639 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2640 /*roundTowardZero=*/false, Ty,
2641 /*IsSigned*/true);
2642 break;
2643 case Intrinsic::x86_sse_cvttss2si:
2644 case Intrinsic::x86_sse_cvttss2si64:
2645 case Intrinsic::x86_sse2_cvttsd2si:
2646 case Intrinsic::x86_sse2_cvttsd2si64:
2647 if (ConstantFP *FPOp =
2648 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2649 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2650 /*roundTowardZero=*/true, Ty,
2651 /*IsSigned*/true);
2652 break;
2656 return nullptr;
2659 static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2660 const ConstrainedFPIntrinsic *Call) {
2661 APFloat::opStatus St = APFloat::opOK;
2662 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2663 FCmpInst::Predicate Cond = FCmp->getPredicate();
2664 if (FCmp->isSignaling()) {
2665 if (Op1.isNaN() || Op2.isNaN())
2666 St = APFloat::opInvalidOp;
2667 } else {
2668 if (Op1.isSignaling() || Op2.isSignaling())
2669 St = APFloat::opInvalidOp;
2671 bool Result = FCmpInst::compare(Op1, Op2, Cond);
2672 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2673 return ConstantInt::get(Call->getType()->getScalarType(), Result);
2674 return nullptr;
2677 static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty,
2678 ArrayRef<Constant *> Operands,
2679 const TargetLibraryInfo *TLI) {
2680 if (!TLI)
2681 return nullptr;
2683 LibFunc Func = NotLibFunc;
2684 if (!TLI->getLibFunc(Name, Func))
2685 return nullptr;
2687 const auto *Op1 = dyn_cast<ConstantFP>(Operands[0]);
2688 if (!Op1)
2689 return nullptr;
2691 const auto *Op2 = dyn_cast<ConstantFP>(Operands[1]);
2692 if (!Op2)
2693 return nullptr;
2695 const APFloat &Op1V = Op1->getValueAPF();
2696 const APFloat &Op2V = Op2->getValueAPF();
2698 switch (Func) {
2699 default:
2700 break;
2701 case LibFunc_pow:
2702 case LibFunc_powf:
2703 case LibFunc_pow_finite:
2704 case LibFunc_powf_finite:
2705 if (TLI->has(Func))
2706 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2707 break;
2708 case LibFunc_fmod:
2709 case LibFunc_fmodf:
2710 if (TLI->has(Func)) {
2711 APFloat V = Op1->getValueAPF();
2712 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2713 return ConstantFP::get(Ty->getContext(), V);
2715 break;
2716 case LibFunc_remainder:
2717 case LibFunc_remainderf:
2718 if (TLI->has(Func)) {
2719 APFloat V = Op1->getValueAPF();
2720 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2721 return ConstantFP::get(Ty->getContext(), V);
2723 break;
2724 case LibFunc_atan2:
2725 case LibFunc_atan2f:
2726 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2727 // (Solaris), so we do not assume a known result for that.
2728 if (Op1V.isZero() && Op2V.isZero())
2729 return nullptr;
2730 [[fallthrough]];
2731 case LibFunc_atan2_finite:
2732 case LibFunc_atan2f_finite:
2733 if (TLI->has(Func))
2734 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2735 break;
2738 return nullptr;
2741 static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty,
2742 ArrayRef<Constant *> Operands,
2743 const CallBase *Call) {
2744 assert(Operands.size() == 2 && "Wrong number of operands.");
2746 if (Ty->isFloatingPointTy()) {
2747 // TODO: We should have undef handling for all of the FP intrinsics that
2748 // are attempted to be folded in this function.
2749 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2750 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2751 switch (IntrinsicID) {
2752 case Intrinsic::maxnum:
2753 case Intrinsic::minnum:
2754 case Intrinsic::maximum:
2755 case Intrinsic::minimum:
2756 // If one argument is undef, return the other argument.
2757 if (IsOp0Undef)
2758 return Operands[1];
2759 if (IsOp1Undef)
2760 return Operands[0];
2761 break;
2765 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2766 const APFloat &Op1V = Op1->getValueAPF();
2768 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2769 if (Op2->getType() != Op1->getType())
2770 return nullptr;
2771 const APFloat &Op2V = Op2->getValueAPF();
2773 if (const auto *ConstrIntr =
2774 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2775 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2776 APFloat Res = Op1V;
2777 APFloat::opStatus St;
2778 switch (IntrinsicID) {
2779 default:
2780 return nullptr;
2781 case Intrinsic::experimental_constrained_fadd:
2782 St = Res.add(Op2V, RM);
2783 break;
2784 case Intrinsic::experimental_constrained_fsub:
2785 St = Res.subtract(Op2V, RM);
2786 break;
2787 case Intrinsic::experimental_constrained_fmul:
2788 St = Res.multiply(Op2V, RM);
2789 break;
2790 case Intrinsic::experimental_constrained_fdiv:
2791 St = Res.divide(Op2V, RM);
2792 break;
2793 case Intrinsic::experimental_constrained_frem:
2794 St = Res.mod(Op2V);
2795 break;
2796 case Intrinsic::experimental_constrained_fcmp:
2797 case Intrinsic::experimental_constrained_fcmps:
2798 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2800 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2801 St))
2802 return ConstantFP::get(Ty->getContext(), Res);
2803 return nullptr;
2806 switch (IntrinsicID) {
2807 default:
2808 break;
2809 case Intrinsic::copysign:
2810 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2811 case Intrinsic::minnum:
2812 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2813 case Intrinsic::maxnum:
2814 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2815 case Intrinsic::minimum:
2816 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2817 case Intrinsic::maximum:
2818 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2821 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2822 return nullptr;
2824 switch (IntrinsicID) {
2825 default:
2826 break;
2827 case Intrinsic::pow:
2828 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2829 case Intrinsic::amdgcn_fmul_legacy:
2830 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2831 // NaN or infinity, gives +0.0.
2832 if (Op1V.isZero() || Op2V.isZero())
2833 return ConstantFP::getZero(Ty);
2834 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2837 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2838 switch (IntrinsicID) {
2839 case Intrinsic::ldexp: {
2840 return ConstantFP::get(
2841 Ty->getContext(),
2842 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2844 case Intrinsic::is_fpclass: {
2845 FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
2846 bool Result =
2847 ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
2848 ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
2849 ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
2850 ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
2851 ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
2852 ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
2853 ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
2854 ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
2855 ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
2856 ((Mask & fcPosInf) && Op1V.isPosInfinity());
2857 return ConstantInt::get(Ty, Result);
2859 case Intrinsic::powi: {
2860 int Exp = static_cast<int>(Op2C->getSExtValue());
2861 switch (Ty->getTypeID()) {
2862 case Type::HalfTyID:
2863 case Type::FloatTyID: {
2864 APFloat Res(static_cast<float>(std::pow(Op1V.convertToFloat(), Exp)));
2865 if (Ty->isHalfTy()) {
2866 bool Unused;
2867 Res.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
2868 &Unused);
2870 return ConstantFP::get(Ty->getContext(), Res);
2872 case Type::DoubleTyID:
2873 return ConstantFP::get(Ty, std::pow(Op1V.convertToDouble(), Exp));
2874 default:
2875 return nullptr;
2878 default:
2879 break;
2882 return nullptr;
2885 if (Operands[0]->getType()->isIntegerTy() &&
2886 Operands[1]->getType()->isIntegerTy()) {
2887 const APInt *C0, *C1;
2888 if (!getConstIntOrUndef(Operands[0], C0) ||
2889 !getConstIntOrUndef(Operands[1], C1))
2890 return nullptr;
2892 switch (IntrinsicID) {
2893 default: break;
2894 case Intrinsic::smax:
2895 case Intrinsic::smin:
2896 case Intrinsic::umax:
2897 case Intrinsic::umin:
2898 // This is the same as for binary ops - poison propagates.
2899 // TODO: Poison handling should be consolidated.
2900 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2901 return PoisonValue::get(Ty);
2903 if (!C0 && !C1)
2904 return UndefValue::get(Ty);
2905 if (!C0 || !C1)
2906 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
2907 return ConstantInt::get(
2908 Ty, ICmpInst::compare(*C0, *C1,
2909 MinMaxIntrinsic::getPredicate(IntrinsicID))
2910 ? *C0
2911 : *C1);
2913 case Intrinsic::scmp:
2914 case Intrinsic::ucmp:
2915 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2916 return PoisonValue::get(Ty);
2918 if (!C0 || !C1)
2919 return ConstantInt::get(Ty, 0);
2921 int Res;
2922 if (IntrinsicID == Intrinsic::scmp)
2923 Res = C0->sgt(*C1) ? 1 : C0->slt(*C1) ? -1 : 0;
2924 else
2925 Res = C0->ugt(*C1) ? 1 : C0->ult(*C1) ? -1 : 0;
2926 return ConstantInt::get(Ty, Res, /*IsSigned=*/true);
2928 case Intrinsic::usub_with_overflow:
2929 case Intrinsic::ssub_with_overflow:
2930 // X - undef -> { 0, false }
2931 // undef - X -> { 0, false }
2932 if (!C0 || !C1)
2933 return Constant::getNullValue(Ty);
2934 [[fallthrough]];
2935 case Intrinsic::uadd_with_overflow:
2936 case Intrinsic::sadd_with_overflow:
2937 // X + undef -> { -1, false }
2938 // undef + x -> { -1, false }
2939 if (!C0 || !C1) {
2940 return ConstantStruct::get(
2941 cast<StructType>(Ty),
2942 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2943 Constant::getNullValue(Ty->getStructElementType(1))});
2945 [[fallthrough]];
2946 case Intrinsic::smul_with_overflow:
2947 case Intrinsic::umul_with_overflow: {
2948 // undef * X -> { 0, false }
2949 // X * undef -> { 0, false }
2950 if (!C0 || !C1)
2951 return Constant::getNullValue(Ty);
2953 APInt Res;
2954 bool Overflow;
2955 switch (IntrinsicID) {
2956 default: llvm_unreachable("Invalid case");
2957 case Intrinsic::sadd_with_overflow:
2958 Res = C0->sadd_ov(*C1, Overflow);
2959 break;
2960 case Intrinsic::uadd_with_overflow:
2961 Res = C0->uadd_ov(*C1, Overflow);
2962 break;
2963 case Intrinsic::ssub_with_overflow:
2964 Res = C0->ssub_ov(*C1, Overflow);
2965 break;
2966 case Intrinsic::usub_with_overflow:
2967 Res = C0->usub_ov(*C1, Overflow);
2968 break;
2969 case Intrinsic::smul_with_overflow:
2970 Res = C0->smul_ov(*C1, Overflow);
2971 break;
2972 case Intrinsic::umul_with_overflow:
2973 Res = C0->umul_ov(*C1, Overflow);
2974 break;
2976 Constant *Ops[] = {
2977 ConstantInt::get(Ty->getContext(), Res),
2978 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2980 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2982 case Intrinsic::uadd_sat:
2983 case Intrinsic::sadd_sat:
2984 // This is the same as for binary ops - poison propagates.
2985 // TODO: Poison handling should be consolidated.
2986 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2987 return PoisonValue::get(Ty);
2989 if (!C0 && !C1)
2990 return UndefValue::get(Ty);
2991 if (!C0 || !C1)
2992 return Constant::getAllOnesValue(Ty);
2993 if (IntrinsicID == Intrinsic::uadd_sat)
2994 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2995 else
2996 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2997 case Intrinsic::usub_sat:
2998 case Intrinsic::ssub_sat:
2999 // This is the same as for binary ops - poison propagates.
3000 // TODO: Poison handling should be consolidated.
3001 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3002 return PoisonValue::get(Ty);
3004 if (!C0 && !C1)
3005 return UndefValue::get(Ty);
3006 if (!C0 || !C1)
3007 return Constant::getNullValue(Ty);
3008 if (IntrinsicID == Intrinsic::usub_sat)
3009 return ConstantInt::get(Ty, C0->usub_sat(*C1));
3010 else
3011 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
3012 case Intrinsic::cttz:
3013 case Intrinsic::ctlz:
3014 assert(C1 && "Must be constant int");
3016 // cttz(0, 1) and ctlz(0, 1) are poison.
3017 if (C1->isOne() && (!C0 || C0->isZero()))
3018 return PoisonValue::get(Ty);
3019 if (!C0)
3020 return Constant::getNullValue(Ty);
3021 if (IntrinsicID == Intrinsic::cttz)
3022 return ConstantInt::get(Ty, C0->countr_zero());
3023 else
3024 return ConstantInt::get(Ty, C0->countl_zero());
3026 case Intrinsic::abs:
3027 assert(C1 && "Must be constant int");
3028 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
3030 // Undef or minimum val operand with poison min --> poison
3031 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
3032 return PoisonValue::get(Ty);
3034 // Undef operand with no poison min --> 0 (sign bit must be clear)
3035 if (!C0)
3036 return Constant::getNullValue(Ty);
3038 return ConstantInt::get(Ty, C0->abs());
3039 case Intrinsic::amdgcn_wave_reduce_umin:
3040 case Intrinsic::amdgcn_wave_reduce_umax:
3041 return dyn_cast<Constant>(Operands[0]);
3044 return nullptr;
3047 // Support ConstantVector in case we have an Undef in the top.
3048 if ((isa<ConstantVector>(Operands[0]) ||
3049 isa<ConstantDataVector>(Operands[0])) &&
3050 // Check for default rounding mode.
3051 // FIXME: Support other rounding modes?
3052 isa<ConstantInt>(Operands[1]) &&
3053 cast<ConstantInt>(Operands[1])->getValue() == 4) {
3054 auto *Op = cast<Constant>(Operands[0]);
3055 switch (IntrinsicID) {
3056 default: break;
3057 case Intrinsic::x86_avx512_vcvtss2si32:
3058 case Intrinsic::x86_avx512_vcvtss2si64:
3059 case Intrinsic::x86_avx512_vcvtsd2si32:
3060 case Intrinsic::x86_avx512_vcvtsd2si64:
3061 if (ConstantFP *FPOp =
3062 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3063 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3064 /*roundTowardZero=*/false, Ty,
3065 /*IsSigned*/true);
3066 break;
3067 case Intrinsic::x86_avx512_vcvtss2usi32:
3068 case Intrinsic::x86_avx512_vcvtss2usi64:
3069 case Intrinsic::x86_avx512_vcvtsd2usi32:
3070 case Intrinsic::x86_avx512_vcvtsd2usi64:
3071 if (ConstantFP *FPOp =
3072 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3073 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3074 /*roundTowardZero=*/false, Ty,
3075 /*IsSigned*/false);
3076 break;
3077 case Intrinsic::x86_avx512_cvttss2si:
3078 case Intrinsic::x86_avx512_cvttss2si64:
3079 case Intrinsic::x86_avx512_cvttsd2si:
3080 case Intrinsic::x86_avx512_cvttsd2si64:
3081 if (ConstantFP *FPOp =
3082 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3083 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3084 /*roundTowardZero=*/true, Ty,
3085 /*IsSigned*/true);
3086 break;
3087 case Intrinsic::x86_avx512_cvttss2usi:
3088 case Intrinsic::x86_avx512_cvttss2usi64:
3089 case Intrinsic::x86_avx512_cvttsd2usi:
3090 case Intrinsic::x86_avx512_cvttsd2usi64:
3091 if (ConstantFP *FPOp =
3092 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
3093 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3094 /*roundTowardZero=*/true, Ty,
3095 /*IsSigned*/false);
3096 break;
3099 return nullptr;
3102 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
3103 const APFloat &S0,
3104 const APFloat &S1,
3105 const APFloat &S2) {
3106 unsigned ID;
3107 const fltSemantics &Sem = S0.getSemantics();
3108 APFloat MA(Sem), SC(Sem), TC(Sem);
3109 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
3110 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
3111 // S2 < 0
3112 ID = 5;
3113 SC = -S0;
3114 } else {
3115 ID = 4;
3116 SC = S0;
3118 MA = S2;
3119 TC = -S1;
3120 } else if (abs(S1) >= abs(S0)) {
3121 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
3122 // S1 < 0
3123 ID = 3;
3124 TC = -S2;
3125 } else {
3126 ID = 2;
3127 TC = S2;
3129 MA = S1;
3130 SC = S0;
3131 } else {
3132 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
3133 // S0 < 0
3134 ID = 1;
3135 SC = S2;
3136 } else {
3137 ID = 0;
3138 SC = -S2;
3140 MA = S0;
3141 TC = -S1;
3143 switch (IntrinsicID) {
3144 default:
3145 llvm_unreachable("unhandled amdgcn cube intrinsic");
3146 case Intrinsic::amdgcn_cubeid:
3147 return APFloat(Sem, ID);
3148 case Intrinsic::amdgcn_cubema:
3149 return MA + MA;
3150 case Intrinsic::amdgcn_cubesc:
3151 return SC;
3152 case Intrinsic::amdgcn_cubetc:
3153 return TC;
3157 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
3158 Type *Ty) {
3159 const APInt *C0, *C1, *C2;
3160 if (!getConstIntOrUndef(Operands[0], C0) ||
3161 !getConstIntOrUndef(Operands[1], C1) ||
3162 !getConstIntOrUndef(Operands[2], C2))
3163 return nullptr;
3165 if (!C2)
3166 return UndefValue::get(Ty);
3168 APInt Val(32, 0);
3169 unsigned NumUndefBytes = 0;
3170 for (unsigned I = 0; I < 32; I += 8) {
3171 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
3172 unsigned B = 0;
3174 if (Sel >= 13)
3175 B = 0xff;
3176 else if (Sel == 12)
3177 B = 0x00;
3178 else {
3179 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3180 if (!Src)
3181 ++NumUndefBytes;
3182 else if (Sel < 8)
3183 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3184 else
3185 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3188 Val.insertBits(B, I, 8);
3191 if (NumUndefBytes == 4)
3192 return UndefValue::get(Ty);
3194 return ConstantInt::get(Ty, Val);
3197 static Constant *ConstantFoldScalarCall3(StringRef Name,
3198 Intrinsic::ID IntrinsicID,
3199 Type *Ty,
3200 ArrayRef<Constant *> Operands,
3201 const TargetLibraryInfo *TLI,
3202 const CallBase *Call) {
3203 assert(Operands.size() == 3 && "Wrong number of operands.");
3205 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
3206 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
3207 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
3208 const APFloat &C1 = Op1->getValueAPF();
3209 const APFloat &C2 = Op2->getValueAPF();
3210 const APFloat &C3 = Op3->getValueAPF();
3212 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3213 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
3214 APFloat Res = C1;
3215 APFloat::opStatus St;
3216 switch (IntrinsicID) {
3217 default:
3218 return nullptr;
3219 case Intrinsic::experimental_constrained_fma:
3220 case Intrinsic::experimental_constrained_fmuladd:
3221 St = Res.fusedMultiplyAdd(C2, C3, RM);
3222 break;
3224 if (mayFoldConstrained(
3225 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3226 return ConstantFP::get(Ty->getContext(), Res);
3227 return nullptr;
3230 switch (IntrinsicID) {
3231 default: break;
3232 case Intrinsic::amdgcn_fma_legacy: {
3233 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3234 // NaN or infinity, gives +0.0.
3235 if (C1.isZero() || C2.isZero()) {
3236 // It's tempting to just return C3 here, but that would give the
3237 // wrong result if C3 was -0.0.
3238 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
3240 [[fallthrough]];
3242 case Intrinsic::fma:
3243 case Intrinsic::fmuladd: {
3244 APFloat V = C1;
3245 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3246 return ConstantFP::get(Ty->getContext(), V);
3248 case Intrinsic::amdgcn_cubeid:
3249 case Intrinsic::amdgcn_cubema:
3250 case Intrinsic::amdgcn_cubesc:
3251 case Intrinsic::amdgcn_cubetc: {
3252 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3253 return ConstantFP::get(Ty->getContext(), V);
3260 if (IntrinsicID == Intrinsic::smul_fix ||
3261 IntrinsicID == Intrinsic::smul_fix_sat) {
3262 // poison * C -> poison
3263 // C * poison -> poison
3264 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3265 return PoisonValue::get(Ty);
3267 const APInt *C0, *C1;
3268 if (!getConstIntOrUndef(Operands[0], C0) ||
3269 !getConstIntOrUndef(Operands[1], C1))
3270 return nullptr;
3272 // undef * C -> 0
3273 // C * undef -> 0
3274 if (!C0 || !C1)
3275 return Constant::getNullValue(Ty);
3277 // This code performs rounding towards negative infinity in case the result
3278 // cannot be represented exactly for the given scale. Targets that do care
3279 // about rounding should use a target hook for specifying how rounding
3280 // should be done, and provide their own folding to be consistent with
3281 // rounding. This is the same approach as used by
3282 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3283 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
3284 unsigned Width = C0->getBitWidth();
3285 assert(Scale < Width && "Illegal scale.");
3286 unsigned ExtendedWidth = Width * 2;
3287 APInt Product =
3288 (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
3289 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3290 APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
3291 APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
3292 Product = APIntOps::smin(Product, Max);
3293 Product = APIntOps::smax(Product, Min);
3295 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
3298 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3299 const APInt *C0, *C1, *C2;
3300 if (!getConstIntOrUndef(Operands[0], C0) ||
3301 !getConstIntOrUndef(Operands[1], C1) ||
3302 !getConstIntOrUndef(Operands[2], C2))
3303 return nullptr;
3305 bool IsRight = IntrinsicID == Intrinsic::fshr;
3306 if (!C2)
3307 return Operands[IsRight ? 1 : 0];
3308 if (!C0 && !C1)
3309 return UndefValue::get(Ty);
3311 // The shift amount is interpreted as modulo the bitwidth. If the shift
3312 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3313 unsigned BitWidth = C2->getBitWidth();
3314 unsigned ShAmt = C2->urem(BitWidth);
3315 if (!ShAmt)
3316 return Operands[IsRight ? 1 : 0];
3318 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3319 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3320 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3321 if (!C0)
3322 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
3323 if (!C1)
3324 return ConstantInt::get(Ty, C0->shl(ShlAmt));
3325 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
3328 if (IntrinsicID == Intrinsic::amdgcn_perm)
3329 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3331 return nullptr;
3334 static Constant *ConstantFoldScalarCall(StringRef Name,
3335 Intrinsic::ID IntrinsicID,
3336 Type *Ty,
3337 ArrayRef<Constant *> Operands,
3338 const TargetLibraryInfo *TLI,
3339 const CallBase *Call) {
3340 if (Operands.size() == 1)
3341 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3343 if (Operands.size() == 2) {
3344 if (Constant *FoldedLibCall =
3345 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
3346 return FoldedLibCall;
3348 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call);
3351 if (Operands.size() == 3)
3352 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3354 return nullptr;
3357 static Constant *ConstantFoldFixedVectorCall(
3358 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3359 ArrayRef<Constant *> Operands, const DataLayout &DL,
3360 const TargetLibraryInfo *TLI, const CallBase *Call) {
3361 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3362 SmallVector<Constant *, 4> Lane(Operands.size());
3363 Type *Ty = FVTy->getElementType();
3365 switch (IntrinsicID) {
3366 case Intrinsic::masked_load: {
3367 auto *SrcPtr = Operands[0];
3368 auto *Mask = Operands[2];
3369 auto *Passthru = Operands[3];
3371 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3373 SmallVector<Constant *, 32> NewElements;
3374 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3375 auto *MaskElt = Mask->getAggregateElement(I);
3376 if (!MaskElt)
3377 break;
3378 auto *PassthruElt = Passthru->getAggregateElement(I);
3379 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3380 if (isa<UndefValue>(MaskElt)) {
3381 if (PassthruElt)
3382 NewElements.push_back(PassthruElt);
3383 else if (VecElt)
3384 NewElements.push_back(VecElt);
3385 else
3386 return nullptr;
3388 if (MaskElt->isNullValue()) {
3389 if (!PassthruElt)
3390 return nullptr;
3391 NewElements.push_back(PassthruElt);
3392 } else if (MaskElt->isOneValue()) {
3393 if (!VecElt)
3394 return nullptr;
3395 NewElements.push_back(VecElt);
3396 } else {
3397 return nullptr;
3400 if (NewElements.size() != FVTy->getNumElements())
3401 return nullptr;
3402 return ConstantVector::get(NewElements);
3404 case Intrinsic::arm_mve_vctp8:
3405 case Intrinsic::arm_mve_vctp16:
3406 case Intrinsic::arm_mve_vctp32:
3407 case Intrinsic::arm_mve_vctp64: {
3408 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3409 unsigned Lanes = FVTy->getNumElements();
3410 uint64_t Limit = Op->getZExtValue();
3412 SmallVector<Constant *, 16> NCs;
3413 for (unsigned i = 0; i < Lanes; i++) {
3414 if (i < Limit)
3415 NCs.push_back(ConstantInt::getTrue(Ty));
3416 else
3417 NCs.push_back(ConstantInt::getFalse(Ty));
3419 return ConstantVector::get(NCs);
3421 return nullptr;
3423 case Intrinsic::get_active_lane_mask: {
3424 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3425 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3426 if (Op0 && Op1) {
3427 unsigned Lanes = FVTy->getNumElements();
3428 uint64_t Base = Op0->getZExtValue();
3429 uint64_t Limit = Op1->getZExtValue();
3431 SmallVector<Constant *, 16> NCs;
3432 for (unsigned i = 0; i < Lanes; i++) {
3433 if (Base + i < Limit)
3434 NCs.push_back(ConstantInt::getTrue(Ty));
3435 else
3436 NCs.push_back(ConstantInt::getFalse(Ty));
3438 return ConstantVector::get(NCs);
3440 return nullptr;
3442 default:
3443 break;
3446 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3447 // Gather a column of constants.
3448 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3449 // Some intrinsics use a scalar type for certain arguments.
3450 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) {
3451 Lane[J] = Operands[J];
3452 continue;
3455 Constant *Agg = Operands[J]->getAggregateElement(I);
3456 if (!Agg)
3457 return nullptr;
3459 Lane[J] = Agg;
3462 // Use the regular scalar folding to simplify this column.
3463 Constant *Folded =
3464 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3465 if (!Folded)
3466 return nullptr;
3467 Result[I] = Folded;
3470 return ConstantVector::get(Result);
3473 static Constant *ConstantFoldScalableVectorCall(
3474 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3475 ArrayRef<Constant *> Operands, const DataLayout &DL,
3476 const TargetLibraryInfo *TLI, const CallBase *Call) {
3477 switch (IntrinsicID) {
3478 case Intrinsic::aarch64_sve_convert_from_svbool: {
3479 auto *Src = dyn_cast<Constant>(Operands[0]);
3480 if (!Src || !Src->isNullValue())
3481 break;
3483 return ConstantInt::getFalse(SVTy);
3485 default:
3486 break;
3488 return nullptr;
3491 static std::pair<Constant *, Constant *>
3492 ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
3493 if (isa<PoisonValue>(Op))
3494 return {Op, PoisonValue::get(IntTy)};
3496 auto *ConstFP = dyn_cast<ConstantFP>(Op);
3497 if (!ConstFP)
3498 return {};
3500 const APFloat &U = ConstFP->getValueAPF();
3501 int FrexpExp;
3502 APFloat FrexpMant = frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3503 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3505 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3506 // using undef.
3507 Constant *Result1 = FrexpMant.isFinite()
3508 ? ConstantInt::getSigned(IntTy, FrexpExp)
3509 : ConstantInt::getNullValue(IntTy);
3510 return {Result0, Result1};
3513 /// Handle intrinsics that return tuples, which may be tuples of vectors.
3514 static Constant *
3515 ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
3516 StructType *StTy, ArrayRef<Constant *> Operands,
3517 const DataLayout &DL, const TargetLibraryInfo *TLI,
3518 const CallBase *Call) {
3520 switch (IntrinsicID) {
3521 case Intrinsic::frexp: {
3522 Type *Ty0 = StTy->getContainedType(0);
3523 Type *Ty1 = StTy->getContainedType(1)->getScalarType();
3525 if (auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3526 SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
3527 SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
3529 for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
3530 Constant *Lane = Operands[0]->getAggregateElement(I);
3531 std::tie(Results0[I], Results1[I]) =
3532 ConstantFoldScalarFrexpCall(Lane, Ty1);
3533 if (!Results0[I])
3534 return nullptr;
3537 return ConstantStruct::get(StTy, ConstantVector::get(Results0),
3538 ConstantVector::get(Results1));
3541 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
3542 if (!Result0)
3543 return nullptr;
3544 return ConstantStruct::get(StTy, Result0, Result1);
3546 case Intrinsic::sincos: {
3547 Type *Ty = StTy->getContainedType(0);
3548 Type *TyScalar = Ty->getScalarType();
3550 auto ConstantFoldScalarSincosCall =
3551 [&](Constant *Op) -> std::pair<Constant *, Constant *> {
3552 Constant *SinResult =
3553 ConstantFoldScalarCall(Name, Intrinsic::sin, TyScalar, Op, TLI, Call);
3554 Constant *CosResult =
3555 ConstantFoldScalarCall(Name, Intrinsic::cos, TyScalar, Op, TLI, Call);
3556 return std::make_pair(SinResult, CosResult);
3559 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3560 SmallVector<Constant *> SinResults(FVTy->getNumElements());
3561 SmallVector<Constant *> CosResults(FVTy->getNumElements());
3563 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3564 Constant *Lane = Operands[0]->getAggregateElement(I);
3565 std::tie(SinResults[I], CosResults[I]) =
3566 ConstantFoldScalarSincosCall(Lane);
3567 if (!SinResults[I] || !CosResults[I])
3568 return nullptr;
3571 return ConstantStruct::get(StTy, ConstantVector::get(SinResults),
3572 ConstantVector::get(CosResults));
3575 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
3576 if (!SinResult || !CosResult)
3577 return nullptr;
3578 return ConstantStruct::get(StTy, SinResult, CosResult);
3580 default:
3581 // TODO: Constant folding of vector intrinsics that fall through here does
3582 // not work (e.g. overflow intrinsics)
3583 return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI, Call);
3586 return nullptr;
3589 } // end anonymous namespace
3591 Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
3592 Constant *RHS, Type *Ty,
3593 Instruction *FMFSource) {
3594 return ConstantFoldIntrinsicCall2(ID, Ty, {LHS, RHS},
3595 dyn_cast_if_present<CallBase>(FMFSource));
3598 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3599 ArrayRef<Constant *> Operands,
3600 const TargetLibraryInfo *TLI,
3601 bool AllowNonDeterministic) {
3602 if (Call->isNoBuiltin())
3603 return nullptr;
3604 if (!F->hasName())
3605 return nullptr;
3607 // If this is not an intrinsic and not recognized as a library call, bail out.
3608 Intrinsic::ID IID = F->getIntrinsicID();
3609 if (IID == Intrinsic::not_intrinsic) {
3610 if (!TLI)
3611 return nullptr;
3612 LibFunc LibF;
3613 if (!TLI->getLibFunc(*F, LibF))
3614 return nullptr;
3617 // Conservatively assume that floating-point libcalls may be
3618 // non-deterministic.
3619 Type *Ty = F->getReturnType();
3620 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
3621 return nullptr;
3623 StringRef Name = F->getName();
3624 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3625 return ConstantFoldFixedVectorCall(
3626 Name, IID, FVTy, Operands, F->getDataLayout(), TLI, Call);
3628 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3629 return ConstantFoldScalableVectorCall(
3630 Name, IID, SVTy, Operands, F->getDataLayout(), TLI, Call);
3632 if (auto *StTy = dyn_cast<StructType>(Ty))
3633 return ConstantFoldStructCall(Name, IID, StTy, Operands,
3634 F->getDataLayout(), TLI, Call);
3636 // TODO: If this is a library function, we already discovered that above,
3637 // so we should pass the LibFunc, not the name (and it might be better
3638 // still to separate intrinsic handling from libcalls).
3639 return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI, Call);
3642 bool llvm::isMathLibCallNoop(const CallBase *Call,
3643 const TargetLibraryInfo *TLI) {
3644 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3645 // (and to some extent ConstantFoldScalarCall).
3646 if (Call->isNoBuiltin() || Call->isStrictFP())
3647 return false;
3648 Function *F = Call->getCalledFunction();
3649 if (!F)
3650 return false;
3652 LibFunc Func;
3653 if (!TLI || !TLI->getLibFunc(*F, Func))
3654 return false;
3656 if (Call->arg_size() == 1) {
3657 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3658 const APFloat &Op = OpC->getValueAPF();
3659 switch (Func) {
3660 case LibFunc_logl:
3661 case LibFunc_log:
3662 case LibFunc_logf:
3663 case LibFunc_log2l:
3664 case LibFunc_log2:
3665 case LibFunc_log2f:
3666 case LibFunc_log10l:
3667 case LibFunc_log10:
3668 case LibFunc_log10f:
3669 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3671 case LibFunc_expl:
3672 case LibFunc_exp:
3673 case LibFunc_expf:
3674 // FIXME: These boundaries are slightly conservative.
3675 if (OpC->getType()->isDoubleTy())
3676 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3677 if (OpC->getType()->isFloatTy())
3678 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3679 break;
3681 case LibFunc_exp2l:
3682 case LibFunc_exp2:
3683 case LibFunc_exp2f:
3684 // FIXME: These boundaries are slightly conservative.
3685 if (OpC->getType()->isDoubleTy())
3686 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3687 if (OpC->getType()->isFloatTy())
3688 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3689 break;
3691 case LibFunc_sinl:
3692 case LibFunc_sin:
3693 case LibFunc_sinf:
3694 case LibFunc_cosl:
3695 case LibFunc_cos:
3696 case LibFunc_cosf:
3697 return !Op.isInfinity();
3699 case LibFunc_tanl:
3700 case LibFunc_tan:
3701 case LibFunc_tanf: {
3702 // FIXME: Stop using the host math library.
3703 // FIXME: The computation isn't done in the right precision.
3704 Type *Ty = OpC->getType();
3705 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3706 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3707 break;
3710 case LibFunc_atan:
3711 case LibFunc_atanf:
3712 case LibFunc_atanl:
3713 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3714 return true;
3716 case LibFunc_asinl:
3717 case LibFunc_asin:
3718 case LibFunc_asinf:
3719 case LibFunc_acosl:
3720 case LibFunc_acos:
3721 case LibFunc_acosf:
3722 return !(Op < APFloat::getOne(Op.getSemantics(), true) ||
3723 Op > APFloat::getOne(Op.getSemantics()));
3725 case LibFunc_sinh:
3726 case LibFunc_cosh:
3727 case LibFunc_sinhf:
3728 case LibFunc_coshf:
3729 case LibFunc_sinhl:
3730 case LibFunc_coshl:
3731 // FIXME: These boundaries are slightly conservative.
3732 if (OpC->getType()->isDoubleTy())
3733 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3734 if (OpC->getType()->isFloatTy())
3735 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3736 break;
3738 case LibFunc_sqrtl:
3739 case LibFunc_sqrt:
3740 case LibFunc_sqrtf:
3741 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3743 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3744 // maybe others?
3745 default:
3746 break;
3751 if (Call->arg_size() == 2) {
3752 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3753 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3754 if (Op0C && Op1C) {
3755 const APFloat &Op0 = Op0C->getValueAPF();
3756 const APFloat &Op1 = Op1C->getValueAPF();
3758 switch (Func) {
3759 case LibFunc_powl:
3760 case LibFunc_pow:
3761 case LibFunc_powf: {
3762 // FIXME: Stop using the host math library.
3763 // FIXME: The computation isn't done in the right precision.
3764 Type *Ty = Op0C->getType();
3765 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3766 if (Ty == Op1C->getType())
3767 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3769 break;
3772 case LibFunc_fmodl:
3773 case LibFunc_fmod:
3774 case LibFunc_fmodf:
3775 case LibFunc_remainderl:
3776 case LibFunc_remainder:
3777 case LibFunc_remainderf:
3778 return Op0.isNaN() || Op1.isNaN() ||
3779 (!Op0.isInfinity() && !Op1.isZero());
3781 case LibFunc_atan2:
3782 case LibFunc_atan2f:
3783 case LibFunc_atan2l:
3784 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
3785 // GLIBC and MSVC do not appear to raise an error on those, we
3786 // cannot rely on that behavior. POSIX and C11 say that a domain error
3787 // may occur, so allow for that possibility.
3788 return !Op0.isZero() || !Op1.isZero();
3790 default:
3791 break;
3796 return false;
3799 void TargetFolder::anchor() {}