1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantFold.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GlobalValue.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/IntrinsicsAArch64.h"
46 #include "llvm/IR/IntrinsicsAMDGPU.h"
47 #include "llvm/IR/IntrinsicsARM.h"
48 #include "llvm/IR/IntrinsicsWebAssembly.h"
49 #include "llvm/IR/IntrinsicsX86.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
67 //===----------------------------------------------------------------------===//
68 // Constant Folding internal helper functions
69 //===----------------------------------------------------------------------===//
71 static Constant
*foldConstVectorToAPInt(APInt
&Result
, Type
*DestTy
,
72 Constant
*C
, Type
*SrcEltTy
,
74 const DataLayout
&DL
) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift
= DL
.getTypeSizeInBits(SrcEltTy
);
78 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
) {
80 if (DL
.isLittleEndian())
81 Element
= C
->getAggregateElement(NumSrcElts
- i
- 1);
83 Element
= C
->getAggregateElement(i
);
85 if (Element
&& isa
<UndefValue
>(Element
)) {
90 auto *ElementCI
= dyn_cast_or_null
<ConstantInt
>(Element
);
92 return ConstantExpr::getBitCast(C
, DestTy
);
95 Result
|= ElementCI
->getValue().zext(Result
.getBitWidth());
101 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
102 /// This always returns a non-null constant, but it may be a
103 /// ConstantExpr if unfoldable.
104 Constant
*FoldBitCast(Constant
*C
, Type
*DestTy
, const DataLayout
&DL
) {
105 assert(CastInst::castIsValid(Instruction::BitCast
, C
, DestTy
) &&
106 "Invalid constantexpr bitcast!");
108 // Catch the obvious splat cases.
109 if (Constant
*Res
= ConstantFoldLoadFromUniformValue(C
, DestTy
))
112 if (auto *VTy
= dyn_cast
<VectorType
>(C
->getType())) {
113 // Handle a vector->scalar integer/fp cast.
114 if (isa
<IntegerType
>(DestTy
) || DestTy
->isFloatingPointTy()) {
115 unsigned NumSrcElts
= cast
<FixedVectorType
>(VTy
)->getNumElements();
116 Type
*SrcEltTy
= VTy
->getElementType();
118 // If the vector is a vector of floating point, convert it to vector of int
119 // to simplify things.
120 if (SrcEltTy
->isFloatingPointTy()) {
121 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
122 auto *SrcIVTy
= FixedVectorType::get(
123 IntegerType::get(C
->getContext(), FPWidth
), NumSrcElts
);
124 // Ask IR to do the conversion now that #elts line up.
125 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
128 APInt
Result(DL
.getTypeSizeInBits(DestTy
), 0);
129 if (Constant
*CE
= foldConstVectorToAPInt(Result
, DestTy
, C
,
130 SrcEltTy
, NumSrcElts
, DL
))
133 if (isa
<IntegerType
>(DestTy
))
134 return ConstantInt::get(DestTy
, Result
);
136 APFloat
FP(DestTy
->getFltSemantics(), Result
);
137 return ConstantFP::get(DestTy
->getContext(), FP
);
141 // The code below only handles casts to vectors currently.
142 auto *DestVTy
= dyn_cast
<VectorType
>(DestTy
);
144 return ConstantExpr::getBitCast(C
, DestTy
);
146 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
147 // vector so the code below can handle it uniformly.
148 if (isa
<ConstantFP
>(C
) || isa
<ConstantInt
>(C
)) {
149 Constant
*Ops
= C
; // don't take the address of C!
150 return FoldBitCast(ConstantVector::get(Ops
), DestTy
, DL
);
153 // If this is a bitcast from constant vector -> vector, fold it.
154 if (!isa
<ConstantDataVector
>(C
) && !isa
<ConstantVector
>(C
))
155 return ConstantExpr::getBitCast(C
, DestTy
);
157 // If the element types match, IR can fold it.
158 unsigned NumDstElt
= cast
<FixedVectorType
>(DestVTy
)->getNumElements();
159 unsigned NumSrcElt
= cast
<FixedVectorType
>(C
->getType())->getNumElements();
160 if (NumDstElt
== NumSrcElt
)
161 return ConstantExpr::getBitCast(C
, DestTy
);
163 Type
*SrcEltTy
= cast
<VectorType
>(C
->getType())->getElementType();
164 Type
*DstEltTy
= DestVTy
->getElementType();
166 // Otherwise, we're changing the number of elements in a vector, which
167 // requires endianness information to do the right thing. For example,
168 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
169 // folds to (little endian):
170 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
171 // and to (big endian):
172 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
174 // First thing is first. We only want to think about integer here, so if
175 // we have something in FP form, recast it as integer.
176 if (DstEltTy
->isFloatingPointTy()) {
177 // Fold to an vector of integers with same size as our FP type.
178 unsigned FPWidth
= DstEltTy
->getPrimitiveSizeInBits();
179 auto *DestIVTy
= FixedVectorType::get(
180 IntegerType::get(C
->getContext(), FPWidth
), NumDstElt
);
181 // Recursively handle this integer conversion, if possible.
182 C
= FoldBitCast(C
, DestIVTy
, DL
);
184 // Finally, IR can handle this now that #elts line up.
185 return ConstantExpr::getBitCast(C
, DestTy
);
188 // Okay, we know the destination is integer, if the input is FP, convert
189 // it to integer first.
190 if (SrcEltTy
->isFloatingPointTy()) {
191 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
192 auto *SrcIVTy
= FixedVectorType::get(
193 IntegerType::get(C
->getContext(), FPWidth
), NumSrcElt
);
194 // Ask IR to do the conversion now that #elts line up.
195 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
196 // If IR wasn't able to fold it, bail out.
197 if (!isa
<ConstantVector
>(C
) && // FIXME: Remove ConstantVector.
198 !isa
<ConstantDataVector
>(C
))
202 // Now we know that the input and output vectors are both integer vectors
203 // of the same size, and that their #elements is not the same. Do the
204 // conversion here, which depends on whether the input or output has
206 bool isLittleEndian
= DL
.isLittleEndian();
208 SmallVector
<Constant
*, 32> Result
;
209 if (NumDstElt
< NumSrcElt
) {
210 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
211 Constant
*Zero
= Constant::getNullValue(DstEltTy
);
212 unsigned Ratio
= NumSrcElt
/NumDstElt
;
213 unsigned SrcBitSize
= SrcEltTy
->getPrimitiveSizeInBits();
215 for (unsigned i
= 0; i
!= NumDstElt
; ++i
) {
216 // Build each element of the result.
217 Constant
*Elt
= Zero
;
218 unsigned ShiftAmt
= isLittleEndian
? 0 : SrcBitSize
*(Ratio
-1);
219 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
220 Constant
*Src
= C
->getAggregateElement(SrcElt
++);
221 if (Src
&& isa
<UndefValue
>(Src
))
222 Src
= Constant::getNullValue(
223 cast
<VectorType
>(C
->getType())->getElementType());
225 Src
= dyn_cast_or_null
<ConstantInt
>(Src
);
226 if (!Src
) // Reject constantexpr elements.
227 return ConstantExpr::getBitCast(C
, DestTy
);
229 // Zero extend the element to the right size.
230 Src
= ConstantFoldCastOperand(Instruction::ZExt
, Src
, Elt
->getType(),
232 assert(Src
&& "Constant folding cannot fail on plain integers");
234 // Shift it to the right place, depending on endianness.
235 Src
= ConstantFoldBinaryOpOperands(
236 Instruction::Shl
, Src
, ConstantInt::get(Src
->getType(), ShiftAmt
),
238 assert(Src
&& "Constant folding cannot fail on plain integers");
240 ShiftAmt
+= isLittleEndian
? SrcBitSize
: -SrcBitSize
;
243 Elt
= ConstantFoldBinaryOpOperands(Instruction::Or
, Elt
, Src
, DL
);
244 assert(Elt
&& "Constant folding cannot fail on plain integers");
246 Result
.push_back(Elt
);
248 return ConstantVector::get(Result
);
251 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
252 unsigned Ratio
= NumDstElt
/NumSrcElt
;
253 unsigned DstBitSize
= DL
.getTypeSizeInBits(DstEltTy
);
255 // Loop over each source value, expanding into multiple results.
256 for (unsigned i
= 0; i
!= NumSrcElt
; ++i
) {
257 auto *Element
= C
->getAggregateElement(i
);
259 if (!Element
) // Reject constantexpr elements.
260 return ConstantExpr::getBitCast(C
, DestTy
);
262 if (isa
<UndefValue
>(Element
)) {
263 // Correctly Propagate undef values.
264 Result
.append(Ratio
, UndefValue::get(DstEltTy
));
268 auto *Src
= dyn_cast
<ConstantInt
>(Element
);
270 return ConstantExpr::getBitCast(C
, DestTy
);
272 unsigned ShiftAmt
= isLittleEndian
? 0 : DstBitSize
*(Ratio
-1);
273 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
274 // Shift the piece of the value into the right place, depending on
276 Constant
*Elt
= ConstantExpr::getLShr(Src
,
277 ConstantInt::get(Src
->getType(), ShiftAmt
));
278 ShiftAmt
+= isLittleEndian
? DstBitSize
: -DstBitSize
;
280 // Truncate the element to an integer with the same pointer size and
281 // convert the element back to a pointer using a inttoptr.
282 if (DstEltTy
->isPointerTy()) {
283 IntegerType
*DstIntTy
= Type::getIntNTy(C
->getContext(), DstBitSize
);
284 Constant
*CE
= ConstantExpr::getTrunc(Elt
, DstIntTy
);
285 Result
.push_back(ConstantExpr::getIntToPtr(CE
, DstEltTy
));
289 // Truncate and remember this piece.
290 Result
.push_back(ConstantExpr::getTrunc(Elt
, DstEltTy
));
294 return ConstantVector::get(Result
);
297 } // end anonymous namespace
299 /// If this constant is a constant offset from a global, return the global and
300 /// the constant. Because of constantexprs, this function is recursive.
301 bool llvm::IsConstantOffsetFromGlobal(Constant
*C
, GlobalValue
*&GV
,
302 APInt
&Offset
, const DataLayout
&DL
,
303 DSOLocalEquivalent
**DSOEquiv
) {
307 // Trivial case, constant is the global.
308 if ((GV
= dyn_cast
<GlobalValue
>(C
))) {
309 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GV
->getType());
310 Offset
= APInt(BitWidth
, 0);
314 if (auto *FoundDSOEquiv
= dyn_cast
<DSOLocalEquivalent
>(C
)) {
316 *DSOEquiv
= FoundDSOEquiv
;
317 GV
= FoundDSOEquiv
->getGlobalValue();
318 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GV
->getType());
319 Offset
= APInt(BitWidth
, 0);
323 // Otherwise, if this isn't a constant expr, bail out.
324 auto *CE
= dyn_cast
<ConstantExpr
>(C
);
325 if (!CE
) return false;
327 // Look through ptr->int and ptr->ptr casts.
328 if (CE
->getOpcode() == Instruction::PtrToInt
||
329 CE
->getOpcode() == Instruction::BitCast
)
330 return IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, DL
,
333 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
334 auto *GEP
= dyn_cast
<GEPOperator
>(CE
);
338 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GEP
->getType());
339 APInt
TmpOffset(BitWidth
, 0);
341 // If the base isn't a global+constant, we aren't either.
342 if (!IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, TmpOffset
, DL
,
346 // Otherwise, add any offset that our operands provide.
347 if (!GEP
->accumulateConstantOffset(DL
, TmpOffset
))
354 Constant
*llvm::ConstantFoldLoadThroughBitcast(Constant
*C
, Type
*DestTy
,
355 const DataLayout
&DL
) {
357 Type
*SrcTy
= C
->getType();
361 TypeSize DestSize
= DL
.getTypeSizeInBits(DestTy
);
362 TypeSize SrcSize
= DL
.getTypeSizeInBits(SrcTy
);
363 if (!TypeSize::isKnownGE(SrcSize
, DestSize
))
366 // Catch the obvious splat cases (since all-zeros can coerce non-integral
367 // pointers legally).
368 if (Constant
*Res
= ConstantFoldLoadFromUniformValue(C
, DestTy
))
371 // If the type sizes are the same and a cast is legal, just directly
372 // cast the constant.
373 // But be careful not to coerce non-integral pointers illegally.
374 if (SrcSize
== DestSize
&&
375 DL
.isNonIntegralPointerType(SrcTy
->getScalarType()) ==
376 DL
.isNonIntegralPointerType(DestTy
->getScalarType())) {
377 Instruction::CastOps Cast
= Instruction::BitCast
;
378 // If we are going from a pointer to int or vice versa, we spell the cast
380 if (SrcTy
->isIntegerTy() && DestTy
->isPointerTy())
381 Cast
= Instruction::IntToPtr
;
382 else if (SrcTy
->isPointerTy() && DestTy
->isIntegerTy())
383 Cast
= Instruction::PtrToInt
;
385 if (CastInst::castIsValid(Cast
, C
, DestTy
))
386 return ConstantFoldCastOperand(Cast
, C
, DestTy
, DL
);
389 // If this isn't an aggregate type, there is nothing we can do to drill down
390 // and find a bitcastable constant.
391 if (!SrcTy
->isAggregateType() && !SrcTy
->isVectorTy())
394 // We're simulating a load through a pointer that was bitcast to point to
395 // a different type, so we can try to walk down through the initial
396 // elements of an aggregate to see if some part of the aggregate is
397 // castable to implement the "load" semantic model.
398 if (SrcTy
->isStructTy()) {
399 // Struct types might have leading zero-length elements like [0 x i32],
400 // which are certainly not what we are looking for, so skip them.
404 ElemC
= C
->getAggregateElement(Elem
++);
405 } while (ElemC
&& DL
.getTypeSizeInBits(ElemC
->getType()).isZero());
408 // For non-byte-sized vector elements, the first element is not
409 // necessarily located at the vector base address.
410 if (auto *VT
= dyn_cast
<VectorType
>(SrcTy
))
411 if (!DL
.typeSizeEqualsStoreSize(VT
->getElementType()))
414 C
= C
->getAggregateElement(0u);
423 /// Recursive helper to read bits out of global. C is the constant being copied
424 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
425 /// results into and BytesLeft is the number of bytes left in
426 /// the CurPtr buffer. DL is the DataLayout.
427 bool ReadDataFromGlobal(Constant
*C
, uint64_t ByteOffset
, unsigned char *CurPtr
,
428 unsigned BytesLeft
, const DataLayout
&DL
) {
429 assert(ByteOffset
<= DL
.getTypeAllocSize(C
->getType()) &&
430 "Out of range access");
432 // If this element is zero or undefined, we can just return since *CurPtr is
434 if (isa
<ConstantAggregateZero
>(C
) || isa
<UndefValue
>(C
))
437 if (auto *CI
= dyn_cast
<ConstantInt
>(C
)) {
438 if ((CI
->getBitWidth() & 7) != 0)
440 const APInt
&Val
= CI
->getValue();
441 unsigned IntBytes
= unsigned(CI
->getBitWidth()/8);
443 for (unsigned i
= 0; i
!= BytesLeft
&& ByteOffset
!= IntBytes
; ++i
) {
444 unsigned n
= ByteOffset
;
445 if (!DL
.isLittleEndian())
446 n
= IntBytes
- n
- 1;
447 CurPtr
[i
] = Val
.extractBits(8, n
* 8).getZExtValue();
453 if (auto *CFP
= dyn_cast
<ConstantFP
>(C
)) {
454 if (CFP
->getType()->isDoubleTy()) {
455 C
= FoldBitCast(C
, Type::getInt64Ty(C
->getContext()), DL
);
456 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
458 if (CFP
->getType()->isFloatTy()){
459 C
= FoldBitCast(C
, Type::getInt32Ty(C
->getContext()), DL
);
460 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
462 if (CFP
->getType()->isHalfTy()){
463 C
= FoldBitCast(C
, Type::getInt16Ty(C
->getContext()), DL
);
464 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
469 if (auto *CS
= dyn_cast
<ConstantStruct
>(C
)) {
470 const StructLayout
*SL
= DL
.getStructLayout(CS
->getType());
471 unsigned Index
= SL
->getElementContainingOffset(ByteOffset
);
472 uint64_t CurEltOffset
= SL
->getElementOffset(Index
);
473 ByteOffset
-= CurEltOffset
;
476 // If the element access is to the element itself and not to tail padding,
477 // read the bytes from the element.
478 uint64_t EltSize
= DL
.getTypeAllocSize(CS
->getOperand(Index
)->getType());
480 if (ByteOffset
< EltSize
&&
481 !ReadDataFromGlobal(CS
->getOperand(Index
), ByteOffset
, CurPtr
,
487 // Check to see if we read from the last struct element, if so we're done.
488 if (Index
== CS
->getType()->getNumElements())
491 // If we read all of the bytes we needed from this element we're done.
492 uint64_t NextEltOffset
= SL
->getElementOffset(Index
);
494 if (BytesLeft
<= NextEltOffset
- CurEltOffset
- ByteOffset
)
497 // Move to the next element of the struct.
498 CurPtr
+= NextEltOffset
- CurEltOffset
- ByteOffset
;
499 BytesLeft
-= NextEltOffset
- CurEltOffset
- ByteOffset
;
501 CurEltOffset
= NextEltOffset
;
506 if (isa
<ConstantArray
>(C
) || isa
<ConstantVector
>(C
) ||
507 isa
<ConstantDataSequential
>(C
)) {
508 uint64_t NumElts
, EltSize
;
510 if (auto *AT
= dyn_cast
<ArrayType
>(C
->getType())) {
511 NumElts
= AT
->getNumElements();
512 EltTy
= AT
->getElementType();
513 EltSize
= DL
.getTypeAllocSize(EltTy
);
515 NumElts
= cast
<FixedVectorType
>(C
->getType())->getNumElements();
516 EltTy
= cast
<FixedVectorType
>(C
->getType())->getElementType();
517 // TODO: For non-byte-sized vectors, current implementation assumes there is
518 // padding to the next byte boundary between elements.
519 if (!DL
.typeSizeEqualsStoreSize(EltTy
))
522 EltSize
= DL
.getTypeStoreSize(EltTy
);
524 uint64_t Index
= ByteOffset
/ EltSize
;
525 uint64_t Offset
= ByteOffset
- Index
* EltSize
;
527 for (; Index
!= NumElts
; ++Index
) {
528 if (!ReadDataFromGlobal(C
->getAggregateElement(Index
), Offset
, CurPtr
,
532 uint64_t BytesWritten
= EltSize
- Offset
;
533 assert(BytesWritten
<= EltSize
&& "Not indexing into this element?");
534 if (BytesWritten
>= BytesLeft
)
538 BytesLeft
-= BytesWritten
;
539 CurPtr
+= BytesWritten
;
544 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
545 if (CE
->getOpcode() == Instruction::IntToPtr
&&
546 CE
->getOperand(0)->getType() == DL
.getIntPtrType(CE
->getType())) {
547 return ReadDataFromGlobal(CE
->getOperand(0), ByteOffset
, CurPtr
,
552 // Otherwise, unknown initializer type.
556 Constant
*FoldReinterpretLoadFromConst(Constant
*C
, Type
*LoadTy
,
557 int64_t Offset
, const DataLayout
&DL
) {
558 // Bail out early. Not expect to load from scalable global variable.
559 if (isa
<ScalableVectorType
>(LoadTy
))
562 auto *IntType
= dyn_cast
<IntegerType
>(LoadTy
);
564 // If this isn't an integer load we can't fold it directly.
566 // If this is a non-integer load, we can try folding it as an int load and
567 // then bitcast the result. This can be useful for union cases. Note
568 // that address spaces don't matter here since we're not going to result in
569 // an actual new load.
570 if (!LoadTy
->isFloatingPointTy() && !LoadTy
->isPointerTy() &&
571 !LoadTy
->isVectorTy())
574 Type
*MapTy
= Type::getIntNTy(C
->getContext(),
575 DL
.getTypeSizeInBits(LoadTy
).getFixedValue());
576 if (Constant
*Res
= FoldReinterpretLoadFromConst(C
, MapTy
, Offset
, DL
)) {
577 if (Res
->isNullValue() && !LoadTy
->isX86_MMXTy() &&
578 !LoadTy
->isX86_AMXTy())
579 // Materializing a zero can be done trivially without a bitcast
580 return Constant::getNullValue(LoadTy
);
581 Type
*CastTy
= LoadTy
->isPtrOrPtrVectorTy() ? DL
.getIntPtrType(LoadTy
) : LoadTy
;
582 Res
= FoldBitCast(Res
, CastTy
, DL
);
583 if (LoadTy
->isPtrOrPtrVectorTy()) {
584 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
585 if (Res
->isNullValue() && !LoadTy
->isX86_MMXTy() &&
586 !LoadTy
->isX86_AMXTy())
587 return Constant::getNullValue(LoadTy
);
588 if (DL
.isNonIntegralPointerType(LoadTy
->getScalarType()))
589 // Be careful not to replace a load of an addrspace value with an inttoptr here
591 Res
= ConstantExpr::getIntToPtr(Res
, LoadTy
);
598 unsigned BytesLoaded
= (IntType
->getBitWidth() + 7) / 8;
599 if (BytesLoaded
> 32 || BytesLoaded
== 0)
602 // If we're not accessing anything in this constant, the result is undefined.
603 if (Offset
<= -1 * static_cast<int64_t>(BytesLoaded
))
604 return PoisonValue::get(IntType
);
606 // TODO: We should be able to support scalable types.
607 TypeSize InitializerSize
= DL
.getTypeAllocSize(C
->getType());
608 if (InitializerSize
.isScalable())
611 // If we're not accessing anything in this constant, the result is undefined.
612 if (Offset
>= (int64_t)InitializerSize
.getFixedValue())
613 return PoisonValue::get(IntType
);
615 unsigned char RawBytes
[32] = {0};
616 unsigned char *CurPtr
= RawBytes
;
617 unsigned BytesLeft
= BytesLoaded
;
619 // If we're loading off the beginning of the global, some bytes may be valid.
626 if (!ReadDataFromGlobal(C
, Offset
, CurPtr
, BytesLeft
, DL
))
629 APInt ResultVal
= APInt(IntType
->getBitWidth(), 0);
630 if (DL
.isLittleEndian()) {
631 ResultVal
= RawBytes
[BytesLoaded
- 1];
632 for (unsigned i
= 1; i
!= BytesLoaded
; ++i
) {
634 ResultVal
|= RawBytes
[BytesLoaded
- 1 - i
];
637 ResultVal
= RawBytes
[0];
638 for (unsigned i
= 1; i
!= BytesLoaded
; ++i
) {
640 ResultVal
|= RawBytes
[i
];
644 return ConstantInt::get(IntType
->getContext(), ResultVal
);
647 } // anonymous namespace
649 // If GV is a constant with an initializer read its representation starting
650 // at Offset and return it as a constant array of unsigned char. Otherwise
652 Constant
*llvm::ReadByteArrayFromGlobal(const GlobalVariable
*GV
,
654 if (!GV
->isConstant() || !GV
->hasDefinitiveInitializer())
657 const DataLayout
&DL
= GV
->getParent()->getDataLayout();
658 Constant
*Init
= const_cast<Constant
*>(GV
->getInitializer());
659 TypeSize InitSize
= DL
.getTypeAllocSize(Init
->getType());
660 if (InitSize
< Offset
)
663 uint64_t NBytes
= InitSize
- Offset
;
664 if (NBytes
> UINT16_MAX
)
665 // Bail for large initializers in excess of 64K to avoid allocating
667 // Offset is assumed to be less than or equal than InitSize (this
668 // is enforced in ReadDataFromGlobal).
671 SmallVector
<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes
));
672 unsigned char *CurPtr
= RawBytes
.data();
674 if (!ReadDataFromGlobal(Init
, Offset
, CurPtr
, NBytes
, DL
))
677 return ConstantDataArray::get(GV
->getContext(), RawBytes
);
680 /// If this Offset points exactly to the start of an aggregate element, return
681 /// that element, otherwise return nullptr.
682 Constant
*getConstantAtOffset(Constant
*Base
, APInt Offset
,
683 const DataLayout
&DL
) {
687 if (!isa
<ConstantAggregate
>(Base
) && !isa
<ConstantDataSequential
>(Base
))
690 Type
*ElemTy
= Base
->getType();
691 SmallVector
<APInt
> Indices
= DL
.getGEPIndicesForOffset(ElemTy
, Offset
);
692 if (!Offset
.isZero() || !Indices
[0].isZero())
696 for (const APInt
&Index
: drop_begin(Indices
)) {
697 if (Index
.isNegative() || Index
.getActiveBits() >= 32)
700 C
= C
->getAggregateElement(Index
.getZExtValue());
708 Constant
*llvm::ConstantFoldLoadFromConst(Constant
*C
, Type
*Ty
,
710 const DataLayout
&DL
) {
711 if (Constant
*AtOffset
= getConstantAtOffset(C
, Offset
, DL
))
712 if (Constant
*Result
= ConstantFoldLoadThroughBitcast(AtOffset
, Ty
, DL
))
715 // Explicitly check for out-of-bounds access, so we return poison even if the
716 // constant is a uniform value.
717 TypeSize Size
= DL
.getTypeAllocSize(C
->getType());
718 if (!Size
.isScalable() && Offset
.sge(Size
.getFixedValue()))
719 return PoisonValue::get(Ty
);
721 // Try an offset-independent fold of a uniform value.
722 if (Constant
*Result
= ConstantFoldLoadFromUniformValue(C
, Ty
))
725 // Try hard to fold loads from bitcasted strange and non-type-safe things.
726 if (Offset
.getSignificantBits() <= 64)
727 if (Constant
*Result
=
728 FoldReinterpretLoadFromConst(C
, Ty
, Offset
.getSExtValue(), DL
))
734 Constant
*llvm::ConstantFoldLoadFromConst(Constant
*C
, Type
*Ty
,
735 const DataLayout
&DL
) {
736 return ConstantFoldLoadFromConst(C
, Ty
, APInt(64, 0), DL
);
739 Constant
*llvm::ConstantFoldLoadFromConstPtr(Constant
*C
, Type
*Ty
,
741 const DataLayout
&DL
) {
742 // We can only fold loads from constant globals with a definitive initializer.
743 // Check this upfront, to skip expensive offset calculations.
744 auto *GV
= dyn_cast
<GlobalVariable
>(getUnderlyingObject(C
));
745 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer())
748 C
= cast
<Constant
>(C
->stripAndAccumulateConstantOffsets(
749 DL
, Offset
, /* AllowNonInbounds */ true));
752 if (Constant
*Result
= ConstantFoldLoadFromConst(GV
->getInitializer(), Ty
,
756 // If this load comes from anywhere in a uniform constant global, the value
757 // is always the same, regardless of the loaded offset.
758 return ConstantFoldLoadFromUniformValue(GV
->getInitializer(), Ty
);
761 Constant
*llvm::ConstantFoldLoadFromConstPtr(Constant
*C
, Type
*Ty
,
762 const DataLayout
&DL
) {
763 APInt
Offset(DL
.getIndexTypeSizeInBits(C
->getType()), 0);
764 return ConstantFoldLoadFromConstPtr(C
, Ty
, Offset
, DL
);
767 Constant
*llvm::ConstantFoldLoadFromUniformValue(Constant
*C
, Type
*Ty
) {
768 if (isa
<PoisonValue
>(C
))
769 return PoisonValue::get(Ty
);
770 if (isa
<UndefValue
>(C
))
771 return UndefValue::get(Ty
);
772 if (C
->isNullValue() && !Ty
->isX86_MMXTy() && !Ty
->isX86_AMXTy())
773 return Constant::getNullValue(Ty
);
774 if (C
->isAllOnesValue() &&
775 (Ty
->isIntOrIntVectorTy() || Ty
->isFPOrFPVectorTy()))
776 return Constant::getAllOnesValue(Ty
);
782 /// One of Op0/Op1 is a constant expression.
783 /// Attempt to symbolically evaluate the result of a binary operator merging
784 /// these together. If target data info is available, it is provided as DL,
785 /// otherwise DL is null.
786 Constant
*SymbolicallyEvaluateBinop(unsigned Opc
, Constant
*Op0
, Constant
*Op1
,
787 const DataLayout
&DL
) {
790 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
791 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
794 if (Opc
== Instruction::And
) {
795 KnownBits Known0
= computeKnownBits(Op0
, DL
);
796 KnownBits Known1
= computeKnownBits(Op1
, DL
);
797 if ((Known1
.One
| Known0
.Zero
).isAllOnes()) {
798 // All the bits of Op0 that the 'and' could be masking are already zero.
801 if ((Known0
.One
| Known1
.Zero
).isAllOnes()) {
802 // All the bits of Op1 that the 'and' could be masking are already zero.
807 if (Known0
.isConstant())
808 return ConstantInt::get(Op0
->getType(), Known0
.getConstant());
811 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
812 // constant. This happens frequently when iterating over a global array.
813 if (Opc
== Instruction::Sub
) {
814 GlobalValue
*GV1
, *GV2
;
817 if (IsConstantOffsetFromGlobal(Op0
, GV1
, Offs1
, DL
))
818 if (IsConstantOffsetFromGlobal(Op1
, GV2
, Offs2
, DL
) && GV1
== GV2
) {
819 unsigned OpSize
= DL
.getTypeSizeInBits(Op0
->getType());
821 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
822 // PtrToInt may change the bitwidth so we have convert to the right size
824 return ConstantInt::get(Op0
->getType(), Offs1
.zextOrTrunc(OpSize
) -
825 Offs2
.zextOrTrunc(OpSize
));
832 /// If array indices are not pointer-sized integers, explicitly cast them so
833 /// that they aren't implicitly casted by the getelementptr.
834 Constant
*CastGEPIndices(Type
*SrcElemTy
, ArrayRef
<Constant
*> Ops
,
835 Type
*ResultTy
, bool InBounds
,
836 std::optional
<unsigned> InRangeIndex
,
837 const DataLayout
&DL
, const TargetLibraryInfo
*TLI
) {
838 Type
*IntIdxTy
= DL
.getIndexType(ResultTy
);
839 Type
*IntIdxScalarTy
= IntIdxTy
->getScalarType();
842 SmallVector
<Constant
*, 32> NewIdxs
;
843 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
) {
845 !isa
<StructType
>(GetElementPtrInst::getIndexedType(
846 SrcElemTy
, Ops
.slice(1, i
- 1)))) &&
847 Ops
[i
]->getType()->getScalarType() != IntIdxScalarTy
) {
850 Ops
[i
]->getType()->isVectorTy() ? IntIdxTy
: IntIdxScalarTy
;
851 Constant
*NewIdx
= ConstantFoldCastOperand(
852 CastInst::getCastOpcode(Ops
[i
], true, NewType
, true), Ops
[i
], NewType
,
856 NewIdxs
.push_back(NewIdx
);
858 NewIdxs
.push_back(Ops
[i
]);
864 Constant
*C
= ConstantExpr::getGetElementPtr(
865 SrcElemTy
, Ops
[0], NewIdxs
, InBounds
, InRangeIndex
);
866 return ConstantFoldConstant(C
, DL
, TLI
);
869 /// If we can symbolically evaluate the GEP constant expression, do so.
870 Constant
*SymbolicallyEvaluateGEP(const GEPOperator
*GEP
,
871 ArrayRef
<Constant
*> Ops
,
872 const DataLayout
&DL
,
873 const TargetLibraryInfo
*TLI
) {
874 const GEPOperator
*InnermostGEP
= GEP
;
875 bool InBounds
= GEP
->isInBounds();
877 Type
*SrcElemTy
= GEP
->getSourceElementType();
878 Type
*ResElemTy
= GEP
->getResultElementType();
879 Type
*ResTy
= GEP
->getType();
880 if (!SrcElemTy
->isSized() || isa
<ScalableVectorType
>(SrcElemTy
))
883 if (Constant
*C
= CastGEPIndices(SrcElemTy
, Ops
, ResTy
,
884 GEP
->isInBounds(), GEP
->getInRangeIndex(),
888 Constant
*Ptr
= Ops
[0];
889 if (!Ptr
->getType()->isPointerTy())
892 Type
*IntIdxTy
= DL
.getIndexType(Ptr
->getType());
894 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
895 if (!isa
<ConstantInt
>(Ops
[i
]))
898 unsigned BitWidth
= DL
.getTypeSizeInBits(IntIdxTy
);
899 APInt Offset
= APInt(
901 DL
.getIndexedOffsetInType(
902 SrcElemTy
, ArrayRef((Value
*const *)Ops
.data() + 1, Ops
.size() - 1)));
904 // If this is a GEP of a GEP, fold it all into a single GEP.
905 while (auto *GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
907 InBounds
&= GEP
->isInBounds();
909 SmallVector
<Value
*, 4> NestedOps(llvm::drop_begin(GEP
->operands()));
911 // Do not try the incorporate the sub-GEP if some index is not a number.
912 bool AllConstantInt
= true;
913 for (Value
*NestedOp
: NestedOps
)
914 if (!isa
<ConstantInt
>(NestedOp
)) {
915 AllConstantInt
= false;
921 Ptr
= cast
<Constant
>(GEP
->getOperand(0));
922 SrcElemTy
= GEP
->getSourceElementType();
923 Offset
+= APInt(BitWidth
, DL
.getIndexedOffsetInType(SrcElemTy
, NestedOps
));
926 // If the base value for this address is a literal integer value, fold the
927 // getelementptr to the resulting integer value casted to the pointer type.
928 APInt
BasePtr(BitWidth
, 0);
929 if (auto *CE
= dyn_cast
<ConstantExpr
>(Ptr
)) {
930 if (CE
->getOpcode() == Instruction::IntToPtr
) {
931 if (auto *Base
= dyn_cast
<ConstantInt
>(CE
->getOperand(0)))
932 BasePtr
= Base
->getValue().zextOrTrunc(BitWidth
);
936 auto *PTy
= cast
<PointerType
>(Ptr
->getType());
937 if ((Ptr
->isNullValue() || BasePtr
!= 0) &&
938 !DL
.isNonIntegralPointerType(PTy
)) {
939 Constant
*C
= ConstantInt::get(Ptr
->getContext(), Offset
+ BasePtr
);
940 return ConstantExpr::getIntToPtr(C
, ResTy
);
943 // Otherwise form a regular getelementptr. Recompute the indices so that
944 // we eliminate over-indexing of the notional static type array bounds.
945 // This makes it easy to determine if the getelementptr is "inbounds".
947 // For GEPs of GlobalValues, use the value type, otherwise use an i8 GEP.
948 if (auto *GV
= dyn_cast
<GlobalValue
>(Ptr
))
949 SrcElemTy
= GV
->getValueType();
951 SrcElemTy
= Type::getInt8Ty(Ptr
->getContext());
953 if (!SrcElemTy
->isSized())
956 Type
*ElemTy
= SrcElemTy
;
957 SmallVector
<APInt
> Indices
= DL
.getGEPIndicesForOffset(ElemTy
, Offset
);
961 // Try to add additional zero indices to reach the desired result element
963 // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
964 // we'll have to insert a bitcast anyway?
965 while (ElemTy
!= ResElemTy
) {
966 Type
*NextTy
= GetElementPtrInst::getTypeAtIndex(ElemTy
, (uint64_t)0);
970 Indices
.push_back(APInt::getZero(isa
<StructType
>(ElemTy
) ? 32 : BitWidth
));
974 SmallVector
<Constant
*, 32> NewIdxs
;
975 for (const APInt
&Index
: Indices
)
976 NewIdxs
.push_back(ConstantInt::get(
977 Type::getIntNTy(Ptr
->getContext(), Index
.getBitWidth()), Index
));
979 // Preserve the inrange index from the innermost GEP if possible. We must
980 // have calculated the same indices up to and including the inrange index.
981 std::optional
<unsigned> InRangeIndex
;
982 if (std::optional
<unsigned> LastIRIndex
= InnermostGEP
->getInRangeIndex())
983 if (SrcElemTy
== InnermostGEP
->getSourceElementType() &&
984 NewIdxs
.size() > *LastIRIndex
) {
985 InRangeIndex
= LastIRIndex
;
986 for (unsigned I
= 0; I
<= *LastIRIndex
; ++I
)
987 if (NewIdxs
[I
] != InnermostGEP
->getOperand(I
+ 1))
992 return ConstantExpr::getGetElementPtr(SrcElemTy
, Ptr
, NewIdxs
, InBounds
,
996 /// Attempt to constant fold an instruction with the
997 /// specified opcode and operands. If successful, the constant result is
998 /// returned, if not, null is returned. Note that this function can fail when
999 /// attempting to fold instructions like loads and stores, which have no
1000 /// constant expression form.
1001 Constant
*ConstantFoldInstOperandsImpl(const Value
*InstOrCE
, unsigned Opcode
,
1002 ArrayRef
<Constant
*> Ops
,
1003 const DataLayout
&DL
,
1004 const TargetLibraryInfo
*TLI
) {
1005 Type
*DestTy
= InstOrCE
->getType();
1007 if (Instruction::isUnaryOp(Opcode
))
1008 return ConstantFoldUnaryOpOperand(Opcode
, Ops
[0], DL
);
1010 if (Instruction::isBinaryOp(Opcode
)) {
1014 case Instruction::FAdd
:
1015 case Instruction::FSub
:
1016 case Instruction::FMul
:
1017 case Instruction::FDiv
:
1018 case Instruction::FRem
:
1019 // Handle floating point instructions separately to account for denormals
1020 // TODO: If a constant expression is being folded rather than an
1021 // instruction, denormals will not be flushed/treated as zero
1022 if (const auto *I
= dyn_cast
<Instruction
>(InstOrCE
)) {
1023 return ConstantFoldFPInstOperands(Opcode
, Ops
[0], Ops
[1], DL
, I
);
1026 return ConstantFoldBinaryOpOperands(Opcode
, Ops
[0], Ops
[1], DL
);
1029 if (Instruction::isCast(Opcode
))
1030 return ConstantFoldCastOperand(Opcode
, Ops
[0], DestTy
, DL
);
1032 if (auto *GEP
= dyn_cast
<GEPOperator
>(InstOrCE
)) {
1033 Type
*SrcElemTy
= GEP
->getSourceElementType();
1034 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy
))
1037 if (Constant
*C
= SymbolicallyEvaluateGEP(GEP
, Ops
, DL
, TLI
))
1040 return ConstantExpr::getGetElementPtr(SrcElemTy
, Ops
[0], Ops
.slice(1),
1042 GEP
->getInRangeIndex());
1045 if (auto *CE
= dyn_cast
<ConstantExpr
>(InstOrCE
)) {
1046 if (CE
->isCompare())
1047 return ConstantFoldCompareInstOperands(CE
->getPredicate(), Ops
[0], Ops
[1],
1049 return CE
->getWithOperands(Ops
);
1053 default: return nullptr;
1054 case Instruction::ICmp
:
1055 case Instruction::FCmp
: {
1056 auto *C
= cast
<CmpInst
>(InstOrCE
);
1057 return ConstantFoldCompareInstOperands(C
->getPredicate(), Ops
[0], Ops
[1],
1060 case Instruction::Freeze
:
1061 return isGuaranteedNotToBeUndefOrPoison(Ops
[0]) ? Ops
[0] : nullptr;
1062 case Instruction::Call
:
1063 if (auto *F
= dyn_cast
<Function
>(Ops
.back())) {
1064 const auto *Call
= cast
<CallBase
>(InstOrCE
);
1065 if (canConstantFoldCallTo(Call
, F
))
1066 return ConstantFoldCall(Call
, F
, Ops
.slice(0, Ops
.size() - 1), TLI
);
1069 case Instruction::Select
:
1070 return ConstantFoldSelectInstruction(Ops
[0], Ops
[1], Ops
[2]);
1071 case Instruction::ExtractElement
:
1072 return ConstantExpr::getExtractElement(Ops
[0], Ops
[1]);
1073 case Instruction::ExtractValue
:
1074 return ConstantFoldExtractValueInstruction(
1075 Ops
[0], cast
<ExtractValueInst
>(InstOrCE
)->getIndices());
1076 case Instruction::InsertElement
:
1077 return ConstantExpr::getInsertElement(Ops
[0], Ops
[1], Ops
[2]);
1078 case Instruction::InsertValue
:
1079 return ConstantFoldInsertValueInstruction(
1080 Ops
[0], Ops
[1], cast
<InsertValueInst
>(InstOrCE
)->getIndices());
1081 case Instruction::ShuffleVector
:
1082 return ConstantExpr::getShuffleVector(
1083 Ops
[0], Ops
[1], cast
<ShuffleVectorInst
>(InstOrCE
)->getShuffleMask());
1084 case Instruction::Load
: {
1085 const auto *LI
= dyn_cast
<LoadInst
>(InstOrCE
);
1086 if (LI
->isVolatile())
1088 return ConstantFoldLoadFromConstPtr(Ops
[0], LI
->getType(), DL
);
1093 } // end anonymous namespace
1095 //===----------------------------------------------------------------------===//
1096 // Constant Folding public APIs
1097 //===----------------------------------------------------------------------===//
1102 ConstantFoldConstantImpl(const Constant
*C
, const DataLayout
&DL
,
1103 const TargetLibraryInfo
*TLI
,
1104 SmallDenseMap
<Constant
*, Constant
*> &FoldedOps
) {
1105 if (!isa
<ConstantVector
>(C
) && !isa
<ConstantExpr
>(C
))
1106 return const_cast<Constant
*>(C
);
1108 SmallVector
<Constant
*, 8> Ops
;
1109 for (const Use
&OldU
: C
->operands()) {
1110 Constant
*OldC
= cast
<Constant
>(&OldU
);
1111 Constant
*NewC
= OldC
;
1112 // Recursively fold the ConstantExpr's operands. If we have already folded
1113 // a ConstantExpr, we don't have to process it again.
1114 if (isa
<ConstantVector
>(OldC
) || isa
<ConstantExpr
>(OldC
)) {
1115 auto It
= FoldedOps
.find(OldC
);
1116 if (It
== FoldedOps
.end()) {
1117 NewC
= ConstantFoldConstantImpl(OldC
, DL
, TLI
, FoldedOps
);
1118 FoldedOps
.insert({OldC
, NewC
});
1123 Ops
.push_back(NewC
);
1126 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1128 ConstantFoldInstOperandsImpl(CE
, CE
->getOpcode(), Ops
, DL
, TLI
))
1130 return const_cast<Constant
*>(C
);
1133 assert(isa
<ConstantVector
>(C
));
1134 return ConstantVector::get(Ops
);
1137 } // end anonymous namespace
1139 Constant
*llvm::ConstantFoldInstruction(Instruction
*I
, const DataLayout
&DL
,
1140 const TargetLibraryInfo
*TLI
) {
1141 // Handle PHI nodes quickly here...
1142 if (auto *PN
= dyn_cast
<PHINode
>(I
)) {
1143 Constant
*CommonValue
= nullptr;
1145 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1146 for (Value
*Incoming
: PN
->incoming_values()) {
1147 // If the incoming value is undef then skip it. Note that while we could
1148 // skip the value if it is equal to the phi node itself we choose not to
1149 // because that would break the rule that constant folding only applies if
1150 // all operands are constants.
1151 if (isa
<UndefValue
>(Incoming
))
1153 // If the incoming value is not a constant, then give up.
1154 auto *C
= dyn_cast
<Constant
>(Incoming
);
1157 // Fold the PHI's operands.
1158 C
= ConstantFoldConstantImpl(C
, DL
, TLI
, FoldedOps
);
1159 // If the incoming value is a different constant to
1160 // the one we saw previously, then give up.
1161 if (CommonValue
&& C
!= CommonValue
)
1166 // If we reach here, all incoming values are the same constant or undef.
1167 return CommonValue
? CommonValue
: UndefValue::get(PN
->getType());
1170 // Scan the operand list, checking to see if they are all constants, if so,
1171 // hand off to ConstantFoldInstOperandsImpl.
1172 if (!all_of(I
->operands(), [](Use
&U
) { return isa
<Constant
>(U
); }))
1175 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1176 SmallVector
<Constant
*, 8> Ops
;
1177 for (const Use
&OpU
: I
->operands()) {
1178 auto *Op
= cast
<Constant
>(&OpU
);
1179 // Fold the Instruction's operands.
1180 Op
= ConstantFoldConstantImpl(Op
, DL
, TLI
, FoldedOps
);
1184 return ConstantFoldInstOperands(I
, Ops
, DL
, TLI
);
1187 Constant
*llvm::ConstantFoldConstant(const Constant
*C
, const DataLayout
&DL
,
1188 const TargetLibraryInfo
*TLI
) {
1189 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1190 return ConstantFoldConstantImpl(C
, DL
, TLI
, FoldedOps
);
1193 Constant
*llvm::ConstantFoldInstOperands(Instruction
*I
,
1194 ArrayRef
<Constant
*> Ops
,
1195 const DataLayout
&DL
,
1196 const TargetLibraryInfo
*TLI
) {
1197 return ConstantFoldInstOperandsImpl(I
, I
->getOpcode(), Ops
, DL
, TLI
);
1200 Constant
*llvm::ConstantFoldCompareInstOperands(
1201 unsigned IntPredicate
, Constant
*Ops0
, Constant
*Ops1
, const DataLayout
&DL
,
1202 const TargetLibraryInfo
*TLI
, const Instruction
*I
) {
1203 CmpInst::Predicate Predicate
= (CmpInst::Predicate
)IntPredicate
;
1204 // fold: icmp (inttoptr x), null -> icmp x, 0
1205 // fold: icmp null, (inttoptr x) -> icmp 0, x
1206 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1207 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1208 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1209 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1211 // FIXME: The following comment is out of data and the DataLayout is here now.
1212 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1213 // around to know if bit truncation is happening.
1214 if (auto *CE0
= dyn_cast
<ConstantExpr
>(Ops0
)) {
1215 if (Ops1
->isNullValue()) {
1216 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
1217 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getType());
1218 // Convert the integer value to the right size to ensure we get the
1219 // proper extension or truncation.
1220 if (Constant
*C
= ConstantFoldIntegerCast(CE0
->getOperand(0), IntPtrTy
,
1221 /*IsSigned*/ false, DL
)) {
1222 Constant
*Null
= Constant::getNullValue(C
->getType());
1223 return ConstantFoldCompareInstOperands(Predicate
, C
, Null
, DL
, TLI
);
1227 // Only do this transformation if the int is intptrty in size, otherwise
1228 // there is a truncation or extension that we aren't modeling.
1229 if (CE0
->getOpcode() == Instruction::PtrToInt
) {
1230 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getOperand(0)->getType());
1231 if (CE0
->getType() == IntPtrTy
) {
1232 Constant
*C
= CE0
->getOperand(0);
1233 Constant
*Null
= Constant::getNullValue(C
->getType());
1234 return ConstantFoldCompareInstOperands(Predicate
, C
, Null
, DL
, TLI
);
1239 if (auto *CE1
= dyn_cast
<ConstantExpr
>(Ops1
)) {
1240 if (CE0
->getOpcode() == CE1
->getOpcode()) {
1241 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
1242 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getType());
1244 // Convert the integer value to the right size to ensure we get the
1245 // proper extension or truncation.
1246 Constant
*C0
= ConstantFoldIntegerCast(CE0
->getOperand(0), IntPtrTy
,
1247 /*IsSigned*/ false, DL
);
1248 Constant
*C1
= ConstantFoldIntegerCast(CE1
->getOperand(0), IntPtrTy
,
1249 /*IsSigned*/ false, DL
);
1251 return ConstantFoldCompareInstOperands(Predicate
, C0
, C1
, DL
, TLI
);
1254 // Only do this transformation if the int is intptrty in size, otherwise
1255 // there is a truncation or extension that we aren't modeling.
1256 if (CE0
->getOpcode() == Instruction::PtrToInt
) {
1257 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getOperand(0)->getType());
1258 if (CE0
->getType() == IntPtrTy
&&
1259 CE0
->getOperand(0)->getType() == CE1
->getOperand(0)->getType()) {
1260 return ConstantFoldCompareInstOperands(
1261 Predicate
, CE0
->getOperand(0), CE1
->getOperand(0), DL
, TLI
);
1267 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1268 // offset1 pred offset2, for the case where the offset is inbounds. This
1269 // only works for equality and unsigned comparison, as inbounds permits
1270 // crossing the sign boundary. However, the offset comparison itself is
1272 if (Ops0
->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate
)) {
1273 unsigned IndexWidth
= DL
.getIndexTypeSizeInBits(Ops0
->getType());
1274 APInt
Offset0(IndexWidth
, 0);
1276 Ops0
->stripAndAccumulateInBoundsConstantOffsets(DL
, Offset0
);
1277 APInt
Offset1(IndexWidth
, 0);
1279 Ops1
->stripAndAccumulateInBoundsConstantOffsets(DL
, Offset1
);
1280 if (Stripped0
== Stripped1
)
1281 return ConstantExpr::getCompare(
1282 ICmpInst::getSignedPredicate(Predicate
),
1283 ConstantInt::get(CE0
->getContext(), Offset0
),
1284 ConstantInt::get(CE0
->getContext(), Offset1
));
1286 } else if (isa
<ConstantExpr
>(Ops1
)) {
1287 // If RHS is a constant expression, but the left side isn't, swap the
1288 // operands and try again.
1289 Predicate
= ICmpInst::getSwappedPredicate(Predicate
);
1290 return ConstantFoldCompareInstOperands(Predicate
, Ops1
, Ops0
, DL
, TLI
);
1293 // Flush any denormal constant float input according to denormal handling
1295 Ops0
= FlushFPConstant(Ops0
, I
, /* IsOutput */ false);
1298 Ops1
= FlushFPConstant(Ops1
, I
, /* IsOutput */ false);
1302 return ConstantExpr::getCompare(Predicate
, Ops0
, Ops1
);
1305 Constant
*llvm::ConstantFoldUnaryOpOperand(unsigned Opcode
, Constant
*Op
,
1306 const DataLayout
&DL
) {
1307 assert(Instruction::isUnaryOp(Opcode
));
1309 return ConstantFoldUnaryInstruction(Opcode
, Op
);
1312 Constant
*llvm::ConstantFoldBinaryOpOperands(unsigned Opcode
, Constant
*LHS
,
1314 const DataLayout
&DL
) {
1315 assert(Instruction::isBinaryOp(Opcode
));
1316 if (isa
<ConstantExpr
>(LHS
) || isa
<ConstantExpr
>(RHS
))
1317 if (Constant
*C
= SymbolicallyEvaluateBinop(Opcode
, LHS
, RHS
, DL
))
1320 if (ConstantExpr::isDesirableBinOp(Opcode
))
1321 return ConstantExpr::get(Opcode
, LHS
, RHS
);
1322 return ConstantFoldBinaryInstruction(Opcode
, LHS
, RHS
);
1325 Constant
*llvm::FlushFPConstant(Constant
*Operand
, const Instruction
*I
,
1327 if (!I
|| !I
->getParent() || !I
->getFunction())
1330 ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(Operand
);
1334 const APFloat
&APF
= CFP
->getValueAPF();
1335 // TODO: Should this canonicalize nans?
1336 if (!APF
.isDenormal())
1339 Type
*Ty
= CFP
->getType();
1340 DenormalMode DenormMode
=
1341 I
->getFunction()->getDenormalMode(Ty
->getFltSemantics());
1342 DenormalMode::DenormalModeKind Mode
=
1343 IsOutput
? DenormMode
.Output
: DenormMode
.Input
;
1346 llvm_unreachable("unknown denormal mode");
1347 case DenormalMode::Dynamic
:
1349 case DenormalMode::IEEE
:
1351 case DenormalMode::PreserveSign
:
1352 if (APF
.isDenormal()) {
1353 return ConstantFP::get(
1355 APFloat::getZero(Ty
->getFltSemantics(), APF
.isNegative()));
1358 case DenormalMode::PositiveZero
:
1359 if (APF
.isDenormal()) {
1360 return ConstantFP::get(Ty
->getContext(),
1361 APFloat::getZero(Ty
->getFltSemantics(), false));
1368 Constant
*llvm::ConstantFoldFPInstOperands(unsigned Opcode
, Constant
*LHS
,
1369 Constant
*RHS
, const DataLayout
&DL
,
1370 const Instruction
*I
) {
1371 if (Instruction::isBinaryOp(Opcode
)) {
1372 // Flush denormal inputs if needed.
1373 Constant
*Op0
= FlushFPConstant(LHS
, I
, /* IsOutput */ false);
1376 Constant
*Op1
= FlushFPConstant(RHS
, I
, /* IsOutput */ false);
1380 // Calculate constant result.
1381 Constant
*C
= ConstantFoldBinaryOpOperands(Opcode
, Op0
, Op1
, DL
);
1385 // Flush denormal output if needed.
1386 return FlushFPConstant(C
, I
, /* IsOutput */ true);
1388 // If instruction lacks a parent/function and the denormal mode cannot be
1389 // determined, use the default (IEEE).
1390 return ConstantFoldBinaryOpOperands(Opcode
, LHS
, RHS
, DL
);
1393 Constant
*llvm::ConstantFoldCastOperand(unsigned Opcode
, Constant
*C
,
1394 Type
*DestTy
, const DataLayout
&DL
) {
1395 assert(Instruction::isCast(Opcode
));
1398 llvm_unreachable("Missing case");
1399 case Instruction::PtrToInt
:
1400 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1401 Constant
*FoldedValue
= nullptr;
1402 // If the input is a inttoptr, eliminate the pair. This requires knowing
1403 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1404 if (CE
->getOpcode() == Instruction::IntToPtr
) {
1405 // zext/trunc the inttoptr to pointer size.
1406 FoldedValue
= ConstantFoldIntegerCast(CE
->getOperand(0),
1407 DL
.getIntPtrType(CE
->getType()),
1408 /*IsSigned=*/false, DL
);
1409 } else if (auto *GEP
= dyn_cast
<GEPOperator
>(CE
)) {
1410 // If we have GEP, we can perform the following folds:
1411 // (ptrtoint (gep null, x)) -> x
1412 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1413 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GEP
->getType());
1414 APInt
BaseOffset(BitWidth
, 0);
1415 auto *Base
= cast
<Constant
>(GEP
->stripAndAccumulateConstantOffsets(
1416 DL
, BaseOffset
, /*AllowNonInbounds=*/true));
1417 if (Base
->isNullValue()) {
1418 FoldedValue
= ConstantInt::get(CE
->getContext(), BaseOffset
);
1420 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1421 if (GEP
->getNumIndices() == 1 &&
1422 GEP
->getSourceElementType()->isIntegerTy(8)) {
1423 auto *Ptr
= cast
<Constant
>(GEP
->getPointerOperand());
1424 auto *Sub
= dyn_cast
<ConstantExpr
>(GEP
->getOperand(1));
1425 Type
*IntIdxTy
= DL
.getIndexType(Ptr
->getType());
1426 if (Sub
&& Sub
->getType() == IntIdxTy
&&
1427 Sub
->getOpcode() == Instruction::Sub
&&
1428 Sub
->getOperand(0)->isNullValue())
1429 FoldedValue
= ConstantExpr::getSub(
1430 ConstantExpr::getPtrToInt(Ptr
, IntIdxTy
), Sub
->getOperand(1));
1435 // Do a zext or trunc to get to the ptrtoint dest size.
1436 return ConstantFoldIntegerCast(FoldedValue
, DestTy
, /*IsSigned=*/false,
1441 case Instruction::IntToPtr
:
1442 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1443 // the int size is >= the ptr size and the address spaces are the same.
1444 // This requires knowing the width of a pointer, so it can't be done in
1445 // ConstantExpr::getCast.
1446 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1447 if (CE
->getOpcode() == Instruction::PtrToInt
) {
1448 Constant
*SrcPtr
= CE
->getOperand(0);
1449 unsigned SrcPtrSize
= DL
.getPointerTypeSizeInBits(SrcPtr
->getType());
1450 unsigned MidIntSize
= CE
->getType()->getScalarSizeInBits();
1452 if (MidIntSize
>= SrcPtrSize
) {
1453 unsigned SrcAS
= SrcPtr
->getType()->getPointerAddressSpace();
1454 if (SrcAS
== DestTy
->getPointerAddressSpace())
1455 return FoldBitCast(CE
->getOperand(0), DestTy
, DL
);
1460 case Instruction::Trunc
:
1461 case Instruction::ZExt
:
1462 case Instruction::SExt
:
1463 case Instruction::FPTrunc
:
1464 case Instruction::FPExt
:
1465 case Instruction::UIToFP
:
1466 case Instruction::SIToFP
:
1467 case Instruction::FPToUI
:
1468 case Instruction::FPToSI
:
1469 case Instruction::AddrSpaceCast
:
1471 case Instruction::BitCast
:
1472 return FoldBitCast(C
, DestTy
, DL
);
1475 if (ConstantExpr::isDesirableCastOp(Opcode
))
1476 return ConstantExpr::getCast(Opcode
, C
, DestTy
);
1477 return ConstantFoldCastInstruction(Opcode
, C
, DestTy
);
1480 Constant
*llvm::ConstantFoldIntegerCast(Constant
*C
, Type
*DestTy
,
1481 bool IsSigned
, const DataLayout
&DL
) {
1482 Type
*SrcTy
= C
->getType();
1483 if (SrcTy
== DestTy
)
1485 if (SrcTy
->getScalarSizeInBits() > DestTy
->getScalarSizeInBits())
1486 return ConstantFoldCastOperand(Instruction::Trunc
, C
, DestTy
, DL
);
1488 return ConstantFoldCastOperand(Instruction::SExt
, C
, DestTy
, DL
);
1489 return ConstantFoldCastOperand(Instruction::ZExt
, C
, DestTy
, DL
);
1492 //===----------------------------------------------------------------------===//
1493 // Constant Folding for Calls
1496 bool llvm::canConstantFoldCallTo(const CallBase
*Call
, const Function
*F
) {
1497 if (Call
->isNoBuiltin())
1499 if (Call
->getFunctionType() != F
->getFunctionType())
1501 switch (F
->getIntrinsicID()) {
1502 // Operations that do not operate floating-point numbers and do not depend on
1503 // FP environment can be folded even in strictfp functions.
1504 case Intrinsic::bswap
:
1505 case Intrinsic::ctpop
:
1506 case Intrinsic::ctlz
:
1507 case Intrinsic::cttz
:
1508 case Intrinsic::fshl
:
1509 case Intrinsic::fshr
:
1510 case Intrinsic::launder_invariant_group
:
1511 case Intrinsic::strip_invariant_group
:
1512 case Intrinsic::masked_load
:
1513 case Intrinsic::get_active_lane_mask
:
1514 case Intrinsic::abs
:
1515 case Intrinsic::smax
:
1516 case Intrinsic::smin
:
1517 case Intrinsic::umax
:
1518 case Intrinsic::umin
:
1519 case Intrinsic::sadd_with_overflow
:
1520 case Intrinsic::uadd_with_overflow
:
1521 case Intrinsic::ssub_with_overflow
:
1522 case Intrinsic::usub_with_overflow
:
1523 case Intrinsic::smul_with_overflow
:
1524 case Intrinsic::umul_with_overflow
:
1525 case Intrinsic::sadd_sat
:
1526 case Intrinsic::uadd_sat
:
1527 case Intrinsic::ssub_sat
:
1528 case Intrinsic::usub_sat
:
1529 case Intrinsic::smul_fix
:
1530 case Intrinsic::smul_fix_sat
:
1531 case Intrinsic::bitreverse
:
1532 case Intrinsic::is_constant
:
1533 case Intrinsic::vector_reduce_add
:
1534 case Intrinsic::vector_reduce_mul
:
1535 case Intrinsic::vector_reduce_and
:
1536 case Intrinsic::vector_reduce_or
:
1537 case Intrinsic::vector_reduce_xor
:
1538 case Intrinsic::vector_reduce_smin
:
1539 case Intrinsic::vector_reduce_smax
:
1540 case Intrinsic::vector_reduce_umin
:
1541 case Intrinsic::vector_reduce_umax
:
1542 // Target intrinsics
1543 case Intrinsic::amdgcn_perm
:
1544 case Intrinsic::amdgcn_wave_reduce_umin
:
1545 case Intrinsic::amdgcn_wave_reduce_umax
:
1546 case Intrinsic::arm_mve_vctp8
:
1547 case Intrinsic::arm_mve_vctp16
:
1548 case Intrinsic::arm_mve_vctp32
:
1549 case Intrinsic::arm_mve_vctp64
:
1550 case Intrinsic::aarch64_sve_convert_from_svbool
:
1551 // WebAssembly float semantics are always known
1552 case Intrinsic::wasm_trunc_signed
:
1553 case Intrinsic::wasm_trunc_unsigned
:
1556 // Floating point operations cannot be folded in strictfp functions in
1557 // general case. They can be folded if FP environment is known to compiler.
1558 case Intrinsic::minnum
:
1559 case Intrinsic::maxnum
:
1560 case Intrinsic::minimum
:
1561 case Intrinsic::maximum
:
1562 case Intrinsic::log
:
1563 case Intrinsic::log2
:
1564 case Intrinsic::log10
:
1565 case Intrinsic::exp
:
1566 case Intrinsic::exp2
:
1567 case Intrinsic::exp10
:
1568 case Intrinsic::sqrt
:
1569 case Intrinsic::sin
:
1570 case Intrinsic::cos
:
1571 case Intrinsic::pow
:
1572 case Intrinsic::powi
:
1573 case Intrinsic::ldexp
:
1574 case Intrinsic::fma
:
1575 case Intrinsic::fmuladd
:
1576 case Intrinsic::frexp
:
1577 case Intrinsic::fptoui_sat
:
1578 case Intrinsic::fptosi_sat
:
1579 case Intrinsic::convert_from_fp16
:
1580 case Intrinsic::convert_to_fp16
:
1581 case Intrinsic::amdgcn_cos
:
1582 case Intrinsic::amdgcn_cubeid
:
1583 case Intrinsic::amdgcn_cubema
:
1584 case Intrinsic::amdgcn_cubesc
:
1585 case Intrinsic::amdgcn_cubetc
:
1586 case Intrinsic::amdgcn_fmul_legacy
:
1587 case Intrinsic::amdgcn_fma_legacy
:
1588 case Intrinsic::amdgcn_fract
:
1589 case Intrinsic::amdgcn_sin
:
1590 // The intrinsics below depend on rounding mode in MXCSR.
1591 case Intrinsic::x86_sse_cvtss2si
:
1592 case Intrinsic::x86_sse_cvtss2si64
:
1593 case Intrinsic::x86_sse_cvttss2si
:
1594 case Intrinsic::x86_sse_cvttss2si64
:
1595 case Intrinsic::x86_sse2_cvtsd2si
:
1596 case Intrinsic::x86_sse2_cvtsd2si64
:
1597 case Intrinsic::x86_sse2_cvttsd2si
:
1598 case Intrinsic::x86_sse2_cvttsd2si64
:
1599 case Intrinsic::x86_avx512_vcvtss2si32
:
1600 case Intrinsic::x86_avx512_vcvtss2si64
:
1601 case Intrinsic::x86_avx512_cvttss2si
:
1602 case Intrinsic::x86_avx512_cvttss2si64
:
1603 case Intrinsic::x86_avx512_vcvtsd2si32
:
1604 case Intrinsic::x86_avx512_vcvtsd2si64
:
1605 case Intrinsic::x86_avx512_cvttsd2si
:
1606 case Intrinsic::x86_avx512_cvttsd2si64
:
1607 case Intrinsic::x86_avx512_vcvtss2usi32
:
1608 case Intrinsic::x86_avx512_vcvtss2usi64
:
1609 case Intrinsic::x86_avx512_cvttss2usi
:
1610 case Intrinsic::x86_avx512_cvttss2usi64
:
1611 case Intrinsic::x86_avx512_vcvtsd2usi32
:
1612 case Intrinsic::x86_avx512_vcvtsd2usi64
:
1613 case Intrinsic::x86_avx512_cvttsd2usi
:
1614 case Intrinsic::x86_avx512_cvttsd2usi64
:
1615 return !Call
->isStrictFP();
1617 // Sign operations are actually bitwise operations, they do not raise
1618 // exceptions even for SNANs.
1619 case Intrinsic::fabs
:
1620 case Intrinsic::copysign
:
1621 case Intrinsic::is_fpclass
:
1622 // Non-constrained variants of rounding operations means default FP
1623 // environment, they can be folded in any case.
1624 case Intrinsic::ceil
:
1625 case Intrinsic::floor
:
1626 case Intrinsic::round
:
1627 case Intrinsic::roundeven
:
1628 case Intrinsic::trunc
:
1629 case Intrinsic::nearbyint
:
1630 case Intrinsic::rint
:
1631 case Intrinsic::canonicalize
:
1632 // Constrained intrinsics can be folded if FP environment is known
1634 case Intrinsic::experimental_constrained_fma
:
1635 case Intrinsic::experimental_constrained_fmuladd
:
1636 case Intrinsic::experimental_constrained_fadd
:
1637 case Intrinsic::experimental_constrained_fsub
:
1638 case Intrinsic::experimental_constrained_fmul
:
1639 case Intrinsic::experimental_constrained_fdiv
:
1640 case Intrinsic::experimental_constrained_frem
:
1641 case Intrinsic::experimental_constrained_ceil
:
1642 case Intrinsic::experimental_constrained_floor
:
1643 case Intrinsic::experimental_constrained_round
:
1644 case Intrinsic::experimental_constrained_roundeven
:
1645 case Intrinsic::experimental_constrained_trunc
:
1646 case Intrinsic::experimental_constrained_nearbyint
:
1647 case Intrinsic::experimental_constrained_rint
:
1648 case Intrinsic::experimental_constrained_fcmp
:
1649 case Intrinsic::experimental_constrained_fcmps
:
1653 case Intrinsic::not_intrinsic
: break;
1656 if (!F
->hasName() || Call
->isStrictFP())
1659 // In these cases, the check of the length is required. We don't want to
1660 // return true for a name like "cos\0blah" which strcmp would return equal to
1661 // "cos", but has length 8.
1662 StringRef Name
= F
->getName();
1667 return Name
== "acos" || Name
== "acosf" ||
1668 Name
== "asin" || Name
== "asinf" ||
1669 Name
== "atan" || Name
== "atanf" ||
1670 Name
== "atan2" || Name
== "atan2f";
1672 return Name
== "ceil" || Name
== "ceilf" ||
1673 Name
== "cos" || Name
== "cosf" ||
1674 Name
== "cosh" || Name
== "coshf";
1676 return Name
== "exp" || Name
== "expf" ||
1677 Name
== "exp2" || Name
== "exp2f";
1679 return Name
== "fabs" || Name
== "fabsf" ||
1680 Name
== "floor" || Name
== "floorf" ||
1681 Name
== "fmod" || Name
== "fmodf";
1683 return Name
== "log" || Name
== "logf" ||
1684 Name
== "log2" || Name
== "log2f" ||
1685 Name
== "log10" || Name
== "log10f";
1687 return Name
== "nearbyint" || Name
== "nearbyintf";
1689 return Name
== "pow" || Name
== "powf";
1691 return Name
== "remainder" || Name
== "remainderf" ||
1692 Name
== "rint" || Name
== "rintf" ||
1693 Name
== "round" || Name
== "roundf";
1695 return Name
== "sin" || Name
== "sinf" ||
1696 Name
== "sinh" || Name
== "sinhf" ||
1697 Name
== "sqrt" || Name
== "sqrtf";
1699 return Name
== "tan" || Name
== "tanf" ||
1700 Name
== "tanh" || Name
== "tanhf" ||
1701 Name
== "trunc" || Name
== "truncf";
1703 // Check for various function names that get used for the math functions
1704 // when the header files are preprocessed with the macro
1705 // __FINITE_MATH_ONLY__ enabled.
1706 // The '12' here is the length of the shortest name that can match.
1707 // We need to check the size before looking at Name[1] and Name[2]
1708 // so we may as well check a limit that will eliminate mismatches.
1709 if (Name
.size() < 12 || Name
[1] != '_')
1715 return Name
== "__acos_finite" || Name
== "__acosf_finite" ||
1716 Name
== "__asin_finite" || Name
== "__asinf_finite" ||
1717 Name
== "__atan2_finite" || Name
== "__atan2f_finite";
1719 return Name
== "__cosh_finite" || Name
== "__coshf_finite";
1721 return Name
== "__exp_finite" || Name
== "__expf_finite" ||
1722 Name
== "__exp2_finite" || Name
== "__exp2f_finite";
1724 return Name
== "__log_finite" || Name
== "__logf_finite" ||
1725 Name
== "__log10_finite" || Name
== "__log10f_finite";
1727 return Name
== "__pow_finite" || Name
== "__powf_finite";
1729 return Name
== "__sinh_finite" || Name
== "__sinhf_finite";
1736 Constant
*GetConstantFoldFPValue(double V
, Type
*Ty
) {
1737 if (Ty
->isHalfTy() || Ty
->isFloatTy()) {
1740 APF
.convert(Ty
->getFltSemantics(), APFloat::rmNearestTiesToEven
, &unused
);
1741 return ConstantFP::get(Ty
->getContext(), APF
);
1743 if (Ty
->isDoubleTy())
1744 return ConstantFP::get(Ty
->getContext(), APFloat(V
));
1745 llvm_unreachable("Can only constant fold half/float/double");
1748 /// Clear the floating-point exception state.
1749 inline void llvm_fenv_clearexcept() {
1750 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1751 feclearexcept(FE_ALL_EXCEPT
);
1756 /// Test if a floating-point exception was raised.
1757 inline bool llvm_fenv_testexcept() {
1758 int errno_val
= errno
;
1759 if (errno_val
== ERANGE
|| errno_val
== EDOM
)
1761 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1762 if (fetestexcept(FE_ALL_EXCEPT
& ~FE_INEXACT
))
1768 Constant
*ConstantFoldFP(double (*NativeFP
)(double), const APFloat
&V
,
1770 llvm_fenv_clearexcept();
1771 double Result
= NativeFP(V
.convertToDouble());
1772 if (llvm_fenv_testexcept()) {
1773 llvm_fenv_clearexcept();
1777 return GetConstantFoldFPValue(Result
, Ty
);
1780 Constant
*ConstantFoldBinaryFP(double (*NativeFP
)(double, double),
1781 const APFloat
&V
, const APFloat
&W
, Type
*Ty
) {
1782 llvm_fenv_clearexcept();
1783 double Result
= NativeFP(V
.convertToDouble(), W
.convertToDouble());
1784 if (llvm_fenv_testexcept()) {
1785 llvm_fenv_clearexcept();
1789 return GetConstantFoldFPValue(Result
, Ty
);
1792 Constant
*constantFoldVectorReduce(Intrinsic::ID IID
, Constant
*Op
) {
1793 FixedVectorType
*VT
= dyn_cast
<FixedVectorType
>(Op
->getType());
1797 // This isn't strictly necessary, but handle the special/common case of zero:
1798 // all integer reductions of a zero input produce zero.
1799 if (isa
<ConstantAggregateZero
>(Op
))
1800 return ConstantInt::get(VT
->getElementType(), 0);
1802 // This is the same as the underlying binops - poison propagates.
1803 if (isa
<PoisonValue
>(Op
) || Op
->containsPoisonElement())
1804 return PoisonValue::get(VT
->getElementType());
1806 // TODO: Handle undef.
1807 if (!isa
<ConstantVector
>(Op
) && !isa
<ConstantDataVector
>(Op
))
1810 auto *EltC
= dyn_cast
<ConstantInt
>(Op
->getAggregateElement(0U));
1814 APInt Acc
= EltC
->getValue();
1815 for (unsigned I
= 1, E
= VT
->getNumElements(); I
!= E
; I
++) {
1816 if (!(EltC
= dyn_cast
<ConstantInt
>(Op
->getAggregateElement(I
))))
1818 const APInt
&X
= EltC
->getValue();
1820 case Intrinsic::vector_reduce_add
:
1823 case Intrinsic::vector_reduce_mul
:
1826 case Intrinsic::vector_reduce_and
:
1829 case Intrinsic::vector_reduce_or
:
1832 case Intrinsic::vector_reduce_xor
:
1835 case Intrinsic::vector_reduce_smin
:
1836 Acc
= APIntOps::smin(Acc
, X
);
1838 case Intrinsic::vector_reduce_smax
:
1839 Acc
= APIntOps::smax(Acc
, X
);
1841 case Intrinsic::vector_reduce_umin
:
1842 Acc
= APIntOps::umin(Acc
, X
);
1844 case Intrinsic::vector_reduce_umax
:
1845 Acc
= APIntOps::umax(Acc
, X
);
1850 return ConstantInt::get(Op
->getContext(), Acc
);
1853 /// Attempt to fold an SSE floating point to integer conversion of a constant
1854 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1855 /// used (toward nearest, ties to even). This matches the behavior of the
1856 /// non-truncating SSE instructions in the default rounding mode. The desired
1857 /// integer type Ty is used to select how many bits are available for the
1858 /// result. Returns null if the conversion cannot be performed, otherwise
1859 /// returns the Constant value resulting from the conversion.
1860 Constant
*ConstantFoldSSEConvertToInt(const APFloat
&Val
, bool roundTowardZero
,
1861 Type
*Ty
, bool IsSigned
) {
1862 // All of these conversion intrinsics form an integer of at most 64bits.
1863 unsigned ResultWidth
= Ty
->getIntegerBitWidth();
1864 assert(ResultWidth
<= 64 &&
1865 "Can only constant fold conversions to 64 and 32 bit ints");
1868 bool isExact
= false;
1869 APFloat::roundingMode mode
= roundTowardZero
? APFloat::rmTowardZero
1870 : APFloat::rmNearestTiesToEven
;
1871 APFloat::opStatus status
=
1872 Val
.convertToInteger(MutableArrayRef(UIntVal
), ResultWidth
,
1873 IsSigned
, mode
, &isExact
);
1874 if (status
!= APFloat::opOK
&&
1875 (!roundTowardZero
|| status
!= APFloat::opInexact
))
1877 return ConstantInt::get(Ty
, UIntVal
, IsSigned
);
1880 double getValueAsDouble(ConstantFP
*Op
) {
1881 Type
*Ty
= Op
->getType();
1883 if (Ty
->isBFloatTy() || Ty
->isHalfTy() || Ty
->isFloatTy() || Ty
->isDoubleTy())
1884 return Op
->getValueAPF().convertToDouble();
1887 APFloat APF
= Op
->getValueAPF();
1888 APF
.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven
, &unused
);
1889 return APF
.convertToDouble();
1892 static bool getConstIntOrUndef(Value
*Op
, const APInt
*&C
) {
1893 if (auto *CI
= dyn_cast
<ConstantInt
>(Op
)) {
1894 C
= &CI
->getValue();
1897 if (isa
<UndefValue
>(Op
)) {
1904 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
1907 /// \param CI Constrained intrinsic call.
1908 /// \param St Exception flags raised during constant evaluation.
1909 static bool mayFoldConstrained(ConstrainedFPIntrinsic
*CI
,
1910 APFloat::opStatus St
) {
1911 std::optional
<RoundingMode
> ORM
= CI
->getRoundingMode();
1912 std::optional
<fp::ExceptionBehavior
> EB
= CI
->getExceptionBehavior();
1914 // If the operation does not change exception status flags, it is safe
1916 if (St
== APFloat::opStatus::opOK
)
1919 // If evaluation raised FP exception, the result can depend on rounding
1920 // mode. If the latter is unknown, folding is not possible.
1921 if (ORM
&& *ORM
== RoundingMode::Dynamic
)
1924 // If FP exceptions are ignored, fold the call, even if such exception is
1926 if (EB
&& *EB
!= fp::ExceptionBehavior::ebStrict
)
1929 // Leave the calculation for runtime so that exception flags be correctly set
1934 /// Returns the rounding mode that should be used for constant evaluation.
1936 getEvaluationRoundingMode(const ConstrainedFPIntrinsic
*CI
) {
1937 std::optional
<RoundingMode
> ORM
= CI
->getRoundingMode();
1938 if (!ORM
|| *ORM
== RoundingMode::Dynamic
)
1939 // Even if the rounding mode is unknown, try evaluating the operation.
1940 // If it does not raise inexact exception, rounding was not applied,
1941 // so the result is exact and does not depend on rounding mode. Whether
1942 // other FP exceptions are raised, it does not depend on rounding mode.
1943 return RoundingMode::NearestTiesToEven
;
1947 /// Try to constant fold llvm.canonicalize for the given caller and value.
1948 static Constant
*constantFoldCanonicalize(const Type
*Ty
, const CallBase
*CI
,
1949 const APFloat
&Src
) {
1950 // Zero, positive and negative, is always OK to fold.
1952 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
1953 return ConstantFP::get(
1955 APFloat::getZero(Src
.getSemantics(), Src
.isNegative()));
1958 if (!Ty
->isIEEELikeFPTy())
1961 // Zero is always canonical and the sign must be preserved.
1963 // Denorms and nans may have special encodings, but it should be OK to fold a
1964 // totally average number.
1965 if (Src
.isNormal() || Src
.isInfinity())
1966 return ConstantFP::get(CI
->getContext(), Src
);
1968 if (Src
.isDenormal() && CI
->getParent() && CI
->getFunction()) {
1969 DenormalMode DenormMode
=
1970 CI
->getFunction()->getDenormalMode(Src
.getSemantics());
1972 if (DenormMode
== DenormalMode::getIEEE())
1973 return ConstantFP::get(CI
->getContext(), Src
);
1975 if (DenormMode
.Input
== DenormalMode::Dynamic
)
1978 // If we know if either input or output is flushed, we can fold.
1979 if ((DenormMode
.Input
== DenormalMode::Dynamic
&&
1980 DenormMode
.Output
== DenormalMode::IEEE
) ||
1981 (DenormMode
.Input
== DenormalMode::IEEE
&&
1982 DenormMode
.Output
== DenormalMode::Dynamic
))
1986 (!Src
.isNegative() || DenormMode
.Input
== DenormalMode::PositiveZero
||
1987 (DenormMode
.Output
== DenormalMode::PositiveZero
&&
1988 DenormMode
.Input
== DenormalMode::IEEE
));
1990 return ConstantFP::get(CI
->getContext(),
1991 APFloat::getZero(Src
.getSemantics(), !IsPositive
));
1997 static Constant
*ConstantFoldScalarCall1(StringRef Name
,
1998 Intrinsic::ID IntrinsicID
,
2000 ArrayRef
<Constant
*> Operands
,
2001 const TargetLibraryInfo
*TLI
,
2002 const CallBase
*Call
) {
2003 assert(Operands
.size() == 1 && "Wrong number of operands.");
2005 if (IntrinsicID
== Intrinsic::is_constant
) {
2006 // We know we have a "Constant" argument. But we want to only
2007 // return true for manifest constants, not those that depend on
2008 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2009 if (Operands
[0]->isManifestConstant())
2010 return ConstantInt::getTrue(Ty
->getContext());
2014 if (isa
<PoisonValue
>(Operands
[0])) {
2015 // TODO: All of these operations should probably propagate poison.
2016 if (IntrinsicID
== Intrinsic::canonicalize
)
2017 return PoisonValue::get(Ty
);
2020 if (isa
<UndefValue
>(Operands
[0])) {
2021 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2022 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2023 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2024 if (IntrinsicID
== Intrinsic::cos
||
2025 IntrinsicID
== Intrinsic::ctpop
||
2026 IntrinsicID
== Intrinsic::fptoui_sat
||
2027 IntrinsicID
== Intrinsic::fptosi_sat
||
2028 IntrinsicID
== Intrinsic::canonicalize
)
2029 return Constant::getNullValue(Ty
);
2030 if (IntrinsicID
== Intrinsic::bswap
||
2031 IntrinsicID
== Intrinsic::bitreverse
||
2032 IntrinsicID
== Intrinsic::launder_invariant_group
||
2033 IntrinsicID
== Intrinsic::strip_invariant_group
)
2037 if (isa
<ConstantPointerNull
>(Operands
[0])) {
2038 // launder(null) == null == strip(null) iff in addrspace 0
2039 if (IntrinsicID
== Intrinsic::launder_invariant_group
||
2040 IntrinsicID
== Intrinsic::strip_invariant_group
) {
2041 // If instruction is not yet put in a basic block (e.g. when cloning
2042 // a function during inlining), Call's caller may not be available.
2043 // So check Call's BB first before querying Call->getCaller.
2044 const Function
*Caller
=
2045 Call
->getParent() ? Call
->getCaller() : nullptr;
2047 !NullPointerIsDefined(
2048 Caller
, Operands
[0]->getType()->getPointerAddressSpace())) {
2055 if (auto *Op
= dyn_cast
<ConstantFP
>(Operands
[0])) {
2056 if (IntrinsicID
== Intrinsic::convert_to_fp16
) {
2057 APFloat
Val(Op
->getValueAPF());
2060 Val
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &lost
);
2062 return ConstantInt::get(Ty
->getContext(), Val
.bitcastToAPInt());
2065 APFloat U
= Op
->getValueAPF();
2067 if (IntrinsicID
== Intrinsic::wasm_trunc_signed
||
2068 IntrinsicID
== Intrinsic::wasm_trunc_unsigned
) {
2069 bool Signed
= IntrinsicID
== Intrinsic::wasm_trunc_signed
;
2074 unsigned Width
= Ty
->getIntegerBitWidth();
2075 APSInt
Int(Width
, !Signed
);
2076 bool IsExact
= false;
2077 APFloat::opStatus Status
=
2078 U
.convertToInteger(Int
, APFloat::rmTowardZero
, &IsExact
);
2080 if (Status
== APFloat::opOK
|| Status
== APFloat::opInexact
)
2081 return ConstantInt::get(Ty
, Int
);
2086 if (IntrinsicID
== Intrinsic::fptoui_sat
||
2087 IntrinsicID
== Intrinsic::fptosi_sat
) {
2088 // convertToInteger() already has the desired saturation semantics.
2089 APSInt
Int(Ty
->getIntegerBitWidth(),
2090 IntrinsicID
== Intrinsic::fptoui_sat
);
2092 U
.convertToInteger(Int
, APFloat::rmTowardZero
, &IsExact
);
2093 return ConstantInt::get(Ty
, Int
);
2096 if (IntrinsicID
== Intrinsic::canonicalize
)
2097 return constantFoldCanonicalize(Ty
, Call
, U
);
2099 if (!Ty
->isHalfTy() && !Ty
->isFloatTy() && !Ty
->isDoubleTy())
2102 // Use internal versions of these intrinsics.
2104 if (IntrinsicID
== Intrinsic::nearbyint
|| IntrinsicID
== Intrinsic::rint
) {
2105 U
.roundToIntegral(APFloat::rmNearestTiesToEven
);
2106 return ConstantFP::get(Ty
->getContext(), U
);
2109 if (IntrinsicID
== Intrinsic::round
) {
2110 U
.roundToIntegral(APFloat::rmNearestTiesToAway
);
2111 return ConstantFP::get(Ty
->getContext(), U
);
2114 if (IntrinsicID
== Intrinsic::roundeven
) {
2115 U
.roundToIntegral(APFloat::rmNearestTiesToEven
);
2116 return ConstantFP::get(Ty
->getContext(), U
);
2119 if (IntrinsicID
== Intrinsic::ceil
) {
2120 U
.roundToIntegral(APFloat::rmTowardPositive
);
2121 return ConstantFP::get(Ty
->getContext(), U
);
2124 if (IntrinsicID
== Intrinsic::floor
) {
2125 U
.roundToIntegral(APFloat::rmTowardNegative
);
2126 return ConstantFP::get(Ty
->getContext(), U
);
2129 if (IntrinsicID
== Intrinsic::trunc
) {
2130 U
.roundToIntegral(APFloat::rmTowardZero
);
2131 return ConstantFP::get(Ty
->getContext(), U
);
2134 if (IntrinsicID
== Intrinsic::fabs
) {
2136 return ConstantFP::get(Ty
->getContext(), U
);
2139 if (IntrinsicID
== Intrinsic::amdgcn_fract
) {
2140 // The v_fract instruction behaves like the OpenCL spec, which defines
2141 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2142 // there to prevent fract(-small) from returning 1.0. It returns the
2143 // largest positive floating-point number less than 1.0."
2145 FloorU
.roundToIntegral(APFloat::rmTowardNegative
);
2146 APFloat
FractU(U
- FloorU
);
2147 APFloat
AlmostOne(U
.getSemantics(), 1);
2148 AlmostOne
.next(/*nextDown*/ true);
2149 return ConstantFP::get(Ty
->getContext(), minimum(FractU
, AlmostOne
));
2152 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2153 // raise FP exceptions, unless the argument is signaling NaN.
2155 std::optional
<APFloat::roundingMode
> RM
;
2156 switch (IntrinsicID
) {
2159 case Intrinsic::experimental_constrained_nearbyint
:
2160 case Intrinsic::experimental_constrained_rint
: {
2161 auto CI
= cast
<ConstrainedFPIntrinsic
>(Call
);
2162 RM
= CI
->getRoundingMode();
2163 if (!RM
|| *RM
== RoundingMode::Dynamic
)
2167 case Intrinsic::experimental_constrained_round
:
2168 RM
= APFloat::rmNearestTiesToAway
;
2170 case Intrinsic::experimental_constrained_ceil
:
2171 RM
= APFloat::rmTowardPositive
;
2173 case Intrinsic::experimental_constrained_floor
:
2174 RM
= APFloat::rmTowardNegative
;
2176 case Intrinsic::experimental_constrained_trunc
:
2177 RM
= APFloat::rmTowardZero
;
2181 auto CI
= cast
<ConstrainedFPIntrinsic
>(Call
);
2183 APFloat::opStatus St
= U
.roundToIntegral(*RM
);
2184 if (IntrinsicID
== Intrinsic::experimental_constrained_rint
&&
2185 St
== APFloat::opInexact
) {
2186 std::optional
<fp::ExceptionBehavior
> EB
= CI
->getExceptionBehavior();
2187 if (EB
&& *EB
== fp::ebStrict
)
2190 } else if (U
.isSignaling()) {
2191 std::optional
<fp::ExceptionBehavior
> EB
= CI
->getExceptionBehavior();
2192 if (EB
&& *EB
!= fp::ebIgnore
)
2194 U
= APFloat::getQNaN(U
.getSemantics());
2196 return ConstantFP::get(Ty
->getContext(), U
);
2199 /// We only fold functions with finite arguments. Folding NaN and inf is
2200 /// likely to be aborted with an exception anyway, and some host libms
2201 /// have known errors raising exceptions.
2205 /// Currently APFloat versions of these functions do not exist, so we use
2206 /// the host native double versions. Float versions are not called
2207 /// directly but for all these it is true (float)(f((double)arg)) ==
2208 /// f(arg). Long double not supported yet.
2209 const APFloat
&APF
= Op
->getValueAPF();
2211 switch (IntrinsicID
) {
2213 case Intrinsic::log
:
2214 return ConstantFoldFP(log
, APF
, Ty
);
2215 case Intrinsic::log2
:
2216 // TODO: What about hosts that lack a C99 library?
2217 return ConstantFoldFP(log2
, APF
, Ty
);
2218 case Intrinsic::log10
:
2219 // TODO: What about hosts that lack a C99 library?
2220 return ConstantFoldFP(log10
, APF
, Ty
);
2221 case Intrinsic::exp
:
2222 return ConstantFoldFP(exp
, APF
, Ty
);
2223 case Intrinsic::exp2
:
2224 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2225 return ConstantFoldBinaryFP(pow
, APFloat(2.0), APF
, Ty
);
2226 case Intrinsic::exp10
:
2227 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2228 return ConstantFoldBinaryFP(pow
, APFloat(10.0), APF
, Ty
);
2229 case Intrinsic::sin
:
2230 return ConstantFoldFP(sin
, APF
, Ty
);
2231 case Intrinsic::cos
:
2232 return ConstantFoldFP(cos
, APF
, Ty
);
2233 case Intrinsic::sqrt
:
2234 return ConstantFoldFP(sqrt
, APF
, Ty
);
2235 case Intrinsic::amdgcn_cos
:
2236 case Intrinsic::amdgcn_sin
: {
2237 double V
= getValueAsDouble(Op
);
2238 if (V
< -256.0 || V
> 256.0)
2239 // The gfx8 and gfx9 architectures handle arguments outside the range
2240 // [-256, 256] differently. This should be a rare case so bail out
2241 // rather than trying to handle the difference.
2243 bool IsCos
= IntrinsicID
== Intrinsic::amdgcn_cos
;
2244 double V4
= V
* 4.0;
2245 if (V4
== floor(V4
)) {
2246 // Force exact results for quarter-integer inputs.
2247 const double SinVals
[4] = { 0.0, 1.0, 0.0, -1.0 };
2248 V
= SinVals
[((int)V4
+ (IsCos
? 1 : 0)) & 3];
2251 V
= cos(V
* 2.0 * numbers::pi
);
2253 V
= sin(V
* 2.0 * numbers::pi
);
2255 return GetConstantFoldFPValue(V
, Ty
);
2262 LibFunc Func
= NotLibFunc
;
2263 if (!TLI
->getLibFunc(Name
, Func
))
2271 case LibFunc_acos_finite
:
2272 case LibFunc_acosf_finite
:
2274 return ConstantFoldFP(acos
, APF
, Ty
);
2278 case LibFunc_asin_finite
:
2279 case LibFunc_asinf_finite
:
2281 return ConstantFoldFP(asin
, APF
, Ty
);
2286 return ConstantFoldFP(atan
, APF
, Ty
);
2290 if (TLI
->has(Func
)) {
2291 U
.roundToIntegral(APFloat::rmTowardPositive
);
2292 return ConstantFP::get(Ty
->getContext(), U
);
2298 return ConstantFoldFP(cos
, APF
, Ty
);
2302 case LibFunc_cosh_finite
:
2303 case LibFunc_coshf_finite
:
2305 return ConstantFoldFP(cosh
, APF
, Ty
);
2309 case LibFunc_exp_finite
:
2310 case LibFunc_expf_finite
:
2312 return ConstantFoldFP(exp
, APF
, Ty
);
2316 case LibFunc_exp2_finite
:
2317 case LibFunc_exp2f_finite
:
2319 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2320 return ConstantFoldBinaryFP(pow
, APFloat(2.0), APF
, Ty
);
2324 if (TLI
->has(Func
)) {
2326 return ConstantFP::get(Ty
->getContext(), U
);
2330 case LibFunc_floorf
:
2331 if (TLI
->has(Func
)) {
2332 U
.roundToIntegral(APFloat::rmTowardNegative
);
2333 return ConstantFP::get(Ty
->getContext(), U
);
2338 case LibFunc_log_finite
:
2339 case LibFunc_logf_finite
:
2340 if (!APF
.isNegative() && !APF
.isZero() && TLI
->has(Func
))
2341 return ConstantFoldFP(log
, APF
, Ty
);
2345 case LibFunc_log2_finite
:
2346 case LibFunc_log2f_finite
:
2347 if (!APF
.isNegative() && !APF
.isZero() && TLI
->has(Func
))
2348 // TODO: What about hosts that lack a C99 library?
2349 return ConstantFoldFP(log2
, APF
, Ty
);
2352 case LibFunc_log10f
:
2353 case LibFunc_log10_finite
:
2354 case LibFunc_log10f_finite
:
2355 if (!APF
.isNegative() && !APF
.isZero() && TLI
->has(Func
))
2356 // TODO: What about hosts that lack a C99 library?
2357 return ConstantFoldFP(log10
, APF
, Ty
);
2359 case LibFunc_nearbyint
:
2360 case LibFunc_nearbyintf
:
2363 if (TLI
->has(Func
)) {
2364 U
.roundToIntegral(APFloat::rmNearestTiesToEven
);
2365 return ConstantFP::get(Ty
->getContext(), U
);
2369 case LibFunc_roundf
:
2370 if (TLI
->has(Func
)) {
2371 U
.roundToIntegral(APFloat::rmNearestTiesToAway
);
2372 return ConstantFP::get(Ty
->getContext(), U
);
2378 return ConstantFoldFP(sin
, APF
, Ty
);
2382 case LibFunc_sinh_finite
:
2383 case LibFunc_sinhf_finite
:
2385 return ConstantFoldFP(sinh
, APF
, Ty
);
2389 if (!APF
.isNegative() && TLI
->has(Func
))
2390 return ConstantFoldFP(sqrt
, APF
, Ty
);
2395 return ConstantFoldFP(tan
, APF
, Ty
);
2400 return ConstantFoldFP(tanh
, APF
, Ty
);
2403 case LibFunc_truncf
:
2404 if (TLI
->has(Func
)) {
2405 U
.roundToIntegral(APFloat::rmTowardZero
);
2406 return ConstantFP::get(Ty
->getContext(), U
);
2413 if (auto *Op
= dyn_cast
<ConstantInt
>(Operands
[0])) {
2414 switch (IntrinsicID
) {
2415 case Intrinsic::bswap
:
2416 return ConstantInt::get(Ty
->getContext(), Op
->getValue().byteSwap());
2417 case Intrinsic::ctpop
:
2418 return ConstantInt::get(Ty
, Op
->getValue().popcount());
2419 case Intrinsic::bitreverse
:
2420 return ConstantInt::get(Ty
->getContext(), Op
->getValue().reverseBits());
2421 case Intrinsic::convert_from_fp16
: {
2422 APFloat
Val(APFloat::IEEEhalf(), Op
->getValue());
2425 APFloat::opStatus status
= Val
.convert(
2426 Ty
->getFltSemantics(), APFloat::rmNearestTiesToEven
, &lost
);
2428 // Conversion is always precise.
2430 assert(status
!= APFloat::opInexact
&& !lost
&&
2431 "Precision lost during fp16 constfolding");
2433 return ConstantFP::get(Ty
->getContext(), Val
);
2440 switch (IntrinsicID
) {
2442 case Intrinsic::vector_reduce_add
:
2443 case Intrinsic::vector_reduce_mul
:
2444 case Intrinsic::vector_reduce_and
:
2445 case Intrinsic::vector_reduce_or
:
2446 case Intrinsic::vector_reduce_xor
:
2447 case Intrinsic::vector_reduce_smin
:
2448 case Intrinsic::vector_reduce_smax
:
2449 case Intrinsic::vector_reduce_umin
:
2450 case Intrinsic::vector_reduce_umax
:
2451 if (Constant
*C
= constantFoldVectorReduce(IntrinsicID
, Operands
[0]))
2456 // Support ConstantVector in case we have an Undef in the top.
2457 if (isa
<ConstantVector
>(Operands
[0]) ||
2458 isa
<ConstantDataVector
>(Operands
[0])) {
2459 auto *Op
= cast
<Constant
>(Operands
[0]);
2460 switch (IntrinsicID
) {
2462 case Intrinsic::x86_sse_cvtss2si
:
2463 case Intrinsic::x86_sse_cvtss2si64
:
2464 case Intrinsic::x86_sse2_cvtsd2si
:
2465 case Intrinsic::x86_sse2_cvtsd2si64
:
2466 if (ConstantFP
*FPOp
=
2467 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2468 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2469 /*roundTowardZero=*/false, Ty
,
2472 case Intrinsic::x86_sse_cvttss2si
:
2473 case Intrinsic::x86_sse_cvttss2si64
:
2474 case Intrinsic::x86_sse2_cvttsd2si
:
2475 case Intrinsic::x86_sse2_cvttsd2si64
:
2476 if (ConstantFP
*FPOp
=
2477 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2478 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2479 /*roundTowardZero=*/true, Ty
,
2488 static Constant
*evaluateCompare(const APFloat
&Op1
, const APFloat
&Op2
,
2489 const ConstrainedFPIntrinsic
*Call
) {
2490 APFloat::opStatus St
= APFloat::opOK
;
2491 auto *FCmp
= cast
<ConstrainedFPCmpIntrinsic
>(Call
);
2492 FCmpInst::Predicate Cond
= FCmp
->getPredicate();
2493 if (FCmp
->isSignaling()) {
2494 if (Op1
.isNaN() || Op2
.isNaN())
2495 St
= APFloat::opInvalidOp
;
2497 if (Op1
.isSignaling() || Op2
.isSignaling())
2498 St
= APFloat::opInvalidOp
;
2500 bool Result
= FCmpInst::compare(Op1
, Op2
, Cond
);
2501 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic
*>(FCmp
), St
))
2502 return ConstantInt::get(Call
->getType()->getScalarType(), Result
);
2506 static Constant
*ConstantFoldScalarCall2(StringRef Name
,
2507 Intrinsic::ID IntrinsicID
,
2509 ArrayRef
<Constant
*> Operands
,
2510 const TargetLibraryInfo
*TLI
,
2511 const CallBase
*Call
) {
2512 assert(Operands
.size() == 2 && "Wrong number of operands.");
2514 if (Ty
->isFloatingPointTy()) {
2515 // TODO: We should have undef handling for all of the FP intrinsics that
2516 // are attempted to be folded in this function.
2517 bool IsOp0Undef
= isa
<UndefValue
>(Operands
[0]);
2518 bool IsOp1Undef
= isa
<UndefValue
>(Operands
[1]);
2519 switch (IntrinsicID
) {
2520 case Intrinsic::maxnum
:
2521 case Intrinsic::minnum
:
2522 case Intrinsic::maximum
:
2523 case Intrinsic::minimum
:
2524 // If one argument is undef, return the other argument.
2533 if (const auto *Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
2534 const APFloat
&Op1V
= Op1
->getValueAPF();
2536 if (const auto *Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
2537 if (Op2
->getType() != Op1
->getType())
2539 const APFloat
&Op2V
= Op2
->getValueAPF();
2541 if (const auto *ConstrIntr
= dyn_cast
<ConstrainedFPIntrinsic
>(Call
)) {
2542 RoundingMode RM
= getEvaluationRoundingMode(ConstrIntr
);
2544 APFloat::opStatus St
;
2545 switch (IntrinsicID
) {
2548 case Intrinsic::experimental_constrained_fadd
:
2549 St
= Res
.add(Op2V
, RM
);
2551 case Intrinsic::experimental_constrained_fsub
:
2552 St
= Res
.subtract(Op2V
, RM
);
2554 case Intrinsic::experimental_constrained_fmul
:
2555 St
= Res
.multiply(Op2V
, RM
);
2557 case Intrinsic::experimental_constrained_fdiv
:
2558 St
= Res
.divide(Op2V
, RM
);
2560 case Intrinsic::experimental_constrained_frem
:
2563 case Intrinsic::experimental_constrained_fcmp
:
2564 case Intrinsic::experimental_constrained_fcmps
:
2565 return evaluateCompare(Op1V
, Op2V
, ConstrIntr
);
2567 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic
*>(ConstrIntr
),
2569 return ConstantFP::get(Ty
->getContext(), Res
);
2573 switch (IntrinsicID
) {
2576 case Intrinsic::copysign
:
2577 return ConstantFP::get(Ty
->getContext(), APFloat::copySign(Op1V
, Op2V
));
2578 case Intrinsic::minnum
:
2579 return ConstantFP::get(Ty
->getContext(), minnum(Op1V
, Op2V
));
2580 case Intrinsic::maxnum
:
2581 return ConstantFP::get(Ty
->getContext(), maxnum(Op1V
, Op2V
));
2582 case Intrinsic::minimum
:
2583 return ConstantFP::get(Ty
->getContext(), minimum(Op1V
, Op2V
));
2584 case Intrinsic::maximum
:
2585 return ConstantFP::get(Ty
->getContext(), maximum(Op1V
, Op2V
));
2588 if (!Ty
->isHalfTy() && !Ty
->isFloatTy() && !Ty
->isDoubleTy())
2591 switch (IntrinsicID
) {
2594 case Intrinsic::pow
:
2595 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
);
2596 case Intrinsic::amdgcn_fmul_legacy
:
2597 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2598 // NaN or infinity, gives +0.0.
2599 if (Op1V
.isZero() || Op2V
.isZero())
2600 return ConstantFP::getZero(Ty
);
2601 return ConstantFP::get(Ty
->getContext(), Op1V
* Op2V
);
2607 LibFunc Func
= NotLibFunc
;
2608 if (!TLI
->getLibFunc(Name
, Func
))
2616 case LibFunc_pow_finite
:
2617 case LibFunc_powf_finite
:
2619 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
);
2623 if (TLI
->has(Func
)) {
2624 APFloat V
= Op1
->getValueAPF();
2625 if (APFloat::opStatus::opOK
== V
.mod(Op2
->getValueAPF()))
2626 return ConstantFP::get(Ty
->getContext(), V
);
2629 case LibFunc_remainder
:
2630 case LibFunc_remainderf
:
2631 if (TLI
->has(Func
)) {
2632 APFloat V
= Op1
->getValueAPF();
2633 if (APFloat::opStatus::opOK
== V
.remainder(Op2
->getValueAPF()))
2634 return ConstantFP::get(Ty
->getContext(), V
);
2638 case LibFunc_atan2f
:
2639 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2640 // (Solaris), so we do not assume a known result for that.
2641 if (Op1V
.isZero() && Op2V
.isZero())
2644 case LibFunc_atan2_finite
:
2645 case LibFunc_atan2f_finite
:
2647 return ConstantFoldBinaryFP(atan2
, Op1V
, Op2V
, Ty
);
2650 } else if (auto *Op2C
= dyn_cast
<ConstantInt
>(Operands
[1])) {
2651 switch (IntrinsicID
) {
2652 case Intrinsic::ldexp
: {
2653 return ConstantFP::get(
2655 scalbn(Op1V
, Op2C
->getSExtValue(), APFloat::rmNearestTiesToEven
));
2657 case Intrinsic::is_fpclass
: {
2658 FPClassTest Mask
= static_cast<FPClassTest
>(Op2C
->getZExtValue());
2660 ((Mask
& fcSNan
) && Op1V
.isNaN() && Op1V
.isSignaling()) ||
2661 ((Mask
& fcQNan
) && Op1V
.isNaN() && !Op1V
.isSignaling()) ||
2662 ((Mask
& fcNegInf
) && Op1V
.isNegInfinity()) ||
2663 ((Mask
& fcNegNormal
) && Op1V
.isNormal() && Op1V
.isNegative()) ||
2664 ((Mask
& fcNegSubnormal
) && Op1V
.isDenormal() && Op1V
.isNegative()) ||
2665 ((Mask
& fcNegZero
) && Op1V
.isZero() && Op1V
.isNegative()) ||
2666 ((Mask
& fcPosZero
) && Op1V
.isZero() && !Op1V
.isNegative()) ||
2667 ((Mask
& fcPosSubnormal
) && Op1V
.isDenormal() && !Op1V
.isNegative()) ||
2668 ((Mask
& fcPosNormal
) && Op1V
.isNormal() && !Op1V
.isNegative()) ||
2669 ((Mask
& fcPosInf
) && Op1V
.isPosInfinity());
2670 return ConstantInt::get(Ty
, Result
);
2676 if (!Ty
->isHalfTy() && !Ty
->isFloatTy() && !Ty
->isDoubleTy())
2678 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isHalfTy())
2679 return ConstantFP::get(
2681 APFloat((float)std::pow((float)Op1V
.convertToDouble(),
2682 (int)Op2C
->getZExtValue())));
2683 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isFloatTy())
2684 return ConstantFP::get(
2686 APFloat((float)std::pow((float)Op1V
.convertToDouble(),
2687 (int)Op2C
->getZExtValue())));
2688 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isDoubleTy())
2689 return ConstantFP::get(
2691 APFloat((double)std::pow(Op1V
.convertToDouble(),
2692 (int)Op2C
->getZExtValue())));
2697 if (Operands
[0]->getType()->isIntegerTy() &&
2698 Operands
[1]->getType()->isIntegerTy()) {
2699 const APInt
*C0
, *C1
;
2700 if (!getConstIntOrUndef(Operands
[0], C0
) ||
2701 !getConstIntOrUndef(Operands
[1], C1
))
2704 switch (IntrinsicID
) {
2706 case Intrinsic::smax
:
2707 case Intrinsic::smin
:
2708 case Intrinsic::umax
:
2709 case Intrinsic::umin
:
2710 // This is the same as for binary ops - poison propagates.
2711 // TODO: Poison handling should be consolidated.
2712 if (isa
<PoisonValue
>(Operands
[0]) || isa
<PoisonValue
>(Operands
[1]))
2713 return PoisonValue::get(Ty
);
2716 return UndefValue::get(Ty
);
2718 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID
, Ty
);
2719 return ConstantInt::get(
2720 Ty
, ICmpInst::compare(*C0
, *C1
,
2721 MinMaxIntrinsic::getPredicate(IntrinsicID
))
2725 case Intrinsic::usub_with_overflow
:
2726 case Intrinsic::ssub_with_overflow
:
2727 // X - undef -> { 0, false }
2728 // undef - X -> { 0, false }
2730 return Constant::getNullValue(Ty
);
2732 case Intrinsic::uadd_with_overflow
:
2733 case Intrinsic::sadd_with_overflow
:
2734 // X + undef -> { -1, false }
2735 // undef + x -> { -1, false }
2737 return ConstantStruct::get(
2738 cast
<StructType
>(Ty
),
2739 {Constant::getAllOnesValue(Ty
->getStructElementType(0)),
2740 Constant::getNullValue(Ty
->getStructElementType(1))});
2743 case Intrinsic::smul_with_overflow
:
2744 case Intrinsic::umul_with_overflow
: {
2745 // undef * X -> { 0, false }
2746 // X * undef -> { 0, false }
2748 return Constant::getNullValue(Ty
);
2752 switch (IntrinsicID
) {
2753 default: llvm_unreachable("Invalid case");
2754 case Intrinsic::sadd_with_overflow
:
2755 Res
= C0
->sadd_ov(*C1
, Overflow
);
2757 case Intrinsic::uadd_with_overflow
:
2758 Res
= C0
->uadd_ov(*C1
, Overflow
);
2760 case Intrinsic::ssub_with_overflow
:
2761 Res
= C0
->ssub_ov(*C1
, Overflow
);
2763 case Intrinsic::usub_with_overflow
:
2764 Res
= C0
->usub_ov(*C1
, Overflow
);
2766 case Intrinsic::smul_with_overflow
:
2767 Res
= C0
->smul_ov(*C1
, Overflow
);
2769 case Intrinsic::umul_with_overflow
:
2770 Res
= C0
->umul_ov(*C1
, Overflow
);
2774 ConstantInt::get(Ty
->getContext(), Res
),
2775 ConstantInt::get(Type::getInt1Ty(Ty
->getContext()), Overflow
)
2777 return ConstantStruct::get(cast
<StructType
>(Ty
), Ops
);
2779 case Intrinsic::uadd_sat
:
2780 case Intrinsic::sadd_sat
:
2781 // This is the same as for binary ops - poison propagates.
2782 // TODO: Poison handling should be consolidated.
2783 if (isa
<PoisonValue
>(Operands
[0]) || isa
<PoisonValue
>(Operands
[1]))
2784 return PoisonValue::get(Ty
);
2787 return UndefValue::get(Ty
);
2789 return Constant::getAllOnesValue(Ty
);
2790 if (IntrinsicID
== Intrinsic::uadd_sat
)
2791 return ConstantInt::get(Ty
, C0
->uadd_sat(*C1
));
2793 return ConstantInt::get(Ty
, C0
->sadd_sat(*C1
));
2794 case Intrinsic::usub_sat
:
2795 case Intrinsic::ssub_sat
:
2796 // This is the same as for binary ops - poison propagates.
2797 // TODO: Poison handling should be consolidated.
2798 if (isa
<PoisonValue
>(Operands
[0]) || isa
<PoisonValue
>(Operands
[1]))
2799 return PoisonValue::get(Ty
);
2802 return UndefValue::get(Ty
);
2804 return Constant::getNullValue(Ty
);
2805 if (IntrinsicID
== Intrinsic::usub_sat
)
2806 return ConstantInt::get(Ty
, C0
->usub_sat(*C1
));
2808 return ConstantInt::get(Ty
, C0
->ssub_sat(*C1
));
2809 case Intrinsic::cttz
:
2810 case Intrinsic::ctlz
:
2811 assert(C1
&& "Must be constant int");
2813 // cttz(0, 1) and ctlz(0, 1) are poison.
2814 if (C1
->isOne() && (!C0
|| C0
->isZero()))
2815 return PoisonValue::get(Ty
);
2817 return Constant::getNullValue(Ty
);
2818 if (IntrinsicID
== Intrinsic::cttz
)
2819 return ConstantInt::get(Ty
, C0
->countr_zero());
2821 return ConstantInt::get(Ty
, C0
->countl_zero());
2823 case Intrinsic::abs
:
2824 assert(C1
&& "Must be constant int");
2825 assert((C1
->isOne() || C1
->isZero()) && "Must be 0 or 1");
2827 // Undef or minimum val operand with poison min --> undef
2828 if (C1
->isOne() && (!C0
|| C0
->isMinSignedValue()))
2829 return UndefValue::get(Ty
);
2831 // Undef operand with no poison min --> 0 (sign bit must be clear)
2833 return Constant::getNullValue(Ty
);
2835 return ConstantInt::get(Ty
, C0
->abs());
2836 case Intrinsic::amdgcn_wave_reduce_umin
:
2837 case Intrinsic::amdgcn_wave_reduce_umax
:
2838 return dyn_cast
<Constant
>(Operands
[0]);
2844 // Support ConstantVector in case we have an Undef in the top.
2845 if ((isa
<ConstantVector
>(Operands
[0]) ||
2846 isa
<ConstantDataVector
>(Operands
[0])) &&
2847 // Check for default rounding mode.
2848 // FIXME: Support other rounding modes?
2849 isa
<ConstantInt
>(Operands
[1]) &&
2850 cast
<ConstantInt
>(Operands
[1])->getValue() == 4) {
2851 auto *Op
= cast
<Constant
>(Operands
[0]);
2852 switch (IntrinsicID
) {
2854 case Intrinsic::x86_avx512_vcvtss2si32
:
2855 case Intrinsic::x86_avx512_vcvtss2si64
:
2856 case Intrinsic::x86_avx512_vcvtsd2si32
:
2857 case Intrinsic::x86_avx512_vcvtsd2si64
:
2858 if (ConstantFP
*FPOp
=
2859 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2860 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2861 /*roundTowardZero=*/false, Ty
,
2864 case Intrinsic::x86_avx512_vcvtss2usi32
:
2865 case Intrinsic::x86_avx512_vcvtss2usi64
:
2866 case Intrinsic::x86_avx512_vcvtsd2usi32
:
2867 case Intrinsic::x86_avx512_vcvtsd2usi64
:
2868 if (ConstantFP
*FPOp
=
2869 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2870 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2871 /*roundTowardZero=*/false, Ty
,
2874 case Intrinsic::x86_avx512_cvttss2si
:
2875 case Intrinsic::x86_avx512_cvttss2si64
:
2876 case Intrinsic::x86_avx512_cvttsd2si
:
2877 case Intrinsic::x86_avx512_cvttsd2si64
:
2878 if (ConstantFP
*FPOp
=
2879 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2880 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2881 /*roundTowardZero=*/true, Ty
,
2884 case Intrinsic::x86_avx512_cvttss2usi
:
2885 case Intrinsic::x86_avx512_cvttss2usi64
:
2886 case Intrinsic::x86_avx512_cvttsd2usi
:
2887 case Intrinsic::x86_avx512_cvttsd2usi64
:
2888 if (ConstantFP
*FPOp
=
2889 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2890 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2891 /*roundTowardZero=*/true, Ty
,
2899 static APFloat
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID
,
2902 const APFloat
&S2
) {
2904 const fltSemantics
&Sem
= S0
.getSemantics();
2905 APFloat
MA(Sem
), SC(Sem
), TC(Sem
);
2906 if (abs(S2
) >= abs(S0
) && abs(S2
) >= abs(S1
)) {
2907 if (S2
.isNegative() && S2
.isNonZero() && !S2
.isNaN()) {
2917 } else if (abs(S1
) >= abs(S0
)) {
2918 if (S1
.isNegative() && S1
.isNonZero() && !S1
.isNaN()) {
2929 if (S0
.isNegative() && S0
.isNonZero() && !S0
.isNaN()) {
2940 switch (IntrinsicID
) {
2942 llvm_unreachable("unhandled amdgcn cube intrinsic");
2943 case Intrinsic::amdgcn_cubeid
:
2944 return APFloat(Sem
, ID
);
2945 case Intrinsic::amdgcn_cubema
:
2947 case Intrinsic::amdgcn_cubesc
:
2949 case Intrinsic::amdgcn_cubetc
:
2954 static Constant
*ConstantFoldAMDGCNPermIntrinsic(ArrayRef
<Constant
*> Operands
,
2956 const APInt
*C0
, *C1
, *C2
;
2957 if (!getConstIntOrUndef(Operands
[0], C0
) ||
2958 !getConstIntOrUndef(Operands
[1], C1
) ||
2959 !getConstIntOrUndef(Operands
[2], C2
))
2963 return UndefValue::get(Ty
);
2966 unsigned NumUndefBytes
= 0;
2967 for (unsigned I
= 0; I
< 32; I
+= 8) {
2968 unsigned Sel
= C2
->extractBitsAsZExtValue(8, I
);
2976 const APInt
*Src
= ((Sel
& 10) == 10 || (Sel
& 12) == 4) ? C0
: C1
;
2980 B
= Src
->extractBitsAsZExtValue(8, (Sel
& 3) * 8);
2982 B
= Src
->extractBitsAsZExtValue(1, (Sel
& 1) ? 31 : 15) * 0xff;
2985 Val
.insertBits(B
, I
, 8);
2988 if (NumUndefBytes
== 4)
2989 return UndefValue::get(Ty
);
2991 return ConstantInt::get(Ty
, Val
);
2994 static Constant
*ConstantFoldScalarCall3(StringRef Name
,
2995 Intrinsic::ID IntrinsicID
,
2997 ArrayRef
<Constant
*> Operands
,
2998 const TargetLibraryInfo
*TLI
,
2999 const CallBase
*Call
) {
3000 assert(Operands
.size() == 3 && "Wrong number of operands.");
3002 if (const auto *Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
3003 if (const auto *Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
3004 if (const auto *Op3
= dyn_cast
<ConstantFP
>(Operands
[2])) {
3005 const APFloat
&C1
= Op1
->getValueAPF();
3006 const APFloat
&C2
= Op2
->getValueAPF();
3007 const APFloat
&C3
= Op3
->getValueAPF();
3009 if (const auto *ConstrIntr
= dyn_cast
<ConstrainedFPIntrinsic
>(Call
)) {
3010 RoundingMode RM
= getEvaluationRoundingMode(ConstrIntr
);
3012 APFloat::opStatus St
;
3013 switch (IntrinsicID
) {
3016 case Intrinsic::experimental_constrained_fma
:
3017 case Intrinsic::experimental_constrained_fmuladd
:
3018 St
= Res
.fusedMultiplyAdd(C2
, C3
, RM
);
3021 if (mayFoldConstrained(
3022 const_cast<ConstrainedFPIntrinsic
*>(ConstrIntr
), St
))
3023 return ConstantFP::get(Ty
->getContext(), Res
);
3027 switch (IntrinsicID
) {
3029 case Intrinsic::amdgcn_fma_legacy
: {
3030 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3031 // NaN or infinity, gives +0.0.
3032 if (C1
.isZero() || C2
.isZero()) {
3033 // It's tempting to just return C3 here, but that would give the
3034 // wrong result if C3 was -0.0.
3035 return ConstantFP::get(Ty
->getContext(), APFloat(0.0f
) + C3
);
3039 case Intrinsic::fma
:
3040 case Intrinsic::fmuladd
: {
3042 V
.fusedMultiplyAdd(C2
, C3
, APFloat::rmNearestTiesToEven
);
3043 return ConstantFP::get(Ty
->getContext(), V
);
3045 case Intrinsic::amdgcn_cubeid
:
3046 case Intrinsic::amdgcn_cubema
:
3047 case Intrinsic::amdgcn_cubesc
:
3048 case Intrinsic::amdgcn_cubetc
: {
3049 APFloat V
= ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID
, C1
, C2
, C3
);
3050 return ConstantFP::get(Ty
->getContext(), V
);
3057 if (IntrinsicID
== Intrinsic::smul_fix
||
3058 IntrinsicID
== Intrinsic::smul_fix_sat
) {
3059 // poison * C -> poison
3060 // C * poison -> poison
3061 if (isa
<PoisonValue
>(Operands
[0]) || isa
<PoisonValue
>(Operands
[1]))
3062 return PoisonValue::get(Ty
);
3064 const APInt
*C0
, *C1
;
3065 if (!getConstIntOrUndef(Operands
[0], C0
) ||
3066 !getConstIntOrUndef(Operands
[1], C1
))
3072 return Constant::getNullValue(Ty
);
3074 // This code performs rounding towards negative infinity in case the result
3075 // cannot be represented exactly for the given scale. Targets that do care
3076 // about rounding should use a target hook for specifying how rounding
3077 // should be done, and provide their own folding to be consistent with
3078 // rounding. This is the same approach as used by
3079 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3080 unsigned Scale
= cast
<ConstantInt
>(Operands
[2])->getZExtValue();
3081 unsigned Width
= C0
->getBitWidth();
3082 assert(Scale
< Width
&& "Illegal scale.");
3083 unsigned ExtendedWidth
= Width
* 2;
3085 (C0
->sext(ExtendedWidth
) * C1
->sext(ExtendedWidth
)).ashr(Scale
);
3086 if (IntrinsicID
== Intrinsic::smul_fix_sat
) {
3087 APInt Max
= APInt::getSignedMaxValue(Width
).sext(ExtendedWidth
);
3088 APInt Min
= APInt::getSignedMinValue(Width
).sext(ExtendedWidth
);
3089 Product
= APIntOps::smin(Product
, Max
);
3090 Product
= APIntOps::smax(Product
, Min
);
3092 return ConstantInt::get(Ty
->getContext(), Product
.sextOrTrunc(Width
));
3095 if (IntrinsicID
== Intrinsic::fshl
|| IntrinsicID
== Intrinsic::fshr
) {
3096 const APInt
*C0
, *C1
, *C2
;
3097 if (!getConstIntOrUndef(Operands
[0], C0
) ||
3098 !getConstIntOrUndef(Operands
[1], C1
) ||
3099 !getConstIntOrUndef(Operands
[2], C2
))
3102 bool IsRight
= IntrinsicID
== Intrinsic::fshr
;
3104 return Operands
[IsRight
? 1 : 0];
3106 return UndefValue::get(Ty
);
3108 // The shift amount is interpreted as modulo the bitwidth. If the shift
3109 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3110 unsigned BitWidth
= C2
->getBitWidth();
3111 unsigned ShAmt
= C2
->urem(BitWidth
);
3113 return Operands
[IsRight
? 1 : 0];
3115 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3116 unsigned LshrAmt
= IsRight
? ShAmt
: BitWidth
- ShAmt
;
3117 unsigned ShlAmt
= !IsRight
? ShAmt
: BitWidth
- ShAmt
;
3119 return ConstantInt::get(Ty
, C1
->lshr(LshrAmt
));
3121 return ConstantInt::get(Ty
, C0
->shl(ShlAmt
));
3122 return ConstantInt::get(Ty
, C0
->shl(ShlAmt
) | C1
->lshr(LshrAmt
));
3125 if (IntrinsicID
== Intrinsic::amdgcn_perm
)
3126 return ConstantFoldAMDGCNPermIntrinsic(Operands
, Ty
);
3131 static Constant
*ConstantFoldScalarCall(StringRef Name
,
3132 Intrinsic::ID IntrinsicID
,
3134 ArrayRef
<Constant
*> Operands
,
3135 const TargetLibraryInfo
*TLI
,
3136 const CallBase
*Call
) {
3137 if (Operands
.size() == 1)
3138 return ConstantFoldScalarCall1(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
3140 if (Operands
.size() == 2)
3141 return ConstantFoldScalarCall2(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
3143 if (Operands
.size() == 3)
3144 return ConstantFoldScalarCall3(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
3149 static Constant
*ConstantFoldFixedVectorCall(
3150 StringRef Name
, Intrinsic::ID IntrinsicID
, FixedVectorType
*FVTy
,
3151 ArrayRef
<Constant
*> Operands
, const DataLayout
&DL
,
3152 const TargetLibraryInfo
*TLI
, const CallBase
*Call
) {
3153 SmallVector
<Constant
*, 4> Result(FVTy
->getNumElements());
3154 SmallVector
<Constant
*, 4> Lane(Operands
.size());
3155 Type
*Ty
= FVTy
->getElementType();
3157 switch (IntrinsicID
) {
3158 case Intrinsic::masked_load
: {
3159 auto *SrcPtr
= Operands
[0];
3160 auto *Mask
= Operands
[2];
3161 auto *Passthru
= Operands
[3];
3163 Constant
*VecData
= ConstantFoldLoadFromConstPtr(SrcPtr
, FVTy
, DL
);
3165 SmallVector
<Constant
*, 32> NewElements
;
3166 for (unsigned I
= 0, E
= FVTy
->getNumElements(); I
!= E
; ++I
) {
3167 auto *MaskElt
= Mask
->getAggregateElement(I
);
3170 auto *PassthruElt
= Passthru
->getAggregateElement(I
);
3171 auto *VecElt
= VecData
? VecData
->getAggregateElement(I
) : nullptr;
3172 if (isa
<UndefValue
>(MaskElt
)) {
3174 NewElements
.push_back(PassthruElt
);
3176 NewElements
.push_back(VecElt
);
3180 if (MaskElt
->isNullValue()) {
3183 NewElements
.push_back(PassthruElt
);
3184 } else if (MaskElt
->isOneValue()) {
3187 NewElements
.push_back(VecElt
);
3192 if (NewElements
.size() != FVTy
->getNumElements())
3194 return ConstantVector::get(NewElements
);
3196 case Intrinsic::arm_mve_vctp8
:
3197 case Intrinsic::arm_mve_vctp16
:
3198 case Intrinsic::arm_mve_vctp32
:
3199 case Intrinsic::arm_mve_vctp64
: {
3200 if (auto *Op
= dyn_cast
<ConstantInt
>(Operands
[0])) {
3201 unsigned Lanes
= FVTy
->getNumElements();
3202 uint64_t Limit
= Op
->getZExtValue();
3204 SmallVector
<Constant
*, 16> NCs
;
3205 for (unsigned i
= 0; i
< Lanes
; i
++) {
3207 NCs
.push_back(ConstantInt::getTrue(Ty
));
3209 NCs
.push_back(ConstantInt::getFalse(Ty
));
3211 return ConstantVector::get(NCs
);
3215 case Intrinsic::get_active_lane_mask
: {
3216 auto *Op0
= dyn_cast
<ConstantInt
>(Operands
[0]);
3217 auto *Op1
= dyn_cast
<ConstantInt
>(Operands
[1]);
3219 unsigned Lanes
= FVTy
->getNumElements();
3220 uint64_t Base
= Op0
->getZExtValue();
3221 uint64_t Limit
= Op1
->getZExtValue();
3223 SmallVector
<Constant
*, 16> NCs
;
3224 for (unsigned i
= 0; i
< Lanes
; i
++) {
3225 if (Base
+ i
< Limit
)
3226 NCs
.push_back(ConstantInt::getTrue(Ty
));
3228 NCs
.push_back(ConstantInt::getFalse(Ty
));
3230 return ConstantVector::get(NCs
);
3238 for (unsigned I
= 0, E
= FVTy
->getNumElements(); I
!= E
; ++I
) {
3239 // Gather a column of constants.
3240 for (unsigned J
= 0, JE
= Operands
.size(); J
!= JE
; ++J
) {
3241 // Some intrinsics use a scalar type for certain arguments.
3242 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID
, J
)) {
3243 Lane
[J
] = Operands
[J
];
3247 Constant
*Agg
= Operands
[J
]->getAggregateElement(I
);
3254 // Use the regular scalar folding to simplify this column.
3256 ConstantFoldScalarCall(Name
, IntrinsicID
, Ty
, Lane
, TLI
, Call
);
3262 return ConstantVector::get(Result
);
3265 static Constant
*ConstantFoldScalableVectorCall(
3266 StringRef Name
, Intrinsic::ID IntrinsicID
, ScalableVectorType
*SVTy
,
3267 ArrayRef
<Constant
*> Operands
, const DataLayout
&DL
,
3268 const TargetLibraryInfo
*TLI
, const CallBase
*Call
) {
3269 switch (IntrinsicID
) {
3270 case Intrinsic::aarch64_sve_convert_from_svbool
: {
3271 auto *Src
= dyn_cast
<Constant
>(Operands
[0]);
3272 if (!Src
|| !Src
->isNullValue())
3275 return ConstantInt::getFalse(SVTy
);
3283 static std::pair
<Constant
*, Constant
*>
3284 ConstantFoldScalarFrexpCall(Constant
*Op
, Type
*IntTy
) {
3285 if (isa
<PoisonValue
>(Op
))
3286 return {Op
, PoisonValue::get(IntTy
)};
3288 auto *ConstFP
= dyn_cast
<ConstantFP
>(Op
);
3292 const APFloat
&U
= ConstFP
->getValueAPF();
3294 APFloat FrexpMant
= frexp(U
, FrexpExp
, APFloat::rmNearestTiesToEven
);
3295 Constant
*Result0
= ConstantFP::get(ConstFP
->getType(), FrexpMant
);
3297 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3299 Constant
*Result1
= FrexpMant
.isFinite() ? ConstantInt::get(IntTy
, FrexpExp
)
3300 : ConstantInt::getNullValue(IntTy
);
3301 return {Result0
, Result1
};
3304 /// Handle intrinsics that return tuples, which may be tuples of vectors.
3306 ConstantFoldStructCall(StringRef Name
, Intrinsic::ID IntrinsicID
,
3307 StructType
*StTy
, ArrayRef
<Constant
*> Operands
,
3308 const DataLayout
&DL
, const TargetLibraryInfo
*TLI
,
3309 const CallBase
*Call
) {
3311 switch (IntrinsicID
) {
3312 case Intrinsic::frexp
: {
3313 Type
*Ty0
= StTy
->getContainedType(0);
3314 Type
*Ty1
= StTy
->getContainedType(1)->getScalarType();
3316 if (auto *FVTy0
= dyn_cast
<FixedVectorType
>(Ty0
)) {
3317 SmallVector
<Constant
*, 4> Results0(FVTy0
->getNumElements());
3318 SmallVector
<Constant
*, 4> Results1(FVTy0
->getNumElements());
3320 for (unsigned I
= 0, E
= FVTy0
->getNumElements(); I
!= E
; ++I
) {
3321 Constant
*Lane
= Operands
[0]->getAggregateElement(I
);
3322 std::tie(Results0
[I
], Results1
[I
]) =
3323 ConstantFoldScalarFrexpCall(Lane
, Ty1
);
3328 return ConstantStruct::get(StTy
, ConstantVector::get(Results0
),
3329 ConstantVector::get(Results1
));
3332 auto [Result0
, Result1
] = ConstantFoldScalarFrexpCall(Operands
[0], Ty1
);
3335 return ConstantStruct::get(StTy
, Result0
, Result1
);
3338 // TODO: Constant folding of vector intrinsics that fall through here does
3339 // not work (e.g. overflow intrinsics)
3340 return ConstantFoldScalarCall(Name
, IntrinsicID
, StTy
, Operands
, TLI
, Call
);
3346 } // end anonymous namespace
3348 Constant
*llvm::ConstantFoldCall(const CallBase
*Call
, Function
*F
,
3349 ArrayRef
<Constant
*> Operands
,
3350 const TargetLibraryInfo
*TLI
) {
3351 if (Call
->isNoBuiltin())
3356 // If this is not an intrinsic and not recognized as a library call, bail out.
3357 Intrinsic::ID IID
= F
->getIntrinsicID();
3358 if (IID
== Intrinsic::not_intrinsic
) {
3362 if (!TLI
->getLibFunc(*F
, LibF
))
3366 StringRef Name
= F
->getName();
3367 Type
*Ty
= F
->getReturnType();
3368 if (auto *FVTy
= dyn_cast
<FixedVectorType
>(Ty
))
3369 return ConstantFoldFixedVectorCall(
3370 Name
, IID
, FVTy
, Operands
, F
->getParent()->getDataLayout(), TLI
, Call
);
3372 if (auto *SVTy
= dyn_cast
<ScalableVectorType
>(Ty
))
3373 return ConstantFoldScalableVectorCall(
3374 Name
, IID
, SVTy
, Operands
, F
->getParent()->getDataLayout(), TLI
, Call
);
3376 if (auto *StTy
= dyn_cast
<StructType
>(Ty
))
3377 return ConstantFoldStructCall(Name
, IID
, StTy
, Operands
,
3378 F
->getParent()->getDataLayout(), TLI
, Call
);
3380 // TODO: If this is a library function, we already discovered that above,
3381 // so we should pass the LibFunc, not the name (and it might be better
3382 // still to separate intrinsic handling from libcalls).
3383 return ConstantFoldScalarCall(Name
, IID
, Ty
, Operands
, TLI
, Call
);
3386 bool llvm::isMathLibCallNoop(const CallBase
*Call
,
3387 const TargetLibraryInfo
*TLI
) {
3388 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3389 // (and to some extent ConstantFoldScalarCall).
3390 if (Call
->isNoBuiltin() || Call
->isStrictFP())
3392 Function
*F
= Call
->getCalledFunction();
3397 if (!TLI
|| !TLI
->getLibFunc(*F
, Func
))
3400 if (Call
->arg_size() == 1) {
3401 if (ConstantFP
*OpC
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(0))) {
3402 const APFloat
&Op
= OpC
->getValueAPF();
3410 case LibFunc_log10l
:
3412 case LibFunc_log10f
:
3413 return Op
.isNaN() || (!Op
.isZero() && !Op
.isNegative());
3418 // FIXME: These boundaries are slightly conservative.
3419 if (OpC
->getType()->isDoubleTy())
3420 return !(Op
< APFloat(-745.0) || Op
> APFloat(709.0));
3421 if (OpC
->getType()->isFloatTy())
3422 return !(Op
< APFloat(-103.0f
) || Op
> APFloat(88.0f
));
3428 // FIXME: These boundaries are slightly conservative.
3429 if (OpC
->getType()->isDoubleTy())
3430 return !(Op
< APFloat(-1074.0) || Op
> APFloat(1023.0));
3431 if (OpC
->getType()->isFloatTy())
3432 return !(Op
< APFloat(-149.0f
) || Op
> APFloat(127.0f
));
3441 return !Op
.isInfinity();
3445 case LibFunc_tanf
: {
3446 // FIXME: Stop using the host math library.
3447 // FIXME: The computation isn't done in the right precision.
3448 Type
*Ty
= OpC
->getType();
3449 if (Ty
->isDoubleTy() || Ty
->isFloatTy() || Ty
->isHalfTy())
3450 return ConstantFoldFP(tan
, OpC
->getValueAPF(), Ty
) != nullptr;
3457 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3467 return !(Op
< APFloat(Op
.getSemantics(), "-1") ||
3468 Op
> APFloat(Op
.getSemantics(), "1"));
3476 // FIXME: These boundaries are slightly conservative.
3477 if (OpC
->getType()->isDoubleTy())
3478 return !(Op
< APFloat(-710.0) || Op
> APFloat(710.0));
3479 if (OpC
->getType()->isFloatTy())
3480 return !(Op
< APFloat(-89.0f
) || Op
> APFloat(89.0f
));
3486 return Op
.isNaN() || Op
.isZero() || !Op
.isNegative();
3488 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3496 if (Call
->arg_size() == 2) {
3497 ConstantFP
*Op0C
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(0));
3498 ConstantFP
*Op1C
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(1));
3500 const APFloat
&Op0
= Op0C
->getValueAPF();
3501 const APFloat
&Op1
= Op1C
->getValueAPF();
3506 case LibFunc_powf
: {
3507 // FIXME: Stop using the host math library.
3508 // FIXME: The computation isn't done in the right precision.
3509 Type
*Ty
= Op0C
->getType();
3510 if (Ty
->isDoubleTy() || Ty
->isFloatTy() || Ty
->isHalfTy()) {
3511 if (Ty
== Op1C
->getType())
3512 return ConstantFoldBinaryFP(pow
, Op0
, Op1
, Ty
) != nullptr;
3520 case LibFunc_remainderl
:
3521 case LibFunc_remainder
:
3522 case LibFunc_remainderf
:
3523 return Op0
.isNaN() || Op1
.isNaN() ||
3524 (!Op0
.isInfinity() && !Op1
.isZero());
3527 case LibFunc_atan2f
:
3528 case LibFunc_atan2l
:
3529 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
3530 // GLIBC and MSVC do not appear to raise an error on those, we
3531 // cannot rely on that behavior. POSIX and C11 say that a domain error
3532 // may occur, so allow for that possibility.
3533 return !Op0
.isZero() || !Op1
.isZero();
3544 void TargetFolder::anchor() {}