1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/KnownBits.h"
46 #include "llvm/Support/MathExtras.h"
58 //===----------------------------------------------------------------------===//
59 // Constant Folding internal helper functions
60 //===----------------------------------------------------------------------===//
62 static Constant
*foldConstVectorToAPInt(APInt
&Result
, Type
*DestTy
,
63 Constant
*C
, Type
*SrcEltTy
,
65 const DataLayout
&DL
) {
66 // Now that we know that the input value is a vector of integers, just shift
67 // and insert them into our result.
68 unsigned BitShift
= DL
.getTypeSizeInBits(SrcEltTy
);
69 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
) {
71 if (DL
.isLittleEndian())
72 Element
= C
->getAggregateElement(NumSrcElts
- i
- 1);
74 Element
= C
->getAggregateElement(i
);
76 if (Element
&& isa
<UndefValue
>(Element
)) {
81 auto *ElementCI
= dyn_cast_or_null
<ConstantInt
>(Element
);
83 return ConstantExpr::getBitCast(C
, DestTy
);
86 Result
|= ElementCI
->getValue().zextOrSelf(Result
.getBitWidth());
92 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
93 /// This always returns a non-null constant, but it may be a
94 /// ConstantExpr if unfoldable.
95 Constant
*FoldBitCast(Constant
*C
, Type
*DestTy
, const DataLayout
&DL
) {
96 assert(CastInst::castIsValid(Instruction::BitCast
, C
, DestTy
) &&
97 "Invalid constantexpr bitcast!");
99 // Catch the obvious splat cases.
100 if (C
->isNullValue() && !DestTy
->isX86_MMXTy())
101 return Constant::getNullValue(DestTy
);
102 if (C
->isAllOnesValue() && !DestTy
->isX86_MMXTy() &&
103 !DestTy
->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
104 return Constant::getAllOnesValue(DestTy
);
106 if (auto *VTy
= dyn_cast
<VectorType
>(C
->getType())) {
107 // Handle a vector->scalar integer/fp cast.
108 if (isa
<IntegerType
>(DestTy
) || DestTy
->isFloatingPointTy()) {
109 unsigned NumSrcElts
= VTy
->getNumElements();
110 Type
*SrcEltTy
= VTy
->getElementType();
112 // If the vector is a vector of floating point, convert it to vector of int
113 // to simplify things.
114 if (SrcEltTy
->isFloatingPointTy()) {
115 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
117 VectorType::get(IntegerType::get(C
->getContext(), FPWidth
), NumSrcElts
);
118 // Ask IR to do the conversion now that #elts line up.
119 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
122 APInt
Result(DL
.getTypeSizeInBits(DestTy
), 0);
123 if (Constant
*CE
= foldConstVectorToAPInt(Result
, DestTy
, C
,
124 SrcEltTy
, NumSrcElts
, DL
))
127 if (isa
<IntegerType
>(DestTy
))
128 return ConstantInt::get(DestTy
, Result
);
130 APFloat
FP(DestTy
->getFltSemantics(), Result
);
131 return ConstantFP::get(DestTy
->getContext(), FP
);
135 // The code below only handles casts to vectors currently.
136 auto *DestVTy
= dyn_cast
<VectorType
>(DestTy
);
138 return ConstantExpr::getBitCast(C
, DestTy
);
140 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
141 // vector so the code below can handle it uniformly.
142 if (isa
<ConstantFP
>(C
) || isa
<ConstantInt
>(C
)) {
143 Constant
*Ops
= C
; // don't take the address of C!
144 return FoldBitCast(ConstantVector::get(Ops
), DestTy
, DL
);
147 // If this is a bitcast from constant vector -> vector, fold it.
148 if (!isa
<ConstantDataVector
>(C
) && !isa
<ConstantVector
>(C
))
149 return ConstantExpr::getBitCast(C
, DestTy
);
151 // If the element types match, IR can fold it.
152 unsigned NumDstElt
= DestVTy
->getNumElements();
153 unsigned NumSrcElt
= C
->getType()->getVectorNumElements();
154 if (NumDstElt
== NumSrcElt
)
155 return ConstantExpr::getBitCast(C
, DestTy
);
157 Type
*SrcEltTy
= C
->getType()->getVectorElementType();
158 Type
*DstEltTy
= DestVTy
->getElementType();
160 // Otherwise, we're changing the number of elements in a vector, which
161 // requires endianness information to do the right thing. For example,
162 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
163 // folds to (little endian):
164 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
165 // and to (big endian):
166 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
168 // First thing is first. We only want to think about integer here, so if
169 // we have something in FP form, recast it as integer.
170 if (DstEltTy
->isFloatingPointTy()) {
171 // Fold to an vector of integers with same size as our FP type.
172 unsigned FPWidth
= DstEltTy
->getPrimitiveSizeInBits();
174 VectorType::get(IntegerType::get(C
->getContext(), FPWidth
), NumDstElt
);
175 // Recursively handle this integer conversion, if possible.
176 C
= FoldBitCast(C
, DestIVTy
, DL
);
178 // Finally, IR can handle this now that #elts line up.
179 return ConstantExpr::getBitCast(C
, DestTy
);
182 // Okay, we know the destination is integer, if the input is FP, convert
183 // it to integer first.
184 if (SrcEltTy
->isFloatingPointTy()) {
185 unsigned FPWidth
= SrcEltTy
->getPrimitiveSizeInBits();
187 VectorType::get(IntegerType::get(C
->getContext(), FPWidth
), NumSrcElt
);
188 // Ask IR to do the conversion now that #elts line up.
189 C
= ConstantExpr::getBitCast(C
, SrcIVTy
);
190 // If IR wasn't able to fold it, bail out.
191 if (!isa
<ConstantVector
>(C
) && // FIXME: Remove ConstantVector.
192 !isa
<ConstantDataVector
>(C
))
196 // Now we know that the input and output vectors are both integer vectors
197 // of the same size, and that their #elements is not the same. Do the
198 // conversion here, which depends on whether the input or output has
200 bool isLittleEndian
= DL
.isLittleEndian();
202 SmallVector
<Constant
*, 32> Result
;
203 if (NumDstElt
< NumSrcElt
) {
204 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
205 Constant
*Zero
= Constant::getNullValue(DstEltTy
);
206 unsigned Ratio
= NumSrcElt
/NumDstElt
;
207 unsigned SrcBitSize
= SrcEltTy
->getPrimitiveSizeInBits();
209 for (unsigned i
= 0; i
!= NumDstElt
; ++i
) {
210 // Build each element of the result.
211 Constant
*Elt
= Zero
;
212 unsigned ShiftAmt
= isLittleEndian
? 0 : SrcBitSize
*(Ratio
-1);
213 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
214 Constant
*Src
= C
->getAggregateElement(SrcElt
++);
215 if (Src
&& isa
<UndefValue
>(Src
))
216 Src
= Constant::getNullValue(C
->getType()->getVectorElementType());
218 Src
= dyn_cast_or_null
<ConstantInt
>(Src
);
219 if (!Src
) // Reject constantexpr elements.
220 return ConstantExpr::getBitCast(C
, DestTy
);
222 // Zero extend the element to the right size.
223 Src
= ConstantExpr::getZExt(Src
, Elt
->getType());
225 // Shift it to the right place, depending on endianness.
226 Src
= ConstantExpr::getShl(Src
,
227 ConstantInt::get(Src
->getType(), ShiftAmt
));
228 ShiftAmt
+= isLittleEndian
? SrcBitSize
: -SrcBitSize
;
231 Elt
= ConstantExpr::getOr(Elt
, Src
);
233 Result
.push_back(Elt
);
235 return ConstantVector::get(Result
);
238 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
239 unsigned Ratio
= NumDstElt
/NumSrcElt
;
240 unsigned DstBitSize
= DL
.getTypeSizeInBits(DstEltTy
);
242 // Loop over each source value, expanding into multiple results.
243 for (unsigned i
= 0; i
!= NumSrcElt
; ++i
) {
244 auto *Element
= C
->getAggregateElement(i
);
246 if (!Element
) // Reject constantexpr elements.
247 return ConstantExpr::getBitCast(C
, DestTy
);
249 if (isa
<UndefValue
>(Element
)) {
250 // Correctly Propagate undef values.
251 Result
.append(Ratio
, UndefValue::get(DstEltTy
));
255 auto *Src
= dyn_cast
<ConstantInt
>(Element
);
257 return ConstantExpr::getBitCast(C
, DestTy
);
259 unsigned ShiftAmt
= isLittleEndian
? 0 : DstBitSize
*(Ratio
-1);
260 for (unsigned j
= 0; j
!= Ratio
; ++j
) {
261 // Shift the piece of the value into the right place, depending on
263 Constant
*Elt
= ConstantExpr::getLShr(Src
,
264 ConstantInt::get(Src
->getType(), ShiftAmt
));
265 ShiftAmt
+= isLittleEndian
? DstBitSize
: -DstBitSize
;
267 // Truncate the element to an integer with the same pointer size and
268 // convert the element back to a pointer using a inttoptr.
269 if (DstEltTy
->isPointerTy()) {
270 IntegerType
*DstIntTy
= Type::getIntNTy(C
->getContext(), DstBitSize
);
271 Constant
*CE
= ConstantExpr::getTrunc(Elt
, DstIntTy
);
272 Result
.push_back(ConstantExpr::getIntToPtr(CE
, DstEltTy
));
276 // Truncate and remember this piece.
277 Result
.push_back(ConstantExpr::getTrunc(Elt
, DstEltTy
));
281 return ConstantVector::get(Result
);
284 } // end anonymous namespace
286 /// If this constant is a constant offset from a global, return the global and
287 /// the constant. Because of constantexprs, this function is recursive.
288 bool llvm::IsConstantOffsetFromGlobal(Constant
*C
, GlobalValue
*&GV
,
289 APInt
&Offset
, const DataLayout
&DL
) {
290 // Trivial case, constant is the global.
291 if ((GV
= dyn_cast
<GlobalValue
>(C
))) {
292 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GV
->getType());
293 Offset
= APInt(BitWidth
, 0);
297 // Otherwise, if this isn't a constant expr, bail out.
298 auto *CE
= dyn_cast
<ConstantExpr
>(C
);
299 if (!CE
) return false;
301 // Look through ptr->int and ptr->ptr casts.
302 if (CE
->getOpcode() == Instruction::PtrToInt
||
303 CE
->getOpcode() == Instruction::BitCast
)
304 return IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, Offset
, DL
);
306 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
307 auto *GEP
= dyn_cast
<GEPOperator
>(CE
);
311 unsigned BitWidth
= DL
.getIndexTypeSizeInBits(GEP
->getType());
312 APInt
TmpOffset(BitWidth
, 0);
314 // If the base isn't a global+constant, we aren't either.
315 if (!IsConstantOffsetFromGlobal(CE
->getOperand(0), GV
, TmpOffset
, DL
))
318 // Otherwise, add any offset that our operands provide.
319 if (!GEP
->accumulateConstantOffset(DL
, TmpOffset
))
326 Constant
*llvm::ConstantFoldLoadThroughBitcast(Constant
*C
, Type
*DestTy
,
327 const DataLayout
&DL
) {
329 Type
*SrcTy
= C
->getType();
331 // If the type sizes are the same and a cast is legal, just directly
332 // cast the constant.
333 if (DL
.getTypeSizeInBits(DestTy
) == DL
.getTypeSizeInBits(SrcTy
)) {
334 Instruction::CastOps Cast
= Instruction::BitCast
;
335 // If we are going from a pointer to int or vice versa, we spell the cast
337 if (SrcTy
->isIntegerTy() && DestTy
->isPointerTy())
338 Cast
= Instruction::IntToPtr
;
339 else if (SrcTy
->isPointerTy() && DestTy
->isIntegerTy())
340 Cast
= Instruction::PtrToInt
;
342 if (CastInst::castIsValid(Cast
, C
, DestTy
))
343 return ConstantExpr::getCast(Cast
, C
, DestTy
);
346 // If this isn't an aggregate type, there is nothing we can do to drill down
347 // and find a bitcastable constant.
348 if (!SrcTy
->isAggregateType())
351 // We're simulating a load through a pointer that was bitcast to point to
352 // a different type, so we can try to walk down through the initial
353 // elements of an aggregate to see if some part of the aggregate is
354 // castable to implement the "load" semantic model.
355 if (SrcTy
->isStructTy()) {
356 // Struct types might have leading zero-length elements like [0 x i32],
357 // which are certainly not what we are looking for, so skip them.
361 ElemC
= C
->getAggregateElement(Elem
++);
362 } while (ElemC
&& DL
.getTypeSizeInBits(ElemC
->getType()) == 0);
365 C
= C
->getAggregateElement(0u);
374 /// Recursive helper to read bits out of global. C is the constant being copied
375 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
376 /// results into and BytesLeft is the number of bytes left in
377 /// the CurPtr buffer. DL is the DataLayout.
378 bool ReadDataFromGlobal(Constant
*C
, uint64_t ByteOffset
, unsigned char *CurPtr
,
379 unsigned BytesLeft
, const DataLayout
&DL
) {
380 assert(ByteOffset
<= DL
.getTypeAllocSize(C
->getType()) &&
381 "Out of range access");
383 // If this element is zero or undefined, we can just return since *CurPtr is
385 if (isa
<ConstantAggregateZero
>(C
) || isa
<UndefValue
>(C
))
388 if (auto *CI
= dyn_cast
<ConstantInt
>(C
)) {
389 if (CI
->getBitWidth() > 64 ||
390 (CI
->getBitWidth() & 7) != 0)
393 uint64_t Val
= CI
->getZExtValue();
394 unsigned IntBytes
= unsigned(CI
->getBitWidth()/8);
396 for (unsigned i
= 0; i
!= BytesLeft
&& ByteOffset
!= IntBytes
; ++i
) {
398 if (!DL
.isLittleEndian())
399 n
= IntBytes
- n
- 1;
400 CurPtr
[i
] = (unsigned char)(Val
>> (n
* 8));
406 if (auto *CFP
= dyn_cast
<ConstantFP
>(C
)) {
407 if (CFP
->getType()->isDoubleTy()) {
408 C
= FoldBitCast(C
, Type::getInt64Ty(C
->getContext()), DL
);
409 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
411 if (CFP
->getType()->isFloatTy()){
412 C
= FoldBitCast(C
, Type::getInt32Ty(C
->getContext()), DL
);
413 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
415 if (CFP
->getType()->isHalfTy()){
416 C
= FoldBitCast(C
, Type::getInt16Ty(C
->getContext()), DL
);
417 return ReadDataFromGlobal(C
, ByteOffset
, CurPtr
, BytesLeft
, DL
);
422 if (auto *CS
= dyn_cast
<ConstantStruct
>(C
)) {
423 const StructLayout
*SL
= DL
.getStructLayout(CS
->getType());
424 unsigned Index
= SL
->getElementContainingOffset(ByteOffset
);
425 uint64_t CurEltOffset
= SL
->getElementOffset(Index
);
426 ByteOffset
-= CurEltOffset
;
429 // If the element access is to the element itself and not to tail padding,
430 // read the bytes from the element.
431 uint64_t EltSize
= DL
.getTypeAllocSize(CS
->getOperand(Index
)->getType());
433 if (ByteOffset
< EltSize
&&
434 !ReadDataFromGlobal(CS
->getOperand(Index
), ByteOffset
, CurPtr
,
440 // Check to see if we read from the last struct element, if so we're done.
441 if (Index
== CS
->getType()->getNumElements())
444 // If we read all of the bytes we needed from this element we're done.
445 uint64_t NextEltOffset
= SL
->getElementOffset(Index
);
447 if (BytesLeft
<= NextEltOffset
- CurEltOffset
- ByteOffset
)
450 // Move to the next element of the struct.
451 CurPtr
+= NextEltOffset
- CurEltOffset
- ByteOffset
;
452 BytesLeft
-= NextEltOffset
- CurEltOffset
- ByteOffset
;
454 CurEltOffset
= NextEltOffset
;
459 if (isa
<ConstantArray
>(C
) || isa
<ConstantVector
>(C
) ||
460 isa
<ConstantDataSequential
>(C
)) {
461 Type
*EltTy
= C
->getType()->getSequentialElementType();
462 uint64_t EltSize
= DL
.getTypeAllocSize(EltTy
);
463 uint64_t Index
= ByteOffset
/ EltSize
;
464 uint64_t Offset
= ByteOffset
- Index
* EltSize
;
466 if (auto *AT
= dyn_cast
<ArrayType
>(C
->getType()))
467 NumElts
= AT
->getNumElements();
469 NumElts
= C
->getType()->getVectorNumElements();
471 for (; Index
!= NumElts
; ++Index
) {
472 if (!ReadDataFromGlobal(C
->getAggregateElement(Index
), Offset
, CurPtr
,
476 uint64_t BytesWritten
= EltSize
- Offset
;
477 assert(BytesWritten
<= EltSize
&& "Not indexing into this element?");
478 if (BytesWritten
>= BytesLeft
)
482 BytesLeft
-= BytesWritten
;
483 CurPtr
+= BytesWritten
;
488 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
489 if (CE
->getOpcode() == Instruction::IntToPtr
&&
490 CE
->getOperand(0)->getType() == DL
.getIntPtrType(CE
->getType())) {
491 return ReadDataFromGlobal(CE
->getOperand(0), ByteOffset
, CurPtr
,
496 // Otherwise, unknown initializer type.
500 Constant
*FoldReinterpretLoadFromConstPtr(Constant
*C
, Type
*LoadTy
,
501 const DataLayout
&DL
) {
502 auto *PTy
= cast
<PointerType
>(C
->getType());
503 auto *IntType
= dyn_cast
<IntegerType
>(LoadTy
);
505 // If this isn't an integer load we can't fold it directly.
507 unsigned AS
= PTy
->getAddressSpace();
509 // If this is a float/double load, we can try folding it as an int32/64 load
510 // and then bitcast the result. This can be useful for union cases. Note
511 // that address spaces don't matter here since we're not going to result in
512 // an actual new load.
514 if (LoadTy
->isHalfTy())
515 MapTy
= Type::getInt16Ty(C
->getContext());
516 else if (LoadTy
->isFloatTy())
517 MapTy
= Type::getInt32Ty(C
->getContext());
518 else if (LoadTy
->isDoubleTy())
519 MapTy
= Type::getInt64Ty(C
->getContext());
520 else if (LoadTy
->isVectorTy()) {
521 MapTy
= PointerType::getIntNTy(C
->getContext(),
522 DL
.getTypeSizeInBits(LoadTy
));
526 C
= FoldBitCast(C
, MapTy
->getPointerTo(AS
), DL
);
527 if (Constant
*Res
= FoldReinterpretLoadFromConstPtr(C
, MapTy
, DL
)) {
528 if (Res
->isNullValue() && !LoadTy
->isX86_MMXTy())
529 // Materializing a zero can be done trivially without a bitcast
530 return Constant::getNullValue(LoadTy
);
531 Type
*CastTy
= LoadTy
->isPtrOrPtrVectorTy() ? DL
.getIntPtrType(LoadTy
) : LoadTy
;
532 Res
= FoldBitCast(Res
, CastTy
, DL
);
533 if (LoadTy
->isPtrOrPtrVectorTy()) {
534 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
535 if (Res
->isNullValue() && !LoadTy
->isX86_MMXTy())
536 return Constant::getNullValue(LoadTy
);
537 if (DL
.isNonIntegralPointerType(LoadTy
->getScalarType()))
538 // Be careful not to replace a load of an addrspace value with an inttoptr here
540 Res
= ConstantExpr::getCast(Instruction::IntToPtr
, Res
, LoadTy
);
547 unsigned BytesLoaded
= (IntType
->getBitWidth() + 7) / 8;
548 if (BytesLoaded
> 32 || BytesLoaded
== 0)
553 if (!IsConstantOffsetFromGlobal(C
, GVal
, OffsetAI
, DL
))
556 auto *GV
= dyn_cast
<GlobalVariable
>(GVal
);
557 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer() ||
558 !GV
->getInitializer()->getType()->isSized())
561 int64_t Offset
= OffsetAI
.getSExtValue();
562 int64_t InitializerSize
= DL
.getTypeAllocSize(GV
->getInitializer()->getType());
564 // If we're not accessing anything in this constant, the result is undefined.
565 if (Offset
<= -1 * static_cast<int64_t>(BytesLoaded
))
566 return UndefValue::get(IntType
);
568 // If we're not accessing anything in this constant, the result is undefined.
569 if (Offset
>= InitializerSize
)
570 return UndefValue::get(IntType
);
572 unsigned char RawBytes
[32] = {0};
573 unsigned char *CurPtr
= RawBytes
;
574 unsigned BytesLeft
= BytesLoaded
;
576 // If we're loading off the beginning of the global, some bytes may be valid.
583 if (!ReadDataFromGlobal(GV
->getInitializer(), Offset
, CurPtr
, BytesLeft
, DL
))
586 APInt ResultVal
= APInt(IntType
->getBitWidth(), 0);
587 if (DL
.isLittleEndian()) {
588 ResultVal
= RawBytes
[BytesLoaded
- 1];
589 for (unsigned i
= 1; i
!= BytesLoaded
; ++i
) {
591 ResultVal
|= RawBytes
[BytesLoaded
- 1 - i
];
594 ResultVal
= RawBytes
[0];
595 for (unsigned i
= 1; i
!= BytesLoaded
; ++i
) {
597 ResultVal
|= RawBytes
[i
];
601 return ConstantInt::get(IntType
->getContext(), ResultVal
);
604 Constant
*ConstantFoldLoadThroughBitcastExpr(ConstantExpr
*CE
, Type
*DestTy
,
605 const DataLayout
&DL
) {
606 auto *SrcPtr
= CE
->getOperand(0);
607 auto *SrcPtrTy
= dyn_cast
<PointerType
>(SrcPtr
->getType());
610 Type
*SrcTy
= SrcPtrTy
->getPointerElementType();
612 Constant
*C
= ConstantFoldLoadFromConstPtr(SrcPtr
, SrcTy
, DL
);
616 return llvm::ConstantFoldLoadThroughBitcast(C
, DestTy
, DL
);
619 } // end anonymous namespace
621 Constant
*llvm::ConstantFoldLoadFromConstPtr(Constant
*C
, Type
*Ty
,
622 const DataLayout
&DL
) {
623 // First, try the easy cases:
624 if (auto *GV
= dyn_cast
<GlobalVariable
>(C
))
625 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
626 return GV
->getInitializer();
628 if (auto *GA
= dyn_cast
<GlobalAlias
>(C
))
629 if (GA
->getAliasee() && !GA
->isInterposable())
630 return ConstantFoldLoadFromConstPtr(GA
->getAliasee(), Ty
, DL
);
632 // If the loaded value isn't a constant expr, we can't handle it.
633 auto *CE
= dyn_cast
<ConstantExpr
>(C
);
637 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
638 if (auto *GV
= dyn_cast
<GlobalVariable
>(CE
->getOperand(0))) {
639 if (GV
->isConstant() && GV
->hasDefinitiveInitializer()) {
641 ConstantFoldLoadThroughGEPConstantExpr(GV
->getInitializer(), CE
))
647 if (CE
->getOpcode() == Instruction::BitCast
)
648 if (Constant
*LoadedC
= ConstantFoldLoadThroughBitcastExpr(CE
, Ty
, DL
))
651 // Instead of loading constant c string, use corresponding integer value
652 // directly if string length is small enough.
654 if (getConstantStringInfo(CE
, Str
) && !Str
.empty()) {
655 size_t StrLen
= Str
.size();
656 unsigned NumBits
= Ty
->getPrimitiveSizeInBits();
657 // Replace load with immediate integer if the result is an integer or fp
659 if ((NumBits
>> 3) == StrLen
+ 1 && (NumBits
& 7) == 0 &&
660 (isa
<IntegerType
>(Ty
) || Ty
->isFloatingPointTy())) {
661 APInt
StrVal(NumBits
, 0);
662 APInt
SingleChar(NumBits
, 0);
663 if (DL
.isLittleEndian()) {
664 for (unsigned char C
: reverse(Str
.bytes())) {
665 SingleChar
= static_cast<uint64_t>(C
);
666 StrVal
= (StrVal
<< 8) | SingleChar
;
669 for (unsigned char C
: Str
.bytes()) {
670 SingleChar
= static_cast<uint64_t>(C
);
671 StrVal
= (StrVal
<< 8) | SingleChar
;
673 // Append NULL at the end.
675 StrVal
= (StrVal
<< 8) | SingleChar
;
678 Constant
*Res
= ConstantInt::get(CE
->getContext(), StrVal
);
679 if (Ty
->isFloatingPointTy())
680 Res
= ConstantExpr::getBitCast(Res
, Ty
);
685 // If this load comes from anywhere in a constant global, and if the global
686 // is all undef or zero, we know what it loads.
687 if (auto *GV
= dyn_cast
<GlobalVariable
>(GetUnderlyingObject(CE
, DL
))) {
688 if (GV
->isConstant() && GV
->hasDefinitiveInitializer()) {
689 if (GV
->getInitializer()->isNullValue())
690 return Constant::getNullValue(Ty
);
691 if (isa
<UndefValue
>(GV
->getInitializer()))
692 return UndefValue::get(Ty
);
696 // Try hard to fold loads from bitcasted strange and non-type-safe things.
697 return FoldReinterpretLoadFromConstPtr(CE
, Ty
, DL
);
702 Constant
*ConstantFoldLoadInst(const LoadInst
*LI
, const DataLayout
&DL
) {
703 if (LI
->isVolatile()) return nullptr;
705 if (auto *C
= dyn_cast
<Constant
>(LI
->getOperand(0)))
706 return ConstantFoldLoadFromConstPtr(C
, LI
->getType(), DL
);
711 /// One of Op0/Op1 is a constant expression.
712 /// Attempt to symbolically evaluate the result of a binary operator merging
713 /// these together. If target data info is available, it is provided as DL,
714 /// otherwise DL is null.
715 Constant
*SymbolicallyEvaluateBinop(unsigned Opc
, Constant
*Op0
, Constant
*Op1
,
716 const DataLayout
&DL
) {
719 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
720 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
723 if (Opc
== Instruction::And
) {
724 KnownBits Known0
= computeKnownBits(Op0
, DL
);
725 KnownBits Known1
= computeKnownBits(Op1
, DL
);
726 if ((Known1
.One
| Known0
.Zero
).isAllOnesValue()) {
727 // All the bits of Op0 that the 'and' could be masking are already zero.
730 if ((Known0
.One
| Known1
.Zero
).isAllOnesValue()) {
731 // All the bits of Op1 that the 'and' could be masking are already zero.
735 Known0
.Zero
|= Known1
.Zero
;
736 Known0
.One
&= Known1
.One
;
737 if (Known0
.isConstant())
738 return ConstantInt::get(Op0
->getType(), Known0
.getConstant());
741 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
742 // constant. This happens frequently when iterating over a global array.
743 if (Opc
== Instruction::Sub
) {
744 GlobalValue
*GV1
, *GV2
;
747 if (IsConstantOffsetFromGlobal(Op0
, GV1
, Offs1
, DL
))
748 if (IsConstantOffsetFromGlobal(Op1
, GV2
, Offs2
, DL
) && GV1
== GV2
) {
749 unsigned OpSize
= DL
.getTypeSizeInBits(Op0
->getType());
751 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
752 // PtrToInt may change the bitwidth so we have convert to the right size
754 return ConstantInt::get(Op0
->getType(), Offs1
.zextOrTrunc(OpSize
) -
755 Offs2
.zextOrTrunc(OpSize
));
762 /// If array indices are not pointer-sized integers, explicitly cast them so
763 /// that they aren't implicitly casted by the getelementptr.
764 Constant
*CastGEPIndices(Type
*SrcElemTy
, ArrayRef
<Constant
*> Ops
,
765 Type
*ResultTy
, Optional
<unsigned> InRangeIndex
,
766 const DataLayout
&DL
, const TargetLibraryInfo
*TLI
) {
767 Type
*IntPtrTy
= DL
.getIntPtrType(ResultTy
);
768 Type
*IntPtrScalarTy
= IntPtrTy
->getScalarType();
771 SmallVector
<Constant
*, 32> NewIdxs
;
772 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
) {
774 !isa
<StructType
>(GetElementPtrInst::getIndexedType(
775 SrcElemTy
, Ops
.slice(1, i
- 1)))) &&
776 Ops
[i
]->getType()->getScalarType() != IntPtrScalarTy
) {
778 Type
*NewType
= Ops
[i
]->getType()->isVectorTy()
780 : IntPtrTy
->getScalarType();
781 NewIdxs
.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops
[i
],
787 NewIdxs
.push_back(Ops
[i
]);
793 Constant
*C
= ConstantExpr::getGetElementPtr(
794 SrcElemTy
, Ops
[0], NewIdxs
, /*InBounds=*/false, InRangeIndex
);
795 if (Constant
*Folded
= ConstantFoldConstant(C
, DL
, TLI
))
801 /// Strip the pointer casts, but preserve the address space information.
802 Constant
*StripPtrCastKeepAS(Constant
*Ptr
, Type
*&ElemTy
) {
803 assert(Ptr
->getType()->isPointerTy() && "Not a pointer type");
804 auto *OldPtrTy
= cast
<PointerType
>(Ptr
->getType());
805 Ptr
= cast
<Constant
>(Ptr
->stripPointerCasts());
806 auto *NewPtrTy
= cast
<PointerType
>(Ptr
->getType());
808 ElemTy
= NewPtrTy
->getPointerElementType();
810 // Preserve the address space number of the pointer.
811 if (NewPtrTy
->getAddressSpace() != OldPtrTy
->getAddressSpace()) {
812 NewPtrTy
= ElemTy
->getPointerTo(OldPtrTy
->getAddressSpace());
813 Ptr
= ConstantExpr::getPointerCast(Ptr
, NewPtrTy
);
818 /// If we can symbolically evaluate the GEP constant expression, do so.
819 Constant
*SymbolicallyEvaluateGEP(const GEPOperator
*GEP
,
820 ArrayRef
<Constant
*> Ops
,
821 const DataLayout
&DL
,
822 const TargetLibraryInfo
*TLI
) {
823 const GEPOperator
*InnermostGEP
= GEP
;
824 bool InBounds
= GEP
->isInBounds();
826 Type
*SrcElemTy
= GEP
->getSourceElementType();
827 Type
*ResElemTy
= GEP
->getResultElementType();
828 Type
*ResTy
= GEP
->getType();
829 if (!SrcElemTy
->isSized())
832 if (Constant
*C
= CastGEPIndices(SrcElemTy
, Ops
, ResTy
,
833 GEP
->getInRangeIndex(), DL
, TLI
))
836 Constant
*Ptr
= Ops
[0];
837 if (!Ptr
->getType()->isPointerTy())
840 Type
*IntPtrTy
= DL
.getIntPtrType(Ptr
->getType());
842 // If this is a constant expr gep that is effectively computing an
843 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
844 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
845 if (!isa
<ConstantInt
>(Ops
[i
])) {
847 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
848 // "inttoptr (sub (ptrtoint Ptr), V)"
849 if (Ops
.size() == 2 && ResElemTy
->isIntegerTy(8)) {
850 auto *CE
= dyn_cast
<ConstantExpr
>(Ops
[1]);
851 assert((!CE
|| CE
->getType() == IntPtrTy
) &&
852 "CastGEPIndices didn't canonicalize index types!");
853 if (CE
&& CE
->getOpcode() == Instruction::Sub
&&
854 CE
->getOperand(0)->isNullValue()) {
855 Constant
*Res
= ConstantExpr::getPtrToInt(Ptr
, CE
->getType());
856 Res
= ConstantExpr::getSub(Res
, CE
->getOperand(1));
857 Res
= ConstantExpr::getIntToPtr(Res
, ResTy
);
858 if (auto *FoldedRes
= ConstantFoldConstant(Res
, DL
, TLI
))
866 unsigned BitWidth
= DL
.getTypeSizeInBits(IntPtrTy
);
869 DL
.getIndexedOffsetInType(
871 makeArrayRef((Value
* const *)Ops
.data() + 1, Ops
.size() - 1)));
872 Ptr
= StripPtrCastKeepAS(Ptr
, SrcElemTy
);
874 // If this is a GEP of a GEP, fold it all into a single GEP.
875 while (auto *GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
877 InBounds
&= GEP
->isInBounds();
879 SmallVector
<Value
*, 4> NestedOps(GEP
->op_begin() + 1, GEP
->op_end());
881 // Do not try the incorporate the sub-GEP if some index is not a number.
882 bool AllConstantInt
= true;
883 for (Value
*NestedOp
: NestedOps
)
884 if (!isa
<ConstantInt
>(NestedOp
)) {
885 AllConstantInt
= false;
891 Ptr
= cast
<Constant
>(GEP
->getOperand(0));
892 SrcElemTy
= GEP
->getSourceElementType();
893 Offset
+= APInt(BitWidth
, DL
.getIndexedOffsetInType(SrcElemTy
, NestedOps
));
894 Ptr
= StripPtrCastKeepAS(Ptr
, SrcElemTy
);
897 // If the base value for this address is a literal integer value, fold the
898 // getelementptr to the resulting integer value casted to the pointer type.
899 APInt
BasePtr(BitWidth
, 0);
900 if (auto *CE
= dyn_cast
<ConstantExpr
>(Ptr
)) {
901 if (CE
->getOpcode() == Instruction::IntToPtr
) {
902 if (auto *Base
= dyn_cast
<ConstantInt
>(CE
->getOperand(0)))
903 BasePtr
= Base
->getValue().zextOrTrunc(BitWidth
);
907 auto *PTy
= cast
<PointerType
>(Ptr
->getType());
908 if ((Ptr
->isNullValue() || BasePtr
!= 0) &&
909 !DL
.isNonIntegralPointerType(PTy
)) {
910 Constant
*C
= ConstantInt::get(Ptr
->getContext(), Offset
+ BasePtr
);
911 return ConstantExpr::getIntToPtr(C
, ResTy
);
914 // Otherwise form a regular getelementptr. Recompute the indices so that
915 // we eliminate over-indexing of the notional static type array bounds.
916 // This makes it easy to determine if the getelementptr is "inbounds".
917 // Also, this helps GlobalOpt do SROA on GlobalVariables.
919 SmallVector
<Constant
*, 32> NewIdxs
;
922 if (!Ty
->isStructTy()) {
923 if (Ty
->isPointerTy()) {
924 // The only pointer indexing we'll do is on the first index of the GEP.
925 if (!NewIdxs
.empty())
930 // Only handle pointers to sized types, not pointers to functions.
933 } else if (auto *ATy
= dyn_cast
<SequentialType
>(Ty
)) {
934 Ty
= ATy
->getElementType();
936 // We've reached some non-indexable type.
940 // Determine which element of the array the offset points into.
941 APInt
ElemSize(BitWidth
, DL
.getTypeAllocSize(Ty
));
943 // The element size is 0. This may be [0 x Ty]*, so just use a zero
944 // index for this level and proceed to the next level to see if it can
945 // accommodate the offset.
946 NewIdxs
.push_back(ConstantInt::get(IntPtrTy
, 0));
948 // The element size is non-zero divide the offset by the element
949 // size (rounding down), to compute the index at this level.
951 APInt NewIdx
= Offset
.sdiv_ov(ElemSize
, Overflow
);
954 Offset
-= NewIdx
* ElemSize
;
955 NewIdxs
.push_back(ConstantInt::get(IntPtrTy
, NewIdx
));
958 auto *STy
= cast
<StructType
>(Ty
);
959 // If we end up with an offset that isn't valid for this struct type, we
960 // can't re-form this GEP in a regular form, so bail out. The pointer
961 // operand likely went through casts that are necessary to make the GEP
963 const StructLayout
&SL
= *DL
.getStructLayout(STy
);
964 if (Offset
.isNegative() || Offset
.uge(SL
.getSizeInBytes()))
967 // Determine which field of the struct the offset points into. The
968 // getZExtValue is fine as we've already ensured that the offset is
969 // within the range representable by the StructLayout API.
970 unsigned ElIdx
= SL
.getElementContainingOffset(Offset
.getZExtValue());
971 NewIdxs
.push_back(ConstantInt::get(Type::getInt32Ty(Ty
->getContext()),
973 Offset
-= APInt(BitWidth
, SL
.getElementOffset(ElIdx
));
974 Ty
= STy
->getTypeAtIndex(ElIdx
);
976 } while (Ty
!= ResElemTy
);
978 // If we haven't used up the entire offset by descending the static
979 // type, then the offset is pointing into the middle of an indivisible
980 // member, so we can't simplify it.
984 // Preserve the inrange index from the innermost GEP if possible. We must
985 // have calculated the same indices up to and including the inrange index.
986 Optional
<unsigned> InRangeIndex
;
987 if (Optional
<unsigned> LastIRIndex
= InnermostGEP
->getInRangeIndex())
988 if (SrcElemTy
== InnermostGEP
->getSourceElementType() &&
989 NewIdxs
.size() > *LastIRIndex
) {
990 InRangeIndex
= LastIRIndex
;
991 for (unsigned I
= 0; I
<= *LastIRIndex
; ++I
)
992 if (NewIdxs
[I
] != InnermostGEP
->getOperand(I
+ 1))
997 Constant
*C
= ConstantExpr::getGetElementPtr(SrcElemTy
, Ptr
, NewIdxs
,
998 InBounds
, InRangeIndex
);
999 assert(C
->getType()->getPointerElementType() == Ty
&&
1000 "Computed GetElementPtr has unexpected type!");
1002 // If we ended up indexing a member with a type that doesn't match
1003 // the type of what the original indices indexed, add a cast.
1004 if (Ty
!= ResElemTy
)
1005 C
= FoldBitCast(C
, ResTy
, DL
);
1010 /// Attempt to constant fold an instruction with the
1011 /// specified opcode and operands. If successful, the constant result is
1012 /// returned, if not, null is returned. Note that this function can fail when
1013 /// attempting to fold instructions like loads and stores, which have no
1014 /// constant expression form.
1015 Constant
*ConstantFoldInstOperandsImpl(const Value
*InstOrCE
, unsigned Opcode
,
1016 ArrayRef
<Constant
*> Ops
,
1017 const DataLayout
&DL
,
1018 const TargetLibraryInfo
*TLI
) {
1019 Type
*DestTy
= InstOrCE
->getType();
1021 if (Instruction::isUnaryOp(Opcode
))
1022 return ConstantFoldUnaryOpOperand(Opcode
, Ops
[0], DL
);
1024 if (Instruction::isBinaryOp(Opcode
))
1025 return ConstantFoldBinaryOpOperands(Opcode
, Ops
[0], Ops
[1], DL
);
1027 if (Instruction::isCast(Opcode
))
1028 return ConstantFoldCastOperand(Opcode
, Ops
[0], DestTy
, DL
);
1030 if (auto *GEP
= dyn_cast
<GEPOperator
>(InstOrCE
)) {
1031 if (Constant
*C
= SymbolicallyEvaluateGEP(GEP
, Ops
, DL
, TLI
))
1034 return ConstantExpr::getGetElementPtr(GEP
->getSourceElementType(), Ops
[0],
1035 Ops
.slice(1), GEP
->isInBounds(),
1036 GEP
->getInRangeIndex());
1039 if (auto *CE
= dyn_cast
<ConstantExpr
>(InstOrCE
))
1040 return CE
->getWithOperands(Ops
);
1043 default: return nullptr;
1044 case Instruction::ICmp
:
1045 case Instruction::FCmp
: llvm_unreachable("Invalid for compares");
1046 case Instruction::Call
:
1047 if (auto *F
= dyn_cast
<Function
>(Ops
.back())) {
1048 const auto *Call
= cast
<CallBase
>(InstOrCE
);
1049 if (canConstantFoldCallTo(Call
, F
))
1050 return ConstantFoldCall(Call
, F
, Ops
.slice(0, Ops
.size() - 1), TLI
);
1053 case Instruction::Select
:
1054 return ConstantExpr::getSelect(Ops
[0], Ops
[1], Ops
[2]);
1055 case Instruction::ExtractElement
:
1056 return ConstantExpr::getExtractElement(Ops
[0], Ops
[1]);
1057 case Instruction::ExtractValue
:
1058 return ConstantExpr::getExtractValue(
1059 Ops
[0], cast
<ExtractValueInst
>(InstOrCE
)->getIndices());
1060 case Instruction::InsertElement
:
1061 return ConstantExpr::getInsertElement(Ops
[0], Ops
[1], Ops
[2]);
1062 case Instruction::ShuffleVector
:
1063 return ConstantExpr::getShuffleVector(Ops
[0], Ops
[1], Ops
[2]);
1067 } // end anonymous namespace
1069 //===----------------------------------------------------------------------===//
1070 // Constant Folding public APIs
1071 //===----------------------------------------------------------------------===//
1076 ConstantFoldConstantImpl(const Constant
*C
, const DataLayout
&DL
,
1077 const TargetLibraryInfo
*TLI
,
1078 SmallDenseMap
<Constant
*, Constant
*> &FoldedOps
) {
1079 if (!isa
<ConstantVector
>(C
) && !isa
<ConstantExpr
>(C
))
1082 SmallVector
<Constant
*, 8> Ops
;
1083 for (const Use
&NewU
: C
->operands()) {
1084 auto *NewC
= cast
<Constant
>(&NewU
);
1085 // Recursively fold the ConstantExpr's operands. If we have already folded
1086 // a ConstantExpr, we don't have to process it again.
1087 if (isa
<ConstantVector
>(NewC
) || isa
<ConstantExpr
>(NewC
)) {
1088 auto It
= FoldedOps
.find(NewC
);
1089 if (It
== FoldedOps
.end()) {
1091 ConstantFoldConstantImpl(NewC
, DL
, TLI
, FoldedOps
)) {
1092 FoldedOps
.insert({NewC
, FoldedC
});
1095 FoldedOps
.insert({NewC
, NewC
});
1101 Ops
.push_back(NewC
);
1104 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1105 if (CE
->isCompare())
1106 return ConstantFoldCompareInstOperands(CE
->getPredicate(), Ops
[0], Ops
[1],
1109 return ConstantFoldInstOperandsImpl(CE
, CE
->getOpcode(), Ops
, DL
, TLI
);
1112 assert(isa
<ConstantVector
>(C
));
1113 return ConstantVector::get(Ops
);
1116 } // end anonymous namespace
1118 Constant
*llvm::ConstantFoldInstruction(Instruction
*I
, const DataLayout
&DL
,
1119 const TargetLibraryInfo
*TLI
) {
1120 // Handle PHI nodes quickly here...
1121 if (auto *PN
= dyn_cast
<PHINode
>(I
)) {
1122 Constant
*CommonValue
= nullptr;
1124 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1125 for (Value
*Incoming
: PN
->incoming_values()) {
1126 // If the incoming value is undef then skip it. Note that while we could
1127 // skip the value if it is equal to the phi node itself we choose not to
1128 // because that would break the rule that constant folding only applies if
1129 // all operands are constants.
1130 if (isa
<UndefValue
>(Incoming
))
1132 // If the incoming value is not a constant, then give up.
1133 auto *C
= dyn_cast
<Constant
>(Incoming
);
1136 // Fold the PHI's operands.
1137 if (auto *FoldedC
= ConstantFoldConstantImpl(C
, DL
, TLI
, FoldedOps
))
1139 // If the incoming value is a different constant to
1140 // the one we saw previously, then give up.
1141 if (CommonValue
&& C
!= CommonValue
)
1146 // If we reach here, all incoming values are the same constant or undef.
1147 return CommonValue
? CommonValue
: UndefValue::get(PN
->getType());
1150 // Scan the operand list, checking to see if they are all constants, if so,
1151 // hand off to ConstantFoldInstOperandsImpl.
1152 if (!all_of(I
->operands(), [](Use
&U
) { return isa
<Constant
>(U
); }))
1155 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1156 SmallVector
<Constant
*, 8> Ops
;
1157 for (const Use
&OpU
: I
->operands()) {
1158 auto *Op
= cast
<Constant
>(&OpU
);
1159 // Fold the Instruction's operands.
1160 if (auto *FoldedOp
= ConstantFoldConstantImpl(Op
, DL
, TLI
, FoldedOps
))
1166 if (const auto *CI
= dyn_cast
<CmpInst
>(I
))
1167 return ConstantFoldCompareInstOperands(CI
->getPredicate(), Ops
[0], Ops
[1],
1170 if (const auto *LI
= dyn_cast
<LoadInst
>(I
))
1171 return ConstantFoldLoadInst(LI
, DL
);
1173 if (auto *IVI
= dyn_cast
<InsertValueInst
>(I
)) {
1174 return ConstantExpr::getInsertValue(
1175 cast
<Constant
>(IVI
->getAggregateOperand()),
1176 cast
<Constant
>(IVI
->getInsertedValueOperand()),
1180 if (auto *EVI
= dyn_cast
<ExtractValueInst
>(I
)) {
1181 return ConstantExpr::getExtractValue(
1182 cast
<Constant
>(EVI
->getAggregateOperand()),
1186 return ConstantFoldInstOperands(I
, Ops
, DL
, TLI
);
1189 Constant
*llvm::ConstantFoldConstant(const Constant
*C
, const DataLayout
&DL
,
1190 const TargetLibraryInfo
*TLI
) {
1191 SmallDenseMap
<Constant
*, Constant
*> FoldedOps
;
1192 return ConstantFoldConstantImpl(C
, DL
, TLI
, FoldedOps
);
1195 Constant
*llvm::ConstantFoldInstOperands(Instruction
*I
,
1196 ArrayRef
<Constant
*> Ops
,
1197 const DataLayout
&DL
,
1198 const TargetLibraryInfo
*TLI
) {
1199 return ConstantFoldInstOperandsImpl(I
, I
->getOpcode(), Ops
, DL
, TLI
);
1202 Constant
*llvm::ConstantFoldCompareInstOperands(unsigned Predicate
,
1203 Constant
*Ops0
, Constant
*Ops1
,
1204 const DataLayout
&DL
,
1205 const TargetLibraryInfo
*TLI
) {
1206 // fold: icmp (inttoptr x), null -> icmp x, 0
1207 // fold: icmp null, (inttoptr x) -> icmp 0, x
1208 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1209 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1210 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1211 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1213 // FIXME: The following comment is out of data and the DataLayout is here now.
1214 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1215 // around to know if bit truncation is happening.
1216 if (auto *CE0
= dyn_cast
<ConstantExpr
>(Ops0
)) {
1217 if (Ops1
->isNullValue()) {
1218 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
1219 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getType());
1220 // Convert the integer value to the right size to ensure we get the
1221 // proper extension or truncation.
1222 Constant
*C
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
1224 Constant
*Null
= Constant::getNullValue(C
->getType());
1225 return ConstantFoldCompareInstOperands(Predicate
, C
, Null
, DL
, TLI
);
1228 // Only do this transformation if the int is intptrty in size, otherwise
1229 // there is a truncation or extension that we aren't modeling.
1230 if (CE0
->getOpcode() == Instruction::PtrToInt
) {
1231 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getOperand(0)->getType());
1232 if (CE0
->getType() == IntPtrTy
) {
1233 Constant
*C
= CE0
->getOperand(0);
1234 Constant
*Null
= Constant::getNullValue(C
->getType());
1235 return ConstantFoldCompareInstOperands(Predicate
, C
, Null
, DL
, TLI
);
1240 if (auto *CE1
= dyn_cast
<ConstantExpr
>(Ops1
)) {
1241 if (CE0
->getOpcode() == CE1
->getOpcode()) {
1242 if (CE0
->getOpcode() == Instruction::IntToPtr
) {
1243 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getType());
1245 // Convert the integer value to the right size to ensure we get the
1246 // proper extension or truncation.
1247 Constant
*C0
= ConstantExpr::getIntegerCast(CE0
->getOperand(0),
1249 Constant
*C1
= ConstantExpr::getIntegerCast(CE1
->getOperand(0),
1251 return ConstantFoldCompareInstOperands(Predicate
, C0
, C1
, DL
, TLI
);
1254 // Only do this transformation if the int is intptrty in size, otherwise
1255 // there is a truncation or extension that we aren't modeling.
1256 if (CE0
->getOpcode() == Instruction::PtrToInt
) {
1257 Type
*IntPtrTy
= DL
.getIntPtrType(CE0
->getOperand(0)->getType());
1258 if (CE0
->getType() == IntPtrTy
&&
1259 CE0
->getOperand(0)->getType() == CE1
->getOperand(0)->getType()) {
1260 return ConstantFoldCompareInstOperands(
1261 Predicate
, CE0
->getOperand(0), CE1
->getOperand(0), DL
, TLI
);
1267 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1268 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1269 if ((Predicate
== ICmpInst::ICMP_EQ
|| Predicate
== ICmpInst::ICMP_NE
) &&
1270 CE0
->getOpcode() == Instruction::Or
&& Ops1
->isNullValue()) {
1271 Constant
*LHS
= ConstantFoldCompareInstOperands(
1272 Predicate
, CE0
->getOperand(0), Ops1
, DL
, TLI
);
1273 Constant
*RHS
= ConstantFoldCompareInstOperands(
1274 Predicate
, CE0
->getOperand(1), Ops1
, DL
, TLI
);
1276 Predicate
== ICmpInst::ICMP_EQ
? Instruction::And
: Instruction::Or
;
1277 return ConstantFoldBinaryOpOperands(OpC
, LHS
, RHS
, DL
);
1279 } else if (isa
<ConstantExpr
>(Ops1
)) {
1280 // If RHS is a constant expression, but the left side isn't, swap the
1281 // operands and try again.
1282 Predicate
= ICmpInst::getSwappedPredicate((ICmpInst::Predicate
)Predicate
);
1283 return ConstantFoldCompareInstOperands(Predicate
, Ops1
, Ops0
, DL
, TLI
);
1286 return ConstantExpr::getCompare(Predicate
, Ops0
, Ops1
);
1289 Constant
*llvm::ConstantFoldUnaryOpOperand(unsigned Opcode
, Constant
*Op
,
1290 const DataLayout
&DL
) {
1291 assert(Instruction::isUnaryOp(Opcode
));
1293 return ConstantExpr::get(Opcode
, Op
);
1296 Constant
*llvm::ConstantFoldBinaryOpOperands(unsigned Opcode
, Constant
*LHS
,
1298 const DataLayout
&DL
) {
1299 assert(Instruction::isBinaryOp(Opcode
));
1300 if (isa
<ConstantExpr
>(LHS
) || isa
<ConstantExpr
>(RHS
))
1301 if (Constant
*C
= SymbolicallyEvaluateBinop(Opcode
, LHS
, RHS
, DL
))
1304 return ConstantExpr::get(Opcode
, LHS
, RHS
);
1307 Constant
*llvm::ConstantFoldCastOperand(unsigned Opcode
, Constant
*C
,
1308 Type
*DestTy
, const DataLayout
&DL
) {
1309 assert(Instruction::isCast(Opcode
));
1312 llvm_unreachable("Missing case");
1313 case Instruction::PtrToInt
:
1314 // If the input is a inttoptr, eliminate the pair. This requires knowing
1315 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1316 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1317 if (CE
->getOpcode() == Instruction::IntToPtr
) {
1318 Constant
*Input
= CE
->getOperand(0);
1319 unsigned InWidth
= Input
->getType()->getScalarSizeInBits();
1320 unsigned PtrWidth
= DL
.getPointerTypeSizeInBits(CE
->getType());
1321 if (PtrWidth
< InWidth
) {
1323 ConstantInt::get(CE
->getContext(),
1324 APInt::getLowBitsSet(InWidth
, PtrWidth
));
1325 Input
= ConstantExpr::getAnd(Input
, Mask
);
1327 // Do a zext or trunc to get to the dest size.
1328 return ConstantExpr::getIntegerCast(Input
, DestTy
, false);
1331 return ConstantExpr::getCast(Opcode
, C
, DestTy
);
1332 case Instruction::IntToPtr
:
1333 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1334 // the int size is >= the ptr size and the address spaces are the same.
1335 // This requires knowing the width of a pointer, so it can't be done in
1336 // ConstantExpr::getCast.
1337 if (auto *CE
= dyn_cast
<ConstantExpr
>(C
)) {
1338 if (CE
->getOpcode() == Instruction::PtrToInt
) {
1339 Constant
*SrcPtr
= CE
->getOperand(0);
1340 unsigned SrcPtrSize
= DL
.getPointerTypeSizeInBits(SrcPtr
->getType());
1341 unsigned MidIntSize
= CE
->getType()->getScalarSizeInBits();
1343 if (MidIntSize
>= SrcPtrSize
) {
1344 unsigned SrcAS
= SrcPtr
->getType()->getPointerAddressSpace();
1345 if (SrcAS
== DestTy
->getPointerAddressSpace())
1346 return FoldBitCast(CE
->getOperand(0), DestTy
, DL
);
1351 return ConstantExpr::getCast(Opcode
, C
, DestTy
);
1352 case Instruction::Trunc
:
1353 case Instruction::ZExt
:
1354 case Instruction::SExt
:
1355 case Instruction::FPTrunc
:
1356 case Instruction::FPExt
:
1357 case Instruction::UIToFP
:
1358 case Instruction::SIToFP
:
1359 case Instruction::FPToUI
:
1360 case Instruction::FPToSI
:
1361 case Instruction::AddrSpaceCast
:
1362 return ConstantExpr::getCast(Opcode
, C
, DestTy
);
1363 case Instruction::BitCast
:
1364 return FoldBitCast(C
, DestTy
, DL
);
1368 Constant
*llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant
*C
,
1370 if (!CE
->getOperand(1)->isNullValue())
1371 return nullptr; // Do not allow stepping over the value!
1373 // Loop over all of the operands, tracking down which value we are
1375 for (unsigned i
= 2, e
= CE
->getNumOperands(); i
!= e
; ++i
) {
1376 C
= C
->getAggregateElement(CE
->getOperand(i
));
1384 llvm::ConstantFoldLoadThroughGEPIndices(Constant
*C
,
1385 ArrayRef
<Constant
*> Indices
) {
1386 // Loop over all of the operands, tracking down which value we are
1388 for (Constant
*Index
: Indices
) {
1389 C
= C
->getAggregateElement(Index
);
1396 //===----------------------------------------------------------------------===//
1397 // Constant Folding for Calls
1400 bool llvm::canConstantFoldCallTo(const CallBase
*Call
, const Function
*F
) {
1401 if (Call
->isNoBuiltin() || Call
->isStrictFP())
1403 switch (F
->getIntrinsicID()) {
1404 case Intrinsic::fabs
:
1405 case Intrinsic::minnum
:
1406 case Intrinsic::maxnum
:
1407 case Intrinsic::minimum
:
1408 case Intrinsic::maximum
:
1409 case Intrinsic::log
:
1410 case Intrinsic::log2
:
1411 case Intrinsic::log10
:
1412 case Intrinsic::exp
:
1413 case Intrinsic::exp2
:
1414 case Intrinsic::floor
:
1415 case Intrinsic::ceil
:
1416 case Intrinsic::sqrt
:
1417 case Intrinsic::sin
:
1418 case Intrinsic::cos
:
1419 case Intrinsic::trunc
:
1420 case Intrinsic::rint
:
1421 case Intrinsic::nearbyint
:
1422 case Intrinsic::pow
:
1423 case Intrinsic::powi
:
1424 case Intrinsic::bswap
:
1425 case Intrinsic::ctpop
:
1426 case Intrinsic::ctlz
:
1427 case Intrinsic::cttz
:
1428 case Intrinsic::fshl
:
1429 case Intrinsic::fshr
:
1430 case Intrinsic::fma
:
1431 case Intrinsic::fmuladd
:
1432 case Intrinsic::copysign
:
1433 case Intrinsic::launder_invariant_group
:
1434 case Intrinsic::strip_invariant_group
:
1435 case Intrinsic::round
:
1436 case Intrinsic::masked_load
:
1437 case Intrinsic::sadd_with_overflow
:
1438 case Intrinsic::uadd_with_overflow
:
1439 case Intrinsic::ssub_with_overflow
:
1440 case Intrinsic::usub_with_overflow
:
1441 case Intrinsic::smul_with_overflow
:
1442 case Intrinsic::umul_with_overflow
:
1443 case Intrinsic::sadd_sat
:
1444 case Intrinsic::uadd_sat
:
1445 case Intrinsic::ssub_sat
:
1446 case Intrinsic::usub_sat
:
1447 case Intrinsic::smul_fix
:
1448 case Intrinsic::smul_fix_sat
:
1449 case Intrinsic::convert_from_fp16
:
1450 case Intrinsic::convert_to_fp16
:
1451 case Intrinsic::bitreverse
:
1452 case Intrinsic::x86_sse_cvtss2si
:
1453 case Intrinsic::x86_sse_cvtss2si64
:
1454 case Intrinsic::x86_sse_cvttss2si
:
1455 case Intrinsic::x86_sse_cvttss2si64
:
1456 case Intrinsic::x86_sse2_cvtsd2si
:
1457 case Intrinsic::x86_sse2_cvtsd2si64
:
1458 case Intrinsic::x86_sse2_cvttsd2si
:
1459 case Intrinsic::x86_sse2_cvttsd2si64
:
1460 case Intrinsic::x86_avx512_vcvtss2si32
:
1461 case Intrinsic::x86_avx512_vcvtss2si64
:
1462 case Intrinsic::x86_avx512_cvttss2si
:
1463 case Intrinsic::x86_avx512_cvttss2si64
:
1464 case Intrinsic::x86_avx512_vcvtsd2si32
:
1465 case Intrinsic::x86_avx512_vcvtsd2si64
:
1466 case Intrinsic::x86_avx512_cvttsd2si
:
1467 case Intrinsic::x86_avx512_cvttsd2si64
:
1468 case Intrinsic::x86_avx512_vcvtss2usi32
:
1469 case Intrinsic::x86_avx512_vcvtss2usi64
:
1470 case Intrinsic::x86_avx512_cvttss2usi
:
1471 case Intrinsic::x86_avx512_cvttss2usi64
:
1472 case Intrinsic::x86_avx512_vcvtsd2usi32
:
1473 case Intrinsic::x86_avx512_vcvtsd2usi64
:
1474 case Intrinsic::x86_avx512_cvttsd2usi
:
1475 case Intrinsic::x86_avx512_cvttsd2usi64
:
1476 case Intrinsic::is_constant
:
1480 case Intrinsic::not_intrinsic
: break;
1486 // In these cases, the check of the length is required. We don't want to
1487 // return true for a name like "cos\0blah" which strcmp would return equal to
1488 // "cos", but has length 8.
1489 StringRef Name
= F
->getName();
1494 return Name
== "acos" || Name
== "acosf" ||
1495 Name
== "asin" || Name
== "asinf" ||
1496 Name
== "atan" || Name
== "atanf" ||
1497 Name
== "atan2" || Name
== "atan2f";
1499 return Name
== "ceil" || Name
== "ceilf" ||
1500 Name
== "cos" || Name
== "cosf" ||
1501 Name
== "cosh" || Name
== "coshf";
1503 return Name
== "exp" || Name
== "expf" ||
1504 Name
== "exp2" || Name
== "exp2f";
1506 return Name
== "fabs" || Name
== "fabsf" ||
1507 Name
== "floor" || Name
== "floorf" ||
1508 Name
== "fmod" || Name
== "fmodf";
1510 return Name
== "log" || Name
== "logf" ||
1511 Name
== "log2" || Name
== "log2f" ||
1512 Name
== "log10" || Name
== "log10f";
1514 return Name
== "nearbyint" || Name
== "nearbyintf";
1516 return Name
== "pow" || Name
== "powf";
1518 return Name
== "rint" || Name
== "rintf" ||
1519 Name
== "round" || Name
== "roundf";
1521 return Name
== "sin" || Name
== "sinf" ||
1522 Name
== "sinh" || Name
== "sinhf" ||
1523 Name
== "sqrt" || Name
== "sqrtf";
1525 return Name
== "tan" || Name
== "tanf" ||
1526 Name
== "tanh" || Name
== "tanhf" ||
1527 Name
== "trunc" || Name
== "truncf";
1529 // Check for various function names that get used for the math functions
1530 // when the header files are preprocessed with the macro
1531 // __FINITE_MATH_ONLY__ enabled.
1532 // The '12' here is the length of the shortest name that can match.
1533 // We need to check the size before looking at Name[1] and Name[2]
1534 // so we may as well check a limit that will eliminate mismatches.
1535 if (Name
.size() < 12 || Name
[1] != '_')
1541 return Name
== "__acos_finite" || Name
== "__acosf_finite" ||
1542 Name
== "__asin_finite" || Name
== "__asinf_finite" ||
1543 Name
== "__atan2_finite" || Name
== "__atan2f_finite";
1545 return Name
== "__cosh_finite" || Name
== "__coshf_finite";
1547 return Name
== "__exp_finite" || Name
== "__expf_finite" ||
1548 Name
== "__exp2_finite" || Name
== "__exp2f_finite";
1550 return Name
== "__log_finite" || Name
== "__logf_finite" ||
1551 Name
== "__log10_finite" || Name
== "__log10f_finite";
1553 return Name
== "__pow_finite" || Name
== "__powf_finite";
1555 return Name
== "__sinh_finite" || Name
== "__sinhf_finite";
1562 Constant
*GetConstantFoldFPValue(double V
, Type
*Ty
) {
1563 if (Ty
->isHalfTy() || Ty
->isFloatTy()) {
1566 APF
.convert(Ty
->getFltSemantics(), APFloat::rmNearestTiesToEven
, &unused
);
1567 return ConstantFP::get(Ty
->getContext(), APF
);
1569 if (Ty
->isDoubleTy())
1570 return ConstantFP::get(Ty
->getContext(), APFloat(V
));
1571 llvm_unreachable("Can only constant fold half/float/double");
1574 /// Clear the floating-point exception state.
1575 inline void llvm_fenv_clearexcept() {
1576 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1577 feclearexcept(FE_ALL_EXCEPT
);
1582 /// Test if a floating-point exception was raised.
1583 inline bool llvm_fenv_testexcept() {
1584 int errno_val
= errno
;
1585 if (errno_val
== ERANGE
|| errno_val
== EDOM
)
1587 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1588 if (fetestexcept(FE_ALL_EXCEPT
& ~FE_INEXACT
))
1594 Constant
*ConstantFoldFP(double (*NativeFP
)(double), double V
, Type
*Ty
) {
1595 llvm_fenv_clearexcept();
1597 if (llvm_fenv_testexcept()) {
1598 llvm_fenv_clearexcept();
1602 return GetConstantFoldFPValue(V
, Ty
);
1605 Constant
*ConstantFoldBinaryFP(double (*NativeFP
)(double, double), double V
,
1606 double W
, Type
*Ty
) {
1607 llvm_fenv_clearexcept();
1609 if (llvm_fenv_testexcept()) {
1610 llvm_fenv_clearexcept();
1614 return GetConstantFoldFPValue(V
, Ty
);
1617 /// Attempt to fold an SSE floating point to integer conversion of a constant
1618 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1619 /// used (toward nearest, ties to even). This matches the behavior of the
1620 /// non-truncating SSE instructions in the default rounding mode. The desired
1621 /// integer type Ty is used to select how many bits are available for the
1622 /// result. Returns null if the conversion cannot be performed, otherwise
1623 /// returns the Constant value resulting from the conversion.
1624 Constant
*ConstantFoldSSEConvertToInt(const APFloat
&Val
, bool roundTowardZero
,
1625 Type
*Ty
, bool IsSigned
) {
1626 // All of these conversion intrinsics form an integer of at most 64bits.
1627 unsigned ResultWidth
= Ty
->getIntegerBitWidth();
1628 assert(ResultWidth
<= 64 &&
1629 "Can only constant fold conversions to 64 and 32 bit ints");
1632 bool isExact
= false;
1633 APFloat::roundingMode mode
= roundTowardZero
? APFloat::rmTowardZero
1634 : APFloat::rmNearestTiesToEven
;
1635 APFloat::opStatus status
=
1636 Val
.convertToInteger(makeMutableArrayRef(UIntVal
), ResultWidth
,
1637 IsSigned
, mode
, &isExact
);
1638 if (status
!= APFloat::opOK
&&
1639 (!roundTowardZero
|| status
!= APFloat::opInexact
))
1641 return ConstantInt::get(Ty
, UIntVal
, IsSigned
);
1644 double getValueAsDouble(ConstantFP
*Op
) {
1645 Type
*Ty
= Op
->getType();
1647 if (Ty
->isFloatTy())
1648 return Op
->getValueAPF().convertToFloat();
1650 if (Ty
->isDoubleTy())
1651 return Op
->getValueAPF().convertToDouble();
1654 APFloat APF
= Op
->getValueAPF();
1655 APF
.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven
, &unused
);
1656 return APF
.convertToDouble();
1659 static bool isManifestConstant(const Constant
*c
) {
1660 if (isa
<ConstantData
>(c
)) {
1662 } else if (isa
<ConstantAggregate
>(c
) || isa
<ConstantExpr
>(c
)) {
1663 for (const Value
*subc
: c
->operand_values()) {
1664 if (!isManifestConstant(cast
<Constant
>(subc
)))
1672 static bool getConstIntOrUndef(Value
*Op
, const APInt
*&C
) {
1673 if (auto *CI
= dyn_cast
<ConstantInt
>(Op
)) {
1674 C
= &CI
->getValue();
1677 if (isa
<UndefValue
>(Op
)) {
1684 static Constant
*ConstantFoldScalarCall1(StringRef Name
,
1685 Intrinsic::ID IntrinsicID
,
1687 ArrayRef
<Constant
*> Operands
,
1688 const TargetLibraryInfo
*TLI
,
1689 const CallBase
*Call
) {
1690 assert(Operands
.size() == 1 && "Wrong number of operands.");
1692 if (IntrinsicID
== Intrinsic::is_constant
) {
1693 // We know we have a "Constant" argument. But we want to only
1694 // return true for manifest constants, not those that depend on
1695 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1696 if (isManifestConstant(Operands
[0]))
1697 return ConstantInt::getTrue(Ty
->getContext());
1700 if (isa
<UndefValue
>(Operands
[0])) {
1701 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1702 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1703 if (IntrinsicID
== Intrinsic::cos
||
1704 IntrinsicID
== Intrinsic::ctpop
)
1705 return Constant::getNullValue(Ty
);
1706 if (IntrinsicID
== Intrinsic::bswap
||
1707 IntrinsicID
== Intrinsic::bitreverse
||
1708 IntrinsicID
== Intrinsic::launder_invariant_group
||
1709 IntrinsicID
== Intrinsic::strip_invariant_group
)
1713 if (isa
<ConstantPointerNull
>(Operands
[0])) {
1714 // launder(null) == null == strip(null) iff in addrspace 0
1715 if (IntrinsicID
== Intrinsic::launder_invariant_group
||
1716 IntrinsicID
== Intrinsic::strip_invariant_group
) {
1717 // If instruction is not yet put in a basic block (e.g. when cloning
1718 // a function during inlining), Call's caller may not be available.
1719 // So check Call's BB first before querying Call->getCaller.
1720 const Function
*Caller
=
1721 Call
->getParent() ? Call
->getCaller() : nullptr;
1723 !NullPointerIsDefined(
1724 Caller
, Operands
[0]->getType()->getPointerAddressSpace())) {
1731 if (auto *Op
= dyn_cast
<ConstantFP
>(Operands
[0])) {
1732 if (IntrinsicID
== Intrinsic::convert_to_fp16
) {
1733 APFloat
Val(Op
->getValueAPF());
1736 Val
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &lost
);
1738 return ConstantInt::get(Ty
->getContext(), Val
.bitcastToAPInt());
1741 if (!Ty
->isHalfTy() && !Ty
->isFloatTy() && !Ty
->isDoubleTy())
1744 // Use internal versions of these intrinsics.
1745 APFloat U
= Op
->getValueAPF();
1747 if (IntrinsicID
== Intrinsic::nearbyint
|| IntrinsicID
== Intrinsic::rint
) {
1748 U
.roundToIntegral(APFloat::rmNearestTiesToEven
);
1749 return ConstantFP::get(Ty
->getContext(), U
);
1752 if (IntrinsicID
== Intrinsic::round
) {
1753 U
.roundToIntegral(APFloat::rmNearestTiesToAway
);
1754 return ConstantFP::get(Ty
->getContext(), U
);
1757 if (IntrinsicID
== Intrinsic::ceil
) {
1758 U
.roundToIntegral(APFloat::rmTowardPositive
);
1759 return ConstantFP::get(Ty
->getContext(), U
);
1762 if (IntrinsicID
== Intrinsic::floor
) {
1763 U
.roundToIntegral(APFloat::rmTowardNegative
);
1764 return ConstantFP::get(Ty
->getContext(), U
);
1767 if (IntrinsicID
== Intrinsic::trunc
) {
1768 U
.roundToIntegral(APFloat::rmTowardZero
);
1769 return ConstantFP::get(Ty
->getContext(), U
);
1772 if (IntrinsicID
== Intrinsic::fabs
) {
1774 return ConstantFP::get(Ty
->getContext(), U
);
1777 /// We only fold functions with finite arguments. Folding NaN and inf is
1778 /// likely to be aborted with an exception anyway, and some host libms
1779 /// have known errors raising exceptions.
1780 if (Op
->getValueAPF().isNaN() || Op
->getValueAPF().isInfinity())
1783 /// Currently APFloat versions of these functions do not exist, so we use
1784 /// the host native double versions. Float versions are not called
1785 /// directly but for all these it is true (float)(f((double)arg)) ==
1786 /// f(arg). Long double not supported yet.
1787 double V
= getValueAsDouble(Op
);
1789 switch (IntrinsicID
) {
1791 case Intrinsic::log
:
1792 return ConstantFoldFP(log
, V
, Ty
);
1793 case Intrinsic::log2
:
1794 // TODO: What about hosts that lack a C99 library?
1795 return ConstantFoldFP(Log2
, V
, Ty
);
1796 case Intrinsic::log10
:
1797 // TODO: What about hosts that lack a C99 library?
1798 return ConstantFoldFP(log10
, V
, Ty
);
1799 case Intrinsic::exp
:
1800 return ConstantFoldFP(exp
, V
, Ty
);
1801 case Intrinsic::exp2
:
1802 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
1803 return ConstantFoldBinaryFP(pow
, 2.0, V
, Ty
);
1804 case Intrinsic::sin
:
1805 return ConstantFoldFP(sin
, V
, Ty
);
1806 case Intrinsic::cos
:
1807 return ConstantFoldFP(cos
, V
, Ty
);
1808 case Intrinsic::sqrt
:
1809 return ConstantFoldFP(sqrt
, V
, Ty
);
1815 LibFunc Func
= NotLibFunc
;
1816 TLI
->getLibFunc(Name
, Func
);
1822 case LibFunc_acos_finite
:
1823 case LibFunc_acosf_finite
:
1825 return ConstantFoldFP(acos
, V
, Ty
);
1829 case LibFunc_asin_finite
:
1830 case LibFunc_asinf_finite
:
1832 return ConstantFoldFP(asin
, V
, Ty
);
1837 return ConstantFoldFP(atan
, V
, Ty
);
1841 if (TLI
->has(Func
)) {
1842 U
.roundToIntegral(APFloat::rmTowardPositive
);
1843 return ConstantFP::get(Ty
->getContext(), U
);
1849 return ConstantFoldFP(cos
, V
, Ty
);
1853 case LibFunc_cosh_finite
:
1854 case LibFunc_coshf_finite
:
1856 return ConstantFoldFP(cosh
, V
, Ty
);
1860 case LibFunc_exp_finite
:
1861 case LibFunc_expf_finite
:
1863 return ConstantFoldFP(exp
, V
, Ty
);
1867 case LibFunc_exp2_finite
:
1868 case LibFunc_exp2f_finite
:
1870 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
1871 return ConstantFoldBinaryFP(pow
, 2.0, V
, Ty
);
1875 if (TLI
->has(Func
)) {
1877 return ConstantFP::get(Ty
->getContext(), U
);
1881 case LibFunc_floorf
:
1882 if (TLI
->has(Func
)) {
1883 U
.roundToIntegral(APFloat::rmTowardNegative
);
1884 return ConstantFP::get(Ty
->getContext(), U
);
1889 case LibFunc_log_finite
:
1890 case LibFunc_logf_finite
:
1891 if (V
> 0.0 && TLI
->has(Func
))
1892 return ConstantFoldFP(log
, V
, Ty
);
1896 case LibFunc_log2_finite
:
1897 case LibFunc_log2f_finite
:
1898 if (V
> 0.0 && TLI
->has(Func
))
1899 // TODO: What about hosts that lack a C99 library?
1900 return ConstantFoldFP(Log2
, V
, Ty
);
1903 case LibFunc_log10f
:
1904 case LibFunc_log10_finite
:
1905 case LibFunc_log10f_finite
:
1906 if (V
> 0.0 && TLI
->has(Func
))
1907 // TODO: What about hosts that lack a C99 library?
1908 return ConstantFoldFP(log10
, V
, Ty
);
1910 case LibFunc_nearbyint
:
1911 case LibFunc_nearbyintf
:
1914 if (TLI
->has(Func
)) {
1915 U
.roundToIntegral(APFloat::rmNearestTiesToEven
);
1916 return ConstantFP::get(Ty
->getContext(), U
);
1920 case LibFunc_roundf
:
1921 if (TLI
->has(Func
)) {
1922 U
.roundToIntegral(APFloat::rmNearestTiesToAway
);
1923 return ConstantFP::get(Ty
->getContext(), U
);
1929 return ConstantFoldFP(sin
, V
, Ty
);
1933 case LibFunc_sinh_finite
:
1934 case LibFunc_sinhf_finite
:
1936 return ConstantFoldFP(sinh
, V
, Ty
);
1940 if (V
>= 0.0 && TLI
->has(Func
))
1941 return ConstantFoldFP(sqrt
, V
, Ty
);
1946 return ConstantFoldFP(tan
, V
, Ty
);
1951 return ConstantFoldFP(tanh
, V
, Ty
);
1954 case LibFunc_truncf
:
1955 if (TLI
->has(Func
)) {
1956 U
.roundToIntegral(APFloat::rmTowardZero
);
1957 return ConstantFP::get(Ty
->getContext(), U
);
1964 if (auto *Op
= dyn_cast
<ConstantInt
>(Operands
[0])) {
1965 switch (IntrinsicID
) {
1966 case Intrinsic::bswap
:
1967 return ConstantInt::get(Ty
->getContext(), Op
->getValue().byteSwap());
1968 case Intrinsic::ctpop
:
1969 return ConstantInt::get(Ty
, Op
->getValue().countPopulation());
1970 case Intrinsic::bitreverse
:
1971 return ConstantInt::get(Ty
->getContext(), Op
->getValue().reverseBits());
1972 case Intrinsic::convert_from_fp16
: {
1973 APFloat
Val(APFloat::IEEEhalf(), Op
->getValue());
1976 APFloat::opStatus status
= Val
.convert(
1977 Ty
->getFltSemantics(), APFloat::rmNearestTiesToEven
, &lost
);
1979 // Conversion is always precise.
1981 assert(status
== APFloat::opOK
&& !lost
&&
1982 "Precision lost during fp16 constfolding");
1984 return ConstantFP::get(Ty
->getContext(), Val
);
1991 // Support ConstantVector in case we have an Undef in the top.
1992 if (isa
<ConstantVector
>(Operands
[0]) ||
1993 isa
<ConstantDataVector
>(Operands
[0])) {
1994 auto *Op
= cast
<Constant
>(Operands
[0]);
1995 switch (IntrinsicID
) {
1997 case Intrinsic::x86_sse_cvtss2si
:
1998 case Intrinsic::x86_sse_cvtss2si64
:
1999 case Intrinsic::x86_sse2_cvtsd2si
:
2000 case Intrinsic::x86_sse2_cvtsd2si64
:
2001 if (ConstantFP
*FPOp
=
2002 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2003 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2004 /*roundTowardZero=*/false, Ty
,
2007 case Intrinsic::x86_sse_cvttss2si
:
2008 case Intrinsic::x86_sse_cvttss2si64
:
2009 case Intrinsic::x86_sse2_cvttsd2si
:
2010 case Intrinsic::x86_sse2_cvttsd2si64
:
2011 if (ConstantFP
*FPOp
=
2012 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2013 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2014 /*roundTowardZero=*/true, Ty
,
2023 static Constant
*ConstantFoldScalarCall2(StringRef Name
,
2024 Intrinsic::ID IntrinsicID
,
2026 ArrayRef
<Constant
*> Operands
,
2027 const TargetLibraryInfo
*TLI
,
2028 const CallBase
*Call
) {
2029 assert(Operands
.size() == 2 && "Wrong number of operands.");
2031 if (auto *Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
2032 if (!Ty
->isHalfTy() && !Ty
->isFloatTy() && !Ty
->isDoubleTy())
2034 double Op1V
= getValueAsDouble(Op1
);
2036 if (auto *Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
2037 if (Op2
->getType() != Op1
->getType())
2040 double Op2V
= getValueAsDouble(Op2
);
2041 if (IntrinsicID
== Intrinsic::pow
) {
2042 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
);
2044 if (IntrinsicID
== Intrinsic::copysign
) {
2045 APFloat V1
= Op1
->getValueAPF();
2046 const APFloat
&V2
= Op2
->getValueAPF();
2048 return ConstantFP::get(Ty
->getContext(), V1
);
2051 if (IntrinsicID
== Intrinsic::minnum
) {
2052 const APFloat
&C1
= Op1
->getValueAPF();
2053 const APFloat
&C2
= Op2
->getValueAPF();
2054 return ConstantFP::get(Ty
->getContext(), minnum(C1
, C2
));
2057 if (IntrinsicID
== Intrinsic::maxnum
) {
2058 const APFloat
&C1
= Op1
->getValueAPF();
2059 const APFloat
&C2
= Op2
->getValueAPF();
2060 return ConstantFP::get(Ty
->getContext(), maxnum(C1
, C2
));
2063 if (IntrinsicID
== Intrinsic::minimum
) {
2064 const APFloat
&C1
= Op1
->getValueAPF();
2065 const APFloat
&C2
= Op2
->getValueAPF();
2066 return ConstantFP::get(Ty
->getContext(), minimum(C1
, C2
));
2069 if (IntrinsicID
== Intrinsic::maximum
) {
2070 const APFloat
&C1
= Op1
->getValueAPF();
2071 const APFloat
&C2
= Op2
->getValueAPF();
2072 return ConstantFP::get(Ty
->getContext(), maximum(C1
, C2
));
2078 LibFunc Func
= NotLibFunc
;
2079 TLI
->getLibFunc(Name
, Func
);
2085 case LibFunc_pow_finite
:
2086 case LibFunc_powf_finite
:
2088 return ConstantFoldBinaryFP(pow
, Op1V
, Op2V
, Ty
);
2092 if (TLI
->has(Func
)) {
2093 APFloat V
= Op1
->getValueAPF();
2094 if (APFloat::opStatus::opOK
== V
.mod(Op2
->getValueAPF()))
2095 return ConstantFP::get(Ty
->getContext(), V
);
2099 case LibFunc_atan2f
:
2100 case LibFunc_atan2_finite
:
2101 case LibFunc_atan2f_finite
:
2103 return ConstantFoldBinaryFP(atan2
, Op1V
, Op2V
, Ty
);
2106 } else if (auto *Op2C
= dyn_cast
<ConstantInt
>(Operands
[1])) {
2107 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isHalfTy())
2108 return ConstantFP::get(Ty
->getContext(),
2109 APFloat((float)std::pow((float)Op1V
,
2110 (int)Op2C
->getZExtValue())));
2111 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isFloatTy())
2112 return ConstantFP::get(Ty
->getContext(),
2113 APFloat((float)std::pow((float)Op1V
,
2114 (int)Op2C
->getZExtValue())));
2115 if (IntrinsicID
== Intrinsic::powi
&& Ty
->isDoubleTy())
2116 return ConstantFP::get(Ty
->getContext(),
2117 APFloat((double)std::pow((double)Op1V
,
2118 (int)Op2C
->getZExtValue())));
2123 if (Operands
[0]->getType()->isIntegerTy() &&
2124 Operands
[1]->getType()->isIntegerTy()) {
2125 const APInt
*C0
, *C1
;
2126 if (!getConstIntOrUndef(Operands
[0], C0
) ||
2127 !getConstIntOrUndef(Operands
[1], C1
))
2130 switch (IntrinsicID
) {
2132 case Intrinsic::usub_with_overflow
:
2133 case Intrinsic::ssub_with_overflow
:
2134 case Intrinsic::uadd_with_overflow
:
2135 case Intrinsic::sadd_with_overflow
:
2136 // X - undef -> { undef, false }
2137 // undef - X -> { undef, false }
2138 // X + undef -> { undef, false }
2139 // undef + x -> { undef, false }
2141 return ConstantStruct::get(
2142 cast
<StructType
>(Ty
),
2143 {UndefValue::get(Ty
->getStructElementType(0)),
2144 Constant::getNullValue(Ty
->getStructElementType(1))});
2147 case Intrinsic::smul_with_overflow
:
2148 case Intrinsic::umul_with_overflow
: {
2149 // undef * X -> { 0, false }
2150 // X * undef -> { 0, false }
2152 return Constant::getNullValue(Ty
);
2156 switch (IntrinsicID
) {
2157 default: llvm_unreachable("Invalid case");
2158 case Intrinsic::sadd_with_overflow
:
2159 Res
= C0
->sadd_ov(*C1
, Overflow
);
2161 case Intrinsic::uadd_with_overflow
:
2162 Res
= C0
->uadd_ov(*C1
, Overflow
);
2164 case Intrinsic::ssub_with_overflow
:
2165 Res
= C0
->ssub_ov(*C1
, Overflow
);
2167 case Intrinsic::usub_with_overflow
:
2168 Res
= C0
->usub_ov(*C1
, Overflow
);
2170 case Intrinsic::smul_with_overflow
:
2171 Res
= C0
->smul_ov(*C1
, Overflow
);
2173 case Intrinsic::umul_with_overflow
:
2174 Res
= C0
->umul_ov(*C1
, Overflow
);
2178 ConstantInt::get(Ty
->getContext(), Res
),
2179 ConstantInt::get(Type::getInt1Ty(Ty
->getContext()), Overflow
)
2181 return ConstantStruct::get(cast
<StructType
>(Ty
), Ops
);
2183 case Intrinsic::uadd_sat
:
2184 case Intrinsic::sadd_sat
:
2186 return UndefValue::get(Ty
);
2188 return Constant::getAllOnesValue(Ty
);
2189 if (IntrinsicID
== Intrinsic::uadd_sat
)
2190 return ConstantInt::get(Ty
, C0
->uadd_sat(*C1
));
2192 return ConstantInt::get(Ty
, C0
->sadd_sat(*C1
));
2193 case Intrinsic::usub_sat
:
2194 case Intrinsic::ssub_sat
:
2196 return UndefValue::get(Ty
);
2198 return Constant::getNullValue(Ty
);
2199 if (IntrinsicID
== Intrinsic::usub_sat
)
2200 return ConstantInt::get(Ty
, C0
->usub_sat(*C1
));
2202 return ConstantInt::get(Ty
, C0
->ssub_sat(*C1
));
2203 case Intrinsic::cttz
:
2204 case Intrinsic::ctlz
:
2205 assert(C1
&& "Must be constant int");
2207 // cttz(0, 1) and ctlz(0, 1) are undef.
2208 if (C1
->isOneValue() && (!C0
|| C0
->isNullValue()))
2209 return UndefValue::get(Ty
);
2211 return Constant::getNullValue(Ty
);
2212 if (IntrinsicID
== Intrinsic::cttz
)
2213 return ConstantInt::get(Ty
, C0
->countTrailingZeros());
2215 return ConstantInt::get(Ty
, C0
->countLeadingZeros());
2221 // Support ConstantVector in case we have an Undef in the top.
2222 if ((isa
<ConstantVector
>(Operands
[0]) ||
2223 isa
<ConstantDataVector
>(Operands
[0])) &&
2224 // Check for default rounding mode.
2225 // FIXME: Support other rounding modes?
2226 isa
<ConstantInt
>(Operands
[1]) &&
2227 cast
<ConstantInt
>(Operands
[1])->getValue() == 4) {
2228 auto *Op
= cast
<Constant
>(Operands
[0]);
2229 switch (IntrinsicID
) {
2231 case Intrinsic::x86_avx512_vcvtss2si32
:
2232 case Intrinsic::x86_avx512_vcvtss2si64
:
2233 case Intrinsic::x86_avx512_vcvtsd2si32
:
2234 case Intrinsic::x86_avx512_vcvtsd2si64
:
2235 if (ConstantFP
*FPOp
=
2236 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2237 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2238 /*roundTowardZero=*/false, Ty
,
2241 case Intrinsic::x86_avx512_vcvtss2usi32
:
2242 case Intrinsic::x86_avx512_vcvtss2usi64
:
2243 case Intrinsic::x86_avx512_vcvtsd2usi32
:
2244 case Intrinsic::x86_avx512_vcvtsd2usi64
:
2245 if (ConstantFP
*FPOp
=
2246 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2247 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2248 /*roundTowardZero=*/false, Ty
,
2251 case Intrinsic::x86_avx512_cvttss2si
:
2252 case Intrinsic::x86_avx512_cvttss2si64
:
2253 case Intrinsic::x86_avx512_cvttsd2si
:
2254 case Intrinsic::x86_avx512_cvttsd2si64
:
2255 if (ConstantFP
*FPOp
=
2256 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2257 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2258 /*roundTowardZero=*/true, Ty
,
2261 case Intrinsic::x86_avx512_cvttss2usi
:
2262 case Intrinsic::x86_avx512_cvttss2usi64
:
2263 case Intrinsic::x86_avx512_cvttsd2usi
:
2264 case Intrinsic::x86_avx512_cvttsd2usi64
:
2265 if (ConstantFP
*FPOp
=
2266 dyn_cast_or_null
<ConstantFP
>(Op
->getAggregateElement(0U)))
2267 return ConstantFoldSSEConvertToInt(FPOp
->getValueAPF(),
2268 /*roundTowardZero=*/true, Ty
,
2276 static Constant
*ConstantFoldScalarCall3(StringRef Name
,
2277 Intrinsic::ID IntrinsicID
,
2279 ArrayRef
<Constant
*> Operands
,
2280 const TargetLibraryInfo
*TLI
,
2281 const CallBase
*Call
) {
2282 assert(Operands
.size() == 3 && "Wrong number of operands.");
2284 if (const auto *Op1
= dyn_cast
<ConstantFP
>(Operands
[0])) {
2285 if (const auto *Op2
= dyn_cast
<ConstantFP
>(Operands
[1])) {
2286 if (const auto *Op3
= dyn_cast
<ConstantFP
>(Operands
[2])) {
2287 switch (IntrinsicID
) {
2289 case Intrinsic::fma
:
2290 case Intrinsic::fmuladd
: {
2291 APFloat V
= Op1
->getValueAPF();
2292 V
.fusedMultiplyAdd(Op2
->getValueAPF(), Op3
->getValueAPF(),
2293 APFloat::rmNearestTiesToEven
);
2294 return ConstantFP::get(Ty
->getContext(), V
);
2301 if (const auto *Op1
= dyn_cast
<ConstantInt
>(Operands
[0])) {
2302 if (const auto *Op2
= dyn_cast
<ConstantInt
>(Operands
[1])) {
2303 if (const auto *Op3
= dyn_cast
<ConstantInt
>(Operands
[2])) {
2304 switch (IntrinsicID
) {
2306 case Intrinsic::smul_fix
:
2307 case Intrinsic::smul_fix_sat
: {
2308 // This code performs rounding towards negative infinity in case the
2309 // result cannot be represented exactly for the given scale. Targets
2310 // that do care about rounding should use a target hook for specifying
2311 // how rounding should be done, and provide their own folding to be
2312 // consistent with rounding. This is the same approach as used by
2313 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2314 APInt Lhs
= Op1
->getValue();
2315 APInt Rhs
= Op2
->getValue();
2316 unsigned Scale
= Op3
->getValue().getZExtValue();
2317 unsigned Width
= Lhs
.getBitWidth();
2318 assert(Scale
< Width
&& "Illegal scale.");
2319 unsigned ExtendedWidth
= Width
* 2;
2320 APInt Product
= (Lhs
.sextOrSelf(ExtendedWidth
) *
2321 Rhs
.sextOrSelf(ExtendedWidth
)).ashr(Scale
);
2322 if (IntrinsicID
== Intrinsic::smul_fix_sat
) {
2324 APInt::getSignedMaxValue(Width
).sextOrSelf(ExtendedWidth
);
2326 APInt::getSignedMinValue(Width
).sextOrSelf(ExtendedWidth
);
2327 Product
= APIntOps::smin(Product
, MaxValue
);
2328 Product
= APIntOps::smax(Product
, MinValue
);
2330 return ConstantInt::get(Ty
->getContext(),
2331 Product
.sextOrTrunc(Width
));
2338 if (IntrinsicID
== Intrinsic::fshl
|| IntrinsicID
== Intrinsic::fshr
) {
2339 const APInt
*C0
, *C1
, *C2
;
2340 if (!getConstIntOrUndef(Operands
[0], C0
) ||
2341 !getConstIntOrUndef(Operands
[1], C1
) ||
2342 !getConstIntOrUndef(Operands
[2], C2
))
2345 bool IsRight
= IntrinsicID
== Intrinsic::fshr
;
2347 return Operands
[IsRight
? 1 : 0];
2349 return UndefValue::get(Ty
);
2351 // The shift amount is interpreted as modulo the bitwidth. If the shift
2352 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2353 unsigned BitWidth
= C2
->getBitWidth();
2354 unsigned ShAmt
= C2
->urem(BitWidth
);
2356 return Operands
[IsRight
? 1 : 0];
2358 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2359 unsigned LshrAmt
= IsRight
? ShAmt
: BitWidth
- ShAmt
;
2360 unsigned ShlAmt
= !IsRight
? ShAmt
: BitWidth
- ShAmt
;
2362 return ConstantInt::get(Ty
, C1
->lshr(LshrAmt
));
2364 return ConstantInt::get(Ty
, C0
->shl(ShlAmt
));
2365 return ConstantInt::get(Ty
, C0
->shl(ShlAmt
) | C1
->lshr(LshrAmt
));
2371 static Constant
*ConstantFoldScalarCall(StringRef Name
,
2372 Intrinsic::ID IntrinsicID
,
2374 ArrayRef
<Constant
*> Operands
,
2375 const TargetLibraryInfo
*TLI
,
2376 const CallBase
*Call
) {
2377 if (Operands
.size() == 1)
2378 return ConstantFoldScalarCall1(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
2380 if (Operands
.size() == 2)
2381 return ConstantFoldScalarCall2(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
2383 if (Operands
.size() == 3)
2384 return ConstantFoldScalarCall3(Name
, IntrinsicID
, Ty
, Operands
, TLI
, Call
);
2389 static Constant
*ConstantFoldVectorCall(StringRef Name
,
2390 Intrinsic::ID IntrinsicID
,
2392 ArrayRef
<Constant
*> Operands
,
2393 const DataLayout
&DL
,
2394 const TargetLibraryInfo
*TLI
,
2395 const CallBase
*Call
) {
2396 SmallVector
<Constant
*, 4> Result(VTy
->getNumElements());
2397 SmallVector
<Constant
*, 4> Lane(Operands
.size());
2398 Type
*Ty
= VTy
->getElementType();
2400 if (IntrinsicID
== Intrinsic::masked_load
) {
2401 auto *SrcPtr
= Operands
[0];
2402 auto *Mask
= Operands
[2];
2403 auto *Passthru
= Operands
[3];
2405 Constant
*VecData
= ConstantFoldLoadFromConstPtr(SrcPtr
, VTy
, DL
);
2407 SmallVector
<Constant
*, 32> NewElements
;
2408 for (unsigned I
= 0, E
= VTy
->getNumElements(); I
!= E
; ++I
) {
2409 auto *MaskElt
= Mask
->getAggregateElement(I
);
2412 auto *PassthruElt
= Passthru
->getAggregateElement(I
);
2413 auto *VecElt
= VecData
? VecData
->getAggregateElement(I
) : nullptr;
2414 if (isa
<UndefValue
>(MaskElt
)) {
2416 NewElements
.push_back(PassthruElt
);
2418 NewElements
.push_back(VecElt
);
2422 if (MaskElt
->isNullValue()) {
2425 NewElements
.push_back(PassthruElt
);
2426 } else if (MaskElt
->isOneValue()) {
2429 NewElements
.push_back(VecElt
);
2434 if (NewElements
.size() != VTy
->getNumElements())
2436 return ConstantVector::get(NewElements
);
2439 for (unsigned I
= 0, E
= VTy
->getNumElements(); I
!= E
; ++I
) {
2440 // Gather a column of constants.
2441 for (unsigned J
= 0, JE
= Operands
.size(); J
!= JE
; ++J
) {
2442 // Some intrinsics use a scalar type for certain arguments.
2443 if (hasVectorInstrinsicScalarOpd(IntrinsicID
, J
)) {
2444 Lane
[J
] = Operands
[J
];
2448 Constant
*Agg
= Operands
[J
]->getAggregateElement(I
);
2455 // Use the regular scalar folding to simplify this column.
2457 ConstantFoldScalarCall(Name
, IntrinsicID
, Ty
, Lane
, TLI
, Call
);
2463 return ConstantVector::get(Result
);
2466 } // end anonymous namespace
2468 Constant
*llvm::ConstantFoldCall(const CallBase
*Call
, Function
*F
,
2469 ArrayRef
<Constant
*> Operands
,
2470 const TargetLibraryInfo
*TLI
) {
2471 if (Call
->isNoBuiltin() || Call
->isStrictFP())
2475 StringRef Name
= F
->getName();
2477 Type
*Ty
= F
->getReturnType();
2479 if (auto *VTy
= dyn_cast
<VectorType
>(Ty
))
2480 return ConstantFoldVectorCall(Name
, F
->getIntrinsicID(), VTy
, Operands
,
2481 F
->getParent()->getDataLayout(), TLI
, Call
);
2483 return ConstantFoldScalarCall(Name
, F
->getIntrinsicID(), Ty
, Operands
, TLI
,
2487 bool llvm::isMathLibCallNoop(const CallBase
*Call
,
2488 const TargetLibraryInfo
*TLI
) {
2489 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2490 // (and to some extent ConstantFoldScalarCall).
2491 if (Call
->isNoBuiltin() || Call
->isStrictFP())
2493 Function
*F
= Call
->getCalledFunction();
2498 if (!TLI
|| !TLI
->getLibFunc(*F
, Func
))
2501 if (Call
->getNumArgOperands() == 1) {
2502 if (ConstantFP
*OpC
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(0))) {
2503 const APFloat
&Op
= OpC
->getValueAPF();
2511 case LibFunc_log10l
:
2513 case LibFunc_log10f
:
2514 return Op
.isNaN() || (!Op
.isZero() && !Op
.isNegative());
2519 // FIXME: These boundaries are slightly conservative.
2520 if (OpC
->getType()->isDoubleTy())
2521 return Op
.compare(APFloat(-745.0)) != APFloat::cmpLessThan
&&
2522 Op
.compare(APFloat(709.0)) != APFloat::cmpGreaterThan
;
2523 if (OpC
->getType()->isFloatTy())
2524 return Op
.compare(APFloat(-103.0f
)) != APFloat::cmpLessThan
&&
2525 Op
.compare(APFloat(88.0f
)) != APFloat::cmpGreaterThan
;
2531 // FIXME: These boundaries are slightly conservative.
2532 if (OpC
->getType()->isDoubleTy())
2533 return Op
.compare(APFloat(-1074.0)) != APFloat::cmpLessThan
&&
2534 Op
.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan
;
2535 if (OpC
->getType()->isFloatTy())
2536 return Op
.compare(APFloat(-149.0f
)) != APFloat::cmpLessThan
&&
2537 Op
.compare(APFloat(127.0f
)) != APFloat::cmpGreaterThan
;
2546 return !Op
.isInfinity();
2550 case LibFunc_tanf
: {
2551 // FIXME: Stop using the host math library.
2552 // FIXME: The computation isn't done in the right precision.
2553 Type
*Ty
= OpC
->getType();
2554 if (Ty
->isDoubleTy() || Ty
->isFloatTy() || Ty
->isHalfTy()) {
2555 double OpV
= getValueAsDouble(OpC
);
2556 return ConstantFoldFP(tan
, OpV
, Ty
) != nullptr;
2567 return Op
.compare(APFloat(Op
.getSemantics(), "-1")) !=
2568 APFloat::cmpLessThan
&&
2569 Op
.compare(APFloat(Op
.getSemantics(), "1")) !=
2570 APFloat::cmpGreaterThan
;
2578 // FIXME: These boundaries are slightly conservative.
2579 if (OpC
->getType()->isDoubleTy())
2580 return Op
.compare(APFloat(-710.0)) != APFloat::cmpLessThan
&&
2581 Op
.compare(APFloat(710.0)) != APFloat::cmpGreaterThan
;
2582 if (OpC
->getType()->isFloatTy())
2583 return Op
.compare(APFloat(-89.0f
)) != APFloat::cmpLessThan
&&
2584 Op
.compare(APFloat(89.0f
)) != APFloat::cmpGreaterThan
;
2590 return Op
.isNaN() || Op
.isZero() || !Op
.isNegative();
2592 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2600 if (Call
->getNumArgOperands() == 2) {
2601 ConstantFP
*Op0C
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(0));
2602 ConstantFP
*Op1C
= dyn_cast
<ConstantFP
>(Call
->getArgOperand(1));
2604 const APFloat
&Op0
= Op0C
->getValueAPF();
2605 const APFloat
&Op1
= Op1C
->getValueAPF();
2610 case LibFunc_powf
: {
2611 // FIXME: Stop using the host math library.
2612 // FIXME: The computation isn't done in the right precision.
2613 Type
*Ty
= Op0C
->getType();
2614 if (Ty
->isDoubleTy() || Ty
->isFloatTy() || Ty
->isHalfTy()) {
2615 if (Ty
== Op1C
->getType()) {
2616 double Op0V
= getValueAsDouble(Op0C
);
2617 double Op1V
= getValueAsDouble(Op1C
);
2618 return ConstantFoldBinaryFP(pow
, Op0V
, Op1V
, Ty
) != nullptr;
2627 return Op0
.isNaN() || Op1
.isNaN() ||
2628 (!Op0
.isInfinity() && !Op1
.isZero());