1 #include "llvm/Transforms/Utils/VNCoercion.h"
2 #include "llvm/Analysis/AliasAnalysis.h"
3 #include "llvm/Analysis/ConstantFolding.h"
4 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
5 #include "llvm/Analysis/ValueTracking.h"
6 #include "llvm/IR/IRBuilder.h"
7 #include "llvm/IR/IntrinsicInst.h"
8 #include "llvm/Support/Debug.h"
10 #define DEBUG_TYPE "vncoerce"
12 namespace VNCoercion
{
14 /// Return true if coerceAvailableValueToLoadType will succeed.
15 bool canCoerceMustAliasedValueToLoad(Value
*StoredVal
, Type
*LoadTy
,
16 const DataLayout
&DL
) {
17 Type
*StoredTy
= StoredVal
->getType();
18 if (StoredTy
== LoadTy
)
21 // If the loaded or stored value is an first class array or struct, don't try
22 // to transform them. We need to be able to bitcast to integer.
23 if (LoadTy
->isStructTy() || LoadTy
->isArrayTy() || StoredTy
->isStructTy() ||
24 StoredTy
->isArrayTy())
27 uint64_t StoreSize
= DL
.getTypeSizeInBits(StoredTy
);
29 // The store size must be byte-aligned to support future type casts.
30 if (llvm::alignTo(StoreSize
, 8) != StoreSize
)
33 // The store has to be at least as big as the load.
34 if (StoreSize
< DL
.getTypeSizeInBits(LoadTy
))
37 // Don't coerce non-integral pointers to integers or vice versa.
38 if (DL
.isNonIntegralPointerType(StoredVal
->getType()->getScalarType()) !=
39 DL
.isNonIntegralPointerType(LoadTy
->getScalarType())) {
40 // As a special case, allow coercion of memset used to initialize
41 // an array w/null. Despite non-integral pointers not generally having a
42 // specific bit pattern, we do assume null is zero.
43 if (auto *CI
= dyn_cast
<Constant
>(StoredVal
))
44 return CI
->isNullValue();
51 template <class T
, class HelperClass
>
52 static T
*coerceAvailableValueToLoadTypeHelper(T
*StoredVal
, Type
*LoadedTy
,
54 const DataLayout
&DL
) {
55 assert(canCoerceMustAliasedValueToLoad(StoredVal
, LoadedTy
, DL
) &&
56 "precondition violation - materialization can't fail");
57 if (auto *C
= dyn_cast
<Constant
>(StoredVal
))
58 if (auto *FoldedStoredVal
= ConstantFoldConstant(C
, DL
))
59 StoredVal
= FoldedStoredVal
;
61 // If this is already the right type, just return it.
62 Type
*StoredValTy
= StoredVal
->getType();
64 uint64_t StoredValSize
= DL
.getTypeSizeInBits(StoredValTy
);
65 uint64_t LoadedValSize
= DL
.getTypeSizeInBits(LoadedTy
);
67 // If the store and reload are the same size, we can always reuse it.
68 if (StoredValSize
== LoadedValSize
) {
69 // Pointer to Pointer -> use bitcast.
70 if (StoredValTy
->isPtrOrPtrVectorTy() && LoadedTy
->isPtrOrPtrVectorTy()) {
71 StoredVal
= Helper
.CreateBitCast(StoredVal
, LoadedTy
);
73 // Convert source pointers to integers, which can be bitcast.
74 if (StoredValTy
->isPtrOrPtrVectorTy()) {
75 StoredValTy
= DL
.getIntPtrType(StoredValTy
);
76 StoredVal
= Helper
.CreatePtrToInt(StoredVal
, StoredValTy
);
79 Type
*TypeToCastTo
= LoadedTy
;
80 if (TypeToCastTo
->isPtrOrPtrVectorTy())
81 TypeToCastTo
= DL
.getIntPtrType(TypeToCastTo
);
83 if (StoredValTy
!= TypeToCastTo
)
84 StoredVal
= Helper
.CreateBitCast(StoredVal
, TypeToCastTo
);
86 // Cast to pointer if the load needs a pointer type.
87 if (LoadedTy
->isPtrOrPtrVectorTy())
88 StoredVal
= Helper
.CreateIntToPtr(StoredVal
, LoadedTy
);
91 if (auto *C
= dyn_cast
<ConstantExpr
>(StoredVal
))
92 if (auto *FoldedStoredVal
= ConstantFoldConstant(C
, DL
))
93 StoredVal
= FoldedStoredVal
;
97 // If the loaded value is smaller than the available value, then we can
98 // extract out a piece from it. If the available value is too small, then we
100 assert(StoredValSize
>= LoadedValSize
&&
101 "canCoerceMustAliasedValueToLoad fail");
103 // Convert source pointers to integers, which can be manipulated.
104 if (StoredValTy
->isPtrOrPtrVectorTy()) {
105 StoredValTy
= DL
.getIntPtrType(StoredValTy
);
106 StoredVal
= Helper
.CreatePtrToInt(StoredVal
, StoredValTy
);
109 // Convert vectors and fp to integer, which can be manipulated.
110 if (!StoredValTy
->isIntegerTy()) {
111 StoredValTy
= IntegerType::get(StoredValTy
->getContext(), StoredValSize
);
112 StoredVal
= Helper
.CreateBitCast(StoredVal
, StoredValTy
);
115 // If this is a big-endian system, we need to shift the value down to the low
116 // bits so that a truncate will work.
117 if (DL
.isBigEndian()) {
118 uint64_t ShiftAmt
= DL
.getTypeStoreSizeInBits(StoredValTy
) -
119 DL
.getTypeStoreSizeInBits(LoadedTy
);
120 StoredVal
= Helper
.CreateLShr(
121 StoredVal
, ConstantInt::get(StoredVal
->getType(), ShiftAmt
));
124 // Truncate the integer to the right size now.
125 Type
*NewIntTy
= IntegerType::get(StoredValTy
->getContext(), LoadedValSize
);
126 StoredVal
= Helper
.CreateTruncOrBitCast(StoredVal
, NewIntTy
);
128 if (LoadedTy
!= NewIntTy
) {
129 // If the result is a pointer, inttoptr.
130 if (LoadedTy
->isPtrOrPtrVectorTy())
131 StoredVal
= Helper
.CreateIntToPtr(StoredVal
, LoadedTy
);
133 // Otherwise, bitcast.
134 StoredVal
= Helper
.CreateBitCast(StoredVal
, LoadedTy
);
137 if (auto *C
= dyn_cast
<Constant
>(StoredVal
))
138 if (auto *FoldedStoredVal
= ConstantFoldConstant(C
, DL
))
139 StoredVal
= FoldedStoredVal
;
144 /// If we saw a store of a value to memory, and
145 /// then a load from a must-aliased pointer of a different type, try to coerce
146 /// the stored value. LoadedTy is the type of the load we want to replace.
147 /// IRB is IRBuilder used to insert new instructions.
149 /// If we can't do it, return null.
150 Value
*coerceAvailableValueToLoadType(Value
*StoredVal
, Type
*LoadedTy
,
151 IRBuilder
<> &IRB
, const DataLayout
&DL
) {
152 return coerceAvailableValueToLoadTypeHelper(StoredVal
, LoadedTy
, IRB
, DL
);
155 /// This function is called when we have a memdep query of a load that ends up
156 /// being a clobbering memory write (store, memset, memcpy, memmove). This
157 /// means that the write *may* provide bits used by the load but we can't be
158 /// sure because the pointers don't must-alias.
160 /// Check this case to see if there is anything more we can do before we give
161 /// up. This returns -1 if we have to give up, or a byte number in the stored
162 /// value of the piece that feeds the load.
163 static int analyzeLoadFromClobberingWrite(Type
*LoadTy
, Value
*LoadPtr
,
165 uint64_t WriteSizeInBits
,
166 const DataLayout
&DL
) {
167 // If the loaded or stored value is a first class array or struct, don't try
168 // to transform them. We need to be able to bitcast to integer.
169 if (LoadTy
->isStructTy() || LoadTy
->isArrayTy())
172 int64_t StoreOffset
= 0, LoadOffset
= 0;
174 GetPointerBaseWithConstantOffset(WritePtr
, StoreOffset
, DL
);
175 Value
*LoadBase
= GetPointerBaseWithConstantOffset(LoadPtr
, LoadOffset
, DL
);
176 if (StoreBase
!= LoadBase
)
179 // If the load and store are to the exact same address, they should have been
180 // a must alias. AA must have gotten confused.
181 // FIXME: Study to see if/when this happens. One case is forwarding a memset
182 // to a load from the base of the memset.
184 // If the load and store don't overlap at all, the store doesn't provide
185 // anything to the load. In this case, they really don't alias at all, AA
186 // must have gotten confused.
187 uint64_t LoadSize
= DL
.getTypeSizeInBits(LoadTy
);
189 if ((WriteSizeInBits
& 7) | (LoadSize
& 7))
191 uint64_t StoreSize
= WriteSizeInBits
/ 8; // Convert to bytes.
194 bool isAAFailure
= false;
195 if (StoreOffset
< LoadOffset
)
196 isAAFailure
= StoreOffset
+ int64_t(StoreSize
) <= LoadOffset
;
198 isAAFailure
= LoadOffset
+ int64_t(LoadSize
) <= StoreOffset
;
203 // If the Load isn't completely contained within the stored bits, we don't
204 // have all the bits to feed it. We could do something crazy in the future
205 // (issue a smaller load then merge the bits in) but this seems unlikely to be
207 if (StoreOffset
> LoadOffset
||
208 StoreOffset
+ StoreSize
< LoadOffset
+ LoadSize
)
211 // Okay, we can do this transformation. Return the number of bytes into the
212 // store that the load is.
213 return LoadOffset
- StoreOffset
;
216 /// This function is called when we have a
217 /// memdep query of a load that ends up being a clobbering store.
218 int analyzeLoadFromClobberingStore(Type
*LoadTy
, Value
*LoadPtr
,
219 StoreInst
*DepSI
, const DataLayout
&DL
) {
220 auto *StoredVal
= DepSI
->getValueOperand();
222 // Cannot handle reading from store of first-class aggregate yet.
223 if (StoredVal
->getType()->isStructTy() ||
224 StoredVal
->getType()->isArrayTy())
227 // Don't coerce non-integral pointers to integers or vice versa.
228 if (DL
.isNonIntegralPointerType(StoredVal
->getType()->getScalarType()) !=
229 DL
.isNonIntegralPointerType(LoadTy
->getScalarType())) {
230 // Allow casts of zero values to null as a special case
231 auto *CI
= dyn_cast
<Constant
>(StoredVal
);
232 if (!CI
|| !CI
->isNullValue())
236 Value
*StorePtr
= DepSI
->getPointerOperand();
238 DL
.getTypeSizeInBits(DepSI
->getValueOperand()->getType());
239 return analyzeLoadFromClobberingWrite(LoadTy
, LoadPtr
, StorePtr
, StoreSize
,
243 /// This function is called when we have a
244 /// memdep query of a load that ends up being clobbered by another load. See if
245 /// the other load can feed into the second load.
246 int analyzeLoadFromClobberingLoad(Type
*LoadTy
, Value
*LoadPtr
, LoadInst
*DepLI
,
247 const DataLayout
&DL
) {
248 // Cannot handle reading from store of first-class aggregate yet.
249 if (DepLI
->getType()->isStructTy() || DepLI
->getType()->isArrayTy())
252 // Don't coerce non-integral pointers to integers or vice versa.
253 if (DL
.isNonIntegralPointerType(DepLI
->getType()->getScalarType()) !=
254 DL
.isNonIntegralPointerType(LoadTy
->getScalarType()))
257 Value
*DepPtr
= DepLI
->getPointerOperand();
258 uint64_t DepSize
= DL
.getTypeSizeInBits(DepLI
->getType());
259 int R
= analyzeLoadFromClobberingWrite(LoadTy
, LoadPtr
, DepPtr
, DepSize
, DL
);
263 // If we have a load/load clobber an DepLI can be widened to cover this load,
264 // then we should widen it!
265 int64_t LoadOffs
= 0;
266 const Value
*LoadBase
=
267 GetPointerBaseWithConstantOffset(LoadPtr
, LoadOffs
, DL
);
268 unsigned LoadSize
= DL
.getTypeStoreSize(LoadTy
);
270 unsigned Size
= MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
271 LoadBase
, LoadOffs
, LoadSize
, DepLI
);
275 // Check non-obvious conditions enforced by MDA which we rely on for being
276 // able to materialize this potentially available value
277 assert(DepLI
->isSimple() && "Cannot widen volatile/atomic load!");
278 assert(DepLI
->getType()->isIntegerTy() && "Can't widen non-integer load");
280 return analyzeLoadFromClobberingWrite(LoadTy
, LoadPtr
, DepPtr
, Size
* 8, DL
);
283 int analyzeLoadFromClobberingMemInst(Type
*LoadTy
, Value
*LoadPtr
,
284 MemIntrinsic
*MI
, const DataLayout
&DL
) {
285 // If the mem operation is a non-constant size, we can't handle it.
286 ConstantInt
*SizeCst
= dyn_cast
<ConstantInt
>(MI
->getLength());
289 uint64_t MemSizeInBits
= SizeCst
->getZExtValue() * 8;
291 // If this is memset, we just need to see if the offset is valid in the size
293 if (MI
->getIntrinsicID() == Intrinsic::memset
) {
294 if (DL
.isNonIntegralPointerType(LoadTy
->getScalarType())) {
295 auto *CI
= dyn_cast
<ConstantInt
>(cast
<MemSetInst
>(MI
)->getValue());
296 if (!CI
|| !CI
->isZero())
299 return analyzeLoadFromClobberingWrite(LoadTy
, LoadPtr
, MI
->getDest(),
303 // If we have a memcpy/memmove, the only case we can handle is if this is a
304 // copy from constant memory. In that case, we can read directly from the
306 MemTransferInst
*MTI
= cast
<MemTransferInst
>(MI
);
308 Constant
*Src
= dyn_cast
<Constant
>(MTI
->getSource());
312 GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GetUnderlyingObject(Src
, DL
));
313 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer())
316 // See if the access is within the bounds of the transfer.
317 int Offset
= analyzeLoadFromClobberingWrite(LoadTy
, LoadPtr
, MI
->getDest(),
322 // Don't coerce non-integral pointers to integers or vice versa, and the
323 // memtransfer is implicitly a raw byte code
324 if (DL
.isNonIntegralPointerType(LoadTy
->getScalarType()))
325 // TODO: Can allow nullptrs from constant zeros
328 unsigned AS
= Src
->getType()->getPointerAddressSpace();
329 // Otherwise, see if we can constant fold a load from the constant with the
330 // offset applied as appropriate.
332 ConstantExpr::getBitCast(Src
, Type::getInt8PtrTy(Src
->getContext(), AS
));
333 Constant
*OffsetCst
=
334 ConstantInt::get(Type::getInt64Ty(Src
->getContext()), (unsigned)Offset
);
335 Src
= ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src
->getContext()), Src
,
337 Src
= ConstantExpr::getBitCast(Src
, PointerType::get(LoadTy
, AS
));
338 if (ConstantFoldLoadFromConstPtr(Src
, LoadTy
, DL
))
343 template <class T
, class HelperClass
>
344 static T
*getStoreValueForLoadHelper(T
*SrcVal
, unsigned Offset
, Type
*LoadTy
,
346 const DataLayout
&DL
) {
347 LLVMContext
&Ctx
= SrcVal
->getType()->getContext();
349 // If two pointers are in the same address space, they have the same size,
350 // so we don't need to do any truncation, etc. This avoids introducing
351 // ptrtoint instructions for pointers that may be non-integral.
352 if (SrcVal
->getType()->isPointerTy() && LoadTy
->isPointerTy() &&
353 cast
<PointerType
>(SrcVal
->getType())->getAddressSpace() ==
354 cast
<PointerType
>(LoadTy
)->getAddressSpace()) {
358 uint64_t StoreSize
= (DL
.getTypeSizeInBits(SrcVal
->getType()) + 7) / 8;
359 uint64_t LoadSize
= (DL
.getTypeSizeInBits(LoadTy
) + 7) / 8;
360 // Compute which bits of the stored value are being used by the load. Convert
361 // to an integer type to start with.
362 if (SrcVal
->getType()->isPtrOrPtrVectorTy())
363 SrcVal
= Helper
.CreatePtrToInt(SrcVal
, DL
.getIntPtrType(SrcVal
->getType()));
364 if (!SrcVal
->getType()->isIntegerTy())
365 SrcVal
= Helper
.CreateBitCast(SrcVal
, IntegerType::get(Ctx
, StoreSize
* 8));
367 // Shift the bits to the least significant depending on endianness.
369 if (DL
.isLittleEndian())
370 ShiftAmt
= Offset
* 8;
372 ShiftAmt
= (StoreSize
- LoadSize
- Offset
) * 8;
374 SrcVal
= Helper
.CreateLShr(SrcVal
,
375 ConstantInt::get(SrcVal
->getType(), ShiftAmt
));
377 if (LoadSize
!= StoreSize
)
378 SrcVal
= Helper
.CreateTruncOrBitCast(SrcVal
,
379 IntegerType::get(Ctx
, LoadSize
* 8));
383 /// This function is called when we have a memdep query of a load that ends up
384 /// being a clobbering store. This means that the store provides bits used by
385 /// the load but the pointers don't must-alias. Check this case to see if
386 /// there is anything more we can do before we give up.
387 Value
*getStoreValueForLoad(Value
*SrcVal
, unsigned Offset
, Type
*LoadTy
,
388 Instruction
*InsertPt
, const DataLayout
&DL
) {
390 IRBuilder
<> Builder(InsertPt
);
391 SrcVal
= getStoreValueForLoadHelper(SrcVal
, Offset
, LoadTy
, Builder
, DL
);
392 return coerceAvailableValueToLoadTypeHelper(SrcVal
, LoadTy
, Builder
, DL
);
395 Constant
*getConstantStoreValueForLoad(Constant
*SrcVal
, unsigned Offset
,
396 Type
*LoadTy
, const DataLayout
&DL
) {
398 SrcVal
= getStoreValueForLoadHelper(SrcVal
, Offset
, LoadTy
, F
, DL
);
399 return coerceAvailableValueToLoadTypeHelper(SrcVal
, LoadTy
, F
, DL
);
402 /// This function is called when we have a memdep query of a load that ends up
403 /// being a clobbering load. This means that the load *may* provide bits used
404 /// by the load but we can't be sure because the pointers don't must-alias.
405 /// Check this case to see if there is anything more we can do before we give
407 Value
*getLoadValueForLoad(LoadInst
*SrcVal
, unsigned Offset
, Type
*LoadTy
,
408 Instruction
*InsertPt
, const DataLayout
&DL
) {
409 // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
410 // widen SrcVal out to a larger load.
411 unsigned SrcValStoreSize
= DL
.getTypeStoreSize(SrcVal
->getType());
412 unsigned LoadSize
= DL
.getTypeStoreSize(LoadTy
);
413 if (Offset
+ LoadSize
> SrcValStoreSize
) {
414 assert(SrcVal
->isSimple() && "Cannot widen volatile/atomic load!");
415 assert(SrcVal
->getType()->isIntegerTy() && "Can't widen non-integer load");
416 // If we have a load/load clobber an DepLI can be widened to cover this
417 // load, then we should widen it to the next power of 2 size big enough!
418 unsigned NewLoadSize
= Offset
+ LoadSize
;
419 if (!isPowerOf2_32(NewLoadSize
))
420 NewLoadSize
= NextPowerOf2(NewLoadSize
);
422 Value
*PtrVal
= SrcVal
->getPointerOperand();
423 // Insert the new load after the old load. This ensures that subsequent
424 // memdep queries will find the new load. We can't easily remove the old
425 // load completely because it is already in the value numbering table.
426 IRBuilder
<> Builder(SrcVal
->getParent(), ++BasicBlock::iterator(SrcVal
));
427 Type
*DestTy
= IntegerType::get(LoadTy
->getContext(), NewLoadSize
* 8);
429 PointerType::get(DestTy
, PtrVal
->getType()->getPointerAddressSpace());
430 Builder
.SetCurrentDebugLocation(SrcVal
->getDebugLoc());
431 PtrVal
= Builder
.CreateBitCast(PtrVal
, DestPTy
);
432 LoadInst
*NewLoad
= Builder
.CreateLoad(DestTy
, PtrVal
);
433 NewLoad
->takeName(SrcVal
);
434 NewLoad
->setAlignment(SrcVal
->getAlignment());
436 LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal
<< "\n");
437 LLVM_DEBUG(dbgs() << "TO: " << *NewLoad
<< "\n");
439 // Replace uses of the original load with the wider load. On a big endian
440 // system, we need to shift down to get the relevant bits.
442 if (DL
.isBigEndian())
443 RV
= Builder
.CreateLShr(RV
, (NewLoadSize
- SrcValStoreSize
) * 8);
444 RV
= Builder
.CreateTrunc(RV
, SrcVal
->getType());
445 SrcVal
->replaceAllUsesWith(RV
);
450 return getStoreValueForLoad(SrcVal
, Offset
, LoadTy
, InsertPt
, DL
);
453 Constant
*getConstantLoadValueForLoad(Constant
*SrcVal
, unsigned Offset
,
454 Type
*LoadTy
, const DataLayout
&DL
) {
455 unsigned SrcValStoreSize
= DL
.getTypeStoreSize(SrcVal
->getType());
456 unsigned LoadSize
= DL
.getTypeStoreSize(LoadTy
);
457 if (Offset
+ LoadSize
> SrcValStoreSize
)
459 return getConstantStoreValueForLoad(SrcVal
, Offset
, LoadTy
, DL
);
462 template <class T
, class HelperClass
>
463 T
*getMemInstValueForLoadHelper(MemIntrinsic
*SrcInst
, unsigned Offset
,
464 Type
*LoadTy
, HelperClass
&Helper
,
465 const DataLayout
&DL
) {
466 LLVMContext
&Ctx
= LoadTy
->getContext();
467 uint64_t LoadSize
= DL
.getTypeSizeInBits(LoadTy
) / 8;
469 // We know that this method is only called when the mem transfer fully
470 // provides the bits for the load.
471 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(SrcInst
)) {
472 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
473 // independently of what the offset is.
474 T
*Val
= cast
<T
>(MSI
->getValue());
477 Helper
.CreateZExtOrBitCast(Val
, IntegerType::get(Ctx
, LoadSize
* 8));
480 // Splat the value out to the right number of bits.
481 for (unsigned NumBytesSet
= 1; NumBytesSet
!= LoadSize
;) {
482 // If we can double the number of bytes set, do it.
483 if (NumBytesSet
* 2 <= LoadSize
) {
484 T
*ShVal
= Helper
.CreateShl(
485 Val
, ConstantInt::get(Val
->getType(), NumBytesSet
* 8));
486 Val
= Helper
.CreateOr(Val
, ShVal
);
491 // Otherwise insert one byte at a time.
492 T
*ShVal
= Helper
.CreateShl(Val
, ConstantInt::get(Val
->getType(), 1 * 8));
493 Val
= Helper
.CreateOr(OneElt
, ShVal
);
497 return coerceAvailableValueToLoadTypeHelper(Val
, LoadTy
, Helper
, DL
);
500 // Otherwise, this is a memcpy/memmove from a constant global.
501 MemTransferInst
*MTI
= cast
<MemTransferInst
>(SrcInst
);
502 Constant
*Src
= cast
<Constant
>(MTI
->getSource());
503 unsigned AS
= Src
->getType()->getPointerAddressSpace();
505 // Otherwise, see if we can constant fold a load from the constant with the
506 // offset applied as appropriate.
508 ConstantExpr::getBitCast(Src
, Type::getInt8PtrTy(Src
->getContext(), AS
));
509 Constant
*OffsetCst
=
510 ConstantInt::get(Type::getInt64Ty(Src
->getContext()), (unsigned)Offset
);
511 Src
= ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src
->getContext()), Src
,
513 Src
= ConstantExpr::getBitCast(Src
, PointerType::get(LoadTy
, AS
));
514 return ConstantFoldLoadFromConstPtr(Src
, LoadTy
, DL
);
517 /// This function is called when we have a
518 /// memdep query of a load that ends up being a clobbering mem intrinsic.
519 Value
*getMemInstValueForLoad(MemIntrinsic
*SrcInst
, unsigned Offset
,
520 Type
*LoadTy
, Instruction
*InsertPt
,
521 const DataLayout
&DL
) {
522 IRBuilder
<> Builder(InsertPt
);
523 return getMemInstValueForLoadHelper
<Value
, IRBuilder
<>>(SrcInst
, Offset
,
524 LoadTy
, Builder
, DL
);
527 Constant
*getConstantMemInstValueForLoad(MemIntrinsic
*SrcInst
, unsigned Offset
,
528 Type
*LoadTy
, const DataLayout
&DL
) {
529 // The only case analyzeLoadFromClobberingMemInst cannot be converted to a
530 // constant is when it's a memset of a non-constant.
531 if (auto *MSI
= dyn_cast
<MemSetInst
>(SrcInst
))
532 if (!isa
<Constant
>(MSI
->getValue()))
535 return getMemInstValueForLoadHelper
<Constant
, ConstantFolder
>(SrcInst
, Offset
,
538 } // namespace VNCoercion