1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for load, store and alloca.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/Transforms/Utils/Local.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 using namespace PatternMatch
;
30 #define DEBUG_TYPE "instcombine"
32 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
33 STATISTIC(NumGlobalCopies
, "Number of allocas copied from constant global");
35 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36 /// some part of a constant global variable. This intentionally only accepts
37 /// constant expressions because we can't rewrite arbitrary instructions.
38 static bool pointsToConstantGlobal(Value
*V
) {
39 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
40 return GV
->isConstant();
42 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
)) {
43 if (CE
->getOpcode() == Instruction::BitCast
||
44 CE
->getOpcode() == Instruction::AddrSpaceCast
||
45 CE
->getOpcode() == Instruction::GetElementPtr
)
46 return pointsToConstantGlobal(CE
->getOperand(0));
51 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
53 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
54 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
56 /// the alloca, and if the source pointer is a pointer to a constant global, we
57 /// can optimize this.
59 isOnlyCopiedFromConstantGlobal(Value
*V
, MemTransferInst
*&TheCopy
,
60 SmallVectorImpl
<Instruction
*> &ToDelete
) {
61 // We track lifetime intrinsics as we encounter them. If we decide to go
62 // ahead and replace the value with the global, this lets the caller quickly
63 // eliminate the markers.
65 SmallVector
<std::pair
<Value
*, bool>, 35> ValuesToInspect
;
66 ValuesToInspect
.emplace_back(V
, false);
67 while (!ValuesToInspect
.empty()) {
68 auto ValuePair
= ValuesToInspect
.pop_back_val();
69 const bool IsOffset
= ValuePair
.second
;
70 for (auto &U
: ValuePair
.first
->uses()) {
71 auto *I
= cast
<Instruction
>(U
.getUser());
73 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
74 // Ignore non-volatile loads, they are always ok.
75 if (!LI
->isSimple()) return false;
79 if (isa
<BitCastInst
>(I
) || isa
<AddrSpaceCastInst
>(I
)) {
80 // If uses of the bitcast are ok, we are ok.
81 ValuesToInspect
.emplace_back(I
, IsOffset
);
84 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
85 // If the GEP has all zero indices, it doesn't offset the pointer. If it
87 ValuesToInspect
.emplace_back(I
, IsOffset
|| !GEP
->hasAllZeroIndices());
91 if (auto *Call
= dyn_cast
<CallBase
>(I
)) {
92 // If this is the function being called then we treat it like a load and
94 if (Call
->isCallee(&U
))
97 unsigned DataOpNo
= Call
->getDataOperandNo(&U
);
98 bool IsArgOperand
= Call
->isArgOperand(&U
);
100 // Inalloca arguments are clobbered by the call.
101 if (IsArgOperand
&& Call
->isInAllocaArgument(DataOpNo
))
104 // If this is a readonly/readnone call site, then we know it is just a
105 // load (but one that potentially returns the value itself), so we can
106 // ignore it if we know that the value isn't captured.
107 if (Call
->onlyReadsMemory() &&
108 (Call
->use_empty() || Call
->doesNotCapture(DataOpNo
)))
111 // If this is being passed as a byval argument, the caller is making a
112 // copy, so it is only a read of the alloca.
113 if (IsArgOperand
&& Call
->isByValArgument(DataOpNo
))
117 // Lifetime intrinsics can be handled by the caller.
118 if (I
->isLifetimeStartOrEnd()) {
119 assert(I
->use_empty() && "Lifetime markers have no result to use!");
120 ToDelete
.push_back(I
);
124 // If this is isn't our memcpy/memmove, reject it as something we can't
126 MemTransferInst
*MI
= dyn_cast
<MemTransferInst
>(I
);
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U
.getOperandNo() == 1) {
133 if (MI
->isVolatile()) return false;
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy
) return false;
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset
) return false;
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U
.getOperandNo() != 0) return false;
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI
->getSource()))
151 // Otherwise, the transform is safe. Remember the copy instruction.
158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159 /// modified by a copy from a constant global. If we can prove this, we can
160 /// replace any uses of the alloca with uses of the global directly.
161 static MemTransferInst
*
162 isOnlyCopiedFromConstantGlobal(AllocaInst
*AI
,
163 SmallVectorImpl
<Instruction
*> &ToDelete
) {
164 MemTransferInst
*TheCopy
= nullptr;
165 if (isOnlyCopiedFromConstantGlobal(AI
, TheCopy
, ToDelete
))
170 /// Returns true if V is dereferenceable for size of alloca.
171 static bool isDereferenceableForAllocaSize(const Value
*V
, const AllocaInst
*AI
,
172 const DataLayout
&DL
) {
173 if (AI
->isArrayAllocation())
175 uint64_t AllocaSize
= DL
.getTypeStoreSize(AI
->getAllocatedType());
178 return isDereferenceableAndAlignedPointer(V
, AI
->getAlignment(),
179 APInt(64, AllocaSize
), DL
);
182 static Instruction
*simplifyAllocaArraySize(InstCombiner
&IC
, AllocaInst
&AI
) {
183 // Check for array size of 1 (scalar allocation).
184 if (!AI
.isArrayAllocation()) {
185 // i32 1 is the canonical array size for scalar allocations.
186 if (AI
.getArraySize()->getType()->isIntegerTy(32))
190 Value
*V
= IC
.Builder
.getInt32(1);
195 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
196 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
197 if (C
->getValue().getActiveBits() <= 64) {
198 Type
*NewTy
= ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
199 AllocaInst
*New
= IC
.Builder
.CreateAlloca(NewTy
, nullptr, AI
.getName());
200 New
->setAlignment(AI
.getAlignment());
202 // Scan to the end of the allocation instructions, to skip over a block of
203 // allocas if possible...also skip interleaved debug info
205 BasicBlock::iterator
It(New
);
206 while (isa
<AllocaInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
))
209 // Now that I is pointing to the first non-allocation-inst in the block,
210 // insert our getelementptr instruction...
212 Type
*IdxTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
213 Value
*NullIdx
= Constant::getNullValue(IdxTy
);
214 Value
*Idx
[2] = {NullIdx
, NullIdx
};
215 Instruction
*GEP
= GetElementPtrInst::CreateInBounds(
216 NewTy
, New
, Idx
, New
->getName() + ".sub");
217 IC
.InsertNewInstBefore(GEP
, *It
);
219 // Now make everything use the getelementptr instead of the original
221 return IC
.replaceInstUsesWith(AI
, GEP
);
225 if (isa
<UndefValue
>(AI
.getArraySize()))
226 return IC
.replaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type
*IntPtrTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
231 if (AI
.getArraySize()->getType() != IntPtrTy
) {
232 Value
*V
= IC
.Builder
.CreateIntCast(AI
.getArraySize(), IntPtrTy
, false);
241 // If I and V are pointers in different address space, it is not allowed to
242 // use replaceAllUsesWith since I and V have different types. A
243 // non-target-specific transformation should not use addrspacecast on V since
244 // the two address space may be disjoint depending on target.
246 // This class chases down uses of the old pointer until reaching the load
247 // instructions, then replaces the old pointer in the load instructions with
248 // the new pointer. If during the chasing it sees bitcast or GEP, it will
249 // create new bitcast or GEP with the new pointer and use them in the load
251 class PointerReplacer
{
253 PointerReplacer(InstCombiner
&IC
) : IC(IC
) {}
254 void replacePointer(Instruction
&I
, Value
*V
);
257 void findLoadAndReplace(Instruction
&I
);
258 void replace(Instruction
*I
);
259 Value
*getReplacement(Value
*I
);
261 SmallVector
<Instruction
*, 4> Path
;
262 MapVector
<Value
*, Value
*> WorkMap
;
265 } // end anonymous namespace
267 void PointerReplacer::findLoadAndReplace(Instruction
&I
) {
268 for (auto U
: I
.users()) {
269 auto *Inst
= dyn_cast
<Instruction
>(&*U
);
272 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U
<< '\n');
273 if (isa
<LoadInst
>(Inst
)) {
277 } else if (isa
<GetElementPtrInst
>(Inst
) || isa
<BitCastInst
>(Inst
)) {
278 Path
.push_back(Inst
);
279 findLoadAndReplace(*Inst
);
287 Value
*PointerReplacer::getReplacement(Value
*V
) {
288 auto Loc
= WorkMap
.find(V
);
289 if (Loc
!= WorkMap
.end())
294 void PointerReplacer::replace(Instruction
*I
) {
295 if (getReplacement(I
))
298 if (auto *LT
= dyn_cast
<LoadInst
>(I
)) {
299 auto *V
= getReplacement(LT
->getPointerOperand());
300 assert(V
&& "Operand not replaced");
301 auto *NewI
= new LoadInst(I
->getType(), V
);
303 IC
.InsertNewInstWith(NewI
, *LT
);
304 IC
.replaceInstUsesWith(*LT
, NewI
);
306 } else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
307 auto *V
= getReplacement(GEP
->getPointerOperand());
308 assert(V
&& "Operand not replaced");
309 SmallVector
<Value
*, 8> Indices
;
310 Indices
.append(GEP
->idx_begin(), GEP
->idx_end());
311 auto *NewI
= GetElementPtrInst::Create(
312 V
->getType()->getPointerElementType(), V
, Indices
);
313 IC
.InsertNewInstWith(NewI
, *GEP
);
316 } else if (auto *BC
= dyn_cast
<BitCastInst
>(I
)) {
317 auto *V
= getReplacement(BC
->getOperand(0));
318 assert(V
&& "Operand not replaced");
319 auto *NewT
= PointerType::get(BC
->getType()->getPointerElementType(),
320 V
->getType()->getPointerAddressSpace());
321 auto *NewI
= new BitCastInst(V
, NewT
);
322 IC
.InsertNewInstWith(NewI
, *BC
);
326 llvm_unreachable("should never reach here");
330 void PointerReplacer::replacePointer(Instruction
&I
, Value
*V
) {
332 auto *PT
= cast
<PointerType
>(I
.getType());
333 auto *NT
= cast
<PointerType
>(V
->getType());
334 assert(PT
!= NT
&& PT
->getElementType() == NT
->getElementType() &&
338 findLoadAndReplace(I
);
341 Instruction
*InstCombiner::visitAllocaInst(AllocaInst
&AI
) {
342 if (auto *I
= simplifyAllocaArraySize(*this, AI
))
345 if (AI
.getAllocatedType()->isSized()) {
346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI
.getAlignment() == 0)
348 AI
.setAlignment(DL
.getPrefTypeAlignment(AI
.getAllocatedType()));
350 // Move all alloca's of zero byte objects to the entry block and merge them
351 // together. Note that we only do this for alloca's, because malloc should
352 // allocate and return a unique pointer, even for a zero byte allocation.
353 if (DL
.getTypeAllocSize(AI
.getAllocatedType()) == 0) {
354 // For a zero sized alloca there is no point in doing an array allocation.
355 // This is helpful if the array size is a complicated expression not used
357 if (AI
.isArrayAllocation()) {
358 AI
.setOperand(0, ConstantInt::get(AI
.getArraySize()->getType(), 1));
362 // Get the first instruction in the entry block.
363 BasicBlock
&EntryBlock
= AI
.getParent()->getParent()->getEntryBlock();
364 Instruction
*FirstInst
= EntryBlock
.getFirstNonPHIOrDbg();
365 if (FirstInst
!= &AI
) {
366 // If the entry block doesn't start with a zero-size alloca then move
367 // this one to the start of the entry block. There is no problem with
368 // dominance as the array size was forced to a constant earlier already.
369 AllocaInst
*EntryAI
= dyn_cast
<AllocaInst
>(FirstInst
);
370 if (!EntryAI
|| !EntryAI
->getAllocatedType()->isSized() ||
371 DL
.getTypeAllocSize(EntryAI
->getAllocatedType()) != 0) {
372 AI
.moveBefore(FirstInst
);
376 // If the alignment of the entry block alloca is 0 (unspecified),
377 // assign it the preferred alignment.
378 if (EntryAI
->getAlignment() == 0)
379 EntryAI
->setAlignment(
380 DL
.getPrefTypeAlignment(EntryAI
->getAllocatedType()));
381 // Replace this zero-sized alloca with the one at the start of the entry
382 // block after ensuring that the address will be aligned enough for both
384 unsigned MaxAlign
= std::max(EntryAI
->getAlignment(),
386 EntryAI
->setAlignment(MaxAlign
);
387 if (AI
.getType() != EntryAI
->getType())
388 return new BitCastInst(EntryAI
, AI
.getType());
389 return replaceInstUsesWith(AI
, EntryAI
);
394 if (AI
.getAlignment()) {
395 // Check to see if this allocation is only modified by a memcpy/memmove from
396 // a constant global whose alignment is equal to or exceeds that of the
397 // allocation. If this is the case, we can change all users to use
398 // the constant global instead. This is commonly produced by the CFE by
399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
400 // is only subsequently read.
401 SmallVector
<Instruction
*, 4> ToDelete
;
402 if (MemTransferInst
*Copy
= isOnlyCopiedFromConstantGlobal(&AI
, ToDelete
)) {
403 unsigned SourceAlign
= getOrEnforceKnownAlignment(
404 Copy
->getSource(), AI
.getAlignment(), DL
, &AI
, &AC
, &DT
);
405 if (AI
.getAlignment() <= SourceAlign
&&
406 isDereferenceableForAllocaSize(Copy
->getSource(), &AI
, DL
)) {
407 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI
<< '\n');
408 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy
<< '\n');
409 for (unsigned i
= 0, e
= ToDelete
.size(); i
!= e
; ++i
)
410 eraseInstFromFunction(*ToDelete
[i
]);
411 Constant
*TheSrc
= cast
<Constant
>(Copy
->getSource());
412 auto *SrcTy
= TheSrc
->getType();
413 auto *DestTy
= PointerType::get(AI
.getType()->getPointerElementType(),
414 SrcTy
->getPointerAddressSpace());
416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc
, DestTy
);
417 if (AI
.getType()->getPointerAddressSpace() ==
418 SrcTy
->getPointerAddressSpace()) {
419 Instruction
*NewI
= replaceInstUsesWith(AI
, Cast
);
420 eraseInstFromFunction(*Copy
);
424 PointerReplacer
PtrReplacer(*this);
425 PtrReplacer
.replacePointer(AI
, Cast
);
432 // At last, use the generic allocation site handler to aggressively remove
434 return visitAllocSite(AI
);
437 // Are we allowed to form a atomic load or store of this type?
438 static bool isSupportedAtomicType(Type
*Ty
) {
439 return Ty
->isIntOrPtrTy() || Ty
->isFloatingPointTy();
442 /// Helper to combine a load to a new type.
444 /// This just does the work of combining a load to a new type. It handles
445 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
446 /// loaded *value* type. This will convert it to a pointer, cast the operand to
447 /// that pointer type, load it, etc.
449 /// Note that this will create all of the instructions with whatever insert
450 /// point the \c InstCombiner currently is using.
451 static LoadInst
*combineLoadToNewType(InstCombiner
&IC
, LoadInst
&LI
, Type
*NewTy
,
452 const Twine
&Suffix
= "") {
453 assert((!LI
.isAtomic() || isSupportedAtomicType(NewTy
)) &&
454 "can't fold an atomic load to requested type");
456 Value
*Ptr
= LI
.getPointerOperand();
457 unsigned AS
= LI
.getPointerAddressSpace();
458 Value
*NewPtr
= nullptr;
459 if (!(match(Ptr
, m_BitCast(m_Value(NewPtr
))) &&
460 NewPtr
->getType()->getPointerElementType() == NewTy
&&
461 NewPtr
->getType()->getPointerAddressSpace() == AS
))
462 NewPtr
= IC
.Builder
.CreateBitCast(Ptr
, NewTy
->getPointerTo(AS
));
464 LoadInst
*NewLoad
= IC
.Builder
.CreateAlignedLoad(
465 NewTy
, NewPtr
, LI
.getAlignment(), LI
.isVolatile(), LI
.getName() + Suffix
);
466 NewLoad
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
467 copyMetadataForLoad(*NewLoad
, LI
);
471 /// Combine a store to a new type.
473 /// Returns the newly created store instruction.
474 static StoreInst
*combineStoreToNewValue(InstCombiner
&IC
, StoreInst
&SI
, Value
*V
) {
475 assert((!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) &&
476 "can't fold an atomic store of requested type");
478 Value
*Ptr
= SI
.getPointerOperand();
479 unsigned AS
= SI
.getPointerAddressSpace();
480 SmallVector
<std::pair
<unsigned, MDNode
*>, 8> MD
;
481 SI
.getAllMetadata(MD
);
483 StoreInst
*NewStore
= IC
.Builder
.CreateAlignedStore(
484 V
, IC
.Builder
.CreateBitCast(Ptr
, V
->getType()->getPointerTo(AS
)),
485 SI
.getAlignment(), SI
.isVolatile());
486 NewStore
->setAtomic(SI
.getOrdering(), SI
.getSyncScopeID());
487 for (const auto &MDPair
: MD
) {
488 unsigned ID
= MDPair
.first
;
489 MDNode
*N
= MDPair
.second
;
490 // Note, essentially every kind of metadata should be preserved here! This
491 // routine is supposed to clone a store instruction changing *only its
492 // type*. The only metadata it makes sense to drop is metadata which is
493 // invalidated when the pointer type changes. This should essentially
494 // never be the case in LLVM, but we explicitly switch over only known
495 // metadata to be conservatively correct. If you are adding metadata to
496 // LLVM which pertains to stores, you almost certainly want to add it
499 case LLVMContext::MD_dbg
:
500 case LLVMContext::MD_tbaa
:
501 case LLVMContext::MD_prof
:
502 case LLVMContext::MD_fpmath
:
503 case LLVMContext::MD_tbaa_struct
:
504 case LLVMContext::MD_alias_scope
:
505 case LLVMContext::MD_noalias
:
506 case LLVMContext::MD_nontemporal
:
507 case LLVMContext::MD_mem_parallel_loop_access
:
508 case LLVMContext::MD_access_group
:
509 // All of these directly apply.
510 NewStore
->setMetadata(ID
, N
);
512 case LLVMContext::MD_invariant_load
:
513 case LLVMContext::MD_nonnull
:
514 case LLVMContext::MD_range
:
515 case LLVMContext::MD_align
:
516 case LLVMContext::MD_dereferenceable
:
517 case LLVMContext::MD_dereferenceable_or_null
:
518 // These don't apply for stores.
526 /// Returns true if instruction represent minmax pattern like:
527 /// select ((cmp load V1, load V2), V1, V2).
528 static bool isMinMaxWithLoads(Value
*V
) {
529 assert(V
->getType()->isPointerTy() && "Expected pointer type.");
530 // Ignore possible ty* to ixx* bitcast.
531 V
= peekThroughBitcast(V
);
532 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
534 CmpInst::Predicate Pred
;
539 if (!match(V
, m_Select(m_Cmp(Pred
, m_Instruction(L1
), m_Instruction(L2
)),
540 m_Value(LHS
), m_Value(RHS
))))
542 return (match(L1
, m_Load(m_Specific(LHS
))) &&
543 match(L2
, m_Load(m_Specific(RHS
)))) ||
544 (match(L1
, m_Load(m_Specific(RHS
))) &&
545 match(L2
, m_Load(m_Specific(LHS
))));
548 /// Combine loads to match the type of their uses' value after looking
549 /// through intervening bitcasts.
551 /// The core idea here is that if the result of a load is used in an operation,
552 /// we should load the type most conducive to that operation. For example, when
553 /// loading an integer and converting that immediately to a pointer, we should
554 /// instead directly load a pointer.
556 /// However, this routine must never change the width of a load or the number of
557 /// loads as that would introduce a semantic change. This combine is expected to
558 /// be a semantic no-op which just allows loads to more closely model the types
559 /// of their consuming operations.
561 /// Currently, we also refuse to change the precise type used for an atomic load
562 /// or a volatile load. This is debatable, and might be reasonable to change
563 /// later. However, it is risky in case some backend or other part of LLVM is
564 /// relying on the exact type loaded to select appropriate atomic operations.
565 static Instruction
*combineLoadToOperationType(InstCombiner
&IC
, LoadInst
&LI
) {
566 // FIXME: We could probably with some care handle both volatile and ordered
567 // atomic loads here but it isn't clear that this is important.
568 if (!LI
.isUnordered())
574 // swifterror values can't be bitcasted.
575 if (LI
.getPointerOperand()->isSwiftError())
578 Type
*Ty
= LI
.getType();
579 const DataLayout
&DL
= IC
.getDataLayout();
581 // Try to canonicalize loads which are only ever stored to operate over
582 // integers instead of any other type. We only do this when the loaded type
583 // is sized and has a size exactly the same as its store size and the store
584 // size is a legal integer type.
585 // Do not perform canonicalization if minmax pattern is found (to avoid
587 if (!Ty
->isIntegerTy() && Ty
->isSized() &&
588 DL
.isLegalInteger(DL
.getTypeStoreSizeInBits(Ty
)) &&
589 DL
.typeSizeEqualsStoreSize(Ty
) &&
590 !DL
.isNonIntegralPointerType(Ty
) &&
592 peekThroughBitcast(LI
.getPointerOperand(), /*OneUseOnly=*/true))) {
593 if (all_of(LI
.users(), [&LI
](User
*U
) {
594 auto *SI
= dyn_cast
<StoreInst
>(U
);
595 return SI
&& SI
->getPointerOperand() != &LI
&&
596 !SI
->getPointerOperand()->isSwiftError();
598 LoadInst
*NewLoad
= combineLoadToNewType(
600 Type::getIntNTy(LI
.getContext(), DL
.getTypeStoreSizeInBits(Ty
)));
601 // Replace all the stores with stores of the newly loaded value.
602 for (auto UI
= LI
.user_begin(), UE
= LI
.user_end(); UI
!= UE
;) {
603 auto *SI
= cast
<StoreInst
>(*UI
++);
604 IC
.Builder
.SetInsertPoint(SI
);
605 combineStoreToNewValue(IC
, *SI
, NewLoad
);
606 IC
.eraseInstFromFunction(*SI
);
608 assert(LI
.use_empty() && "Failed to remove all users of the load!");
609 // Return the old load so the combiner can delete it safely.
614 // Fold away bit casts of the loaded value by loading the desired type.
615 // We can do this for BitCastInsts as well as casts from and to pointer types,
616 // as long as those are noops (i.e., the source or dest type have the same
617 // bitwidth as the target's pointers).
619 if (auto* CI
= dyn_cast
<CastInst
>(LI
.user_back()))
620 if (CI
->isNoopCast(DL
))
621 if (!LI
.isAtomic() || isSupportedAtomicType(CI
->getDestTy())) {
622 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, CI
->getDestTy());
623 CI
->replaceAllUsesWith(NewLoad
);
624 IC
.eraseInstFromFunction(*CI
);
628 // FIXME: We should also canonicalize loads of vectors when their elements are
629 // cast to other types.
633 static Instruction
*unpackLoadToAggregate(InstCombiner
&IC
, LoadInst
&LI
) {
634 // FIXME: We could probably with some care handle both volatile and atomic
635 // stores here but it isn't clear that this is important.
639 Type
*T
= LI
.getType();
640 if (!T
->isAggregateType())
643 StringRef Name
= LI
.getName();
644 assert(LI
.getAlignment() && "Alignment must be set at this point");
646 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
647 // If the struct only have one element, we unpack.
648 auto NumElements
= ST
->getNumElements();
649 if (NumElements
== 1) {
650 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, ST
->getTypeAtIndex(0U),
653 LI
.getAAMetadata(AAMD
);
654 NewLoad
->setAAMetadata(AAMD
);
655 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
656 UndefValue::get(T
), NewLoad
, 0, Name
));
659 // We don't want to break loads with padding here as we'd loose
660 // the knowledge that padding exists for the rest of the pipeline.
661 const DataLayout
&DL
= IC
.getDataLayout();
662 auto *SL
= DL
.getStructLayout(ST
);
663 if (SL
->hasPadding())
666 auto Align
= LI
.getAlignment();
668 Align
= DL
.getABITypeAlignment(ST
);
670 auto *Addr
= LI
.getPointerOperand();
671 auto *IdxType
= Type::getInt32Ty(T
->getContext());
672 auto *Zero
= ConstantInt::get(IdxType
, 0);
674 Value
*V
= UndefValue::get(T
);
675 for (unsigned i
= 0; i
< NumElements
; i
++) {
676 Value
*Indices
[2] = {
678 ConstantInt::get(IdxType
, i
),
680 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
682 auto EltAlign
= MinAlign(Align
, SL
->getElementOffset(i
));
683 auto *L
= IC
.Builder
.CreateAlignedLoad(ST
->getElementType(i
), Ptr
,
684 EltAlign
, Name
+ ".unpack");
685 // Propagate AA metadata. It'll still be valid on the narrowed load.
687 LI
.getAAMetadata(AAMD
);
688 L
->setAAMetadata(AAMD
);
689 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
693 return IC
.replaceInstUsesWith(LI
, V
);
696 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
697 auto *ET
= AT
->getElementType();
698 auto NumElements
= AT
->getNumElements();
699 if (NumElements
== 1) {
700 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, ET
, ".unpack");
702 LI
.getAAMetadata(AAMD
);
703 NewLoad
->setAAMetadata(AAMD
);
704 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
705 UndefValue::get(T
), NewLoad
, 0, Name
));
708 // Bail out if the array is too large. Ideally we would like to optimize
709 // arrays of arbitrary size but this has a terrible impact on compile time.
710 // The threshold here is chosen arbitrarily, maybe needs a little bit of
712 if (NumElements
> IC
.MaxArraySizeForCombine
)
715 const DataLayout
&DL
= IC
.getDataLayout();
716 auto EltSize
= DL
.getTypeAllocSize(ET
);
717 auto Align
= LI
.getAlignment();
719 Align
= DL
.getABITypeAlignment(T
);
721 auto *Addr
= LI
.getPointerOperand();
722 auto *IdxType
= Type::getInt64Ty(T
->getContext());
723 auto *Zero
= ConstantInt::get(IdxType
, 0);
725 Value
*V
= UndefValue::get(T
);
727 for (uint64_t i
= 0; i
< NumElements
; i
++) {
728 Value
*Indices
[2] = {
730 ConstantInt::get(IdxType
, i
),
732 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
734 auto *L
= IC
.Builder
.CreateAlignedLoad(
735 AT
->getElementType(), Ptr
, MinAlign(Align
, Offset
), Name
+ ".unpack");
737 LI
.getAAMetadata(AAMD
);
738 L
->setAAMetadata(AAMD
);
739 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
744 return IC
.replaceInstUsesWith(LI
, V
);
750 // If we can determine that all possible objects pointed to by the provided
751 // pointer value are, not only dereferenceable, but also definitively less than
752 // or equal to the provided maximum size, then return true. Otherwise, return
753 // false (constant global values and allocas fall into this category).
755 // FIXME: This should probably live in ValueTracking (or similar).
756 static bool isObjectSizeLessThanOrEq(Value
*V
, uint64_t MaxSize
,
757 const DataLayout
&DL
) {
758 SmallPtrSet
<Value
*, 4> Visited
;
759 SmallVector
<Value
*, 4> Worklist(1, V
);
762 Value
*P
= Worklist
.pop_back_val();
763 P
= P
->stripPointerCasts();
765 if (!Visited
.insert(P
).second
)
768 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(P
)) {
769 Worklist
.push_back(SI
->getTrueValue());
770 Worklist
.push_back(SI
->getFalseValue());
774 if (PHINode
*PN
= dyn_cast
<PHINode
>(P
)) {
775 for (Value
*IncValue
: PN
->incoming_values())
776 Worklist
.push_back(IncValue
);
780 if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(P
)) {
781 if (GA
->isInterposable())
783 Worklist
.push_back(GA
->getAliasee());
787 // If we know how big this object is, and it is less than MaxSize, continue
788 // searching. Otherwise, return false.
789 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(P
)) {
790 if (!AI
->getAllocatedType()->isSized())
793 ConstantInt
*CS
= dyn_cast
<ConstantInt
>(AI
->getArraySize());
797 uint64_t TypeSize
= DL
.getTypeAllocSize(AI
->getAllocatedType());
798 // Make sure that, even if the multiplication below would wrap as an
799 // uint64_t, we still do the right thing.
800 if ((CS
->getValue().zextOrSelf(128)*APInt(128, TypeSize
)).ugt(MaxSize
))
805 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(P
)) {
806 if (!GV
->hasDefinitiveInitializer() || !GV
->isConstant())
809 uint64_t InitSize
= DL
.getTypeAllocSize(GV
->getValueType());
810 if (InitSize
> MaxSize
)
816 } while (!Worklist
.empty());
821 // If we're indexing into an object of a known size, and the outer index is
822 // not a constant, but having any value but zero would lead to undefined
823 // behavior, replace it with zero.
825 // For example, if we have:
826 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
828 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
829 // ... = load i32* %arrayidx, align 4
830 // Then we know that we can replace %x in the GEP with i64 0.
832 // FIXME: We could fold any GEP index to zero that would cause UB if it were
833 // not zero. Currently, we only handle the first such index. Also, we could
834 // also search through non-zero constant indices if we kept track of the
835 // offsets those indices implied.
836 static bool canReplaceGEPIdxWithZero(InstCombiner
&IC
, GetElementPtrInst
*GEPI
,
837 Instruction
*MemI
, unsigned &Idx
) {
838 if (GEPI
->getNumOperands() < 2)
841 // Find the first non-zero index of a GEP. If all indices are zero, return
842 // one past the last index.
843 auto FirstNZIdx
= [](const GetElementPtrInst
*GEPI
) {
845 for (unsigned IE
= GEPI
->getNumOperands(); I
!= IE
; ++I
) {
846 Value
*V
= GEPI
->getOperand(I
);
847 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
857 // Skip through initial 'zero' indices, and find the corresponding pointer
858 // type. See if the next index is not a constant.
859 Idx
= FirstNZIdx(GEPI
);
860 if (Idx
== GEPI
->getNumOperands())
862 if (isa
<Constant
>(GEPI
->getOperand(Idx
)))
865 SmallVector
<Value
*, 4> Ops(GEPI
->idx_begin(), GEPI
->idx_begin() + Idx
);
867 GetElementPtrInst::getIndexedType(GEPI
->getSourceElementType(), Ops
);
868 if (!AllocTy
|| !AllocTy
->isSized())
870 const DataLayout
&DL
= IC
.getDataLayout();
871 uint64_t TyAllocSize
= DL
.getTypeAllocSize(AllocTy
);
873 // If there are more indices after the one we might replace with a zero, make
874 // sure they're all non-negative. If any of them are negative, the overall
875 // address being computed might be before the base address determined by the
876 // first non-zero index.
877 auto IsAllNonNegative
= [&]() {
878 for (unsigned i
= Idx
+1, e
= GEPI
->getNumOperands(); i
!= e
; ++i
) {
879 KnownBits Known
= IC
.computeKnownBits(GEPI
->getOperand(i
), 0, MemI
);
880 if (Known
.isNonNegative())
888 // FIXME: If the GEP is not inbounds, and there are extra indices after the
889 // one we'll replace, those could cause the address computation to wrap
890 // (rendering the IsAllNonNegative() check below insufficient). We can do
891 // better, ignoring zero indices (and other indices we can prove small
892 // enough not to wrap).
893 if (Idx
+1 != GEPI
->getNumOperands() && !GEPI
->isInBounds())
896 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
897 // also known to be dereferenceable.
898 return isObjectSizeLessThanOrEq(GEPI
->getOperand(0), TyAllocSize
, DL
) &&
902 // If we're indexing into an object with a variable index for the memory
903 // access, but the object has only one element, we can assume that the index
904 // will always be zero. If we replace the GEP, return it.
905 template <typename T
>
906 static Instruction
*replaceGEPIdxWithZero(InstCombiner
&IC
, Value
*Ptr
,
908 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
910 if (canReplaceGEPIdxWithZero(IC
, GEPI
, &MemI
, Idx
)) {
911 Instruction
*NewGEPI
= GEPI
->clone();
912 NewGEPI
->setOperand(Idx
,
913 ConstantInt::get(GEPI
->getOperand(Idx
)->getType(), 0));
914 NewGEPI
->insertBefore(GEPI
);
915 MemI
.setOperand(MemI
.getPointerOperandIndex(), NewGEPI
);
923 static bool canSimplifyNullStoreOrGEP(StoreInst
&SI
) {
924 if (NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()))
927 auto *Ptr
= SI
.getPointerOperand();
928 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
))
929 Ptr
= GEPI
->getOperand(0);
930 return (isa
<ConstantPointerNull
>(Ptr
) &&
931 !NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()));
934 static bool canSimplifyNullLoadOrGEP(LoadInst
&LI
, Value
*Op
) {
935 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
936 const Value
*GEPI0
= GEPI
->getOperand(0);
937 if (isa
<ConstantPointerNull
>(GEPI0
) &&
938 !NullPointerIsDefined(LI
.getFunction(), GEPI
->getPointerAddressSpace()))
941 if (isa
<UndefValue
>(Op
) ||
942 (isa
<ConstantPointerNull
>(Op
) &&
943 !NullPointerIsDefined(LI
.getFunction(), LI
.getPointerAddressSpace())))
948 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
949 Value
*Op
= LI
.getOperand(0);
951 // Try to canonicalize the loaded type.
952 if (Instruction
*Res
= combineLoadToOperationType(*this, LI
))
955 // Attempt to improve the alignment.
956 unsigned KnownAlign
= getOrEnforceKnownAlignment(
957 Op
, DL
.getPrefTypeAlignment(LI
.getType()), DL
, &LI
, &AC
, &DT
);
958 unsigned LoadAlign
= LI
.getAlignment();
959 unsigned EffectiveLoadAlign
=
960 LoadAlign
!= 0 ? LoadAlign
: DL
.getABITypeAlignment(LI
.getType());
962 if (KnownAlign
> EffectiveLoadAlign
)
963 LI
.setAlignment(KnownAlign
);
964 else if (LoadAlign
== 0)
965 LI
.setAlignment(EffectiveLoadAlign
);
967 // Replace GEP indices if possible.
968 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Op
, LI
)) {
969 Worklist
.Add(NewGEPI
);
973 if (Instruction
*Res
= unpackLoadToAggregate(*this, LI
))
976 // Do really simple store-to-load forwarding and load CSE, to catch cases
977 // where there are several consecutive memory accesses to the same location,
978 // separated by a few arithmetic operations.
979 BasicBlock::iterator
BBI(LI
);
980 bool IsLoadCSE
= false;
981 if (Value
*AvailableVal
= FindAvailableLoadedValue(
982 &LI
, LI
.getParent(), BBI
, DefMaxInstsToScan
, AA
, &IsLoadCSE
)) {
984 combineMetadataForCSE(cast
<LoadInst
>(AvailableVal
), &LI
, false);
986 return replaceInstUsesWith(
987 LI
, Builder
.CreateBitOrPointerCast(AvailableVal
, LI
.getType(),
988 LI
.getName() + ".cast"));
991 // None of the following transforms are legal for volatile/ordered atomic
992 // loads. Most of them do apply for unordered atomics.
993 if (!LI
.isUnordered()) return nullptr;
995 // load(gep null, ...) -> unreachable
996 // load null/undef -> unreachable
997 // TODO: Consider a target hook for valid address spaces for this xforms.
998 if (canSimplifyNullLoadOrGEP(LI
, Op
)) {
999 // Insert a new store to null instruction before the load to indicate
1000 // that this code is not reachable. We do this instead of inserting
1001 // an unreachable instruction directly because we cannot modify the
1003 StoreInst
*SI
= new StoreInst(UndefValue::get(LI
.getType()),
1004 Constant::getNullValue(Op
->getType()), &LI
);
1005 SI
->setDebugLoc(LI
.getDebugLoc());
1006 return replaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
1009 if (Op
->hasOneUse()) {
1010 // Change select and PHI nodes to select values instead of addresses: this
1011 // helps alias analysis out a lot, allows many others simplifications, and
1012 // exposes redundancy in the code.
1014 // Note that we cannot do the transformation unless we know that the
1015 // introduced loads cannot trap! Something like this is valid as long as
1016 // the condition is always false: load (select bool %C, int* null, int* %G),
1017 // but it would not be valid if we transformed it to load from null
1020 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
1021 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1022 unsigned Align
= LI
.getAlignment();
1023 if (isSafeToLoadUnconditionally(SI
->getOperand(1), LI
.getType(), Align
,
1025 isSafeToLoadUnconditionally(SI
->getOperand(2), LI
.getType(), Align
,
1028 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(1),
1029 SI
->getOperand(1)->getName() + ".val");
1031 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(2),
1032 SI
->getOperand(2)->getName() + ".val");
1033 assert(LI
.isUnordered() && "implied by above");
1034 V1
->setAlignment(Align
);
1035 V1
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1036 V2
->setAlignment(Align
);
1037 V2
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1038 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
1041 // load (select (cond, null, P)) -> load P
1042 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)) &&
1043 !NullPointerIsDefined(SI
->getFunction(),
1044 LI
.getPointerAddressSpace())) {
1045 LI
.setOperand(0, SI
->getOperand(2));
1049 // load (select (cond, P, null)) -> load P
1050 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)) &&
1051 !NullPointerIsDefined(SI
->getFunction(),
1052 LI
.getPointerAddressSpace())) {
1053 LI
.setOperand(0, SI
->getOperand(1));
1061 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1063 /// \returns underlying value that was "cast", or nullptr otherwise.
1065 /// For example, if we have:
1067 /// %E0 = extractelement <2 x double> %U, i32 0
1068 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1069 /// %E1 = extractelement <2 x double> %U, i32 1
1070 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1072 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1073 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1074 /// Note that %U may contain non-undef values where %V1 has undef.
1075 static Value
*likeBitCastFromVector(InstCombiner
&IC
, Value
*V
) {
1077 while (auto *IV
= dyn_cast
<InsertValueInst
>(V
)) {
1078 auto *E
= dyn_cast
<ExtractElementInst
>(IV
->getInsertedValueOperand());
1081 auto *W
= E
->getVectorOperand();
1086 auto *CI
= dyn_cast
<ConstantInt
>(E
->getIndexOperand());
1087 if (!CI
|| IV
->getNumIndices() != 1 || CI
->getZExtValue() != *IV
->idx_begin())
1089 V
= IV
->getAggregateOperand();
1091 if (!isa
<UndefValue
>(V
) ||!U
)
1094 auto *UT
= cast
<VectorType
>(U
->getType());
1095 auto *VT
= V
->getType();
1096 // Check that types UT and VT are bitwise isomorphic.
1097 const auto &DL
= IC
.getDataLayout();
1098 if (DL
.getTypeStoreSizeInBits(UT
) != DL
.getTypeStoreSizeInBits(VT
)) {
1101 if (auto *AT
= dyn_cast
<ArrayType
>(VT
)) {
1102 if (AT
->getNumElements() != UT
->getNumElements())
1105 auto *ST
= cast
<StructType
>(VT
);
1106 if (ST
->getNumElements() != UT
->getNumElements())
1108 for (const auto *EltT
: ST
->elements()) {
1109 if (EltT
!= UT
->getElementType())
1116 /// Combine stores to match the type of value being stored.
1118 /// The core idea here is that the memory does not have any intrinsic type and
1119 /// where we can we should match the type of a store to the type of value being
1122 /// However, this routine must never change the width of a store or the number of
1123 /// stores as that would introduce a semantic change. This combine is expected to
1124 /// be a semantic no-op which just allows stores to more closely model the types
1125 /// of their incoming values.
1127 /// Currently, we also refuse to change the precise type used for an atomic or
1128 /// volatile store. This is debatable, and might be reasonable to change later.
1129 /// However, it is risky in case some backend or other part of LLVM is relying
1130 /// on the exact type stored to select appropriate atomic operations.
1132 /// \returns true if the store was successfully combined away. This indicates
1133 /// the caller must erase the store instruction. We have to let the caller erase
1134 /// the store instruction as otherwise there is no way to signal whether it was
1135 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1136 static bool combineStoreToValueType(InstCombiner
&IC
, StoreInst
&SI
) {
1137 // FIXME: We could probably with some care handle both volatile and ordered
1138 // atomic stores here but it isn't clear that this is important.
1139 if (!SI
.isUnordered())
1142 // swifterror values can't be bitcasted.
1143 if (SI
.getPointerOperand()->isSwiftError())
1146 Value
*V
= SI
.getValueOperand();
1148 // Fold away bit casts of the stored value by storing the original type.
1149 if (auto *BC
= dyn_cast
<BitCastInst
>(V
)) {
1150 V
= BC
->getOperand(0);
1151 if (!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) {
1152 combineStoreToNewValue(IC
, SI
, V
);
1157 if (Value
*U
= likeBitCastFromVector(IC
, V
))
1158 if (!SI
.isAtomic() || isSupportedAtomicType(U
->getType())) {
1159 combineStoreToNewValue(IC
, SI
, U
);
1163 // FIXME: We should also canonicalize stores of vectors when their elements
1164 // are cast to other types.
1168 static bool unpackStoreToAggregate(InstCombiner
&IC
, StoreInst
&SI
) {
1169 // FIXME: We could probably with some care handle both volatile and atomic
1170 // stores here but it isn't clear that this is important.
1174 Value
*V
= SI
.getValueOperand();
1175 Type
*T
= V
->getType();
1177 if (!T
->isAggregateType())
1180 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
1181 // If the struct only have one element, we unpack.
1182 unsigned Count
= ST
->getNumElements();
1184 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1185 combineStoreToNewValue(IC
, SI
, V
);
1189 // We don't want to break loads with padding here as we'd loose
1190 // the knowledge that padding exists for the rest of the pipeline.
1191 const DataLayout
&DL
= IC
.getDataLayout();
1192 auto *SL
= DL
.getStructLayout(ST
);
1193 if (SL
->hasPadding())
1196 auto Align
= SI
.getAlignment();
1198 Align
= DL
.getABITypeAlignment(ST
);
1200 SmallString
<16> EltName
= V
->getName();
1202 auto *Addr
= SI
.getPointerOperand();
1203 SmallString
<16> AddrName
= Addr
->getName();
1204 AddrName
+= ".repack";
1206 auto *IdxType
= Type::getInt32Ty(ST
->getContext());
1207 auto *Zero
= ConstantInt::get(IdxType
, 0);
1208 for (unsigned i
= 0; i
< Count
; i
++) {
1209 Value
*Indices
[2] = {
1211 ConstantInt::get(IdxType
, i
),
1213 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
1215 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1216 auto EltAlign
= MinAlign(Align
, SL
->getElementOffset(i
));
1217 llvm::Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1219 SI
.getAAMetadata(AAMD
);
1220 NS
->setAAMetadata(AAMD
);
1226 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
1227 // If the array only have one element, we unpack.
1228 auto NumElements
= AT
->getNumElements();
1229 if (NumElements
== 1) {
1230 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1231 combineStoreToNewValue(IC
, SI
, V
);
1235 // Bail out if the array is too large. Ideally we would like to optimize
1236 // arrays of arbitrary size but this has a terrible impact on compile time.
1237 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1239 if (NumElements
> IC
.MaxArraySizeForCombine
)
1242 const DataLayout
&DL
= IC
.getDataLayout();
1243 auto EltSize
= DL
.getTypeAllocSize(AT
->getElementType());
1244 auto Align
= SI
.getAlignment();
1246 Align
= DL
.getABITypeAlignment(T
);
1248 SmallString
<16> EltName
= V
->getName();
1250 auto *Addr
= SI
.getPointerOperand();
1251 SmallString
<16> AddrName
= Addr
->getName();
1252 AddrName
+= ".repack";
1254 auto *IdxType
= Type::getInt64Ty(T
->getContext());
1255 auto *Zero
= ConstantInt::get(IdxType
, 0);
1257 uint64_t Offset
= 0;
1258 for (uint64_t i
= 0; i
< NumElements
; i
++) {
1259 Value
*Indices
[2] = {
1261 ConstantInt::get(IdxType
, i
),
1263 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
1265 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1266 auto EltAlign
= MinAlign(Align
, Offset
);
1267 Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1269 SI
.getAAMetadata(AAMD
);
1270 NS
->setAAMetadata(AAMD
);
1280 /// equivalentAddressValues - Test if A and B will obviously have the same
1281 /// value. This includes recognizing that %t0 and %t1 will have the same
1282 /// value in code like this:
1283 /// %t0 = getelementptr \@a, 0, 3
1284 /// store i32 0, i32* %t0
1285 /// %t1 = getelementptr \@a, 0, 3
1286 /// %t2 = load i32* %t1
1288 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
1289 // Test if the values are trivially equivalent.
1290 if (A
== B
) return true;
1292 // Test if the values come form identical arithmetic instructions.
1293 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1294 // its only used to compare two uses within the same basic block, which
1295 // means that they'll always either have the same value or one of them
1296 // will have an undefined value.
1297 if (isa
<BinaryOperator
>(A
) ||
1300 isa
<GetElementPtrInst
>(A
))
1301 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
1302 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
1305 // Otherwise they may not be equivalent.
1309 /// Converts store (bitcast (load (bitcast (select ...)))) to
1310 /// store (load (select ...)), where select is minmax:
1311 /// select ((cmp load V1, load V2), V1, V2).
1312 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner
&IC
,
1315 if (!match(SI
.getPointerOperand(), m_BitCast(m_Value())))
1319 if (!match(SI
.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr
)))))
1321 auto *LI
= cast
<LoadInst
>(SI
.getValueOperand());
1322 if (!LI
->getType()->isIntegerTy())
1324 if (!isMinMaxWithLoads(LoadAddr
))
1327 if (!all_of(LI
->users(), [LI
, LoadAddr
](User
*U
) {
1328 auto *SI
= dyn_cast
<StoreInst
>(U
);
1329 return SI
&& SI
->getPointerOperand() != LI
&&
1330 peekThroughBitcast(SI
->getPointerOperand()) != LoadAddr
&&
1331 !SI
->getPointerOperand()->isSwiftError();
1335 IC
.Builder
.SetInsertPoint(LI
);
1336 LoadInst
*NewLI
= combineLoadToNewType(
1337 IC
, *LI
, LoadAddr
->getType()->getPointerElementType());
1338 // Replace all the stores with stores of the newly loaded value.
1339 for (auto *UI
: LI
->users()) {
1340 auto *USI
= cast
<StoreInst
>(UI
);
1341 IC
.Builder
.SetInsertPoint(USI
);
1342 combineStoreToNewValue(IC
, *USI
, NewLI
);
1344 IC
.replaceInstUsesWith(*LI
, UndefValue::get(LI
->getType()));
1345 IC
.eraseInstFromFunction(*LI
);
1349 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
1350 Value
*Val
= SI
.getOperand(0);
1351 Value
*Ptr
= SI
.getOperand(1);
1353 // Try to canonicalize the stored type.
1354 if (combineStoreToValueType(*this, SI
))
1355 return eraseInstFromFunction(SI
);
1357 // Attempt to improve the alignment.
1358 unsigned KnownAlign
= getOrEnforceKnownAlignment(
1359 Ptr
, DL
.getPrefTypeAlignment(Val
->getType()), DL
, &SI
, &AC
, &DT
);
1360 unsigned StoreAlign
= SI
.getAlignment();
1361 unsigned EffectiveStoreAlign
=
1362 StoreAlign
!= 0 ? StoreAlign
: DL
.getABITypeAlignment(Val
->getType());
1364 if (KnownAlign
> EffectiveStoreAlign
)
1365 SI
.setAlignment(KnownAlign
);
1366 else if (StoreAlign
== 0)
1367 SI
.setAlignment(EffectiveStoreAlign
);
1369 // Try to canonicalize the stored type.
1370 if (unpackStoreToAggregate(*this, SI
))
1371 return eraseInstFromFunction(SI
);
1373 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI
))
1374 return eraseInstFromFunction(SI
);
1376 // Replace GEP indices if possible.
1377 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Ptr
, SI
)) {
1378 Worklist
.Add(NewGEPI
);
1382 // Don't hack volatile/ordered stores.
1383 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1384 if (!SI
.isUnordered()) return nullptr;
1386 // If the RHS is an alloca with a single use, zapify the store, making the
1388 if (Ptr
->hasOneUse()) {
1389 if (isa
<AllocaInst
>(Ptr
))
1390 return eraseInstFromFunction(SI
);
1391 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
1392 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
1393 if (GEP
->getOperand(0)->hasOneUse())
1394 return eraseInstFromFunction(SI
);
1399 // If we have a store to a location which is known constant, we can conclude
1400 // that the store must be storing the constant value (else the memory
1401 // wouldn't be constant), and this must be a noop.
1402 if (AA
->pointsToConstantMemory(Ptr
))
1403 return eraseInstFromFunction(SI
);
1405 // Do really simple DSE, to catch cases where there are several consecutive
1406 // stores to the same location, separated by a few arithmetic operations. This
1407 // situation often occurs with bitfield accesses.
1408 BasicBlock::iterator
BBI(SI
);
1409 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
1412 // Don't count debug info directives, lest they affect codegen,
1413 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1414 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
1415 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1420 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
1421 // Prev store isn't volatile, and stores to the same location?
1422 if (PrevSI
->isUnordered() && equivalentAddressValues(PrevSI
->getOperand(1),
1423 SI
.getOperand(1))) {
1426 eraseInstFromFunction(*PrevSI
);
1432 // If this is a load, we have to stop. However, if the loaded value is from
1433 // the pointer we're loading and is producing the pointer we're storing,
1434 // then *this* store is dead (X = load P; store X -> P).
1435 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
1436 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
)) {
1437 assert(SI
.isUnordered() && "can't eliminate ordering operation");
1438 return eraseInstFromFunction(SI
);
1441 // Otherwise, this is a load from some other location. Stores before it
1446 // Don't skip over loads, throws or things that can modify memory.
1447 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory() || BBI
->mayThrow())
1451 // store X, null -> turns into 'unreachable' in SimplifyCFG
1452 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1453 if (canSimplifyNullStoreOrGEP(SI
)) {
1454 if (!isa
<UndefValue
>(Val
)) {
1455 SI
.setOperand(0, UndefValue::get(Val
->getType()));
1456 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
1457 Worklist
.Add(U
); // Dropped a use.
1459 return nullptr; // Do not modify these!
1462 // store undef, Ptr -> noop
1463 if (isa
<UndefValue
>(Val
))
1464 return eraseInstFromFunction(SI
);
1466 // If this store is the second-to-last instruction in the basic block
1467 // (excluding debug info and bitcasts of pointers) and if the block ends with
1468 // an unconditional branch, try to move the store to the successor block.
1469 BBI
= SI
.getIterator();
1472 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
1473 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy()));
1475 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
1476 if (BI
->isUnconditional())
1477 mergeStoreIntoSuccessor(SI
);
1482 /// Try to transform:
1483 /// if () { *P = v1; } else { *P = v2 }
1485 /// *P = v1; if () { *P = v2; }
1486 /// into a phi node with a store in the successor.
1487 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst
&SI
) {
1488 assert(SI
.isUnordered() &&
1489 "This code has not been audited for volatile or ordered store case.");
1491 // Check if the successor block has exactly 2 incoming edges.
1492 BasicBlock
*StoreBB
= SI
.getParent();
1493 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
1494 if (!DestBB
->hasNPredecessors(2))
1497 // Capture the other block (the block that doesn't contain our store).
1498 pred_iterator PredIter
= pred_begin(DestBB
);
1499 if (*PredIter
== StoreBB
)
1501 BasicBlock
*OtherBB
= *PredIter
;
1503 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1504 // for example, if SI is in an infinite loop.
1505 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
1508 // Verify that the other block ends in a branch and is not otherwise empty.
1509 BasicBlock::iterator
BBI(OtherBB
->getTerminator());
1510 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
1511 if (!OtherBr
|| BBI
== OtherBB
->begin())
1514 // If the other block ends in an unconditional branch, check for the 'if then
1515 // else' case. There is an instruction before the branch.
1516 StoreInst
*OtherStore
= nullptr;
1517 if (OtherBr
->isUnconditional()) {
1519 // Skip over debugging info.
1520 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
1521 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1522 if (BBI
==OtherBB
->begin())
1526 // If this isn't a store, isn't a store to the same location, or is not the
1527 // right kind of store, bail out.
1528 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
1529 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1530 !SI
.isSameOperationAs(OtherStore
))
1533 // Otherwise, the other block ended with a conditional branch. If one of the
1534 // destinations is StoreBB, then we have the if/then case.
1535 if (OtherBr
->getSuccessor(0) != StoreBB
&&
1536 OtherBr
->getSuccessor(1) != StoreBB
)
1539 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1540 // if/then triangle. See if there is a store to the same ptr as SI that
1541 // lives in OtherBB.
1543 // Check to see if we find the matching store.
1544 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
1545 if (OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1546 !SI
.isSameOperationAs(OtherStore
))
1550 // If we find something that may be using or overwriting the stored
1551 // value, or if we run out of instructions, we can't do the transform.
1552 if (BBI
->mayReadFromMemory() || BBI
->mayThrow() ||
1553 BBI
->mayWriteToMemory() || BBI
== OtherBB
->begin())
1557 // In order to eliminate the store in OtherBr, we have to make sure nothing
1558 // reads or overwrites the stored value in StoreBB.
1559 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
1560 // FIXME: This should really be AA driven.
1561 if (I
->mayReadFromMemory() || I
->mayThrow() || I
->mayWriteToMemory())
1566 // Insert a PHI node now if we need it.
1567 Value
*MergedVal
= OtherStore
->getOperand(0);
1568 // The debug locations of the original instructions might differ. Merge them.
1569 DebugLoc MergedLoc
= DILocation::getMergedLocation(SI
.getDebugLoc(),
1570 OtherStore
->getDebugLoc());
1571 if (MergedVal
!= SI
.getOperand(0)) {
1572 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), 2, "storemerge");
1573 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
1574 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
1575 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
1576 PN
->setDebugLoc(MergedLoc
);
1579 // Advance to a place where it is safe to insert the new store and insert it.
1580 BBI
= DestBB
->getFirstInsertionPt();
1581 StoreInst
*NewSI
= new StoreInst(MergedVal
, SI
.getOperand(1),
1582 SI
.isVolatile(), SI
.getAlignment(),
1583 SI
.getOrdering(), SI
.getSyncScopeID());
1584 InsertNewInstBefore(NewSI
, *BBI
);
1585 NewSI
->setDebugLoc(MergedLoc
);
1587 // If the two stores had AA tags, merge them.
1589 SI
.getAAMetadata(AATags
);
1591 OtherStore
->getAAMetadata(AATags
, /* Merge = */ true);
1592 NewSI
->setAAMetadata(AATags
);
1595 // Nuke the old stores.
1596 eraseInstFromFunction(SI
);
1597 eraseInstFromFunction(*OtherStore
);