1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for load, store and alloca.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/Transforms/Utils/Local.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 using namespace PatternMatch
;
30 #define DEBUG_TYPE "instcombine"
32 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
33 STATISTIC(NumGlobalCopies
, "Number of allocas copied from constant global");
35 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36 /// some part of a constant global variable. This intentionally only accepts
37 /// constant expressions because we can't rewrite arbitrary instructions.
38 static bool pointsToConstantGlobal(Value
*V
) {
39 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
40 return GV
->isConstant();
42 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
)) {
43 if (CE
->getOpcode() == Instruction::BitCast
||
44 CE
->getOpcode() == Instruction::AddrSpaceCast
||
45 CE
->getOpcode() == Instruction::GetElementPtr
)
46 return pointsToConstantGlobal(CE
->getOperand(0));
51 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
53 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
54 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
56 /// the alloca, and if the source pointer is a pointer to a constant global, we
57 /// can optimize this.
59 isOnlyCopiedFromConstantGlobal(Value
*V
, MemTransferInst
*&TheCopy
,
60 SmallVectorImpl
<Instruction
*> &ToDelete
) {
61 // We track lifetime intrinsics as we encounter them. If we decide to go
62 // ahead and replace the value with the global, this lets the caller quickly
63 // eliminate the markers.
65 SmallVector
<std::pair
<Value
*, bool>, 35> ValuesToInspect
;
66 ValuesToInspect
.emplace_back(V
, false);
67 while (!ValuesToInspect
.empty()) {
68 auto ValuePair
= ValuesToInspect
.pop_back_val();
69 const bool IsOffset
= ValuePair
.second
;
70 for (auto &U
: ValuePair
.first
->uses()) {
71 auto *I
= cast
<Instruction
>(U
.getUser());
73 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
74 // Ignore non-volatile loads, they are always ok.
75 if (!LI
->isSimple()) return false;
79 if (isa
<BitCastInst
>(I
) || isa
<AddrSpaceCastInst
>(I
)) {
80 // If uses of the bitcast are ok, we are ok.
81 ValuesToInspect
.emplace_back(I
, IsOffset
);
84 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
85 // If the GEP has all zero indices, it doesn't offset the pointer. If it
87 ValuesToInspect
.emplace_back(I
, IsOffset
|| !GEP
->hasAllZeroIndices());
91 if (auto *Call
= dyn_cast
<CallBase
>(I
)) {
92 // If this is the function being called then we treat it like a load and
94 if (Call
->isCallee(&U
))
97 unsigned DataOpNo
= Call
->getDataOperandNo(&U
);
98 bool IsArgOperand
= Call
->isArgOperand(&U
);
100 // Inalloca arguments are clobbered by the call.
101 if (IsArgOperand
&& Call
->isInAllocaArgument(DataOpNo
))
104 // If this is a readonly/readnone call site, then we know it is just a
105 // load (but one that potentially returns the value itself), so we can
106 // ignore it if we know that the value isn't captured.
107 if (Call
->onlyReadsMemory() &&
108 (Call
->use_empty() || Call
->doesNotCapture(DataOpNo
)))
111 // If this is being passed as a byval argument, the caller is making a
112 // copy, so it is only a read of the alloca.
113 if (IsArgOperand
&& Call
->isByValArgument(DataOpNo
))
117 // Lifetime intrinsics can be handled by the caller.
118 if (I
->isLifetimeStartOrEnd()) {
119 assert(I
->use_empty() && "Lifetime markers have no result to use!");
120 ToDelete
.push_back(I
);
124 // If this is isn't our memcpy/memmove, reject it as something we can't
126 MemTransferInst
*MI
= dyn_cast
<MemTransferInst
>(I
);
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U
.getOperandNo() == 1) {
133 if (MI
->isVolatile()) return false;
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy
) return false;
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset
) return false;
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U
.getOperandNo() != 0) return false;
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI
->getSource()))
151 // Otherwise, the transform is safe. Remember the copy instruction.
158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159 /// modified by a copy from a constant global. If we can prove this, we can
160 /// replace any uses of the alloca with uses of the global directly.
161 static MemTransferInst
*
162 isOnlyCopiedFromConstantGlobal(AllocaInst
*AI
,
163 SmallVectorImpl
<Instruction
*> &ToDelete
) {
164 MemTransferInst
*TheCopy
= nullptr;
165 if (isOnlyCopiedFromConstantGlobal(AI
, TheCopy
, ToDelete
))
170 /// Returns true if V is dereferenceable for size of alloca.
171 static bool isDereferenceableForAllocaSize(const Value
*V
, const AllocaInst
*AI
,
172 const DataLayout
&DL
) {
173 if (AI
->isArrayAllocation())
175 uint64_t AllocaSize
= DL
.getTypeStoreSize(AI
->getAllocatedType());
178 return isDereferenceableAndAlignedPointer(V
, Align(AI
->getAlignment()),
179 APInt(64, AllocaSize
), DL
);
182 static Instruction
*simplifyAllocaArraySize(InstCombiner
&IC
, AllocaInst
&AI
) {
183 // Check for array size of 1 (scalar allocation).
184 if (!AI
.isArrayAllocation()) {
185 // i32 1 is the canonical array size for scalar allocations.
186 if (AI
.getArraySize()->getType()->isIntegerTy(32))
190 Value
*V
= IC
.Builder
.getInt32(1);
195 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
196 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
197 if (C
->getValue().getActiveBits() <= 64) {
198 Type
*NewTy
= ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
199 AllocaInst
*New
= IC
.Builder
.CreateAlloca(NewTy
, nullptr, AI
.getName());
200 New
->setAlignment(MaybeAlign(AI
.getAlignment()));
202 // Scan to the end of the allocation instructions, to skip over a block of
203 // allocas if possible...also skip interleaved debug info
205 BasicBlock::iterator
It(New
);
206 while (isa
<AllocaInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
))
209 // Now that I is pointing to the first non-allocation-inst in the block,
210 // insert our getelementptr instruction...
212 Type
*IdxTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
213 Value
*NullIdx
= Constant::getNullValue(IdxTy
);
214 Value
*Idx
[2] = {NullIdx
, NullIdx
};
215 Instruction
*GEP
= GetElementPtrInst::CreateInBounds(
216 NewTy
, New
, Idx
, New
->getName() + ".sub");
217 IC
.InsertNewInstBefore(GEP
, *It
);
219 // Now make everything use the getelementptr instead of the original
221 return IC
.replaceInstUsesWith(AI
, GEP
);
225 if (isa
<UndefValue
>(AI
.getArraySize()))
226 return IC
.replaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type
*IntPtrTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
231 if (AI
.getArraySize()->getType() != IntPtrTy
) {
232 Value
*V
= IC
.Builder
.CreateIntCast(AI
.getArraySize(), IntPtrTy
, false);
241 // If I and V are pointers in different address space, it is not allowed to
242 // use replaceAllUsesWith since I and V have different types. A
243 // non-target-specific transformation should not use addrspacecast on V since
244 // the two address space may be disjoint depending on target.
246 // This class chases down uses of the old pointer until reaching the load
247 // instructions, then replaces the old pointer in the load instructions with
248 // the new pointer. If during the chasing it sees bitcast or GEP, it will
249 // create new bitcast or GEP with the new pointer and use them in the load
251 class PointerReplacer
{
253 PointerReplacer(InstCombiner
&IC
) : IC(IC
) {}
254 void replacePointer(Instruction
&I
, Value
*V
);
257 void findLoadAndReplace(Instruction
&I
);
258 void replace(Instruction
*I
);
259 Value
*getReplacement(Value
*I
);
261 SmallVector
<Instruction
*, 4> Path
;
262 MapVector
<Value
*, Value
*> WorkMap
;
265 } // end anonymous namespace
267 void PointerReplacer::findLoadAndReplace(Instruction
&I
) {
268 for (auto U
: I
.users()) {
269 auto *Inst
= dyn_cast
<Instruction
>(&*U
);
272 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U
<< '\n');
273 if (isa
<LoadInst
>(Inst
)) {
277 } else if (isa
<GetElementPtrInst
>(Inst
) || isa
<BitCastInst
>(Inst
)) {
278 Path
.push_back(Inst
);
279 findLoadAndReplace(*Inst
);
287 Value
*PointerReplacer::getReplacement(Value
*V
) {
288 auto Loc
= WorkMap
.find(V
);
289 if (Loc
!= WorkMap
.end())
294 void PointerReplacer::replace(Instruction
*I
) {
295 if (getReplacement(I
))
298 if (auto *LT
= dyn_cast
<LoadInst
>(I
)) {
299 auto *V
= getReplacement(LT
->getPointerOperand());
300 assert(V
&& "Operand not replaced");
301 auto *NewI
= new LoadInst(I
->getType(), V
);
303 IC
.InsertNewInstWith(NewI
, *LT
);
304 IC
.replaceInstUsesWith(*LT
, NewI
);
306 } else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
307 auto *V
= getReplacement(GEP
->getPointerOperand());
308 assert(V
&& "Operand not replaced");
309 SmallVector
<Value
*, 8> Indices
;
310 Indices
.append(GEP
->idx_begin(), GEP
->idx_end());
311 auto *NewI
= GetElementPtrInst::Create(
312 V
->getType()->getPointerElementType(), V
, Indices
);
313 IC
.InsertNewInstWith(NewI
, *GEP
);
316 } else if (auto *BC
= dyn_cast
<BitCastInst
>(I
)) {
317 auto *V
= getReplacement(BC
->getOperand(0));
318 assert(V
&& "Operand not replaced");
319 auto *NewT
= PointerType::get(BC
->getType()->getPointerElementType(),
320 V
->getType()->getPointerAddressSpace());
321 auto *NewI
= new BitCastInst(V
, NewT
);
322 IC
.InsertNewInstWith(NewI
, *BC
);
326 llvm_unreachable("should never reach here");
330 void PointerReplacer::replacePointer(Instruction
&I
, Value
*V
) {
332 auto *PT
= cast
<PointerType
>(I
.getType());
333 auto *NT
= cast
<PointerType
>(V
->getType());
334 assert(PT
!= NT
&& PT
->getElementType() == NT
->getElementType() &&
338 findLoadAndReplace(I
);
341 Instruction
*InstCombiner::visitAllocaInst(AllocaInst
&AI
) {
342 if (auto *I
= simplifyAllocaArraySize(*this, AI
))
345 if (AI
.getAllocatedType()->isSized()) {
346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI
.getAlignment() == 0)
349 MaybeAlign(DL
.getPrefTypeAlignment(AI
.getAllocatedType())));
351 // Move all alloca's of zero byte objects to the entry block and merge them
352 // together. Note that we only do this for alloca's, because malloc should
353 // allocate and return a unique pointer, even for a zero byte allocation.
354 if (DL
.getTypeAllocSize(AI
.getAllocatedType()) == 0) {
355 // For a zero sized alloca there is no point in doing an array allocation.
356 // This is helpful if the array size is a complicated expression not used
358 if (AI
.isArrayAllocation()) {
359 AI
.setOperand(0, ConstantInt::get(AI
.getArraySize()->getType(), 1));
363 // Get the first instruction in the entry block.
364 BasicBlock
&EntryBlock
= AI
.getParent()->getParent()->getEntryBlock();
365 Instruction
*FirstInst
= EntryBlock
.getFirstNonPHIOrDbg();
366 if (FirstInst
!= &AI
) {
367 // If the entry block doesn't start with a zero-size alloca then move
368 // this one to the start of the entry block. There is no problem with
369 // dominance as the array size was forced to a constant earlier already.
370 AllocaInst
*EntryAI
= dyn_cast
<AllocaInst
>(FirstInst
);
371 if (!EntryAI
|| !EntryAI
->getAllocatedType()->isSized() ||
372 DL
.getTypeAllocSize(EntryAI
->getAllocatedType()) != 0) {
373 AI
.moveBefore(FirstInst
);
377 // If the alignment of the entry block alloca is 0 (unspecified),
378 // assign it the preferred alignment.
379 if (EntryAI
->getAlignment() == 0)
380 EntryAI
->setAlignment(
381 MaybeAlign(DL
.getPrefTypeAlignment(EntryAI
->getAllocatedType())));
382 // Replace this zero-sized alloca with the one at the start of the entry
383 // block after ensuring that the address will be aligned enough for both
385 const MaybeAlign
MaxAlign(
386 std::max(EntryAI
->getAlignment(), AI
.getAlignment()));
387 EntryAI
->setAlignment(MaxAlign
);
388 if (AI
.getType() != EntryAI
->getType())
389 return new BitCastInst(EntryAI
, AI
.getType());
390 return replaceInstUsesWith(AI
, EntryAI
);
395 if (AI
.getAlignment()) {
396 // Check to see if this allocation is only modified by a memcpy/memmove from
397 // a constant global whose alignment is equal to or exceeds that of the
398 // allocation. If this is the case, we can change all users to use
399 // the constant global instead. This is commonly produced by the CFE by
400 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
401 // is only subsequently read.
402 SmallVector
<Instruction
*, 4> ToDelete
;
403 if (MemTransferInst
*Copy
= isOnlyCopiedFromConstantGlobal(&AI
, ToDelete
)) {
404 unsigned SourceAlign
= getOrEnforceKnownAlignment(
405 Copy
->getSource(), AI
.getAlignment(), DL
, &AI
, &AC
, &DT
);
406 if (AI
.getAlignment() <= SourceAlign
&&
407 isDereferenceableForAllocaSize(Copy
->getSource(), &AI
, DL
)) {
408 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI
<< '\n');
409 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy
<< '\n');
410 for (unsigned i
= 0, e
= ToDelete
.size(); i
!= e
; ++i
)
411 eraseInstFromFunction(*ToDelete
[i
]);
412 Constant
*TheSrc
= cast
<Constant
>(Copy
->getSource());
413 auto *SrcTy
= TheSrc
->getType();
414 auto *DestTy
= PointerType::get(AI
.getType()->getPointerElementType(),
415 SrcTy
->getPointerAddressSpace());
417 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc
, DestTy
);
418 if (AI
.getType()->getPointerAddressSpace() ==
419 SrcTy
->getPointerAddressSpace()) {
420 Instruction
*NewI
= replaceInstUsesWith(AI
, Cast
);
421 eraseInstFromFunction(*Copy
);
425 PointerReplacer
PtrReplacer(*this);
426 PtrReplacer
.replacePointer(AI
, Cast
);
433 // At last, use the generic allocation site handler to aggressively remove
435 return visitAllocSite(AI
);
438 // Are we allowed to form a atomic load or store of this type?
439 static bool isSupportedAtomicType(Type
*Ty
) {
440 return Ty
->isIntOrPtrTy() || Ty
->isFloatingPointTy();
443 /// Helper to combine a load to a new type.
445 /// This just does the work of combining a load to a new type. It handles
446 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
447 /// loaded *value* type. This will convert it to a pointer, cast the operand to
448 /// that pointer type, load it, etc.
450 /// Note that this will create all of the instructions with whatever insert
451 /// point the \c InstCombiner currently is using.
452 static LoadInst
*combineLoadToNewType(InstCombiner
&IC
, LoadInst
&LI
, Type
*NewTy
,
453 const Twine
&Suffix
= "") {
454 assert((!LI
.isAtomic() || isSupportedAtomicType(NewTy
)) &&
455 "can't fold an atomic load to requested type");
457 Value
*Ptr
= LI
.getPointerOperand();
458 unsigned AS
= LI
.getPointerAddressSpace();
459 Value
*NewPtr
= nullptr;
460 if (!(match(Ptr
, m_BitCast(m_Value(NewPtr
))) &&
461 NewPtr
->getType()->getPointerElementType() == NewTy
&&
462 NewPtr
->getType()->getPointerAddressSpace() == AS
))
463 NewPtr
= IC
.Builder
.CreateBitCast(Ptr
, NewTy
->getPointerTo(AS
));
465 LoadInst
*NewLoad
= IC
.Builder
.CreateAlignedLoad(
466 NewTy
, NewPtr
, LI
.getAlignment(), LI
.isVolatile(), LI
.getName() + Suffix
);
467 NewLoad
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
468 copyMetadataForLoad(*NewLoad
, LI
);
472 /// Combine a store to a new type.
474 /// Returns the newly created store instruction.
475 static StoreInst
*combineStoreToNewValue(InstCombiner
&IC
, StoreInst
&SI
, Value
*V
) {
476 assert((!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) &&
477 "can't fold an atomic store of requested type");
479 Value
*Ptr
= SI
.getPointerOperand();
480 unsigned AS
= SI
.getPointerAddressSpace();
481 SmallVector
<std::pair
<unsigned, MDNode
*>, 8> MD
;
482 SI
.getAllMetadata(MD
);
484 StoreInst
*NewStore
= IC
.Builder
.CreateAlignedStore(
485 V
, IC
.Builder
.CreateBitCast(Ptr
, V
->getType()->getPointerTo(AS
)),
486 SI
.getAlignment(), SI
.isVolatile());
487 NewStore
->setAtomic(SI
.getOrdering(), SI
.getSyncScopeID());
488 for (const auto &MDPair
: MD
) {
489 unsigned ID
= MDPair
.first
;
490 MDNode
*N
= MDPair
.second
;
491 // Note, essentially every kind of metadata should be preserved here! This
492 // routine is supposed to clone a store instruction changing *only its
493 // type*. The only metadata it makes sense to drop is metadata which is
494 // invalidated when the pointer type changes. This should essentially
495 // never be the case in LLVM, but we explicitly switch over only known
496 // metadata to be conservatively correct. If you are adding metadata to
497 // LLVM which pertains to stores, you almost certainly want to add it
500 case LLVMContext::MD_dbg
:
501 case LLVMContext::MD_tbaa
:
502 case LLVMContext::MD_prof
:
503 case LLVMContext::MD_fpmath
:
504 case LLVMContext::MD_tbaa_struct
:
505 case LLVMContext::MD_alias_scope
:
506 case LLVMContext::MD_noalias
:
507 case LLVMContext::MD_nontemporal
:
508 case LLVMContext::MD_mem_parallel_loop_access
:
509 case LLVMContext::MD_access_group
:
510 // All of these directly apply.
511 NewStore
->setMetadata(ID
, N
);
513 case LLVMContext::MD_invariant_load
:
514 case LLVMContext::MD_nonnull
:
515 case LLVMContext::MD_range
:
516 case LLVMContext::MD_align
:
517 case LLVMContext::MD_dereferenceable
:
518 case LLVMContext::MD_dereferenceable_or_null
:
519 // These don't apply for stores.
527 /// Returns true if instruction represent minmax pattern like:
528 /// select ((cmp load V1, load V2), V1, V2).
529 static bool isMinMaxWithLoads(Value
*V
) {
530 assert(V
->getType()->isPointerTy() && "Expected pointer type.");
531 // Ignore possible ty* to ixx* bitcast.
532 V
= peekThroughBitcast(V
);
533 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
535 CmpInst::Predicate Pred
;
540 if (!match(V
, m_Select(m_Cmp(Pred
, m_Instruction(L1
), m_Instruction(L2
)),
541 m_Value(LHS
), m_Value(RHS
))))
543 return (match(L1
, m_Load(m_Specific(LHS
))) &&
544 match(L2
, m_Load(m_Specific(RHS
)))) ||
545 (match(L1
, m_Load(m_Specific(RHS
))) &&
546 match(L2
, m_Load(m_Specific(LHS
))));
549 /// Combine loads to match the type of their uses' value after looking
550 /// through intervening bitcasts.
552 /// The core idea here is that if the result of a load is used in an operation,
553 /// we should load the type most conducive to that operation. For example, when
554 /// loading an integer and converting that immediately to a pointer, we should
555 /// instead directly load a pointer.
557 /// However, this routine must never change the width of a load or the number of
558 /// loads as that would introduce a semantic change. This combine is expected to
559 /// be a semantic no-op which just allows loads to more closely model the types
560 /// of their consuming operations.
562 /// Currently, we also refuse to change the precise type used for an atomic load
563 /// or a volatile load. This is debatable, and might be reasonable to change
564 /// later. However, it is risky in case some backend or other part of LLVM is
565 /// relying on the exact type loaded to select appropriate atomic operations.
566 static Instruction
*combineLoadToOperationType(InstCombiner
&IC
, LoadInst
&LI
) {
567 // FIXME: We could probably with some care handle both volatile and ordered
568 // atomic loads here but it isn't clear that this is important.
569 if (!LI
.isUnordered())
575 // swifterror values can't be bitcasted.
576 if (LI
.getPointerOperand()->isSwiftError())
579 Type
*Ty
= LI
.getType();
580 const DataLayout
&DL
= IC
.getDataLayout();
582 // Try to canonicalize loads which are only ever stored to operate over
583 // integers instead of any other type. We only do this when the loaded type
584 // is sized and has a size exactly the same as its store size and the store
585 // size is a legal integer type.
586 // Do not perform canonicalization if minmax pattern is found (to avoid
588 if (!Ty
->isIntegerTy() && Ty
->isSized() &&
589 DL
.isLegalInteger(DL
.getTypeStoreSizeInBits(Ty
)) &&
590 DL
.typeSizeEqualsStoreSize(Ty
) &&
591 !DL
.isNonIntegralPointerType(Ty
) &&
593 peekThroughBitcast(LI
.getPointerOperand(), /*OneUseOnly=*/true))) {
594 if (all_of(LI
.users(), [&LI
](User
*U
) {
595 auto *SI
= dyn_cast
<StoreInst
>(U
);
596 return SI
&& SI
->getPointerOperand() != &LI
&&
597 !SI
->getPointerOperand()->isSwiftError();
599 LoadInst
*NewLoad
= combineLoadToNewType(
601 Type::getIntNTy(LI
.getContext(), DL
.getTypeStoreSizeInBits(Ty
)));
602 // Replace all the stores with stores of the newly loaded value.
603 for (auto UI
= LI
.user_begin(), UE
= LI
.user_end(); UI
!= UE
;) {
604 auto *SI
= cast
<StoreInst
>(*UI
++);
605 IC
.Builder
.SetInsertPoint(SI
);
606 combineStoreToNewValue(IC
, *SI
, NewLoad
);
607 IC
.eraseInstFromFunction(*SI
);
609 assert(LI
.use_empty() && "Failed to remove all users of the load!");
610 // Return the old load so the combiner can delete it safely.
615 // Fold away bit casts of the loaded value by loading the desired type.
616 // We can do this for BitCastInsts as well as casts from and to pointer types,
617 // as long as those are noops (i.e., the source or dest type have the same
618 // bitwidth as the target's pointers).
620 if (auto* CI
= dyn_cast
<CastInst
>(LI
.user_back()))
621 if (CI
->isNoopCast(DL
))
622 if (!LI
.isAtomic() || isSupportedAtomicType(CI
->getDestTy())) {
623 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, CI
->getDestTy());
624 CI
->replaceAllUsesWith(NewLoad
);
625 IC
.eraseInstFromFunction(*CI
);
629 // FIXME: We should also canonicalize loads of vectors when their elements are
630 // cast to other types.
634 static Instruction
*unpackLoadToAggregate(InstCombiner
&IC
, LoadInst
&LI
) {
635 // FIXME: We could probably with some care handle both volatile and atomic
636 // stores here but it isn't clear that this is important.
640 Type
*T
= LI
.getType();
641 if (!T
->isAggregateType())
644 StringRef Name
= LI
.getName();
645 assert(LI
.getAlignment() && "Alignment must be set at this point");
647 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
648 // If the struct only have one element, we unpack.
649 auto NumElements
= ST
->getNumElements();
650 if (NumElements
== 1) {
651 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, ST
->getTypeAtIndex(0U),
654 LI
.getAAMetadata(AAMD
);
655 NewLoad
->setAAMetadata(AAMD
);
656 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
657 UndefValue::get(T
), NewLoad
, 0, Name
));
660 // We don't want to break loads with padding here as we'd loose
661 // the knowledge that padding exists for the rest of the pipeline.
662 const DataLayout
&DL
= IC
.getDataLayout();
663 auto *SL
= DL
.getStructLayout(ST
);
664 if (SL
->hasPadding())
667 auto Align
= LI
.getAlignment();
669 Align
= DL
.getABITypeAlignment(ST
);
671 auto *Addr
= LI
.getPointerOperand();
672 auto *IdxType
= Type::getInt32Ty(T
->getContext());
673 auto *Zero
= ConstantInt::get(IdxType
, 0);
675 Value
*V
= UndefValue::get(T
);
676 for (unsigned i
= 0; i
< NumElements
; i
++) {
677 Value
*Indices
[2] = {
679 ConstantInt::get(IdxType
, i
),
681 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
683 auto EltAlign
= MinAlign(Align
, SL
->getElementOffset(i
));
684 auto *L
= IC
.Builder
.CreateAlignedLoad(ST
->getElementType(i
), Ptr
,
685 EltAlign
, Name
+ ".unpack");
686 // Propagate AA metadata. It'll still be valid on the narrowed load.
688 LI
.getAAMetadata(AAMD
);
689 L
->setAAMetadata(AAMD
);
690 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
694 return IC
.replaceInstUsesWith(LI
, V
);
697 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
698 auto *ET
= AT
->getElementType();
699 auto NumElements
= AT
->getNumElements();
700 if (NumElements
== 1) {
701 LoadInst
*NewLoad
= combineLoadToNewType(IC
, LI
, ET
, ".unpack");
703 LI
.getAAMetadata(AAMD
);
704 NewLoad
->setAAMetadata(AAMD
);
705 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
706 UndefValue::get(T
), NewLoad
, 0, Name
));
709 // Bail out if the array is too large. Ideally we would like to optimize
710 // arrays of arbitrary size but this has a terrible impact on compile time.
711 // The threshold here is chosen arbitrarily, maybe needs a little bit of
713 if (NumElements
> IC
.MaxArraySizeForCombine
)
716 const DataLayout
&DL
= IC
.getDataLayout();
717 auto EltSize
= DL
.getTypeAllocSize(ET
);
718 auto Align
= LI
.getAlignment();
720 Align
= DL
.getABITypeAlignment(T
);
722 auto *Addr
= LI
.getPointerOperand();
723 auto *IdxType
= Type::getInt64Ty(T
->getContext());
724 auto *Zero
= ConstantInt::get(IdxType
, 0);
726 Value
*V
= UndefValue::get(T
);
728 for (uint64_t i
= 0; i
< NumElements
; i
++) {
729 Value
*Indices
[2] = {
731 ConstantInt::get(IdxType
, i
),
733 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
735 auto *L
= IC
.Builder
.CreateAlignedLoad(
736 AT
->getElementType(), Ptr
, MinAlign(Align
, Offset
), Name
+ ".unpack");
738 LI
.getAAMetadata(AAMD
);
739 L
->setAAMetadata(AAMD
);
740 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
745 return IC
.replaceInstUsesWith(LI
, V
);
751 // If we can determine that all possible objects pointed to by the provided
752 // pointer value are, not only dereferenceable, but also definitively less than
753 // or equal to the provided maximum size, then return true. Otherwise, return
754 // false (constant global values and allocas fall into this category).
756 // FIXME: This should probably live in ValueTracking (or similar).
757 static bool isObjectSizeLessThanOrEq(Value
*V
, uint64_t MaxSize
,
758 const DataLayout
&DL
) {
759 SmallPtrSet
<Value
*, 4> Visited
;
760 SmallVector
<Value
*, 4> Worklist(1, V
);
763 Value
*P
= Worklist
.pop_back_val();
764 P
= P
->stripPointerCasts();
766 if (!Visited
.insert(P
).second
)
769 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(P
)) {
770 Worklist
.push_back(SI
->getTrueValue());
771 Worklist
.push_back(SI
->getFalseValue());
775 if (PHINode
*PN
= dyn_cast
<PHINode
>(P
)) {
776 for (Value
*IncValue
: PN
->incoming_values())
777 Worklist
.push_back(IncValue
);
781 if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(P
)) {
782 if (GA
->isInterposable())
784 Worklist
.push_back(GA
->getAliasee());
788 // If we know how big this object is, and it is less than MaxSize, continue
789 // searching. Otherwise, return false.
790 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(P
)) {
791 if (!AI
->getAllocatedType()->isSized())
794 ConstantInt
*CS
= dyn_cast
<ConstantInt
>(AI
->getArraySize());
798 uint64_t TypeSize
= DL
.getTypeAllocSize(AI
->getAllocatedType());
799 // Make sure that, even if the multiplication below would wrap as an
800 // uint64_t, we still do the right thing.
801 if ((CS
->getValue().zextOrSelf(128)*APInt(128, TypeSize
)).ugt(MaxSize
))
806 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(P
)) {
807 if (!GV
->hasDefinitiveInitializer() || !GV
->isConstant())
810 uint64_t InitSize
= DL
.getTypeAllocSize(GV
->getValueType());
811 if (InitSize
> MaxSize
)
817 } while (!Worklist
.empty());
822 // If we're indexing into an object of a known size, and the outer index is
823 // not a constant, but having any value but zero would lead to undefined
824 // behavior, replace it with zero.
826 // For example, if we have:
827 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
829 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
830 // ... = load i32* %arrayidx, align 4
831 // Then we know that we can replace %x in the GEP with i64 0.
833 // FIXME: We could fold any GEP index to zero that would cause UB if it were
834 // not zero. Currently, we only handle the first such index. Also, we could
835 // also search through non-zero constant indices if we kept track of the
836 // offsets those indices implied.
837 static bool canReplaceGEPIdxWithZero(InstCombiner
&IC
, GetElementPtrInst
*GEPI
,
838 Instruction
*MemI
, unsigned &Idx
) {
839 if (GEPI
->getNumOperands() < 2)
842 // Find the first non-zero index of a GEP. If all indices are zero, return
843 // one past the last index.
844 auto FirstNZIdx
= [](const GetElementPtrInst
*GEPI
) {
846 for (unsigned IE
= GEPI
->getNumOperands(); I
!= IE
; ++I
) {
847 Value
*V
= GEPI
->getOperand(I
);
848 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
858 // Skip through initial 'zero' indices, and find the corresponding pointer
859 // type. See if the next index is not a constant.
860 Idx
= FirstNZIdx(GEPI
);
861 if (Idx
== GEPI
->getNumOperands())
863 if (isa
<Constant
>(GEPI
->getOperand(Idx
)))
866 SmallVector
<Value
*, 4> Ops(GEPI
->idx_begin(), GEPI
->idx_begin() + Idx
);
868 GetElementPtrInst::getIndexedType(GEPI
->getSourceElementType(), Ops
);
869 if (!AllocTy
|| !AllocTy
->isSized())
871 const DataLayout
&DL
= IC
.getDataLayout();
872 uint64_t TyAllocSize
= DL
.getTypeAllocSize(AllocTy
);
874 // If there are more indices after the one we might replace with a zero, make
875 // sure they're all non-negative. If any of them are negative, the overall
876 // address being computed might be before the base address determined by the
877 // first non-zero index.
878 auto IsAllNonNegative
= [&]() {
879 for (unsigned i
= Idx
+1, e
= GEPI
->getNumOperands(); i
!= e
; ++i
) {
880 KnownBits Known
= IC
.computeKnownBits(GEPI
->getOperand(i
), 0, MemI
);
881 if (Known
.isNonNegative())
889 // FIXME: If the GEP is not inbounds, and there are extra indices after the
890 // one we'll replace, those could cause the address computation to wrap
891 // (rendering the IsAllNonNegative() check below insufficient). We can do
892 // better, ignoring zero indices (and other indices we can prove small
893 // enough not to wrap).
894 if (Idx
+1 != GEPI
->getNumOperands() && !GEPI
->isInBounds())
897 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
898 // also known to be dereferenceable.
899 return isObjectSizeLessThanOrEq(GEPI
->getOperand(0), TyAllocSize
, DL
) &&
903 // If we're indexing into an object with a variable index for the memory
904 // access, but the object has only one element, we can assume that the index
905 // will always be zero. If we replace the GEP, return it.
906 template <typename T
>
907 static Instruction
*replaceGEPIdxWithZero(InstCombiner
&IC
, Value
*Ptr
,
909 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
911 if (canReplaceGEPIdxWithZero(IC
, GEPI
, &MemI
, Idx
)) {
912 Instruction
*NewGEPI
= GEPI
->clone();
913 NewGEPI
->setOperand(Idx
,
914 ConstantInt::get(GEPI
->getOperand(Idx
)->getType(), 0));
915 NewGEPI
->insertBefore(GEPI
);
916 MemI
.setOperand(MemI
.getPointerOperandIndex(), NewGEPI
);
924 static bool canSimplifyNullStoreOrGEP(StoreInst
&SI
) {
925 if (NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()))
928 auto *Ptr
= SI
.getPointerOperand();
929 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
))
930 Ptr
= GEPI
->getOperand(0);
931 return (isa
<ConstantPointerNull
>(Ptr
) &&
932 !NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()));
935 static bool canSimplifyNullLoadOrGEP(LoadInst
&LI
, Value
*Op
) {
936 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
937 const Value
*GEPI0
= GEPI
->getOperand(0);
938 if (isa
<ConstantPointerNull
>(GEPI0
) &&
939 !NullPointerIsDefined(LI
.getFunction(), GEPI
->getPointerAddressSpace()))
942 if (isa
<UndefValue
>(Op
) ||
943 (isa
<ConstantPointerNull
>(Op
) &&
944 !NullPointerIsDefined(LI
.getFunction(), LI
.getPointerAddressSpace())))
949 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
950 Value
*Op
= LI
.getOperand(0);
952 // Try to canonicalize the loaded type.
953 if (Instruction
*Res
= combineLoadToOperationType(*this, LI
))
956 // Attempt to improve the alignment.
957 unsigned KnownAlign
= getOrEnforceKnownAlignment(
958 Op
, DL
.getPrefTypeAlignment(LI
.getType()), DL
, &LI
, &AC
, &DT
);
959 unsigned LoadAlign
= LI
.getAlignment();
960 unsigned EffectiveLoadAlign
=
961 LoadAlign
!= 0 ? LoadAlign
: DL
.getABITypeAlignment(LI
.getType());
963 if (KnownAlign
> EffectiveLoadAlign
)
964 LI
.setAlignment(MaybeAlign(KnownAlign
));
965 else if (LoadAlign
== 0)
966 LI
.setAlignment(MaybeAlign(EffectiveLoadAlign
));
968 // Replace GEP indices if possible.
969 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Op
, LI
)) {
970 Worklist
.Add(NewGEPI
);
974 if (Instruction
*Res
= unpackLoadToAggregate(*this, LI
))
977 // Do really simple store-to-load forwarding and load CSE, to catch cases
978 // where there are several consecutive memory accesses to the same location,
979 // separated by a few arithmetic operations.
980 BasicBlock::iterator
BBI(LI
);
981 bool IsLoadCSE
= false;
982 if (Value
*AvailableVal
= FindAvailableLoadedValue(
983 &LI
, LI
.getParent(), BBI
, DefMaxInstsToScan
, AA
, &IsLoadCSE
)) {
985 combineMetadataForCSE(cast
<LoadInst
>(AvailableVal
), &LI
, false);
987 return replaceInstUsesWith(
988 LI
, Builder
.CreateBitOrPointerCast(AvailableVal
, LI
.getType(),
989 LI
.getName() + ".cast"));
992 // None of the following transforms are legal for volatile/ordered atomic
993 // loads. Most of them do apply for unordered atomics.
994 if (!LI
.isUnordered()) return nullptr;
996 // load(gep null, ...) -> unreachable
997 // load null/undef -> unreachable
998 // TODO: Consider a target hook for valid address spaces for this xforms.
999 if (canSimplifyNullLoadOrGEP(LI
, Op
)) {
1000 // Insert a new store to null instruction before the load to indicate
1001 // that this code is not reachable. We do this instead of inserting
1002 // an unreachable instruction directly because we cannot modify the
1004 StoreInst
*SI
= new StoreInst(UndefValue::get(LI
.getType()),
1005 Constant::getNullValue(Op
->getType()), &LI
);
1006 SI
->setDebugLoc(LI
.getDebugLoc());
1007 return replaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
1010 if (Op
->hasOneUse()) {
1011 // Change select and PHI nodes to select values instead of addresses: this
1012 // helps alias analysis out a lot, allows many others simplifications, and
1013 // exposes redundancy in the code.
1015 // Note that we cannot do the transformation unless we know that the
1016 // introduced loads cannot trap! Something like this is valid as long as
1017 // the condition is always false: load (select bool %C, int* null, int* %G),
1018 // but it would not be valid if we transformed it to load from null
1021 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
1022 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1023 const MaybeAlign
Alignment(LI
.getAlignment());
1024 if (isSafeToLoadUnconditionally(SI
->getOperand(1), LI
.getType(),
1025 Alignment
, DL
, SI
) &&
1026 isSafeToLoadUnconditionally(SI
->getOperand(2), LI
.getType(),
1027 Alignment
, DL
, SI
)) {
1029 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(1),
1030 SI
->getOperand(1)->getName() + ".val");
1032 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(2),
1033 SI
->getOperand(2)->getName() + ".val");
1034 assert(LI
.isUnordered() && "implied by above");
1035 V1
->setAlignment(Alignment
);
1036 V1
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1037 V2
->setAlignment(Alignment
);
1038 V2
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1039 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
1042 // load (select (cond, null, P)) -> load P
1043 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)) &&
1044 !NullPointerIsDefined(SI
->getFunction(),
1045 LI
.getPointerAddressSpace())) {
1046 LI
.setOperand(0, SI
->getOperand(2));
1050 // load (select (cond, P, null)) -> load P
1051 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)) &&
1052 !NullPointerIsDefined(SI
->getFunction(),
1053 LI
.getPointerAddressSpace())) {
1054 LI
.setOperand(0, SI
->getOperand(1));
1062 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1064 /// \returns underlying value that was "cast", or nullptr otherwise.
1066 /// For example, if we have:
1068 /// %E0 = extractelement <2 x double> %U, i32 0
1069 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1070 /// %E1 = extractelement <2 x double> %U, i32 1
1071 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1073 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1074 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1075 /// Note that %U may contain non-undef values where %V1 has undef.
1076 static Value
*likeBitCastFromVector(InstCombiner
&IC
, Value
*V
) {
1078 while (auto *IV
= dyn_cast
<InsertValueInst
>(V
)) {
1079 auto *E
= dyn_cast
<ExtractElementInst
>(IV
->getInsertedValueOperand());
1082 auto *W
= E
->getVectorOperand();
1087 auto *CI
= dyn_cast
<ConstantInt
>(E
->getIndexOperand());
1088 if (!CI
|| IV
->getNumIndices() != 1 || CI
->getZExtValue() != *IV
->idx_begin())
1090 V
= IV
->getAggregateOperand();
1092 if (!isa
<UndefValue
>(V
) ||!U
)
1095 auto *UT
= cast
<VectorType
>(U
->getType());
1096 auto *VT
= V
->getType();
1097 // Check that types UT and VT are bitwise isomorphic.
1098 const auto &DL
= IC
.getDataLayout();
1099 if (DL
.getTypeStoreSizeInBits(UT
) != DL
.getTypeStoreSizeInBits(VT
)) {
1102 if (auto *AT
= dyn_cast
<ArrayType
>(VT
)) {
1103 if (AT
->getNumElements() != UT
->getNumElements())
1106 auto *ST
= cast
<StructType
>(VT
);
1107 if (ST
->getNumElements() != UT
->getNumElements())
1109 for (const auto *EltT
: ST
->elements()) {
1110 if (EltT
!= UT
->getElementType())
1117 /// Combine stores to match the type of value being stored.
1119 /// The core idea here is that the memory does not have any intrinsic type and
1120 /// where we can we should match the type of a store to the type of value being
1123 /// However, this routine must never change the width of a store or the number of
1124 /// stores as that would introduce a semantic change. This combine is expected to
1125 /// be a semantic no-op which just allows stores to more closely model the types
1126 /// of their incoming values.
1128 /// Currently, we also refuse to change the precise type used for an atomic or
1129 /// volatile store. This is debatable, and might be reasonable to change later.
1130 /// However, it is risky in case some backend or other part of LLVM is relying
1131 /// on the exact type stored to select appropriate atomic operations.
1133 /// \returns true if the store was successfully combined away. This indicates
1134 /// the caller must erase the store instruction. We have to let the caller erase
1135 /// the store instruction as otherwise there is no way to signal whether it was
1136 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1137 static bool combineStoreToValueType(InstCombiner
&IC
, StoreInst
&SI
) {
1138 // FIXME: We could probably with some care handle both volatile and ordered
1139 // atomic stores here but it isn't clear that this is important.
1140 if (!SI
.isUnordered())
1143 // swifterror values can't be bitcasted.
1144 if (SI
.getPointerOperand()->isSwiftError())
1147 Value
*V
= SI
.getValueOperand();
1149 // Fold away bit casts of the stored value by storing the original type.
1150 if (auto *BC
= dyn_cast
<BitCastInst
>(V
)) {
1151 V
= BC
->getOperand(0);
1152 if (!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) {
1153 combineStoreToNewValue(IC
, SI
, V
);
1158 if (Value
*U
= likeBitCastFromVector(IC
, V
))
1159 if (!SI
.isAtomic() || isSupportedAtomicType(U
->getType())) {
1160 combineStoreToNewValue(IC
, SI
, U
);
1164 // FIXME: We should also canonicalize stores of vectors when their elements
1165 // are cast to other types.
1169 static bool unpackStoreToAggregate(InstCombiner
&IC
, StoreInst
&SI
) {
1170 // FIXME: We could probably with some care handle both volatile and atomic
1171 // stores here but it isn't clear that this is important.
1175 Value
*V
= SI
.getValueOperand();
1176 Type
*T
= V
->getType();
1178 if (!T
->isAggregateType())
1181 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
1182 // If the struct only have one element, we unpack.
1183 unsigned Count
= ST
->getNumElements();
1185 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1186 combineStoreToNewValue(IC
, SI
, V
);
1190 // We don't want to break loads with padding here as we'd loose
1191 // the knowledge that padding exists for the rest of the pipeline.
1192 const DataLayout
&DL
= IC
.getDataLayout();
1193 auto *SL
= DL
.getStructLayout(ST
);
1194 if (SL
->hasPadding())
1197 auto Align
= SI
.getAlignment();
1199 Align
= DL
.getABITypeAlignment(ST
);
1201 SmallString
<16> EltName
= V
->getName();
1203 auto *Addr
= SI
.getPointerOperand();
1204 SmallString
<16> AddrName
= Addr
->getName();
1205 AddrName
+= ".repack";
1207 auto *IdxType
= Type::getInt32Ty(ST
->getContext());
1208 auto *Zero
= ConstantInt::get(IdxType
, 0);
1209 for (unsigned i
= 0; i
< Count
; i
++) {
1210 Value
*Indices
[2] = {
1212 ConstantInt::get(IdxType
, i
),
1214 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
1216 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1217 auto EltAlign
= MinAlign(Align
, SL
->getElementOffset(i
));
1218 llvm::Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1220 SI
.getAAMetadata(AAMD
);
1221 NS
->setAAMetadata(AAMD
);
1227 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
1228 // If the array only have one element, we unpack.
1229 auto NumElements
= AT
->getNumElements();
1230 if (NumElements
== 1) {
1231 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1232 combineStoreToNewValue(IC
, SI
, V
);
1236 // Bail out if the array is too large. Ideally we would like to optimize
1237 // arrays of arbitrary size but this has a terrible impact on compile time.
1238 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1240 if (NumElements
> IC
.MaxArraySizeForCombine
)
1243 const DataLayout
&DL
= IC
.getDataLayout();
1244 auto EltSize
= DL
.getTypeAllocSize(AT
->getElementType());
1245 auto Align
= SI
.getAlignment();
1247 Align
= DL
.getABITypeAlignment(T
);
1249 SmallString
<16> EltName
= V
->getName();
1251 auto *Addr
= SI
.getPointerOperand();
1252 SmallString
<16> AddrName
= Addr
->getName();
1253 AddrName
+= ".repack";
1255 auto *IdxType
= Type::getInt64Ty(T
->getContext());
1256 auto *Zero
= ConstantInt::get(IdxType
, 0);
1258 uint64_t Offset
= 0;
1259 for (uint64_t i
= 0; i
< NumElements
; i
++) {
1260 Value
*Indices
[2] = {
1262 ConstantInt::get(IdxType
, i
),
1264 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
1266 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1267 auto EltAlign
= MinAlign(Align
, Offset
);
1268 Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1270 SI
.getAAMetadata(AAMD
);
1271 NS
->setAAMetadata(AAMD
);
1281 /// equivalentAddressValues - Test if A and B will obviously have the same
1282 /// value. This includes recognizing that %t0 and %t1 will have the same
1283 /// value in code like this:
1284 /// %t0 = getelementptr \@a, 0, 3
1285 /// store i32 0, i32* %t0
1286 /// %t1 = getelementptr \@a, 0, 3
1287 /// %t2 = load i32* %t1
1289 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
1290 // Test if the values are trivially equivalent.
1291 if (A
== B
) return true;
1293 // Test if the values come form identical arithmetic instructions.
1294 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1295 // its only used to compare two uses within the same basic block, which
1296 // means that they'll always either have the same value or one of them
1297 // will have an undefined value.
1298 if (isa
<BinaryOperator
>(A
) ||
1301 isa
<GetElementPtrInst
>(A
))
1302 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
1303 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
1306 // Otherwise they may not be equivalent.
1310 /// Converts store (bitcast (load (bitcast (select ...)))) to
1311 /// store (load (select ...)), where select is minmax:
1312 /// select ((cmp load V1, load V2), V1, V2).
1313 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner
&IC
,
1316 if (!match(SI
.getPointerOperand(), m_BitCast(m_Value())))
1320 if (!match(SI
.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr
)))))
1322 auto *LI
= cast
<LoadInst
>(SI
.getValueOperand());
1323 if (!LI
->getType()->isIntegerTy())
1325 if (!isMinMaxWithLoads(LoadAddr
))
1328 if (!all_of(LI
->users(), [LI
, LoadAddr
](User
*U
) {
1329 auto *SI
= dyn_cast
<StoreInst
>(U
);
1330 return SI
&& SI
->getPointerOperand() != LI
&&
1331 peekThroughBitcast(SI
->getPointerOperand()) != LoadAddr
&&
1332 !SI
->getPointerOperand()->isSwiftError();
1336 IC
.Builder
.SetInsertPoint(LI
);
1337 LoadInst
*NewLI
= combineLoadToNewType(
1338 IC
, *LI
, LoadAddr
->getType()->getPointerElementType());
1339 // Replace all the stores with stores of the newly loaded value.
1340 for (auto *UI
: LI
->users()) {
1341 auto *USI
= cast
<StoreInst
>(UI
);
1342 IC
.Builder
.SetInsertPoint(USI
);
1343 combineStoreToNewValue(IC
, *USI
, NewLI
);
1345 IC
.replaceInstUsesWith(*LI
, UndefValue::get(LI
->getType()));
1346 IC
.eraseInstFromFunction(*LI
);
1350 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
1351 Value
*Val
= SI
.getOperand(0);
1352 Value
*Ptr
= SI
.getOperand(1);
1354 // Try to canonicalize the stored type.
1355 if (combineStoreToValueType(*this, SI
))
1356 return eraseInstFromFunction(SI
);
1358 // Attempt to improve the alignment.
1359 const Align KnownAlign
= Align(getOrEnforceKnownAlignment(
1360 Ptr
, DL
.getPrefTypeAlignment(Val
->getType()), DL
, &SI
, &AC
, &DT
));
1361 const MaybeAlign StoreAlign
= MaybeAlign(SI
.getAlignment());
1362 const Align EffectiveStoreAlign
=
1363 StoreAlign
? *StoreAlign
: Align(DL
.getABITypeAlignment(Val
->getType()));
1365 if (KnownAlign
> EffectiveStoreAlign
)
1366 SI
.setAlignment(KnownAlign
);
1367 else if (!StoreAlign
)
1368 SI
.setAlignment(EffectiveStoreAlign
);
1370 // Try to canonicalize the stored type.
1371 if (unpackStoreToAggregate(*this, SI
))
1372 return eraseInstFromFunction(SI
);
1374 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI
))
1375 return eraseInstFromFunction(SI
);
1377 // Replace GEP indices if possible.
1378 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Ptr
, SI
)) {
1379 Worklist
.Add(NewGEPI
);
1383 // Don't hack volatile/ordered stores.
1384 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1385 if (!SI
.isUnordered()) return nullptr;
1387 // If the RHS is an alloca with a single use, zapify the store, making the
1389 if (Ptr
->hasOneUse()) {
1390 if (isa
<AllocaInst
>(Ptr
))
1391 return eraseInstFromFunction(SI
);
1392 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
1393 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
1394 if (GEP
->getOperand(0)->hasOneUse())
1395 return eraseInstFromFunction(SI
);
1400 // If we have a store to a location which is known constant, we can conclude
1401 // that the store must be storing the constant value (else the memory
1402 // wouldn't be constant), and this must be a noop.
1403 if (AA
->pointsToConstantMemory(Ptr
))
1404 return eraseInstFromFunction(SI
);
1406 // Do really simple DSE, to catch cases where there are several consecutive
1407 // stores to the same location, separated by a few arithmetic operations. This
1408 // situation often occurs with bitfield accesses.
1409 BasicBlock::iterator
BBI(SI
);
1410 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
1413 // Don't count debug info directives, lest they affect codegen,
1414 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1415 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
1416 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1421 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
1422 // Prev store isn't volatile, and stores to the same location?
1423 if (PrevSI
->isUnordered() && equivalentAddressValues(PrevSI
->getOperand(1),
1424 SI
.getOperand(1))) {
1427 eraseInstFromFunction(*PrevSI
);
1433 // If this is a load, we have to stop. However, if the loaded value is from
1434 // the pointer we're loading and is producing the pointer we're storing,
1435 // then *this* store is dead (X = load P; store X -> P).
1436 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
1437 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
)) {
1438 assert(SI
.isUnordered() && "can't eliminate ordering operation");
1439 return eraseInstFromFunction(SI
);
1442 // Otherwise, this is a load from some other location. Stores before it
1447 // Don't skip over loads, throws or things that can modify memory.
1448 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory() || BBI
->mayThrow())
1452 // store X, null -> turns into 'unreachable' in SimplifyCFG
1453 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1454 if (canSimplifyNullStoreOrGEP(SI
)) {
1455 if (!isa
<UndefValue
>(Val
)) {
1456 SI
.setOperand(0, UndefValue::get(Val
->getType()));
1457 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
1458 Worklist
.Add(U
); // Dropped a use.
1460 return nullptr; // Do not modify these!
1463 // store undef, Ptr -> noop
1464 if (isa
<UndefValue
>(Val
))
1465 return eraseInstFromFunction(SI
);
1467 // If this store is the second-to-last instruction in the basic block
1468 // (excluding debug info and bitcasts of pointers) and if the block ends with
1469 // an unconditional branch, try to move the store to the successor block.
1470 BBI
= SI
.getIterator();
1473 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
1474 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy()));
1476 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
1477 if (BI
->isUnconditional())
1478 mergeStoreIntoSuccessor(SI
);
1483 /// Try to transform:
1484 /// if () { *P = v1; } else { *P = v2 }
1486 /// *P = v1; if () { *P = v2; }
1487 /// into a phi node with a store in the successor.
1488 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst
&SI
) {
1489 assert(SI
.isUnordered() &&
1490 "This code has not been audited for volatile or ordered store case.");
1492 // Check if the successor block has exactly 2 incoming edges.
1493 BasicBlock
*StoreBB
= SI
.getParent();
1494 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
1495 if (!DestBB
->hasNPredecessors(2))
1498 // Capture the other block (the block that doesn't contain our store).
1499 pred_iterator PredIter
= pred_begin(DestBB
);
1500 if (*PredIter
== StoreBB
)
1502 BasicBlock
*OtherBB
= *PredIter
;
1504 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1505 // for example, if SI is in an infinite loop.
1506 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
1509 // Verify that the other block ends in a branch and is not otherwise empty.
1510 BasicBlock::iterator
BBI(OtherBB
->getTerminator());
1511 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
1512 if (!OtherBr
|| BBI
== OtherBB
->begin())
1515 // If the other block ends in an unconditional branch, check for the 'if then
1516 // else' case. There is an instruction before the branch.
1517 StoreInst
*OtherStore
= nullptr;
1518 if (OtherBr
->isUnconditional()) {
1520 // Skip over debugging info.
1521 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
1522 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1523 if (BBI
==OtherBB
->begin())
1527 // If this isn't a store, isn't a store to the same location, or is not the
1528 // right kind of store, bail out.
1529 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
1530 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1531 !SI
.isSameOperationAs(OtherStore
))
1534 // Otherwise, the other block ended with a conditional branch. If one of the
1535 // destinations is StoreBB, then we have the if/then case.
1536 if (OtherBr
->getSuccessor(0) != StoreBB
&&
1537 OtherBr
->getSuccessor(1) != StoreBB
)
1540 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1541 // if/then triangle. See if there is a store to the same ptr as SI that
1542 // lives in OtherBB.
1544 // Check to see if we find the matching store.
1545 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
1546 if (OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1547 !SI
.isSameOperationAs(OtherStore
))
1551 // If we find something that may be using or overwriting the stored
1552 // value, or if we run out of instructions, we can't do the transform.
1553 if (BBI
->mayReadFromMemory() || BBI
->mayThrow() ||
1554 BBI
->mayWriteToMemory() || BBI
== OtherBB
->begin())
1558 // In order to eliminate the store in OtherBr, we have to make sure nothing
1559 // reads or overwrites the stored value in StoreBB.
1560 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
1561 // FIXME: This should really be AA driven.
1562 if (I
->mayReadFromMemory() || I
->mayThrow() || I
->mayWriteToMemory())
1567 // Insert a PHI node now if we need it.
1568 Value
*MergedVal
= OtherStore
->getOperand(0);
1569 // The debug locations of the original instructions might differ. Merge them.
1570 DebugLoc MergedLoc
= DILocation::getMergedLocation(SI
.getDebugLoc(),
1571 OtherStore
->getDebugLoc());
1572 if (MergedVal
!= SI
.getOperand(0)) {
1573 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), 2, "storemerge");
1574 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
1575 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
1576 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
1577 PN
->setDebugLoc(MergedLoc
);
1580 // Advance to a place where it is safe to insert the new store and insert it.
1581 BBI
= DestBB
->getFirstInsertionPt();
1582 StoreInst
*NewSI
= new StoreInst(MergedVal
, SI
.getOperand(1), SI
.isVolatile(),
1583 MaybeAlign(SI
.getAlignment()),
1584 SI
.getOrdering(), SI
.getSyncScopeID());
1585 InsertNewInstBefore(NewSI
, *BBI
);
1586 NewSI
->setDebugLoc(MergedLoc
);
1588 // If the two stores had AA tags, merge them.
1590 SI
.getAAMetadata(AATags
);
1592 OtherStore
->getAAMetadata(AATags
, /* Merge = */ true);
1593 NewSI
->setAAMetadata(AATags
);
1596 // Nuke the old stores.
1597 eraseInstFromFunction(SI
);
1598 eraseInstFromFunction(*OtherStore
);