1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for load, store and alloca.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/InstCombine/InstCombiner.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 #include "llvm/Transforms/Utils/Local.h"
30 using namespace PatternMatch
;
32 #define DEBUG_TYPE "instcombine"
34 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
35 STATISTIC(NumGlobalCopies
, "Number of allocas copied from constant global");
37 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
38 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
39 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
40 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
41 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
42 /// the alloca, and if the source pointer is a pointer to a constant global, we
43 /// can optimize this.
45 isOnlyCopiedFromConstantMemory(AAResults
*AA
,
46 Value
*V
, MemTransferInst
*&TheCopy
,
47 SmallVectorImpl
<Instruction
*> &ToDelete
) {
48 // We track lifetime intrinsics as we encounter them. If we decide to go
49 // ahead and replace the value with the global, this lets the caller quickly
50 // eliminate the markers.
52 SmallVector
<std::pair
<Value
*, bool>, 35> ValuesToInspect
;
53 ValuesToInspect
.emplace_back(V
, false);
54 while (!ValuesToInspect
.empty()) {
55 auto ValuePair
= ValuesToInspect
.pop_back_val();
56 const bool IsOffset
= ValuePair
.second
;
57 for (auto &U
: ValuePair
.first
->uses()) {
58 auto *I
= cast
<Instruction
>(U
.getUser());
60 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
61 // Ignore non-volatile loads, they are always ok.
62 if (!LI
->isSimple()) return false;
66 if (isa
<BitCastInst
>(I
) || isa
<AddrSpaceCastInst
>(I
)) {
67 // If uses of the bitcast are ok, we are ok.
68 ValuesToInspect
.emplace_back(I
, IsOffset
);
71 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
72 // If the GEP has all zero indices, it doesn't offset the pointer. If it
74 ValuesToInspect
.emplace_back(I
, IsOffset
|| !GEP
->hasAllZeroIndices());
78 if (auto *Call
= dyn_cast
<CallBase
>(I
)) {
79 // If this is the function being called then we treat it like a load and
81 if (Call
->isCallee(&U
))
84 unsigned DataOpNo
= Call
->getDataOperandNo(&U
);
85 bool IsArgOperand
= Call
->isArgOperand(&U
);
87 // Inalloca arguments are clobbered by the call.
88 if (IsArgOperand
&& Call
->isInAllocaArgument(DataOpNo
))
91 // If this is a readonly/readnone call site, then we know it is just a
92 // load (but one that potentially returns the value itself), so we can
93 // ignore it if we know that the value isn't captured.
94 if (Call
->onlyReadsMemory() &&
95 (Call
->use_empty() || Call
->doesNotCapture(DataOpNo
)))
98 // If this is being passed as a byval argument, the caller is making a
99 // copy, so it is only a read of the alloca.
100 if (IsArgOperand
&& Call
->isByValArgument(DataOpNo
))
104 // Lifetime intrinsics can be handled by the caller.
105 if (I
->isLifetimeStartOrEnd()) {
106 assert(I
->use_empty() && "Lifetime markers have no result to use!");
107 ToDelete
.push_back(I
);
111 // If this is isn't our memcpy/memmove, reject it as something we can't
113 MemTransferInst
*MI
= dyn_cast
<MemTransferInst
>(I
);
117 // If the transfer is using the alloca as a source of the transfer, then
118 // ignore it since it is a load (unless the transfer is volatile).
119 if (U
.getOperandNo() == 1) {
120 if (MI
->isVolatile()) return false;
124 // If we already have seen a copy, reject the second one.
125 if (TheCopy
) return false;
127 // If the pointer has been offset from the start of the alloca, we can't
128 // safely handle this.
129 if (IsOffset
) return false;
131 // If the memintrinsic isn't using the alloca as the dest, reject it.
132 if (U
.getOperandNo() != 0) return false;
134 // If the source of the memcpy/move is not a constant global, reject it.
135 if (!AA
->pointsToConstantMemory(MI
->getSource()))
138 // Otherwise, the transform is safe. Remember the copy instruction.
145 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
146 /// modified by a copy from a constant global. If we can prove this, we can
147 /// replace any uses of the alloca with uses of the global directly.
148 static MemTransferInst
*
149 isOnlyCopiedFromConstantMemory(AAResults
*AA
,
151 SmallVectorImpl
<Instruction
*> &ToDelete
) {
152 MemTransferInst
*TheCopy
= nullptr;
153 if (isOnlyCopiedFromConstantMemory(AA
, AI
, TheCopy
, ToDelete
))
158 /// Returns true if V is dereferenceable for size of alloca.
159 static bool isDereferenceableForAllocaSize(const Value
*V
, const AllocaInst
*AI
,
160 const DataLayout
&DL
) {
161 if (AI
->isArrayAllocation())
163 uint64_t AllocaSize
= DL
.getTypeStoreSize(AI
->getAllocatedType());
166 return isDereferenceableAndAlignedPointer(V
, Align(AI
->getAlignment()),
167 APInt(64, AllocaSize
), DL
);
170 static Instruction
*simplifyAllocaArraySize(InstCombinerImpl
&IC
,
172 // Check for array size of 1 (scalar allocation).
173 if (!AI
.isArrayAllocation()) {
174 // i32 1 is the canonical array size for scalar allocations.
175 if (AI
.getArraySize()->getType()->isIntegerTy(32))
179 return IC
.replaceOperand(AI
, 0, IC
.Builder
.getInt32(1));
182 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
183 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
184 if (C
->getValue().getActiveBits() <= 64) {
185 Type
*NewTy
= ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
186 AllocaInst
*New
= IC
.Builder
.CreateAlloca(NewTy
, nullptr, AI
.getName());
187 New
->setAlignment(AI
.getAlign());
189 // Scan to the end of the allocation instructions, to skip over a block of
190 // allocas if possible...also skip interleaved debug info
192 BasicBlock::iterator
It(New
);
193 while (isa
<AllocaInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
))
196 // Now that I is pointing to the first non-allocation-inst in the block,
197 // insert our getelementptr instruction...
199 Type
*IdxTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
200 Value
*NullIdx
= Constant::getNullValue(IdxTy
);
201 Value
*Idx
[2] = {NullIdx
, NullIdx
};
202 Instruction
*NewI
= GetElementPtrInst::CreateInBounds(
203 NewTy
, New
, Idx
, New
->getName() + ".sub");
204 IC
.InsertNewInstBefore(NewI
, *It
);
206 // Gracefully handle allocas in other address spaces.
207 if (AI
.getType()->getPointerAddressSpace() !=
208 NewI
->getType()->getPointerAddressSpace()) {
210 CastInst::CreatePointerBitCastOrAddrSpaceCast(NewI
, AI
.getType());
211 IC
.InsertNewInstBefore(NewI
, *It
);
214 // Now make everything use the getelementptr instead of the original
216 return IC
.replaceInstUsesWith(AI
, NewI
);
220 if (isa
<UndefValue
>(AI
.getArraySize()))
221 return IC
.replaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
223 // Ensure that the alloca array size argument has type intptr_t, so that
224 // any casting is exposed early.
225 Type
*IntPtrTy
= IC
.getDataLayout().getIntPtrType(AI
.getType());
226 if (AI
.getArraySize()->getType() != IntPtrTy
) {
227 Value
*V
= IC
.Builder
.CreateIntCast(AI
.getArraySize(), IntPtrTy
, false);
228 return IC
.replaceOperand(AI
, 0, V
);
235 // If I and V are pointers in different address space, it is not allowed to
236 // use replaceAllUsesWith since I and V have different types. A
237 // non-target-specific transformation should not use addrspacecast on V since
238 // the two address space may be disjoint depending on target.
240 // This class chases down uses of the old pointer until reaching the load
241 // instructions, then replaces the old pointer in the load instructions with
242 // the new pointer. If during the chasing it sees bitcast or GEP, it will
243 // create new bitcast or GEP with the new pointer and use them in the load
245 class PointerReplacer
{
247 PointerReplacer(InstCombinerImpl
&IC
) : IC(IC
) {}
249 bool collectUsers(Instruction
&I
);
250 void replacePointer(Instruction
&I
, Value
*V
);
253 void replace(Instruction
*I
);
254 Value
*getReplacement(Value
*I
);
256 SmallSetVector
<Instruction
*, 4> Worklist
;
257 MapVector
<Value
*, Value
*> WorkMap
;
258 InstCombinerImpl
&IC
;
260 } // end anonymous namespace
262 bool PointerReplacer::collectUsers(Instruction
&I
) {
263 for (auto U
: I
.users()) {
264 auto *Inst
= cast
<Instruction
>(&*U
);
265 if (auto *Load
= dyn_cast
<LoadInst
>(Inst
)) {
266 if (Load
->isVolatile())
268 Worklist
.insert(Load
);
269 } else if (isa
<GetElementPtrInst
>(Inst
) || isa
<BitCastInst
>(Inst
)) {
270 Worklist
.insert(Inst
);
271 if (!collectUsers(*Inst
))
273 } else if (auto *MI
= dyn_cast
<MemTransferInst
>(Inst
)) {
274 if (MI
->isVolatile())
276 Worklist
.insert(Inst
);
277 } else if (Inst
->isLifetimeStartOrEnd()) {
280 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *U
<< '\n');
288 Value
*PointerReplacer::getReplacement(Value
*V
) { return WorkMap
.lookup(V
); }
290 void PointerReplacer::replace(Instruction
*I
) {
291 if (getReplacement(I
))
294 if (auto *LT
= dyn_cast
<LoadInst
>(I
)) {
295 auto *V
= getReplacement(LT
->getPointerOperand());
296 assert(V
&& "Operand not replaced");
297 auto *NewI
= new LoadInst(LT
->getType(), V
, "", LT
->isVolatile(),
298 LT
->getAlign(), LT
->getOrdering(),
299 LT
->getSyncScopeID());
301 copyMetadataForLoad(*NewI
, *LT
);
303 IC
.InsertNewInstWith(NewI
, *LT
);
304 IC
.replaceInstUsesWith(*LT
, NewI
);
306 } else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
307 auto *V
= getReplacement(GEP
->getPointerOperand());
308 assert(V
&& "Operand not replaced");
309 SmallVector
<Value
*, 8> Indices
;
310 Indices
.append(GEP
->idx_begin(), GEP
->idx_end());
311 auto *NewI
= GetElementPtrInst::Create(
312 V
->getType()->getPointerElementType(), V
, Indices
);
313 IC
.InsertNewInstWith(NewI
, *GEP
);
316 } else if (auto *BC
= dyn_cast
<BitCastInst
>(I
)) {
317 auto *V
= getReplacement(BC
->getOperand(0));
318 assert(V
&& "Operand not replaced");
319 auto *NewT
= PointerType::get(BC
->getType()->getPointerElementType(),
320 V
->getType()->getPointerAddressSpace());
321 auto *NewI
= new BitCastInst(V
, NewT
);
322 IC
.InsertNewInstWith(NewI
, *BC
);
325 } else if (auto *MemCpy
= dyn_cast
<MemTransferInst
>(I
)) {
326 auto *SrcV
= getReplacement(MemCpy
->getRawSource());
327 // The pointer may appear in the destination of a copy, but we don't want to
330 assert(getReplacement(MemCpy
->getRawDest()) &&
331 "destination not in replace list");
335 IC
.Builder
.SetInsertPoint(MemCpy
);
336 auto *NewI
= IC
.Builder
.CreateMemTransferInst(
337 MemCpy
->getIntrinsicID(), MemCpy
->getRawDest(), MemCpy
->getDestAlign(),
338 SrcV
, MemCpy
->getSourceAlign(), MemCpy
->getLength(),
339 MemCpy
->isVolatile());
341 MemCpy
->getAAMetadata(AAMD
);
343 NewI
->setAAMetadata(AAMD
);
345 IC
.eraseInstFromFunction(*MemCpy
);
346 WorkMap
[MemCpy
] = NewI
;
348 llvm_unreachable("should never reach here");
352 void PointerReplacer::replacePointer(Instruction
&I
, Value
*V
) {
354 auto *PT
= cast
<PointerType
>(I
.getType());
355 auto *NT
= cast
<PointerType
>(V
->getType());
356 assert(PT
!= NT
&& PT
->getElementType() == NT
->getElementType() &&
361 for (Instruction
*Workitem
: Worklist
)
365 Instruction
*InstCombinerImpl::visitAllocaInst(AllocaInst
&AI
) {
366 if (auto *I
= simplifyAllocaArraySize(*this, AI
))
369 if (AI
.getAllocatedType()->isSized()) {
370 // Move all alloca's of zero byte objects to the entry block and merge them
371 // together. Note that we only do this for alloca's, because malloc should
372 // allocate and return a unique pointer, even for a zero byte allocation.
373 if (DL
.getTypeAllocSize(AI
.getAllocatedType()).getKnownMinSize() == 0) {
374 // For a zero sized alloca there is no point in doing an array allocation.
375 // This is helpful if the array size is a complicated expression not used
377 if (AI
.isArrayAllocation())
378 return replaceOperand(AI
, 0,
379 ConstantInt::get(AI
.getArraySize()->getType(), 1));
381 // Get the first instruction in the entry block.
382 BasicBlock
&EntryBlock
= AI
.getParent()->getParent()->getEntryBlock();
383 Instruction
*FirstInst
= EntryBlock
.getFirstNonPHIOrDbg();
384 if (FirstInst
!= &AI
) {
385 // If the entry block doesn't start with a zero-size alloca then move
386 // this one to the start of the entry block. There is no problem with
387 // dominance as the array size was forced to a constant earlier already.
388 AllocaInst
*EntryAI
= dyn_cast
<AllocaInst
>(FirstInst
);
389 if (!EntryAI
|| !EntryAI
->getAllocatedType()->isSized() ||
390 DL
.getTypeAllocSize(EntryAI
->getAllocatedType())
391 .getKnownMinSize() != 0) {
392 AI
.moveBefore(FirstInst
);
396 // Replace this zero-sized alloca with the one at the start of the entry
397 // block after ensuring that the address will be aligned enough for both
399 const Align MaxAlign
= std::max(EntryAI
->getAlign(), AI
.getAlign());
400 EntryAI
->setAlignment(MaxAlign
);
401 if (AI
.getType() != EntryAI
->getType())
402 return new BitCastInst(EntryAI
, AI
.getType());
403 return replaceInstUsesWith(AI
, EntryAI
);
408 // Check to see if this allocation is only modified by a memcpy/memmove from
409 // a constant whose alignment is equal to or exceeds that of the allocation.
410 // If this is the case, we can change all users to use the constant global
411 // instead. This is commonly produced by the CFE by constructs like "void
412 // foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' is only subsequently
414 SmallVector
<Instruction
*, 4> ToDelete
;
415 if (MemTransferInst
*Copy
= isOnlyCopiedFromConstantMemory(AA
, &AI
, ToDelete
)) {
416 Value
*TheSrc
= Copy
->getSource();
417 Align AllocaAlign
= AI
.getAlign();
418 Align SourceAlign
= getOrEnforceKnownAlignment(
419 TheSrc
, AllocaAlign
, DL
, &AI
, &AC
, &DT
);
420 if (AllocaAlign
<= SourceAlign
&&
421 isDereferenceableForAllocaSize(TheSrc
, &AI
, DL
) &&
422 !isa
<Instruction
>(TheSrc
)) {
423 // FIXME: Can we sink instructions without violating dominance when TheSrc
424 // is an instruction instead of a constant or argument?
425 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI
<< '\n');
426 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy
<< '\n');
427 unsigned SrcAddrSpace
= TheSrc
->getType()->getPointerAddressSpace();
428 auto *DestTy
= PointerType::get(AI
.getAllocatedType(), SrcAddrSpace
);
429 if (AI
.getType()->getAddressSpace() == SrcAddrSpace
) {
430 for (Instruction
*Delete
: ToDelete
)
431 eraseInstFromFunction(*Delete
);
433 Value
*Cast
= Builder
.CreateBitCast(TheSrc
, DestTy
);
434 Instruction
*NewI
= replaceInstUsesWith(AI
, Cast
);
435 eraseInstFromFunction(*Copy
);
440 PointerReplacer
PtrReplacer(*this);
441 if (PtrReplacer
.collectUsers(AI
)) {
442 for (Instruction
*Delete
: ToDelete
)
443 eraseInstFromFunction(*Delete
);
445 Value
*Cast
= Builder
.CreateBitCast(TheSrc
, DestTy
);
446 PtrReplacer
.replacePointer(AI
, Cast
);
452 // At last, use the generic allocation site handler to aggressively remove
454 return visitAllocSite(AI
);
457 // Are we allowed to form a atomic load or store of this type?
458 static bool isSupportedAtomicType(Type
*Ty
) {
459 return Ty
->isIntOrPtrTy() || Ty
->isFloatingPointTy();
462 /// Helper to combine a load to a new type.
464 /// This just does the work of combining a load to a new type. It handles
465 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
466 /// loaded *value* type. This will convert it to a pointer, cast the operand to
467 /// that pointer type, load it, etc.
469 /// Note that this will create all of the instructions with whatever insert
470 /// point the \c InstCombinerImpl currently is using.
471 LoadInst
*InstCombinerImpl::combineLoadToNewType(LoadInst
&LI
, Type
*NewTy
,
472 const Twine
&Suffix
) {
473 assert((!LI
.isAtomic() || isSupportedAtomicType(NewTy
)) &&
474 "can't fold an atomic load to requested type");
476 Value
*Ptr
= LI
.getPointerOperand();
477 unsigned AS
= LI
.getPointerAddressSpace();
478 Type
*NewPtrTy
= NewTy
->getPointerTo(AS
);
479 Value
*NewPtr
= nullptr;
480 if (!(match(Ptr
, m_BitCast(m_Value(NewPtr
))) &&
481 NewPtr
->getType() == NewPtrTy
))
482 NewPtr
= Builder
.CreateBitCast(Ptr
, NewPtrTy
);
484 LoadInst
*NewLoad
= Builder
.CreateAlignedLoad(
485 NewTy
, NewPtr
, LI
.getAlign(), LI
.isVolatile(), LI
.getName() + Suffix
);
486 NewLoad
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
487 copyMetadataForLoad(*NewLoad
, LI
);
491 /// Combine a store to a new type.
493 /// Returns the newly created store instruction.
494 static StoreInst
*combineStoreToNewValue(InstCombinerImpl
&IC
, StoreInst
&SI
,
496 assert((!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) &&
497 "can't fold an atomic store of requested type");
499 Value
*Ptr
= SI
.getPointerOperand();
500 unsigned AS
= SI
.getPointerAddressSpace();
501 SmallVector
<std::pair
<unsigned, MDNode
*>, 8> MD
;
502 SI
.getAllMetadata(MD
);
504 StoreInst
*NewStore
= IC
.Builder
.CreateAlignedStore(
505 V
, IC
.Builder
.CreateBitCast(Ptr
, V
->getType()->getPointerTo(AS
)),
506 SI
.getAlign(), SI
.isVolatile());
507 NewStore
->setAtomic(SI
.getOrdering(), SI
.getSyncScopeID());
508 for (const auto &MDPair
: MD
) {
509 unsigned ID
= MDPair
.first
;
510 MDNode
*N
= MDPair
.second
;
511 // Note, essentially every kind of metadata should be preserved here! This
512 // routine is supposed to clone a store instruction changing *only its
513 // type*. The only metadata it makes sense to drop is metadata which is
514 // invalidated when the pointer type changes. This should essentially
515 // never be the case in LLVM, but we explicitly switch over only known
516 // metadata to be conservatively correct. If you are adding metadata to
517 // LLVM which pertains to stores, you almost certainly want to add it
520 case LLVMContext::MD_dbg
:
521 case LLVMContext::MD_tbaa
:
522 case LLVMContext::MD_prof
:
523 case LLVMContext::MD_fpmath
:
524 case LLVMContext::MD_tbaa_struct
:
525 case LLVMContext::MD_alias_scope
:
526 case LLVMContext::MD_noalias
:
527 case LLVMContext::MD_nontemporal
:
528 case LLVMContext::MD_mem_parallel_loop_access
:
529 case LLVMContext::MD_access_group
:
530 // All of these directly apply.
531 NewStore
->setMetadata(ID
, N
);
533 case LLVMContext::MD_invariant_load
:
534 case LLVMContext::MD_nonnull
:
535 case LLVMContext::MD_noundef
:
536 case LLVMContext::MD_range
:
537 case LLVMContext::MD_align
:
538 case LLVMContext::MD_dereferenceable
:
539 case LLVMContext::MD_dereferenceable_or_null
:
540 // These don't apply for stores.
548 /// Returns true if instruction represent minmax pattern like:
549 /// select ((cmp load V1, load V2), V1, V2).
550 static bool isMinMaxWithLoads(Value
*V
, Type
*&LoadTy
) {
551 assert(V
->getType()->isPointerTy() && "Expected pointer type.");
552 // Ignore possible ty* to ixx* bitcast.
553 V
= InstCombiner::peekThroughBitcast(V
);
554 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
556 CmpInst::Predicate Pred
;
561 if (!match(V
, m_Select(m_Cmp(Pred
, m_Instruction(L1
), m_Instruction(L2
)),
562 m_Value(LHS
), m_Value(RHS
))))
564 LoadTy
= L1
->getType();
565 return (match(L1
, m_Load(m_Specific(LHS
))) &&
566 match(L2
, m_Load(m_Specific(RHS
)))) ||
567 (match(L1
, m_Load(m_Specific(RHS
))) &&
568 match(L2
, m_Load(m_Specific(LHS
))));
571 /// Combine loads to match the type of their uses' value after looking
572 /// through intervening bitcasts.
574 /// The core idea here is that if the result of a load is used in an operation,
575 /// we should load the type most conducive to that operation. For example, when
576 /// loading an integer and converting that immediately to a pointer, we should
577 /// instead directly load a pointer.
579 /// However, this routine must never change the width of a load or the number of
580 /// loads as that would introduce a semantic change. This combine is expected to
581 /// be a semantic no-op which just allows loads to more closely model the types
582 /// of their consuming operations.
584 /// Currently, we also refuse to change the precise type used for an atomic load
585 /// or a volatile load. This is debatable, and might be reasonable to change
586 /// later. However, it is risky in case some backend or other part of LLVM is
587 /// relying on the exact type loaded to select appropriate atomic operations.
588 static Instruction
*combineLoadToOperationType(InstCombinerImpl
&IC
,
590 // FIXME: We could probably with some care handle both volatile and ordered
591 // atomic loads here but it isn't clear that this is important.
592 if (!LI
.isUnordered())
598 // swifterror values can't be bitcasted.
599 if (LI
.getPointerOperand()->isSwiftError())
602 const DataLayout
&DL
= IC
.getDataLayout();
604 // Fold away bit casts of the loaded value by loading the desired type.
605 // Note that we should not do this for pointer<->integer casts,
606 // because that would result in type punning.
607 if (LI
.hasOneUse()) {
608 // Don't transform when the type is x86_amx, it makes the pass that lower
609 // x86_amx type happy.
610 if (auto *BC
= dyn_cast
<BitCastInst
>(LI
.user_back())) {
611 assert(!LI
.getType()->isX86_AMXTy() &&
612 "load from x86_amx* should not happen!");
613 if (BC
->getType()->isX86_AMXTy())
617 if (auto* CI
= dyn_cast
<CastInst
>(LI
.user_back()))
618 if (CI
->isNoopCast(DL
) && LI
.getType()->isPtrOrPtrVectorTy() ==
619 CI
->getDestTy()->isPtrOrPtrVectorTy())
620 if (!LI
.isAtomic() || isSupportedAtomicType(CI
->getDestTy())) {
621 LoadInst
*NewLoad
= IC
.combineLoadToNewType(LI
, CI
->getDestTy());
622 CI
->replaceAllUsesWith(NewLoad
);
623 IC
.eraseInstFromFunction(*CI
);
628 // FIXME: We should also canonicalize loads of vectors when their elements are
629 // cast to other types.
633 static Instruction
*unpackLoadToAggregate(InstCombinerImpl
&IC
, LoadInst
&LI
) {
634 // FIXME: We could probably with some care handle both volatile and atomic
635 // stores here but it isn't clear that this is important.
639 Type
*T
= LI
.getType();
640 if (!T
->isAggregateType())
643 StringRef Name
= LI
.getName();
644 assert(LI
.getAlignment() && "Alignment must be set at this point");
646 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
647 // If the struct only have one element, we unpack.
648 auto NumElements
= ST
->getNumElements();
649 if (NumElements
== 1) {
650 LoadInst
*NewLoad
= IC
.combineLoadToNewType(LI
, ST
->getTypeAtIndex(0U),
653 LI
.getAAMetadata(AAMD
);
654 NewLoad
->setAAMetadata(AAMD
);
655 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
656 UndefValue::get(T
), NewLoad
, 0, Name
));
659 // We don't want to break loads with padding here as we'd loose
660 // the knowledge that padding exists for the rest of the pipeline.
661 const DataLayout
&DL
= IC
.getDataLayout();
662 auto *SL
= DL
.getStructLayout(ST
);
663 if (SL
->hasPadding())
666 const auto Align
= LI
.getAlign();
667 auto *Addr
= LI
.getPointerOperand();
668 auto *IdxType
= Type::getInt32Ty(T
->getContext());
669 auto *Zero
= ConstantInt::get(IdxType
, 0);
671 Value
*V
= UndefValue::get(T
);
672 for (unsigned i
= 0; i
< NumElements
; i
++) {
673 Value
*Indices
[2] = {
675 ConstantInt::get(IdxType
, i
),
677 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
679 auto *L
= IC
.Builder
.CreateAlignedLoad(
680 ST
->getElementType(i
), Ptr
,
681 commonAlignment(Align
, SL
->getElementOffset(i
)), Name
+ ".unpack");
682 // Propagate AA metadata. It'll still be valid on the narrowed load.
684 LI
.getAAMetadata(AAMD
);
685 L
->setAAMetadata(AAMD
);
686 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
690 return IC
.replaceInstUsesWith(LI
, V
);
693 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
694 auto *ET
= AT
->getElementType();
695 auto NumElements
= AT
->getNumElements();
696 if (NumElements
== 1) {
697 LoadInst
*NewLoad
= IC
.combineLoadToNewType(LI
, ET
, ".unpack");
699 LI
.getAAMetadata(AAMD
);
700 NewLoad
->setAAMetadata(AAMD
);
701 return IC
.replaceInstUsesWith(LI
, IC
.Builder
.CreateInsertValue(
702 UndefValue::get(T
), NewLoad
, 0, Name
));
705 // Bail out if the array is too large. Ideally we would like to optimize
706 // arrays of arbitrary size but this has a terrible impact on compile time.
707 // The threshold here is chosen arbitrarily, maybe needs a little bit of
709 if (NumElements
> IC
.MaxArraySizeForCombine
)
712 const DataLayout
&DL
= IC
.getDataLayout();
713 auto EltSize
= DL
.getTypeAllocSize(ET
);
714 const auto Align
= LI
.getAlign();
716 auto *Addr
= LI
.getPointerOperand();
717 auto *IdxType
= Type::getInt64Ty(T
->getContext());
718 auto *Zero
= ConstantInt::get(IdxType
, 0);
720 Value
*V
= UndefValue::get(T
);
722 for (uint64_t i
= 0; i
< NumElements
; i
++) {
723 Value
*Indices
[2] = {
725 ConstantInt::get(IdxType
, i
),
727 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
729 auto *L
= IC
.Builder
.CreateAlignedLoad(AT
->getElementType(), Ptr
,
730 commonAlignment(Align
, Offset
),
733 LI
.getAAMetadata(AAMD
);
734 L
->setAAMetadata(AAMD
);
735 V
= IC
.Builder
.CreateInsertValue(V
, L
, i
);
740 return IC
.replaceInstUsesWith(LI
, V
);
746 // If we can determine that all possible objects pointed to by the provided
747 // pointer value are, not only dereferenceable, but also definitively less than
748 // or equal to the provided maximum size, then return true. Otherwise, return
749 // false (constant global values and allocas fall into this category).
751 // FIXME: This should probably live in ValueTracking (or similar).
752 static bool isObjectSizeLessThanOrEq(Value
*V
, uint64_t MaxSize
,
753 const DataLayout
&DL
) {
754 SmallPtrSet
<Value
*, 4> Visited
;
755 SmallVector
<Value
*, 4> Worklist(1, V
);
758 Value
*P
= Worklist
.pop_back_val();
759 P
= P
->stripPointerCasts();
761 if (!Visited
.insert(P
).second
)
764 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(P
)) {
765 Worklist
.push_back(SI
->getTrueValue());
766 Worklist
.push_back(SI
->getFalseValue());
770 if (PHINode
*PN
= dyn_cast
<PHINode
>(P
)) {
771 append_range(Worklist
, PN
->incoming_values());
775 if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(P
)) {
776 if (GA
->isInterposable())
778 Worklist
.push_back(GA
->getAliasee());
782 // If we know how big this object is, and it is less than MaxSize, continue
783 // searching. Otherwise, return false.
784 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(P
)) {
785 if (!AI
->getAllocatedType()->isSized())
788 ConstantInt
*CS
= dyn_cast
<ConstantInt
>(AI
->getArraySize());
792 uint64_t TypeSize
= DL
.getTypeAllocSize(AI
->getAllocatedType());
793 // Make sure that, even if the multiplication below would wrap as an
794 // uint64_t, we still do the right thing.
795 if ((CS
->getValue().zextOrSelf(128)*APInt(128, TypeSize
)).ugt(MaxSize
))
800 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(P
)) {
801 if (!GV
->hasDefinitiveInitializer() || !GV
->isConstant())
804 uint64_t InitSize
= DL
.getTypeAllocSize(GV
->getValueType());
805 if (InitSize
> MaxSize
)
811 } while (!Worklist
.empty());
816 // If we're indexing into an object of a known size, and the outer index is
817 // not a constant, but having any value but zero would lead to undefined
818 // behavior, replace it with zero.
820 // For example, if we have:
821 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
823 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
824 // ... = load i32* %arrayidx, align 4
825 // Then we know that we can replace %x in the GEP with i64 0.
827 // FIXME: We could fold any GEP index to zero that would cause UB if it were
828 // not zero. Currently, we only handle the first such index. Also, we could
829 // also search through non-zero constant indices if we kept track of the
830 // offsets those indices implied.
831 static bool canReplaceGEPIdxWithZero(InstCombinerImpl
&IC
,
832 GetElementPtrInst
*GEPI
, Instruction
*MemI
,
834 if (GEPI
->getNumOperands() < 2)
837 // Find the first non-zero index of a GEP. If all indices are zero, return
838 // one past the last index.
839 auto FirstNZIdx
= [](const GetElementPtrInst
*GEPI
) {
841 for (unsigned IE
= GEPI
->getNumOperands(); I
!= IE
; ++I
) {
842 Value
*V
= GEPI
->getOperand(I
);
843 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
853 // Skip through initial 'zero' indices, and find the corresponding pointer
854 // type. See if the next index is not a constant.
855 Idx
= FirstNZIdx(GEPI
);
856 if (Idx
== GEPI
->getNumOperands())
858 if (isa
<Constant
>(GEPI
->getOperand(Idx
)))
861 SmallVector
<Value
*, 4> Ops(GEPI
->idx_begin(), GEPI
->idx_begin() + Idx
);
862 Type
*SourceElementType
= GEPI
->getSourceElementType();
863 // Size information about scalable vectors is not available, so we cannot
864 // deduce whether indexing at n is undefined behaviour or not. Bail out.
865 if (isa
<ScalableVectorType
>(SourceElementType
))
868 Type
*AllocTy
= GetElementPtrInst::getIndexedType(SourceElementType
, Ops
);
869 if (!AllocTy
|| !AllocTy
->isSized())
871 const DataLayout
&DL
= IC
.getDataLayout();
872 uint64_t TyAllocSize
= DL
.getTypeAllocSize(AllocTy
).getFixedSize();
874 // If there are more indices after the one we might replace with a zero, make
875 // sure they're all non-negative. If any of them are negative, the overall
876 // address being computed might be before the base address determined by the
877 // first non-zero index.
878 auto IsAllNonNegative
= [&]() {
879 for (unsigned i
= Idx
+1, e
= GEPI
->getNumOperands(); i
!= e
; ++i
) {
880 KnownBits Known
= IC
.computeKnownBits(GEPI
->getOperand(i
), 0, MemI
);
881 if (Known
.isNonNegative())
889 // FIXME: If the GEP is not inbounds, and there are extra indices after the
890 // one we'll replace, those could cause the address computation to wrap
891 // (rendering the IsAllNonNegative() check below insufficient). We can do
892 // better, ignoring zero indices (and other indices we can prove small
893 // enough not to wrap).
894 if (Idx
+1 != GEPI
->getNumOperands() && !GEPI
->isInBounds())
897 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
898 // also known to be dereferenceable.
899 return isObjectSizeLessThanOrEq(GEPI
->getOperand(0), TyAllocSize
, DL
) &&
903 // If we're indexing into an object with a variable index for the memory
904 // access, but the object has only one element, we can assume that the index
905 // will always be zero. If we replace the GEP, return it.
906 template <typename T
>
907 static Instruction
*replaceGEPIdxWithZero(InstCombinerImpl
&IC
, Value
*Ptr
,
909 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
911 if (canReplaceGEPIdxWithZero(IC
, GEPI
, &MemI
, Idx
)) {
912 Instruction
*NewGEPI
= GEPI
->clone();
913 NewGEPI
->setOperand(Idx
,
914 ConstantInt::get(GEPI
->getOperand(Idx
)->getType(), 0));
915 NewGEPI
->insertBefore(GEPI
);
916 MemI
.setOperand(MemI
.getPointerOperandIndex(), NewGEPI
);
924 static bool canSimplifyNullStoreOrGEP(StoreInst
&SI
) {
925 if (NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()))
928 auto *Ptr
= SI
.getPointerOperand();
929 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Ptr
))
930 Ptr
= GEPI
->getOperand(0);
931 return (isa
<ConstantPointerNull
>(Ptr
) &&
932 !NullPointerIsDefined(SI
.getFunction(), SI
.getPointerAddressSpace()));
935 static bool canSimplifyNullLoadOrGEP(LoadInst
&LI
, Value
*Op
) {
936 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
937 const Value
*GEPI0
= GEPI
->getOperand(0);
938 if (isa
<ConstantPointerNull
>(GEPI0
) &&
939 !NullPointerIsDefined(LI
.getFunction(), GEPI
->getPointerAddressSpace()))
942 if (isa
<UndefValue
>(Op
) ||
943 (isa
<ConstantPointerNull
>(Op
) &&
944 !NullPointerIsDefined(LI
.getFunction(), LI
.getPointerAddressSpace())))
949 Instruction
*InstCombinerImpl::visitLoadInst(LoadInst
&LI
) {
950 Value
*Op
= LI
.getOperand(0);
952 // Try to canonicalize the loaded type.
953 if (Instruction
*Res
= combineLoadToOperationType(*this, LI
))
956 // Attempt to improve the alignment.
957 Align KnownAlign
= getOrEnforceKnownAlignment(
958 Op
, DL
.getPrefTypeAlign(LI
.getType()), DL
, &LI
, &AC
, &DT
);
959 if (KnownAlign
> LI
.getAlign())
960 LI
.setAlignment(KnownAlign
);
962 // Replace GEP indices if possible.
963 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Op
, LI
)) {
964 Worklist
.push(NewGEPI
);
968 if (Instruction
*Res
= unpackLoadToAggregate(*this, LI
))
971 // Do really simple store-to-load forwarding and load CSE, to catch cases
972 // where there are several consecutive memory accesses to the same location,
973 // separated by a few arithmetic operations.
974 bool IsLoadCSE
= false;
975 if (Value
*AvailableVal
= FindAvailableLoadedValue(&LI
, *AA
, &IsLoadCSE
)) {
977 combineMetadataForCSE(cast
<LoadInst
>(AvailableVal
), &LI
, false);
979 return replaceInstUsesWith(
980 LI
, Builder
.CreateBitOrPointerCast(AvailableVal
, LI
.getType(),
981 LI
.getName() + ".cast"));
984 // None of the following transforms are legal for volatile/ordered atomic
985 // loads. Most of them do apply for unordered atomics.
986 if (!LI
.isUnordered()) return nullptr;
988 // load(gep null, ...) -> unreachable
989 // load null/undef -> unreachable
990 // TODO: Consider a target hook for valid address spaces for this xforms.
991 if (canSimplifyNullLoadOrGEP(LI
, Op
)) {
992 // Insert a new store to null instruction before the load to indicate
993 // that this code is not reachable. We do this instead of inserting
994 // an unreachable instruction directly because we cannot modify the
996 StoreInst
*SI
= new StoreInst(PoisonValue::get(LI
.getType()),
997 Constant::getNullValue(Op
->getType()), &LI
);
998 SI
->setDebugLoc(LI
.getDebugLoc());
999 return replaceInstUsesWith(LI
, PoisonValue::get(LI
.getType()));
1002 if (Op
->hasOneUse()) {
1003 // Change select and PHI nodes to select values instead of addresses: this
1004 // helps alias analysis out a lot, allows many others simplifications, and
1005 // exposes redundancy in the code.
1007 // Note that we cannot do the transformation unless we know that the
1008 // introduced loads cannot trap! Something like this is valid as long as
1009 // the condition is always false: load (select bool %C, int* null, int* %G),
1010 // but it would not be valid if we transformed it to load from null
1013 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
1014 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1015 Align Alignment
= LI
.getAlign();
1016 if (isSafeToLoadUnconditionally(SI
->getOperand(1), LI
.getType(),
1017 Alignment
, DL
, SI
) &&
1018 isSafeToLoadUnconditionally(SI
->getOperand(2), LI
.getType(),
1019 Alignment
, DL
, SI
)) {
1021 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(1),
1022 SI
->getOperand(1)->getName() + ".val");
1024 Builder
.CreateLoad(LI
.getType(), SI
->getOperand(2),
1025 SI
->getOperand(2)->getName() + ".val");
1026 assert(LI
.isUnordered() && "implied by above");
1027 V1
->setAlignment(Alignment
);
1028 V1
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1029 V2
->setAlignment(Alignment
);
1030 V2
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
1031 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
1034 // load (select (cond, null, P)) -> load P
1035 if (isa
<ConstantPointerNull
>(SI
->getOperand(1)) &&
1036 !NullPointerIsDefined(SI
->getFunction(),
1037 LI
.getPointerAddressSpace()))
1038 return replaceOperand(LI
, 0, SI
->getOperand(2));
1040 // load (select (cond, P, null)) -> load P
1041 if (isa
<ConstantPointerNull
>(SI
->getOperand(2)) &&
1042 !NullPointerIsDefined(SI
->getFunction(),
1043 LI
.getPointerAddressSpace()))
1044 return replaceOperand(LI
, 0, SI
->getOperand(1));
1050 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1052 /// \returns underlying value that was "cast", or nullptr otherwise.
1054 /// For example, if we have:
1056 /// %E0 = extractelement <2 x double> %U, i32 0
1057 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1058 /// %E1 = extractelement <2 x double> %U, i32 1
1059 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1061 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1062 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1063 /// Note that %U may contain non-undef values where %V1 has undef.
1064 static Value
*likeBitCastFromVector(InstCombinerImpl
&IC
, Value
*V
) {
1066 while (auto *IV
= dyn_cast
<InsertValueInst
>(V
)) {
1067 auto *E
= dyn_cast
<ExtractElementInst
>(IV
->getInsertedValueOperand());
1070 auto *W
= E
->getVectorOperand();
1075 auto *CI
= dyn_cast
<ConstantInt
>(E
->getIndexOperand());
1076 if (!CI
|| IV
->getNumIndices() != 1 || CI
->getZExtValue() != *IV
->idx_begin())
1078 V
= IV
->getAggregateOperand();
1080 if (!match(V
, m_Undef()) || !U
)
1083 auto *UT
= cast
<VectorType
>(U
->getType());
1084 auto *VT
= V
->getType();
1085 // Check that types UT and VT are bitwise isomorphic.
1086 const auto &DL
= IC
.getDataLayout();
1087 if (DL
.getTypeStoreSizeInBits(UT
) != DL
.getTypeStoreSizeInBits(VT
)) {
1090 if (auto *AT
= dyn_cast
<ArrayType
>(VT
)) {
1091 if (AT
->getNumElements() != cast
<FixedVectorType
>(UT
)->getNumElements())
1094 auto *ST
= cast
<StructType
>(VT
);
1095 if (ST
->getNumElements() != cast
<FixedVectorType
>(UT
)->getNumElements())
1097 for (const auto *EltT
: ST
->elements()) {
1098 if (EltT
!= UT
->getElementType())
1105 /// Combine stores to match the type of value being stored.
1107 /// The core idea here is that the memory does not have any intrinsic type and
1108 /// where we can we should match the type of a store to the type of value being
1111 /// However, this routine must never change the width of a store or the number of
1112 /// stores as that would introduce a semantic change. This combine is expected to
1113 /// be a semantic no-op which just allows stores to more closely model the types
1114 /// of their incoming values.
1116 /// Currently, we also refuse to change the precise type used for an atomic or
1117 /// volatile store. This is debatable, and might be reasonable to change later.
1118 /// However, it is risky in case some backend or other part of LLVM is relying
1119 /// on the exact type stored to select appropriate atomic operations.
1121 /// \returns true if the store was successfully combined away. This indicates
1122 /// the caller must erase the store instruction. We have to let the caller erase
1123 /// the store instruction as otherwise there is no way to signal whether it was
1124 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1125 static bool combineStoreToValueType(InstCombinerImpl
&IC
, StoreInst
&SI
) {
1126 // FIXME: We could probably with some care handle both volatile and ordered
1127 // atomic stores here but it isn't clear that this is important.
1128 if (!SI
.isUnordered())
1131 // swifterror values can't be bitcasted.
1132 if (SI
.getPointerOperand()->isSwiftError())
1135 Value
*V
= SI
.getValueOperand();
1137 // Fold away bit casts of the stored value by storing the original type.
1138 if (auto *BC
= dyn_cast
<BitCastInst
>(V
)) {
1139 assert(!BC
->getType()->isX86_AMXTy() &&
1140 "store to x86_amx* should not happen!");
1141 V
= BC
->getOperand(0);
1142 // Don't transform when the type is x86_amx, it makes the pass that lower
1143 // x86_amx type happy.
1144 if (V
->getType()->isX86_AMXTy())
1146 if (!SI
.isAtomic() || isSupportedAtomicType(V
->getType())) {
1147 combineStoreToNewValue(IC
, SI
, V
);
1152 if (Value
*U
= likeBitCastFromVector(IC
, V
))
1153 if (!SI
.isAtomic() || isSupportedAtomicType(U
->getType())) {
1154 combineStoreToNewValue(IC
, SI
, U
);
1158 // FIXME: We should also canonicalize stores of vectors when their elements
1159 // are cast to other types.
1163 static bool unpackStoreToAggregate(InstCombinerImpl
&IC
, StoreInst
&SI
) {
1164 // FIXME: We could probably with some care handle both volatile and atomic
1165 // stores here but it isn't clear that this is important.
1169 Value
*V
= SI
.getValueOperand();
1170 Type
*T
= V
->getType();
1172 if (!T
->isAggregateType())
1175 if (auto *ST
= dyn_cast
<StructType
>(T
)) {
1176 // If the struct only have one element, we unpack.
1177 unsigned Count
= ST
->getNumElements();
1179 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1180 combineStoreToNewValue(IC
, SI
, V
);
1184 // We don't want to break loads with padding here as we'd loose
1185 // the knowledge that padding exists for the rest of the pipeline.
1186 const DataLayout
&DL
= IC
.getDataLayout();
1187 auto *SL
= DL
.getStructLayout(ST
);
1188 if (SL
->hasPadding())
1191 const auto Align
= SI
.getAlign();
1193 SmallString
<16> EltName
= V
->getName();
1195 auto *Addr
= SI
.getPointerOperand();
1196 SmallString
<16> AddrName
= Addr
->getName();
1197 AddrName
+= ".repack";
1199 auto *IdxType
= Type::getInt32Ty(ST
->getContext());
1200 auto *Zero
= ConstantInt::get(IdxType
, 0);
1201 for (unsigned i
= 0; i
< Count
; i
++) {
1202 Value
*Indices
[2] = {
1204 ConstantInt::get(IdxType
, i
),
1206 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(ST
, Addr
, makeArrayRef(Indices
),
1208 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1209 auto EltAlign
= commonAlignment(Align
, SL
->getElementOffset(i
));
1210 llvm::Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1212 SI
.getAAMetadata(AAMD
);
1213 NS
->setAAMetadata(AAMD
);
1219 if (auto *AT
= dyn_cast
<ArrayType
>(T
)) {
1220 // If the array only have one element, we unpack.
1221 auto NumElements
= AT
->getNumElements();
1222 if (NumElements
== 1) {
1223 V
= IC
.Builder
.CreateExtractValue(V
, 0);
1224 combineStoreToNewValue(IC
, SI
, V
);
1228 // Bail out if the array is too large. Ideally we would like to optimize
1229 // arrays of arbitrary size but this has a terrible impact on compile time.
1230 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1232 if (NumElements
> IC
.MaxArraySizeForCombine
)
1235 const DataLayout
&DL
= IC
.getDataLayout();
1236 auto EltSize
= DL
.getTypeAllocSize(AT
->getElementType());
1237 const auto Align
= SI
.getAlign();
1239 SmallString
<16> EltName
= V
->getName();
1241 auto *Addr
= SI
.getPointerOperand();
1242 SmallString
<16> AddrName
= Addr
->getName();
1243 AddrName
+= ".repack";
1245 auto *IdxType
= Type::getInt64Ty(T
->getContext());
1246 auto *Zero
= ConstantInt::get(IdxType
, 0);
1248 uint64_t Offset
= 0;
1249 for (uint64_t i
= 0; i
< NumElements
; i
++) {
1250 Value
*Indices
[2] = {
1252 ConstantInt::get(IdxType
, i
),
1254 auto *Ptr
= IC
.Builder
.CreateInBoundsGEP(AT
, Addr
, makeArrayRef(Indices
),
1256 auto *Val
= IC
.Builder
.CreateExtractValue(V
, i
, EltName
);
1257 auto EltAlign
= commonAlignment(Align
, Offset
);
1258 Instruction
*NS
= IC
.Builder
.CreateAlignedStore(Val
, Ptr
, EltAlign
);
1260 SI
.getAAMetadata(AAMD
);
1261 NS
->setAAMetadata(AAMD
);
1271 /// equivalentAddressValues - Test if A and B will obviously have the same
1272 /// value. This includes recognizing that %t0 and %t1 will have the same
1273 /// value in code like this:
1274 /// %t0 = getelementptr \@a, 0, 3
1275 /// store i32 0, i32* %t0
1276 /// %t1 = getelementptr \@a, 0, 3
1277 /// %t2 = load i32* %t1
1279 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
1280 // Test if the values are trivially equivalent.
1281 if (A
== B
) return true;
1283 // Test if the values come form identical arithmetic instructions.
1284 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1285 // its only used to compare two uses within the same basic block, which
1286 // means that they'll always either have the same value or one of them
1287 // will have an undefined value.
1288 if (isa
<BinaryOperator
>(A
) ||
1291 isa
<GetElementPtrInst
>(A
))
1292 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
1293 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
1296 // Otherwise they may not be equivalent.
1300 /// Converts store (bitcast (load (bitcast (select ...)))) to
1301 /// store (load (select ...)), where select is minmax:
1302 /// select ((cmp load V1, load V2), V1, V2).
1303 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl
&IC
,
1306 if (!match(SI
.getPointerOperand(), m_BitCast(m_Value())))
1310 if (!match(SI
.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr
)))))
1312 auto *LI
= cast
<LoadInst
>(SI
.getValueOperand());
1313 if (!LI
->getType()->isIntegerTy())
1316 if (!isMinMaxWithLoads(LoadAddr
, CmpLoadTy
))
1319 // Make sure the type would actually change.
1320 // This condition can be hit with chains of bitcasts.
1321 if (LI
->getType() == CmpLoadTy
)
1324 // Make sure we're not changing the size of the load/store.
1325 const auto &DL
= IC
.getDataLayout();
1326 if (DL
.getTypeStoreSizeInBits(LI
->getType()) !=
1327 DL
.getTypeStoreSizeInBits(CmpLoadTy
))
1330 if (!all_of(LI
->users(), [LI
, LoadAddr
](User
*U
) {
1331 auto *SI
= dyn_cast
<StoreInst
>(U
);
1332 return SI
&& SI
->getPointerOperand() != LI
&&
1333 InstCombiner::peekThroughBitcast(SI
->getPointerOperand()) !=
1335 !SI
->getPointerOperand()->isSwiftError();
1339 IC
.Builder
.SetInsertPoint(LI
);
1340 LoadInst
*NewLI
= IC
.combineLoadToNewType(*LI
, CmpLoadTy
);
1341 // Replace all the stores with stores of the newly loaded value.
1342 for (auto *UI
: LI
->users()) {
1343 auto *USI
= cast
<StoreInst
>(UI
);
1344 IC
.Builder
.SetInsertPoint(USI
);
1345 combineStoreToNewValue(IC
, *USI
, NewLI
);
1347 IC
.replaceInstUsesWith(*LI
, PoisonValue::get(LI
->getType()));
1348 IC
.eraseInstFromFunction(*LI
);
1352 Instruction
*InstCombinerImpl::visitStoreInst(StoreInst
&SI
) {
1353 Value
*Val
= SI
.getOperand(0);
1354 Value
*Ptr
= SI
.getOperand(1);
1356 // Try to canonicalize the stored type.
1357 if (combineStoreToValueType(*this, SI
))
1358 return eraseInstFromFunction(SI
);
1360 // Attempt to improve the alignment.
1361 const Align KnownAlign
= getOrEnforceKnownAlignment(
1362 Ptr
, DL
.getPrefTypeAlign(Val
->getType()), DL
, &SI
, &AC
, &DT
);
1363 if (KnownAlign
> SI
.getAlign())
1364 SI
.setAlignment(KnownAlign
);
1366 // Try to canonicalize the stored type.
1367 if (unpackStoreToAggregate(*this, SI
))
1368 return eraseInstFromFunction(SI
);
1370 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI
))
1371 return eraseInstFromFunction(SI
);
1373 // Replace GEP indices if possible.
1374 if (Instruction
*NewGEPI
= replaceGEPIdxWithZero(*this, Ptr
, SI
)) {
1375 Worklist
.push(NewGEPI
);
1379 // Don't hack volatile/ordered stores.
1380 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1381 if (!SI
.isUnordered()) return nullptr;
1383 // If the RHS is an alloca with a single use, zapify the store, making the
1385 if (Ptr
->hasOneUse()) {
1386 if (isa
<AllocaInst
>(Ptr
))
1387 return eraseInstFromFunction(SI
);
1388 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
1389 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
1390 if (GEP
->getOperand(0)->hasOneUse())
1391 return eraseInstFromFunction(SI
);
1396 // If we have a store to a location which is known constant, we can conclude
1397 // that the store must be storing the constant value (else the memory
1398 // wouldn't be constant), and this must be a noop.
1399 if (AA
->pointsToConstantMemory(Ptr
))
1400 return eraseInstFromFunction(SI
);
1402 // Do really simple DSE, to catch cases where there are several consecutive
1403 // stores to the same location, separated by a few arithmetic operations. This
1404 // situation often occurs with bitfield accesses.
1405 BasicBlock::iterator
BBI(SI
);
1406 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
1409 // Don't count debug info directives, lest they affect codegen,
1410 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1411 if (BBI
->isDebugOrPseudoInst() ||
1412 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1417 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
1418 // Prev store isn't volatile, and stores to the same location?
1419 if (PrevSI
->isUnordered() && equivalentAddressValues(PrevSI
->getOperand(1),
1420 SI
.getOperand(1))) {
1422 // Manually add back the original store to the worklist now, so it will
1423 // be processed after the operands of the removed store, as this may
1424 // expose additional DSE opportunities.
1426 eraseInstFromFunction(*PrevSI
);
1432 // If this is a load, we have to stop. However, if the loaded value is from
1433 // the pointer we're loading and is producing the pointer we're storing,
1434 // then *this* store is dead (X = load P; store X -> P).
1435 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
1436 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
)) {
1437 assert(SI
.isUnordered() && "can't eliminate ordering operation");
1438 return eraseInstFromFunction(SI
);
1441 // Otherwise, this is a load from some other location. Stores before it
1446 // Don't skip over loads, throws or things that can modify memory.
1447 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory() || BBI
->mayThrow())
1451 // store X, null -> turns into 'unreachable' in SimplifyCFG
1452 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1453 if (canSimplifyNullStoreOrGEP(SI
)) {
1454 if (!isa
<PoisonValue
>(Val
))
1455 return replaceOperand(SI
, 0, PoisonValue::get(Val
->getType()));
1456 return nullptr; // Do not modify these!
1459 // store undef, Ptr -> noop
1460 if (isa
<UndefValue
>(Val
))
1461 return eraseInstFromFunction(SI
);
1466 /// Try to transform:
1467 /// if () { *P = v1; } else { *P = v2 }
1469 /// *P = v1; if () { *P = v2; }
1470 /// into a phi node with a store in the successor.
1471 bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst
&SI
) {
1472 if (!SI
.isUnordered())
1473 return false; // This code has not been audited for volatile/ordered case.
1475 // Check if the successor block has exactly 2 incoming edges.
1476 BasicBlock
*StoreBB
= SI
.getParent();
1477 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
1478 if (!DestBB
->hasNPredecessors(2))
1481 // Capture the other block (the block that doesn't contain our store).
1482 pred_iterator PredIter
= pred_begin(DestBB
);
1483 if (*PredIter
== StoreBB
)
1485 BasicBlock
*OtherBB
= *PredIter
;
1487 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1488 // for example, if SI is in an infinite loop.
1489 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
1492 // Verify that the other block ends in a branch and is not otherwise empty.
1493 BasicBlock::iterator
BBI(OtherBB
->getTerminator());
1494 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
1495 if (!OtherBr
|| BBI
== OtherBB
->begin())
1498 // If the other block ends in an unconditional branch, check for the 'if then
1499 // else' case. There is an instruction before the branch.
1500 StoreInst
*OtherStore
= nullptr;
1501 if (OtherBr
->isUnconditional()) {
1503 // Skip over debugging info.
1504 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
1505 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
1506 if (BBI
==OtherBB
->begin())
1510 // If this isn't a store, isn't a store to the same location, or is not the
1511 // right kind of store, bail out.
1512 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
1513 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1514 !SI
.isSameOperationAs(OtherStore
))
1517 // Otherwise, the other block ended with a conditional branch. If one of the
1518 // destinations is StoreBB, then we have the if/then case.
1519 if (OtherBr
->getSuccessor(0) != StoreBB
&&
1520 OtherBr
->getSuccessor(1) != StoreBB
)
1523 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1524 // if/then triangle. See if there is a store to the same ptr as SI that
1525 // lives in OtherBB.
1527 // Check to see if we find the matching store.
1528 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
1529 if (OtherStore
->getOperand(1) != SI
.getOperand(1) ||
1530 !SI
.isSameOperationAs(OtherStore
))
1534 // If we find something that may be using or overwriting the stored
1535 // value, or if we run out of instructions, we can't do the transform.
1536 if (BBI
->mayReadFromMemory() || BBI
->mayThrow() ||
1537 BBI
->mayWriteToMemory() || BBI
== OtherBB
->begin())
1541 // In order to eliminate the store in OtherBr, we have to make sure nothing
1542 // reads or overwrites the stored value in StoreBB.
1543 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
1544 // FIXME: This should really be AA driven.
1545 if (I
->mayReadFromMemory() || I
->mayThrow() || I
->mayWriteToMemory())
1550 // Insert a PHI node now if we need it.
1551 Value
*MergedVal
= OtherStore
->getOperand(0);
1552 // The debug locations of the original instructions might differ. Merge them.
1553 DebugLoc MergedLoc
= DILocation::getMergedLocation(SI
.getDebugLoc(),
1554 OtherStore
->getDebugLoc());
1555 if (MergedVal
!= SI
.getOperand(0)) {
1556 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), 2, "storemerge");
1557 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
1558 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
1559 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
1560 PN
->setDebugLoc(MergedLoc
);
1563 // Advance to a place where it is safe to insert the new store and insert it.
1564 BBI
= DestBB
->getFirstInsertionPt();
1566 new StoreInst(MergedVal
, SI
.getOperand(1), SI
.isVolatile(), SI
.getAlign(),
1567 SI
.getOrdering(), SI
.getSyncScopeID());
1568 InsertNewInstBefore(NewSI
, *BBI
);
1569 NewSI
->setDebugLoc(MergedLoc
);
1571 // If the two stores had AA tags, merge them.
1573 SI
.getAAMetadata(AATags
);
1575 OtherStore
->getAAMetadata(AATags
, /* Merge = */ true);
1576 NewSI
->setAAMetadata(AATags
);
1579 // Nuke the old stores.
1580 eraseInstFromFunction(SI
);
1581 eraseInstFromFunction(*OtherStore
);