1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This transformation implements the well known scalar replacement of
10 /// aggregates transformation. It tries to identify promotable elements of an
11 /// aggregate alloca, and promote them to registers. It will also try to
12 /// convert uses of an element (or set of elements) of an alloca into a vector
13 /// or bitfield-style integer scalar if appropriate.
15 /// It works to do this with minimal slicing of the alloca so that regions
16 /// which are merely transferred in and out of external memory remain unchanged
17 /// and are not decomposed to scalar code.
19 /// Because this also performs alloca promotion, it can be thought of as also
20 /// serving the purpose of SSA formation. The algorithm iterates on the
21 /// function until all opportunities for promotion have been realized.
23 //===----------------------------------------------------------------------===//
25 #include "llvm/Transforms/Scalar/SROA.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/PointerIntPair.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetVector.h"
32 #include "llvm/ADT/SmallBitVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/iterator.h"
39 #include "llvm/ADT/iterator_range.h"
40 #include "llvm/Analysis/AssumptionCache.h"
41 #include "llvm/Analysis/GlobalsModRef.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/PtrUseVisitor.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/ConstantFolder.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DIBuilder.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GetElementPtrTypeIterator.h"
57 #include "llvm/IR/GlobalAlias.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstVisitor.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Metadata.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/PassManager.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Transforms/Scalar.h"
83 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
97 // We only use this for a debug check.
101 using namespace llvm
;
102 using namespace llvm::sroa
;
104 #define DEBUG_TYPE "sroa"
106 STATISTIC(NumAllocasAnalyzed
, "Number of allocas analyzed for replacement");
107 STATISTIC(NumAllocaPartitions
, "Number of alloca partitions formed");
108 STATISTIC(MaxPartitionsPerAlloca
, "Maximum number of partitions per alloca");
109 STATISTIC(NumAllocaPartitionUses
, "Number of alloca partition uses rewritten");
110 STATISTIC(MaxUsesPerAllocaPartition
, "Maximum number of uses of a partition");
111 STATISTIC(NumNewAllocas
, "Number of new, smaller allocas introduced");
112 STATISTIC(NumPromoted
, "Number of allocas promoted to SSA values");
113 STATISTIC(NumLoadsSpeculated
, "Number of loads speculated to allow promotion");
114 STATISTIC(NumDeleted
, "Number of instructions deleted");
115 STATISTIC(NumVectorized
, "Number of vectorized aggregates");
117 /// Hidden option to enable randomly shuffling the slices to help uncover
118 /// instability in their order.
119 static cl::opt
<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
120 cl::init(false), cl::Hidden
);
122 /// Hidden option to experiment with completely strict handling of inbounds
124 static cl::opt
<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
129 /// A custom IRBuilder inserter which prefixes all names, but only in
131 class IRBuilderPrefixedInserter
: public IRBuilderDefaultInserter
{
134 const Twine
getNameWithPrefix(const Twine
&Name
) const {
135 return Name
.isTriviallyEmpty() ? Name
: Prefix
+ Name
;
139 void SetNamePrefix(const Twine
&P
) { Prefix
= P
.str(); }
142 void InsertHelper(Instruction
*I
, const Twine
&Name
, BasicBlock
*BB
,
143 BasicBlock::iterator InsertPt
) const {
144 IRBuilderDefaultInserter::InsertHelper(I
, getNameWithPrefix(Name
), BB
,
149 /// Provide a type for IRBuilder that drops names in release builds.
150 using IRBuilderTy
= IRBuilder
<ConstantFolder
, IRBuilderPrefixedInserter
>;
152 /// A used slice of an alloca.
154 /// This structure represents a slice of an alloca used by some instruction. It
155 /// stores both the begin and end offsets of this use, a pointer to the use
156 /// itself, and a flag indicating whether we can classify the use as splittable
157 /// or not when forming partitions of the alloca.
159 /// The beginning offset of the range.
160 uint64_t BeginOffset
= 0;
162 /// The ending offset, not included in the range.
163 uint64_t EndOffset
= 0;
165 /// Storage for both the use of this slice and whether it can be
167 PointerIntPair
<Use
*, 1, bool> UseAndIsSplittable
;
172 Slice(uint64_t BeginOffset
, uint64_t EndOffset
, Use
*U
, bool IsSplittable
)
173 : BeginOffset(BeginOffset
), EndOffset(EndOffset
),
174 UseAndIsSplittable(U
, IsSplittable
) {}
176 uint64_t beginOffset() const { return BeginOffset
; }
177 uint64_t endOffset() const { return EndOffset
; }
179 bool isSplittable() const { return UseAndIsSplittable
.getInt(); }
180 void makeUnsplittable() { UseAndIsSplittable
.setInt(false); }
182 Use
*getUse() const { return UseAndIsSplittable
.getPointer(); }
184 bool isDead() const { return getUse() == nullptr; }
185 void kill() { UseAndIsSplittable
.setPointer(nullptr); }
187 /// Support for ordering ranges.
189 /// This provides an ordering over ranges such that start offsets are
190 /// always increasing, and within equal start offsets, the end offsets are
191 /// decreasing. Thus the spanning range comes first in a cluster with the
192 /// same start position.
193 bool operator<(const Slice
&RHS
) const {
194 if (beginOffset() < RHS
.beginOffset())
196 if (beginOffset() > RHS
.beginOffset())
198 if (isSplittable() != RHS
.isSplittable())
199 return !isSplittable();
200 if (endOffset() > RHS
.endOffset())
205 /// Support comparison with a single offset to allow binary searches.
206 friend LLVM_ATTRIBUTE_UNUSED
bool operator<(const Slice
&LHS
,
207 uint64_t RHSOffset
) {
208 return LHS
.beginOffset() < RHSOffset
;
210 friend LLVM_ATTRIBUTE_UNUSED
bool operator<(uint64_t LHSOffset
,
212 return LHSOffset
< RHS
.beginOffset();
215 bool operator==(const Slice
&RHS
) const {
216 return isSplittable() == RHS
.isSplittable() &&
217 beginOffset() == RHS
.beginOffset() && endOffset() == RHS
.endOffset();
219 bool operator!=(const Slice
&RHS
) const { return !operator==(RHS
); }
222 } // end anonymous namespace
224 /// Representation of the alloca slices.
226 /// This class represents the slices of an alloca which are formed by its
227 /// various uses. If a pointer escapes, we can't fully build a representation
228 /// for the slices used and we reflect that in this structure. The uses are
229 /// stored, sorted by increasing beginning offset and with unsplittable slices
230 /// starting at a particular offset before splittable slices.
231 class llvm::sroa::AllocaSlices
{
233 /// Construct the slices of a particular alloca.
234 AllocaSlices(const DataLayout
&DL
, AllocaInst
&AI
);
236 /// Test whether a pointer to the allocation escapes our analysis.
238 /// If this is true, the slices are never fully built and should be
240 bool isEscaped() const { return PointerEscapingInstr
; }
242 /// Support for iterating over the slices.
244 using iterator
= SmallVectorImpl
<Slice
>::iterator
;
245 using range
= iterator_range
<iterator
>;
247 iterator
begin() { return Slices
.begin(); }
248 iterator
end() { return Slices
.end(); }
250 using const_iterator
= SmallVectorImpl
<Slice
>::const_iterator
;
251 using const_range
= iterator_range
<const_iterator
>;
253 const_iterator
begin() const { return Slices
.begin(); }
254 const_iterator
end() const { return Slices
.end(); }
257 /// Erase a range of slices.
258 void erase(iterator Start
, iterator Stop
) { Slices
.erase(Start
, Stop
); }
260 /// Insert new slices for this alloca.
262 /// This moves the slices into the alloca's slices collection, and re-sorts
263 /// everything so that the usual ordering properties of the alloca's slices
265 void insert(ArrayRef
<Slice
> NewSlices
) {
266 int OldSize
= Slices
.size();
267 Slices
.append(NewSlices
.begin(), NewSlices
.end());
268 auto SliceI
= Slices
.begin() + OldSize
;
269 llvm::sort(SliceI
, Slices
.end());
270 std::inplace_merge(Slices
.begin(), SliceI
, Slices
.end());
273 // Forward declare the iterator and range accessor for walking the
275 class partition_iterator
;
276 iterator_range
<partition_iterator
> partitions();
278 /// Access the dead users for this alloca.
279 ArrayRef
<Instruction
*> getDeadUsers() const { return DeadUsers
; }
281 /// Access the dead operands referring to this alloca.
283 /// These are operands which have cannot actually be used to refer to the
284 /// alloca as they are outside its range and the user doesn't correct for
285 /// that. These mostly consist of PHI node inputs and the like which we just
286 /// need to replace with undef.
287 ArrayRef
<Use
*> getDeadOperands() const { return DeadOperands
; }
289 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
290 void print(raw_ostream
&OS
, const_iterator I
, StringRef Indent
= " ") const;
291 void printSlice(raw_ostream
&OS
, const_iterator I
,
292 StringRef Indent
= " ") const;
293 void printUse(raw_ostream
&OS
, const_iterator I
,
294 StringRef Indent
= " ") const;
295 void print(raw_ostream
&OS
) const;
296 void dump(const_iterator I
) const;
301 template <typename DerivedT
, typename RetT
= void> class BuilderBase
;
304 friend class AllocaSlices::SliceBuilder
;
306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
307 /// Handle to alloca instruction to simplify method interfaces.
311 /// The instruction responsible for this alloca not having a known set
314 /// When an instruction (potentially) escapes the pointer to the alloca, we
315 /// store a pointer to that here and abort trying to form slices of the
316 /// alloca. This will be null if the alloca slices are analyzed successfully.
317 Instruction
*PointerEscapingInstr
;
319 /// The slices of the alloca.
321 /// We store a vector of the slices formed by uses of the alloca here. This
322 /// vector is sorted by increasing begin offset, and then the unsplittable
323 /// slices before the splittable ones. See the Slice inner class for more
325 SmallVector
<Slice
, 8> Slices
;
327 /// Instructions which will become dead if we rewrite the alloca.
329 /// Note that these are not separated by slice. This is because we expect an
330 /// alloca to be completely rewritten or not rewritten at all. If rewritten,
331 /// all these instructions can simply be removed and replaced with undef as
332 /// they come from outside of the allocated space.
333 SmallVector
<Instruction
*, 8> DeadUsers
;
335 /// Operands which will become dead if we rewrite the alloca.
337 /// These are operands that in their particular use can be replaced with
338 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
339 /// to PHI nodes and the like. They aren't entirely dead (there might be
340 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
341 /// want to swap this particular input for undef to simplify the use lists of
343 SmallVector
<Use
*, 8> DeadOperands
;
346 /// A partition of the slices.
348 /// An ephemeral representation for a range of slices which can be viewed as
349 /// a partition of the alloca. This range represents a span of the alloca's
350 /// memory which cannot be split, and provides access to all of the slices
351 /// overlapping some part of the partition.
353 /// Objects of this type are produced by traversing the alloca's slices, but
354 /// are only ephemeral and not persistent.
355 class llvm::sroa::Partition
{
357 friend class AllocaSlices
;
358 friend class AllocaSlices::partition_iterator
;
360 using iterator
= AllocaSlices::iterator
;
362 /// The beginning and ending offsets of the alloca for this
364 uint64_t BeginOffset
, EndOffset
;
366 /// The start and end iterators of this partition.
369 /// A collection of split slice tails overlapping the partition.
370 SmallVector
<Slice
*, 4> SplitTails
;
372 /// Raw constructor builds an empty partition starting and ending at
373 /// the given iterator.
374 Partition(iterator SI
) : SI(SI
), SJ(SI
) {}
377 /// The start offset of this partition.
379 /// All of the contained slices start at or after this offset.
380 uint64_t beginOffset() const { return BeginOffset
; }
382 /// The end offset of this partition.
384 /// All of the contained slices end at or before this offset.
385 uint64_t endOffset() const { return EndOffset
; }
387 /// The size of the partition.
389 /// Note that this can never be zero.
390 uint64_t size() const {
391 assert(BeginOffset
< EndOffset
&& "Partitions must span some bytes!");
392 return EndOffset
- BeginOffset
;
395 /// Test whether this partition contains no slices, and merely spans
396 /// a region occupied by split slices.
397 bool empty() const { return SI
== SJ
; }
399 /// \name Iterate slices that start within the partition.
400 /// These may be splittable or unsplittable. They have a begin offset >= the
401 /// partition begin offset.
403 // FIXME: We should probably define a "concat_iterator" helper and use that
404 // to stitch together pointee_iterators over the split tails and the
405 // contiguous iterators of the partition. That would give a much nicer
406 // interface here. We could then additionally expose filtered iterators for
407 // split, unsplit, and unsplittable splices based on the usage patterns.
408 iterator
begin() const { return SI
; }
409 iterator
end() const { return SJ
; }
412 /// Get the sequence of split slice tails.
414 /// These tails are of slices which start before this partition but are
415 /// split and overlap into the partition. We accumulate these while forming
417 ArrayRef
<Slice
*> splitSliceTails() const { return SplitTails
; }
420 /// An iterator over partitions of the alloca's slices.
422 /// This iterator implements the core algorithm for partitioning the alloca's
423 /// slices. It is a forward iterator as we don't support backtracking for
424 /// efficiency reasons, and re-use a single storage area to maintain the
425 /// current set of split slices.
427 /// It is templated on the slice iterator type to use so that it can operate
428 /// with either const or non-const slice iterators.
429 class AllocaSlices::partition_iterator
430 : public iterator_facade_base
<partition_iterator
, std::forward_iterator_tag
,
432 friend class AllocaSlices
;
434 /// Most of the state for walking the partitions is held in a class
435 /// with a nice interface for examining them.
438 /// We need to keep the end of the slices to know when to stop.
439 AllocaSlices::iterator SE
;
441 /// We also need to keep track of the maximum split end offset seen.
442 /// FIXME: Do we really?
443 uint64_t MaxSplitSliceEndOffset
= 0;
445 /// Sets the partition to be empty at given iterator, and sets the
447 partition_iterator(AllocaSlices::iterator SI
, AllocaSlices::iterator SE
)
449 // If not already at the end, advance our state to form the initial
455 /// Advance the iterator to the next partition.
457 /// Requires that the iterator not be at the end of the slices.
459 assert((P
.SI
!= SE
|| !P
.SplitTails
.empty()) &&
460 "Cannot advance past the end of the slices!");
462 // Clear out any split uses which have ended.
463 if (!P
.SplitTails
.empty()) {
464 if (P
.EndOffset
>= MaxSplitSliceEndOffset
) {
465 // If we've finished all splits, this is easy.
466 P
.SplitTails
.clear();
467 MaxSplitSliceEndOffset
= 0;
469 // Remove the uses which have ended in the prior partition. This
470 // cannot change the max split slice end because we just checked that
471 // the prior partition ended prior to that max.
472 P
.SplitTails
.erase(llvm::remove_if(P
.SplitTails
,
474 return S
->endOffset() <=
478 assert(llvm::any_of(P
.SplitTails
,
480 return S
->endOffset() == MaxSplitSliceEndOffset
;
482 "Could not find the current max split slice offset!");
483 assert(llvm::all_of(P
.SplitTails
,
485 return S
->endOffset() <= MaxSplitSliceEndOffset
;
487 "Max split slice end offset is not actually the max!");
491 // If P.SI is already at the end, then we've cleared the split tail and
492 // now have an end iterator.
494 assert(P
.SplitTails
.empty() && "Failed to clear the split slices!");
498 // If we had a non-empty partition previously, set up the state for
499 // subsequent partitions.
501 // Accumulate all the splittable slices which started in the old
502 // partition into the split list.
504 if (S
.isSplittable() && S
.endOffset() > P
.EndOffset
) {
505 P
.SplitTails
.push_back(&S
);
506 MaxSplitSliceEndOffset
=
507 std::max(S
.endOffset(), MaxSplitSliceEndOffset
);
510 // Start from the end of the previous partition.
513 // If P.SI is now at the end, we at most have a tail of split slices.
515 P
.BeginOffset
= P
.EndOffset
;
516 P
.EndOffset
= MaxSplitSliceEndOffset
;
520 // If the we have split slices and the next slice is after a gap and is
521 // not splittable immediately form an empty partition for the split
522 // slices up until the next slice begins.
523 if (!P
.SplitTails
.empty() && P
.SI
->beginOffset() != P
.EndOffset
&&
524 !P
.SI
->isSplittable()) {
525 P
.BeginOffset
= P
.EndOffset
;
526 P
.EndOffset
= P
.SI
->beginOffset();
531 // OK, we need to consume new slices. Set the end offset based on the
532 // current slice, and step SJ past it. The beginning offset of the
533 // partition is the beginning offset of the next slice unless we have
534 // pre-existing split slices that are continuing, in which case we begin
535 // at the prior end offset.
536 P
.BeginOffset
= P
.SplitTails
.empty() ? P
.SI
->beginOffset() : P
.EndOffset
;
537 P
.EndOffset
= P
.SI
->endOffset();
540 // There are two strategies to form a partition based on whether the
541 // partition starts with an unsplittable slice or a splittable slice.
542 if (!P
.SI
->isSplittable()) {
543 // When we're forming an unsplittable region, it must always start at
544 // the first slice and will extend through its end.
545 assert(P
.BeginOffset
== P
.SI
->beginOffset());
547 // Form a partition including all of the overlapping slices with this
548 // unsplittable slice.
549 while (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
) {
550 if (!P
.SJ
->isSplittable())
551 P
.EndOffset
= std::max(P
.EndOffset
, P
.SJ
->endOffset());
555 // We have a partition across a set of overlapping unsplittable
560 // If we're starting with a splittable slice, then we need to form
561 // a synthetic partition spanning it and any other overlapping splittable
563 assert(P
.SI
->isSplittable() && "Forming a splittable partition!");
565 // Collect all of the overlapping splittable slices.
566 while (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
&&
567 P
.SJ
->isSplittable()) {
568 P
.EndOffset
= std::max(P
.EndOffset
, P
.SJ
->endOffset());
572 // Back upiP.EndOffset if we ended the span early when encountering an
573 // unsplittable slice. This synthesizes the early end offset of
574 // a partition spanning only splittable slices.
575 if (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
) {
576 assert(!P
.SJ
->isSplittable());
577 P
.EndOffset
= P
.SJ
->beginOffset();
582 bool operator==(const partition_iterator
&RHS
) const {
583 assert(SE
== RHS
.SE
&&
584 "End iterators don't match between compared partition iterators!");
586 // The observed positions of partitions is marked by the P.SI iterator and
587 // the emptiness of the split slices. The latter is only relevant when
588 // P.SI == SE, as the end iterator will additionally have an empty split
589 // slices list, but the prior may have the same P.SI and a tail of split
591 if (P
.SI
== RHS
.P
.SI
&& P
.SplitTails
.empty() == RHS
.P
.SplitTails
.empty()) {
592 assert(P
.SJ
== RHS
.P
.SJ
&&
593 "Same set of slices formed two different sized partitions!");
594 assert(P
.SplitTails
.size() == RHS
.P
.SplitTails
.size() &&
595 "Same slice position with differently sized non-empty split "
602 partition_iterator
&operator++() {
607 Partition
&operator*() { return P
; }
610 /// A forward range over the partitions of the alloca's slices.
612 /// This accesses an iterator range over the partitions of the alloca's
613 /// slices. It computes these partitions on the fly based on the overlapping
614 /// offsets of the slices and the ability to split them. It will visit "empty"
615 /// partitions to cover regions of the alloca only accessed via split
617 iterator_range
<AllocaSlices::partition_iterator
> AllocaSlices::partitions() {
618 return make_range(partition_iterator(begin(), end()),
619 partition_iterator(end(), end()));
622 static Value
*foldSelectInst(SelectInst
&SI
) {
623 // If the condition being selected on is a constant or the same value is
624 // being selected between, fold the select. Yes this does (rarely) happen
626 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(SI
.getCondition()))
627 return SI
.getOperand(1 + CI
->isZero());
628 if (SI
.getOperand(1) == SI
.getOperand(2))
629 return SI
.getOperand(1);
634 /// A helper that folds a PHI node or a select.
635 static Value
*foldPHINodeOrSelectInst(Instruction
&I
) {
636 if (PHINode
*PN
= dyn_cast
<PHINode
>(&I
)) {
637 // If PN merges together the same value, return that value.
638 return PN
->hasConstantValue();
640 return foldSelectInst(cast
<SelectInst
>(I
));
643 /// Builder for the alloca slices.
645 /// This class builds a set of alloca slices by recursively visiting the uses
646 /// of an alloca and making a slice for each load and store at each offset.
647 class AllocaSlices::SliceBuilder
: public PtrUseVisitor
<SliceBuilder
> {
648 friend class PtrUseVisitor
<SliceBuilder
>;
649 friend class InstVisitor
<SliceBuilder
>;
651 using Base
= PtrUseVisitor
<SliceBuilder
>;
653 const uint64_t AllocSize
;
656 SmallDenseMap
<Instruction
*, unsigned> MemTransferSliceMap
;
657 SmallDenseMap
<Instruction
*, uint64_t> PHIOrSelectSizes
;
659 /// Set to de-duplicate dead instructions found in the use walk.
660 SmallPtrSet
<Instruction
*, 4> VisitedDeadInsts
;
663 SliceBuilder(const DataLayout
&DL
, AllocaInst
&AI
, AllocaSlices
&AS
)
664 : PtrUseVisitor
<SliceBuilder
>(DL
),
665 AllocSize(DL
.getTypeAllocSize(AI
.getAllocatedType())), AS(AS
) {}
668 void markAsDead(Instruction
&I
) {
669 if (VisitedDeadInsts
.insert(&I
).second
)
670 AS
.DeadUsers
.push_back(&I
);
673 void insertUse(Instruction
&I
, const APInt
&Offset
, uint64_t Size
,
674 bool IsSplittable
= false) {
675 // Completely skip uses which have a zero size or start either before or
676 // past the end of the allocation.
677 if (Size
== 0 || Offset
.uge(AllocSize
)) {
678 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size
<< " byte use @"
680 << " which has zero size or starts outside of the "
681 << AllocSize
<< " byte alloca:\n"
682 << " alloca: " << AS
.AI
<< "\n"
683 << " use: " << I
<< "\n");
684 return markAsDead(I
);
687 uint64_t BeginOffset
= Offset
.getZExtValue();
688 uint64_t EndOffset
= BeginOffset
+ Size
;
690 // Clamp the end offset to the end of the allocation. Note that this is
691 // formulated to handle even the case where "BeginOffset + Size" overflows.
692 // This may appear superficially to be something we could ignore entirely,
693 // but that is not so! There may be widened loads or PHI-node uses where
694 // some instructions are dead but not others. We can't completely ignore
695 // them, and so have to record at least the information here.
696 assert(AllocSize
>= BeginOffset
); // Established above.
697 if (Size
> AllocSize
- BeginOffset
) {
698 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size
<< " byte use @"
699 << Offset
<< " to remain within the " << AllocSize
701 << " alloca: " << AS
.AI
<< "\n"
702 << " use: " << I
<< "\n");
703 EndOffset
= AllocSize
;
706 AS
.Slices
.push_back(Slice(BeginOffset
, EndOffset
, U
, IsSplittable
));
709 void visitBitCastInst(BitCastInst
&BC
) {
711 return markAsDead(BC
);
713 return Base::visitBitCastInst(BC
);
716 void visitAddrSpaceCastInst(AddrSpaceCastInst
&ASC
) {
718 return markAsDead(ASC
);
720 return Base::visitAddrSpaceCastInst(ASC
);
723 void visitGetElementPtrInst(GetElementPtrInst
&GEPI
) {
724 if (GEPI
.use_empty())
725 return markAsDead(GEPI
);
727 if (SROAStrictInbounds
&& GEPI
.isInBounds()) {
728 // FIXME: This is a manually un-factored variant of the basic code inside
729 // of GEPs with checking of the inbounds invariant specified in the
730 // langref in a very strict sense. If we ever want to enable
731 // SROAStrictInbounds, this code should be factored cleanly into
732 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
733 // by writing out the code here where we have the underlying allocation
734 // size readily available.
735 APInt GEPOffset
= Offset
;
736 const DataLayout
&DL
= GEPI
.getModule()->getDataLayout();
737 for (gep_type_iterator GTI
= gep_type_begin(GEPI
),
738 GTE
= gep_type_end(GEPI
);
740 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GTI
.getOperand());
744 // Handle a struct index, which adds its field offset to the pointer.
745 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
746 unsigned ElementIdx
= OpC
->getZExtValue();
747 const StructLayout
*SL
= DL
.getStructLayout(STy
);
749 APInt(Offset
.getBitWidth(), SL
->getElementOffset(ElementIdx
));
751 // For array or vector indices, scale the index by the size of the
753 APInt Index
= OpC
->getValue().sextOrTrunc(Offset
.getBitWidth());
754 GEPOffset
+= Index
* APInt(Offset
.getBitWidth(),
755 DL
.getTypeAllocSize(GTI
.getIndexedType()));
758 // If this index has computed an intermediate pointer which is not
759 // inbounds, then the result of the GEP is a poison value and we can
760 // delete it and all uses.
761 if (GEPOffset
.ugt(AllocSize
))
762 return markAsDead(GEPI
);
766 return Base::visitGetElementPtrInst(GEPI
);
769 void handleLoadOrStore(Type
*Ty
, Instruction
&I
, const APInt
&Offset
,
770 uint64_t Size
, bool IsVolatile
) {
771 // We allow splitting of non-volatile loads and stores where the type is an
772 // integer type. These may be used to implement 'memcpy' or other "transfer
773 // of bits" patterns.
774 bool IsSplittable
= Ty
->isIntegerTy() && !IsVolatile
;
776 insertUse(I
, Offset
, Size
, IsSplittable
);
779 void visitLoadInst(LoadInst
&LI
) {
780 assert((!LI
.isSimple() || LI
.getType()->isSingleValueType()) &&
781 "All simple FCA loads should have been pre-split");
784 return PI
.setAborted(&LI
);
786 if (LI
.isVolatile() &&
787 LI
.getPointerAddressSpace() != DL
.getAllocaAddrSpace())
788 return PI
.setAborted(&LI
);
790 uint64_t Size
= DL
.getTypeStoreSize(LI
.getType());
791 return handleLoadOrStore(LI
.getType(), LI
, Offset
, Size
, LI
.isVolatile());
794 void visitStoreInst(StoreInst
&SI
) {
795 Value
*ValOp
= SI
.getValueOperand();
797 return PI
.setEscapedAndAborted(&SI
);
799 return PI
.setAborted(&SI
);
801 if (SI
.isVolatile() &&
802 SI
.getPointerAddressSpace() != DL
.getAllocaAddrSpace())
803 return PI
.setAborted(&SI
);
805 uint64_t Size
= DL
.getTypeStoreSize(ValOp
->getType());
807 // If this memory access can be shown to *statically* extend outside the
808 // bounds of the allocation, it's behavior is undefined, so simply
809 // ignore it. Note that this is more strict than the generic clamping
810 // behavior of insertUse. We also try to handle cases which might run the
812 // FIXME: We should instead consider the pointer to have escaped if this
813 // function is being instrumented for addressing bugs or race conditions.
814 if (Size
> AllocSize
|| Offset
.ugt(AllocSize
- Size
)) {
815 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size
<< " byte store @"
816 << Offset
<< " which extends past the end of the "
817 << AllocSize
<< " byte alloca:\n"
818 << " alloca: " << AS
.AI
<< "\n"
819 << " use: " << SI
<< "\n");
820 return markAsDead(SI
);
823 assert((!SI
.isSimple() || ValOp
->getType()->isSingleValueType()) &&
824 "All simple FCA stores should have been pre-split");
825 handleLoadOrStore(ValOp
->getType(), SI
, Offset
, Size
, SI
.isVolatile());
828 void visitMemSetInst(MemSetInst
&II
) {
829 assert(II
.getRawDest() == *U
&& "Pointer use is not the destination?");
830 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(II
.getLength());
831 if ((Length
&& Length
->getValue() == 0) ||
832 (IsOffsetKnown
&& Offset
.uge(AllocSize
)))
833 // Zero-length mem transfer intrinsics can be ignored entirely.
834 return markAsDead(II
);
837 return PI
.setAborted(&II
);
839 // Don't replace this with a store with a different address space. TODO:
840 // Use a store with the casted new alloca?
841 if (II
.isVolatile() && II
.getDestAddressSpace() != DL
.getAllocaAddrSpace())
842 return PI
.setAborted(&II
);
844 insertUse(II
, Offset
, Length
? Length
->getLimitedValue()
845 : AllocSize
- Offset
.getLimitedValue(),
849 void visitMemTransferInst(MemTransferInst
&II
) {
850 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(II
.getLength());
851 if (Length
&& Length
->getValue() == 0)
852 // Zero-length mem transfer intrinsics can be ignored entirely.
853 return markAsDead(II
);
855 // Because we can visit these intrinsics twice, also check to see if the
856 // first time marked this instruction as dead. If so, skip it.
857 if (VisitedDeadInsts
.count(&II
))
861 return PI
.setAborted(&II
);
863 // Don't replace this with a load/store with a different address space.
864 // TODO: Use a store with the casted new alloca?
865 if (II
.isVolatile() &&
866 (II
.getDestAddressSpace() != DL
.getAllocaAddrSpace() ||
867 II
.getSourceAddressSpace() != DL
.getAllocaAddrSpace()))
868 return PI
.setAborted(&II
);
870 // This side of the transfer is completely out-of-bounds, and so we can
871 // nuke the entire transfer. However, we also need to nuke the other side
872 // if already added to our partitions.
873 // FIXME: Yet another place we really should bypass this when
874 // instrumenting for ASan.
875 if (Offset
.uge(AllocSize
)) {
876 SmallDenseMap
<Instruction
*, unsigned>::iterator MTPI
=
877 MemTransferSliceMap
.find(&II
);
878 if (MTPI
!= MemTransferSliceMap
.end())
879 AS
.Slices
[MTPI
->second
].kill();
880 return markAsDead(II
);
883 uint64_t RawOffset
= Offset
.getLimitedValue();
884 uint64_t Size
= Length
? Length
->getLimitedValue() : AllocSize
- RawOffset
;
886 // Check for the special case where the same exact value is used for both
888 if (*U
== II
.getRawDest() && *U
== II
.getRawSource()) {
889 // For non-volatile transfers this is a no-op.
890 if (!II
.isVolatile())
891 return markAsDead(II
);
893 return insertUse(II
, Offset
, Size
, /*IsSplittable=*/false);
896 // If we have seen both source and destination for a mem transfer, then
897 // they both point to the same alloca.
899 SmallDenseMap
<Instruction
*, unsigned>::iterator MTPI
;
900 std::tie(MTPI
, Inserted
) =
901 MemTransferSliceMap
.insert(std::make_pair(&II
, AS
.Slices
.size()));
902 unsigned PrevIdx
= MTPI
->second
;
904 Slice
&PrevP
= AS
.Slices
[PrevIdx
];
906 // Check if the begin offsets match and this is a non-volatile transfer.
907 // In that case, we can completely elide the transfer.
908 if (!II
.isVolatile() && PrevP
.beginOffset() == RawOffset
) {
910 return markAsDead(II
);
913 // Otherwise we have an offset transfer within the same alloca. We can't
915 PrevP
.makeUnsplittable();
918 // Insert the use now that we've fixed up the splittable nature.
919 insertUse(II
, Offset
, Size
, /*IsSplittable=*/Inserted
&& Length
);
921 // Check that we ended up with a valid index in the map.
922 assert(AS
.Slices
[PrevIdx
].getUse()->getUser() == &II
&&
923 "Map index doesn't point back to a slice with this user.");
926 // Disable SRoA for any intrinsics except for lifetime invariants.
927 // FIXME: What about debug intrinsics? This matches old behavior, but
928 // doesn't make sense.
929 void visitIntrinsicInst(IntrinsicInst
&II
) {
931 return PI
.setAborted(&II
);
933 if (II
.isLifetimeStartOrEnd()) {
934 ConstantInt
*Length
= cast
<ConstantInt
>(II
.getArgOperand(0));
935 uint64_t Size
= std::min(AllocSize
- Offset
.getLimitedValue(),
936 Length
->getLimitedValue());
937 insertUse(II
, Offset
, Size
, true);
941 Base::visitIntrinsicInst(II
);
944 Instruction
*hasUnsafePHIOrSelectUse(Instruction
*Root
, uint64_t &Size
) {
945 // We consider any PHI or select that results in a direct load or store of
946 // the same offset to be a viable use for slicing purposes. These uses
947 // are considered unsplittable and the size is the maximum loaded or stored
949 SmallPtrSet
<Instruction
*, 4> Visited
;
950 SmallVector
<std::pair
<Instruction
*, Instruction
*>, 4> Uses
;
951 Visited
.insert(Root
);
952 Uses
.push_back(std::make_pair(cast
<Instruction
>(*U
), Root
));
953 const DataLayout
&DL
= Root
->getModule()->getDataLayout();
954 // If there are no loads or stores, the access is dead. We mark that as
955 // a size zero access.
958 Instruction
*I
, *UsedI
;
959 std::tie(UsedI
, I
) = Uses
.pop_back_val();
961 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
962 Size
= std::max(Size
,
963 DL
.getTypeStoreSize(LI
->getType()).getFixedSize());
966 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
967 Value
*Op
= SI
->getOperand(0);
970 Size
= std::max(Size
,
971 DL
.getTypeStoreSize(Op
->getType()).getFixedSize());
975 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
976 if (!GEP
->hasAllZeroIndices())
978 } else if (!isa
<BitCastInst
>(I
) && !isa
<PHINode
>(I
) &&
979 !isa
<SelectInst
>(I
) && !isa
<AddrSpaceCastInst
>(I
)) {
983 for (User
*U
: I
->users())
984 if (Visited
.insert(cast
<Instruction
>(U
)).second
)
985 Uses
.push_back(std::make_pair(I
, cast
<Instruction
>(U
)));
986 } while (!Uses
.empty());
991 void visitPHINodeOrSelectInst(Instruction
&I
) {
992 assert(isa
<PHINode
>(I
) || isa
<SelectInst
>(I
));
994 return markAsDead(I
);
996 // TODO: We could use SimplifyInstruction here to fold PHINodes and
997 // SelectInsts. However, doing so requires to change the current
998 // dead-operand-tracking mechanism. For instance, suppose neither loading
999 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
1000 // trap either. However, if we simply replace %U with undef using the
1001 // current dead-operand-tracking mechanism, "load (select undef, undef,
1002 // %other)" may trap because the select may return the first operand
1004 if (Value
*Result
= foldPHINodeOrSelectInst(I
)) {
1006 // If the result of the constant fold will be the pointer, recurse
1007 // through the PHI/select as if we had RAUW'ed it.
1010 // Otherwise the operand to the PHI/select is dead, and we can replace
1012 AS
.DeadOperands
.push_back(U
);
1018 return PI
.setAborted(&I
);
1020 // See if we already have computed info on this node.
1021 uint64_t &Size
= PHIOrSelectSizes
[&I
];
1023 // This is a new PHI/Select, check for an unsafe use of it.
1024 if (Instruction
*UnsafeI
= hasUnsafePHIOrSelectUse(&I
, Size
))
1025 return PI
.setAborted(UnsafeI
);
1028 // For PHI and select operands outside the alloca, we can't nuke the entire
1029 // phi or select -- the other side might still be relevant, so we special
1030 // case them here and use a separate structure to track the operands
1031 // themselves which should be replaced with undef.
1032 // FIXME: This should instead be escaped in the event we're instrumenting
1033 // for address sanitization.
1034 if (Offset
.uge(AllocSize
)) {
1035 AS
.DeadOperands
.push_back(U
);
1039 insertUse(I
, Offset
, Size
);
1042 void visitPHINode(PHINode
&PN
) { visitPHINodeOrSelectInst(PN
); }
1044 void visitSelectInst(SelectInst
&SI
) { visitPHINodeOrSelectInst(SI
); }
1046 /// Disable SROA entirely if there are unhandled users of the alloca.
1047 void visitInstruction(Instruction
&I
) { PI
.setAborted(&I
); }
1050 AllocaSlices::AllocaSlices(const DataLayout
&DL
, AllocaInst
&AI
)
1052 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1055 PointerEscapingInstr(nullptr) {
1056 SliceBuilder
PB(DL
, AI
, *this);
1057 SliceBuilder::PtrInfo PtrI
= PB
.visitPtr(AI
);
1058 if (PtrI
.isEscaped() || PtrI
.isAborted()) {
1059 // FIXME: We should sink the escape vs. abort info into the caller nicely,
1060 // possibly by just storing the PtrInfo in the AllocaSlices.
1061 PointerEscapingInstr
= PtrI
.getEscapingInst() ? PtrI
.getEscapingInst()
1062 : PtrI
.getAbortingInst();
1063 assert(PointerEscapingInstr
&& "Did not track a bad instruction");
1068 llvm::remove_if(Slices
, [](const Slice
&S
) { return S
.isDead(); }),
1072 if (SROARandomShuffleSlices
) {
1073 std::mt19937
MT(static_cast<unsigned>(
1074 std::chrono::system_clock::now().time_since_epoch().count()));
1075 std::shuffle(Slices
.begin(), Slices
.end(), MT
);
1079 // Sort the uses. This arranges for the offsets to be in ascending order,
1080 // and the sizes to be in descending order.
1084 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1086 void AllocaSlices::print(raw_ostream
&OS
, const_iterator I
,
1087 StringRef Indent
) const {
1088 printSlice(OS
, I
, Indent
);
1090 printUse(OS
, I
, Indent
);
1093 void AllocaSlices::printSlice(raw_ostream
&OS
, const_iterator I
,
1094 StringRef Indent
) const {
1095 OS
<< Indent
<< "[" << I
->beginOffset() << "," << I
->endOffset() << ")"
1096 << " slice #" << (I
- begin())
1097 << (I
->isSplittable() ? " (splittable)" : "");
1100 void AllocaSlices::printUse(raw_ostream
&OS
, const_iterator I
,
1101 StringRef Indent
) const {
1102 OS
<< Indent
<< " used by: " << *I
->getUse()->getUser() << "\n";
1105 void AllocaSlices::print(raw_ostream
&OS
) const {
1106 if (PointerEscapingInstr
) {
1107 OS
<< "Can't analyze slices for alloca: " << AI
<< "\n"
1108 << " A pointer to this alloca escaped by:\n"
1109 << " " << *PointerEscapingInstr
<< "\n";
1113 OS
<< "Slices of alloca: " << AI
<< "\n";
1114 for (const_iterator I
= begin(), E
= end(); I
!= E
; ++I
)
1118 LLVM_DUMP_METHOD
void AllocaSlices::dump(const_iterator I
) const {
1121 LLVM_DUMP_METHOD
void AllocaSlices::dump() const { print(dbgs()); }
1123 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1125 /// Walk the range of a partitioning looking for a common type to cover this
1126 /// sequence of slices.
1127 static Type
*findCommonType(AllocaSlices::const_iterator B
,
1128 AllocaSlices::const_iterator E
,
1129 uint64_t EndOffset
) {
1131 bool TyIsCommon
= true;
1132 IntegerType
*ITy
= nullptr;
1134 // Note that we need to look at *every* alloca slice's Use to ensure we
1135 // always get consistent results regardless of the order of slices.
1136 for (AllocaSlices::const_iterator I
= B
; I
!= E
; ++I
) {
1137 Use
*U
= I
->getUse();
1138 if (isa
<IntrinsicInst
>(*U
->getUser()))
1140 if (I
->beginOffset() != B
->beginOffset() || I
->endOffset() != EndOffset
)
1143 Type
*UserTy
= nullptr;
1144 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
1145 UserTy
= LI
->getType();
1146 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
1147 UserTy
= SI
->getValueOperand()->getType();
1150 if (IntegerType
*UserITy
= dyn_cast_or_null
<IntegerType
>(UserTy
)) {
1151 // If the type is larger than the partition, skip it. We only encounter
1152 // this for split integer operations where we want to use the type of the
1153 // entity causing the split. Also skip if the type is not a byte width
1155 if (UserITy
->getBitWidth() % 8 != 0 ||
1156 UserITy
->getBitWidth() / 8 > (EndOffset
- B
->beginOffset()))
1159 // Track the largest bitwidth integer type used in this way in case there
1160 // is no common type.
1161 if (!ITy
|| ITy
->getBitWidth() < UserITy
->getBitWidth())
1165 // To avoid depending on the order of slices, Ty and TyIsCommon must not
1166 // depend on types skipped above.
1167 if (!UserTy
|| (Ty
&& Ty
!= UserTy
))
1168 TyIsCommon
= false; // Give up on anything but an iN type.
1173 return TyIsCommon
? Ty
: ITy
;
1176 /// PHI instructions that use an alloca and are subsequently loaded can be
1177 /// rewritten to load both input pointers in the pred blocks and then PHI the
1178 /// results, allowing the load of the alloca to be promoted.
1180 /// %P2 = phi [i32* %Alloca, i32* %Other]
1181 /// %V = load i32* %P2
1183 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1185 /// %V2 = load i32* %Other
1187 /// %V = phi [i32 %V1, i32 %V2]
1189 /// We can do this to a select if its only uses are loads and if the operands
1190 /// to the select can be loaded unconditionally.
1192 /// FIXME: This should be hoisted into a generic utility, likely in
1193 /// Transforms/Util/Local.h
1194 static bool isSafePHIToSpeculate(PHINode
&PN
) {
1195 const DataLayout
&DL
= PN
.getModule()->getDataLayout();
1197 // For now, we can only do this promotion if the load is in the same block
1198 // as the PHI, and if there are no stores between the phi and load.
1199 // TODO: Allow recursive phi users.
1200 // TODO: Allow stores.
1201 BasicBlock
*BB
= PN
.getParent();
1202 MaybeAlign MaxAlign
;
1203 uint64_t APWidth
= DL
.getIndexTypeSizeInBits(PN
.getType());
1204 APInt
MaxSize(APWidth
, 0);
1205 bool HaveLoad
= false;
1206 for (User
*U
: PN
.users()) {
1207 LoadInst
*LI
= dyn_cast
<LoadInst
>(U
);
1208 if (!LI
|| !LI
->isSimple())
1211 // For now we only allow loads in the same block as the PHI. This is
1212 // a common case that happens when instcombine merges two loads through
1214 if (LI
->getParent() != BB
)
1217 // Ensure that there are no instructions between the PHI and the load that
1219 for (BasicBlock::iterator
BBI(PN
); &*BBI
!= LI
; ++BBI
)
1220 if (BBI
->mayWriteToMemory())
1223 uint64_t Size
= DL
.getTypeStoreSize(LI
->getType());
1224 MaxAlign
= std::max(MaxAlign
, MaybeAlign(LI
->getAlignment()));
1225 MaxSize
= MaxSize
.ult(Size
) ? APInt(APWidth
, Size
) : MaxSize
;
1232 // We can only transform this if it is safe to push the loads into the
1233 // predecessor blocks. The only thing to watch out for is that we can't put
1234 // a possibly trapping load in the predecessor if it is a critical edge.
1235 for (unsigned Idx
= 0, Num
= PN
.getNumIncomingValues(); Idx
!= Num
; ++Idx
) {
1236 Instruction
*TI
= PN
.getIncomingBlock(Idx
)->getTerminator();
1237 Value
*InVal
= PN
.getIncomingValue(Idx
);
1239 // If the value is produced by the terminator of the predecessor (an
1240 // invoke) or it has side-effects, there is no valid place to put a load
1241 // in the predecessor.
1242 if (TI
== InVal
|| TI
->mayHaveSideEffects())
1245 // If the predecessor has a single successor, then the edge isn't
1247 if (TI
->getNumSuccessors() == 1)
1250 // If this pointer is always safe to load, or if we can prove that there
1251 // is already a load in the block, then we can move the load to the pred
1253 if (isSafeToLoadUnconditionally(InVal
, MaxAlign
, MaxSize
, DL
, TI
))
1262 static void speculatePHINodeLoads(PHINode
&PN
) {
1263 LLVM_DEBUG(dbgs() << " original: " << PN
<< "\n");
1265 LoadInst
*SomeLoad
= cast
<LoadInst
>(PN
.user_back());
1266 Type
*LoadTy
= SomeLoad
->getType();
1267 IRBuilderTy
PHIBuilder(&PN
);
1268 PHINode
*NewPN
= PHIBuilder
.CreatePHI(LoadTy
, PN
.getNumIncomingValues(),
1269 PN
.getName() + ".sroa.speculated");
1271 // Get the AA tags and alignment to use from one of the loads. It does not
1272 // matter which one we get and if any differ.
1274 SomeLoad
->getAAMetadata(AATags
);
1275 const MaybeAlign Align
= MaybeAlign(SomeLoad
->getAlignment());
1277 // Rewrite all loads of the PN to use the new PHI.
1278 while (!PN
.use_empty()) {
1279 LoadInst
*LI
= cast
<LoadInst
>(PN
.user_back());
1280 LI
->replaceAllUsesWith(NewPN
);
1281 LI
->eraseFromParent();
1284 // Inject loads into all of the pred blocks.
1285 DenseMap
<BasicBlock
*, Value
*> InjectedLoads
;
1286 for (unsigned Idx
= 0, Num
= PN
.getNumIncomingValues(); Idx
!= Num
; ++Idx
) {
1287 BasicBlock
*Pred
= PN
.getIncomingBlock(Idx
);
1288 Value
*InVal
= PN
.getIncomingValue(Idx
);
1290 // A PHI node is allowed to have multiple (duplicated) entries for the same
1291 // basic block, as long as the value is the same. So if we already injected
1292 // a load in the predecessor, then we should reuse the same load for all
1293 // duplicated entries.
1294 if (Value
* V
= InjectedLoads
.lookup(Pred
)) {
1295 NewPN
->addIncoming(V
, Pred
);
1299 Instruction
*TI
= Pred
->getTerminator();
1300 IRBuilderTy
PredBuilder(TI
);
1302 LoadInst
*Load
= PredBuilder
.CreateLoad(
1304 (PN
.getName() + ".sroa.speculate.load." + Pred
->getName()));
1305 ++NumLoadsSpeculated
;
1306 Load
->setAlignment(Align
);
1308 Load
->setAAMetadata(AATags
);
1309 NewPN
->addIncoming(Load
, Pred
);
1310 InjectedLoads
[Pred
] = Load
;
1313 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN
<< "\n");
1314 PN
.eraseFromParent();
1317 /// Select instructions that use an alloca and are subsequently loaded can be
1318 /// rewritten to load both input pointers and then select between the result,
1319 /// allowing the load of the alloca to be promoted.
1321 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1322 /// %V = load i32* %P2
1324 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1325 /// %V2 = load i32* %Other
1326 /// %V = select i1 %cond, i32 %V1, i32 %V2
1328 /// We can do this to a select if its only uses are loads and if the operand
1329 /// to the select can be loaded unconditionally.
1330 static bool isSafeSelectToSpeculate(SelectInst
&SI
) {
1331 Value
*TValue
= SI
.getTrueValue();
1332 Value
*FValue
= SI
.getFalseValue();
1333 const DataLayout
&DL
= SI
.getModule()->getDataLayout();
1335 for (User
*U
: SI
.users()) {
1336 LoadInst
*LI
= dyn_cast
<LoadInst
>(U
);
1337 if (!LI
|| !LI
->isSimple())
1340 // Both operands to the select need to be dereferenceable, either
1341 // absolutely (e.g. allocas) or at this point because we can see other
1343 if (!isSafeToLoadUnconditionally(TValue
, LI
->getType(),
1344 MaybeAlign(LI
->getAlignment()), DL
, LI
))
1346 if (!isSafeToLoadUnconditionally(FValue
, LI
->getType(),
1347 MaybeAlign(LI
->getAlignment()), DL
, LI
))
1354 static void speculateSelectInstLoads(SelectInst
&SI
) {
1355 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
1357 IRBuilderTy
IRB(&SI
);
1358 Value
*TV
= SI
.getTrueValue();
1359 Value
*FV
= SI
.getFalseValue();
1360 // Replace the loads of the select with a select of two loads.
1361 while (!SI
.use_empty()) {
1362 LoadInst
*LI
= cast
<LoadInst
>(SI
.user_back());
1363 assert(LI
->isSimple() && "We only speculate simple loads");
1365 IRB
.SetInsertPoint(LI
);
1366 LoadInst
*TL
= IRB
.CreateLoad(LI
->getType(), TV
,
1367 LI
->getName() + ".sroa.speculate.load.true");
1368 LoadInst
*FL
= IRB
.CreateLoad(LI
->getType(), FV
,
1369 LI
->getName() + ".sroa.speculate.load.false");
1370 NumLoadsSpeculated
+= 2;
1372 // Transfer alignment and AA info if present.
1373 TL
->setAlignment(MaybeAlign(LI
->getAlignment()));
1374 FL
->setAlignment(MaybeAlign(LI
->getAlignment()));
1377 LI
->getAAMetadata(Tags
);
1379 TL
->setAAMetadata(Tags
);
1380 FL
->setAAMetadata(Tags
);
1383 Value
*V
= IRB
.CreateSelect(SI
.getCondition(), TL
, FL
,
1384 LI
->getName() + ".sroa.speculated");
1386 LLVM_DEBUG(dbgs() << " speculated to: " << *V
<< "\n");
1387 LI
->replaceAllUsesWith(V
);
1388 LI
->eraseFromParent();
1390 SI
.eraseFromParent();
1393 /// Build a GEP out of a base pointer and indices.
1395 /// This will return the BasePtr if that is valid, or build a new GEP
1396 /// instruction using the IRBuilder if GEP-ing is needed.
1397 static Value
*buildGEP(IRBuilderTy
&IRB
, Value
*BasePtr
,
1398 SmallVectorImpl
<Value
*> &Indices
, Twine NamePrefix
) {
1399 if (Indices
.empty())
1402 // A single zero index is a no-op, so check for this and avoid building a GEP
1404 if (Indices
.size() == 1 && cast
<ConstantInt
>(Indices
.back())->isZero())
1407 return IRB
.CreateInBoundsGEP(BasePtr
->getType()->getPointerElementType(),
1408 BasePtr
, Indices
, NamePrefix
+ "sroa_idx");
1411 /// Get a natural GEP off of the BasePtr walking through Ty toward
1412 /// TargetTy without changing the offset of the pointer.
1414 /// This routine assumes we've already established a properly offset GEP with
1415 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1416 /// zero-indices down through type layers until we find one the same as
1417 /// TargetTy. If we can't find one with the same type, we at least try to use
1418 /// one with the same size. If none of that works, we just produce the GEP as
1419 /// indicated by Indices to have the correct offset.
1420 static Value
*getNaturalGEPWithType(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1421 Value
*BasePtr
, Type
*Ty
, Type
*TargetTy
,
1422 SmallVectorImpl
<Value
*> &Indices
,
1425 return buildGEP(IRB
, BasePtr
, Indices
, NamePrefix
);
1427 // Offset size to use for the indices.
1428 unsigned OffsetSize
= DL
.getIndexTypeSizeInBits(BasePtr
->getType());
1430 // See if we can descend into a struct and locate a field with the correct
1432 unsigned NumLayers
= 0;
1433 Type
*ElementTy
= Ty
;
1435 if (ElementTy
->isPointerTy())
1438 if (ArrayType
*ArrayTy
= dyn_cast
<ArrayType
>(ElementTy
)) {
1439 ElementTy
= ArrayTy
->getElementType();
1440 Indices
.push_back(IRB
.getIntN(OffsetSize
, 0));
1441 } else if (VectorType
*VectorTy
= dyn_cast
<VectorType
>(ElementTy
)) {
1442 ElementTy
= VectorTy
->getElementType();
1443 Indices
.push_back(IRB
.getInt32(0));
1444 } else if (StructType
*STy
= dyn_cast
<StructType
>(ElementTy
)) {
1445 if (STy
->element_begin() == STy
->element_end())
1446 break; // Nothing left to descend into.
1447 ElementTy
= *STy
->element_begin();
1448 Indices
.push_back(IRB
.getInt32(0));
1453 } while (ElementTy
!= TargetTy
);
1454 if (ElementTy
!= TargetTy
)
1455 Indices
.erase(Indices
.end() - NumLayers
, Indices
.end());
1457 return buildGEP(IRB
, BasePtr
, Indices
, NamePrefix
);
1460 /// Recursively compute indices for a natural GEP.
1462 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1463 /// element types adding appropriate indices for the GEP.
1464 static Value
*getNaturalGEPRecursively(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1465 Value
*Ptr
, Type
*Ty
, APInt
&Offset
,
1467 SmallVectorImpl
<Value
*> &Indices
,
1470 return getNaturalGEPWithType(IRB
, DL
, Ptr
, Ty
, TargetTy
, Indices
,
1473 // We can't recurse through pointer types.
1474 if (Ty
->isPointerTy())
1477 // We try to analyze GEPs over vectors here, but note that these GEPs are
1478 // extremely poorly defined currently. The long-term goal is to remove GEPing
1479 // over a vector from the IR completely.
1480 if (VectorType
*VecTy
= dyn_cast
<VectorType
>(Ty
)) {
1481 unsigned ElementSizeInBits
= DL
.getTypeSizeInBits(VecTy
->getScalarType());
1482 if (ElementSizeInBits
% 8 != 0) {
1483 // GEPs over non-multiple of 8 size vector elements are invalid.
1486 APInt
ElementSize(Offset
.getBitWidth(), ElementSizeInBits
/ 8);
1487 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1488 if (NumSkippedElements
.ugt(VecTy
->getNumElements()))
1490 Offset
-= NumSkippedElements
* ElementSize
;
1491 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1492 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, VecTy
->getElementType(),
1493 Offset
, TargetTy
, Indices
, NamePrefix
);
1496 if (ArrayType
*ArrTy
= dyn_cast
<ArrayType
>(Ty
)) {
1497 Type
*ElementTy
= ArrTy
->getElementType();
1498 APInt
ElementSize(Offset
.getBitWidth(), DL
.getTypeAllocSize(ElementTy
));
1499 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1500 if (NumSkippedElements
.ugt(ArrTy
->getNumElements()))
1503 Offset
-= NumSkippedElements
* ElementSize
;
1504 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1505 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1506 Indices
, NamePrefix
);
1509 StructType
*STy
= dyn_cast
<StructType
>(Ty
);
1513 const StructLayout
*SL
= DL
.getStructLayout(STy
);
1514 uint64_t StructOffset
= Offset
.getZExtValue();
1515 if (StructOffset
>= SL
->getSizeInBytes())
1517 unsigned Index
= SL
->getElementContainingOffset(StructOffset
);
1518 Offset
-= APInt(Offset
.getBitWidth(), SL
->getElementOffset(Index
));
1519 Type
*ElementTy
= STy
->getElementType(Index
);
1520 if (Offset
.uge(DL
.getTypeAllocSize(ElementTy
)))
1521 return nullptr; // The offset points into alignment padding.
1523 Indices
.push_back(IRB
.getInt32(Index
));
1524 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1525 Indices
, NamePrefix
);
1528 /// Get a natural GEP from a base pointer to a particular offset and
1529 /// resulting in a particular type.
1531 /// The goal is to produce a "natural" looking GEP that works with the existing
1532 /// composite types to arrive at the appropriate offset and element type for
1533 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1534 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1535 /// Indices, and setting Ty to the result subtype.
1537 /// If no natural GEP can be constructed, this function returns null.
1538 static Value
*getNaturalGEPWithOffset(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1539 Value
*Ptr
, APInt Offset
, Type
*TargetTy
,
1540 SmallVectorImpl
<Value
*> &Indices
,
1542 PointerType
*Ty
= cast
<PointerType
>(Ptr
->getType());
1544 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1546 if (Ty
== IRB
.getInt8PtrTy(Ty
->getAddressSpace()) && TargetTy
->isIntegerTy(8))
1549 Type
*ElementTy
= Ty
->getElementType();
1550 if (!ElementTy
->isSized())
1551 return nullptr; // We can't GEP through an unsized element.
1552 APInt
ElementSize(Offset
.getBitWidth(), DL
.getTypeAllocSize(ElementTy
));
1553 if (ElementSize
== 0)
1554 return nullptr; // Zero-length arrays can't help us build a natural GEP.
1555 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1557 Offset
-= NumSkippedElements
* ElementSize
;
1558 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1559 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1560 Indices
, NamePrefix
);
1563 /// Compute an adjusted pointer from Ptr by Offset bytes where the
1564 /// resulting pointer has PointerTy.
1566 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1567 /// and produces the pointer type desired. Where it cannot, it will try to use
1568 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1569 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1570 /// bitcast to the type.
1572 /// The strategy for finding the more natural GEPs is to peel off layers of the
1573 /// pointer, walking back through bit casts and GEPs, searching for a base
1574 /// pointer from which we can compute a natural GEP with the desired
1575 /// properties. The algorithm tries to fold as many constant indices into
1576 /// a single GEP as possible, thus making each GEP more independent of the
1577 /// surrounding code.
1578 static Value
*getAdjustedPtr(IRBuilderTy
&IRB
, const DataLayout
&DL
, Value
*Ptr
,
1579 APInt Offset
, Type
*PointerTy
, Twine NamePrefix
) {
1580 // Even though we don't look through PHI nodes, we could be called on an
1581 // instruction in an unreachable block, which may be on a cycle.
1582 SmallPtrSet
<Value
*, 4> Visited
;
1583 Visited
.insert(Ptr
);
1584 SmallVector
<Value
*, 4> Indices
;
1586 // We may end up computing an offset pointer that has the wrong type. If we
1587 // never are able to compute one directly that has the correct type, we'll
1588 // fall back to it, so keep it and the base it was computed from around here.
1589 Value
*OffsetPtr
= nullptr;
1590 Value
*OffsetBasePtr
;
1592 // Remember any i8 pointer we come across to re-use if we need to do a raw
1594 Value
*Int8Ptr
= nullptr;
1595 APInt
Int8PtrOffset(Offset
.getBitWidth(), 0);
1597 PointerType
*TargetPtrTy
= cast
<PointerType
>(PointerTy
);
1598 Type
*TargetTy
= TargetPtrTy
->getElementType();
1600 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different
1601 // address space from the expected `PointerTy` (the pointer to be used).
1602 // Adjust the pointer type based the original storage pointer.
1603 auto AS
= cast
<PointerType
>(Ptr
->getType())->getAddressSpace();
1604 PointerTy
= TargetTy
->getPointerTo(AS
);
1607 // First fold any existing GEPs into the offset.
1608 while (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
1609 APInt
GEPOffset(Offset
.getBitWidth(), 0);
1610 if (!GEP
->accumulateConstantOffset(DL
, GEPOffset
))
1612 Offset
+= GEPOffset
;
1613 Ptr
= GEP
->getPointerOperand();
1614 if (!Visited
.insert(Ptr
).second
)
1618 // See if we can perform a natural GEP here.
1620 if (Value
*P
= getNaturalGEPWithOffset(IRB
, DL
, Ptr
, Offset
, TargetTy
,
1621 Indices
, NamePrefix
)) {
1622 // If we have a new natural pointer at the offset, clear out any old
1623 // offset pointer we computed. Unless it is the base pointer or
1624 // a non-instruction, we built a GEP we don't need. Zap it.
1625 if (OffsetPtr
&& OffsetPtr
!= OffsetBasePtr
)
1626 if (Instruction
*I
= dyn_cast
<Instruction
>(OffsetPtr
)) {
1627 assert(I
->use_empty() && "Built a GEP with uses some how!");
1628 I
->eraseFromParent();
1631 OffsetBasePtr
= Ptr
;
1632 // If we also found a pointer of the right type, we're done.
1633 if (P
->getType() == PointerTy
)
1637 // Stash this pointer if we've found an i8*.
1638 if (Ptr
->getType()->isIntegerTy(8)) {
1640 Int8PtrOffset
= Offset
;
1643 // Peel off a layer of the pointer and update the offset appropriately.
1644 if (Operator::getOpcode(Ptr
) == Instruction::BitCast
) {
1645 Ptr
= cast
<Operator
>(Ptr
)->getOperand(0);
1646 } else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(Ptr
)) {
1647 if (GA
->isInterposable())
1649 Ptr
= GA
->getAliasee();
1653 assert(Ptr
->getType()->isPointerTy() && "Unexpected operand type!");
1654 } while (Visited
.insert(Ptr
).second
);
1658 Int8Ptr
= IRB
.CreateBitCast(
1659 Ptr
, IRB
.getInt8PtrTy(PointerTy
->getPointerAddressSpace()),
1660 NamePrefix
+ "sroa_raw_cast");
1661 Int8PtrOffset
= Offset
;
1664 OffsetPtr
= Int8PtrOffset
== 0
1666 : IRB
.CreateInBoundsGEP(IRB
.getInt8Ty(), Int8Ptr
,
1667 IRB
.getInt(Int8PtrOffset
),
1668 NamePrefix
+ "sroa_raw_idx");
1672 // On the off chance we were targeting i8*, guard the bitcast here.
1673 if (cast
<PointerType
>(Ptr
->getType()) != TargetPtrTy
) {
1674 Ptr
= IRB
.CreatePointerBitCastOrAddrSpaceCast(Ptr
,
1676 NamePrefix
+ "sroa_cast");
1682 /// Compute the adjusted alignment for a load or store from an offset.
1683 static unsigned getAdjustedAlignment(Instruction
*I
, uint64_t Offset
,
1684 const DataLayout
&DL
) {
1687 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
1688 Alignment
= LI
->getAlignment();
1690 } else if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
1691 Alignment
= SI
->getAlignment();
1692 Ty
= SI
->getValueOperand()->getType();
1694 llvm_unreachable("Only loads and stores are allowed!");
1698 Alignment
= DL
.getABITypeAlignment(Ty
);
1700 return MinAlign(Alignment
, Offset
);
1703 /// Test whether we can convert a value from the old to the new type.
1705 /// This predicate should be used to guard calls to convertValue in order to
1706 /// ensure that we only try to convert viable values. The strategy is that we
1707 /// will peel off single element struct and array wrappings to get to an
1708 /// underlying value, and convert that value.
1709 static bool canConvertValue(const DataLayout
&DL
, Type
*OldTy
, Type
*NewTy
) {
1713 // For integer types, we can't handle any bit-width differences. This would
1714 // break both vector conversions with extension and introduce endianness
1715 // issues when in conjunction with loads and stores.
1716 if (isa
<IntegerType
>(OldTy
) && isa
<IntegerType
>(NewTy
)) {
1717 assert(cast
<IntegerType
>(OldTy
)->getBitWidth() !=
1718 cast
<IntegerType
>(NewTy
)->getBitWidth() &&
1719 "We can't have the same bitwidth for different int types");
1723 if (DL
.getTypeSizeInBits(NewTy
) != DL
.getTypeSizeInBits(OldTy
))
1725 if (!NewTy
->isSingleValueType() || !OldTy
->isSingleValueType())
1728 // We can convert pointers to integers and vice-versa. Same for vectors
1729 // of pointers and integers.
1730 OldTy
= OldTy
->getScalarType();
1731 NewTy
= NewTy
->getScalarType();
1732 if (NewTy
->isPointerTy() || OldTy
->isPointerTy()) {
1733 if (NewTy
->isPointerTy() && OldTy
->isPointerTy()) {
1734 return cast
<PointerType
>(NewTy
)->getPointerAddressSpace() ==
1735 cast
<PointerType
>(OldTy
)->getPointerAddressSpace();
1738 // We can convert integers to integral pointers, but not to non-integral
1740 if (OldTy
->isIntegerTy())
1741 return !DL
.isNonIntegralPointerType(NewTy
);
1743 // We can convert integral pointers to integers, but non-integral pointers
1744 // need to remain pointers.
1745 if (!DL
.isNonIntegralPointerType(OldTy
))
1746 return NewTy
->isIntegerTy();
1754 /// Generic routine to convert an SSA value to a value of a different
1757 /// This will try various different casting techniques, such as bitcasts,
1758 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1759 /// two types for viability with this routine.
1760 static Value
*convertValue(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*V
,
1762 Type
*OldTy
= V
->getType();
1763 assert(canConvertValue(DL
, OldTy
, NewTy
) && "Value not convertable to type");
1768 assert(!(isa
<IntegerType
>(OldTy
) && isa
<IntegerType
>(NewTy
)) &&
1769 "Integer types must be the exact same to convert.");
1771 // See if we need inttoptr for this type pair. A cast involving both scalars
1772 // and vectors requires and additional bitcast.
1773 if (OldTy
->isIntOrIntVectorTy() && NewTy
->isPtrOrPtrVectorTy()) {
1774 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1775 if (OldTy
->isVectorTy() && !NewTy
->isVectorTy())
1776 return IRB
.CreateIntToPtr(IRB
.CreateBitCast(V
, DL
.getIntPtrType(NewTy
)),
1779 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1780 if (!OldTy
->isVectorTy() && NewTy
->isVectorTy())
1781 return IRB
.CreateIntToPtr(IRB
.CreateBitCast(V
, DL
.getIntPtrType(NewTy
)),
1784 return IRB
.CreateIntToPtr(V
, NewTy
);
1787 // See if we need ptrtoint for this type pair. A cast involving both scalars
1788 // and vectors requires and additional bitcast.
1789 if (OldTy
->isPtrOrPtrVectorTy() && NewTy
->isIntOrIntVectorTy()) {
1790 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1791 if (OldTy
->isVectorTy() && !NewTy
->isVectorTy())
1792 return IRB
.CreateBitCast(IRB
.CreatePtrToInt(V
, DL
.getIntPtrType(OldTy
)),
1795 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1796 if (!OldTy
->isVectorTy() && NewTy
->isVectorTy())
1797 return IRB
.CreateBitCast(IRB
.CreatePtrToInt(V
, DL
.getIntPtrType(OldTy
)),
1800 return IRB
.CreatePtrToInt(V
, NewTy
);
1803 return IRB
.CreateBitCast(V
, NewTy
);
1806 /// Test whether the given slice use can be promoted to a vector.
1808 /// This function is called to test each entry in a partition which is slated
1809 /// for a single slice.
1810 static bool isVectorPromotionViableForSlice(Partition
&P
, const Slice
&S
,
1812 uint64_t ElementSize
,
1813 const DataLayout
&DL
) {
1814 // First validate the slice offsets.
1815 uint64_t BeginOffset
=
1816 std::max(S
.beginOffset(), P
.beginOffset()) - P
.beginOffset();
1817 uint64_t BeginIndex
= BeginOffset
/ ElementSize
;
1818 if (BeginIndex
* ElementSize
!= BeginOffset
||
1819 BeginIndex
>= Ty
->getNumElements())
1821 uint64_t EndOffset
=
1822 std::min(S
.endOffset(), P
.endOffset()) - P
.beginOffset();
1823 uint64_t EndIndex
= EndOffset
/ ElementSize
;
1824 if (EndIndex
* ElementSize
!= EndOffset
|| EndIndex
> Ty
->getNumElements())
1827 assert(EndIndex
> BeginIndex
&& "Empty vector!");
1828 uint64_t NumElements
= EndIndex
- BeginIndex
;
1829 Type
*SliceTy
= (NumElements
== 1)
1830 ? Ty
->getElementType()
1831 : VectorType::get(Ty
->getElementType(), NumElements
);
1834 Type::getIntNTy(Ty
->getContext(), NumElements
* ElementSize
* 8);
1836 Use
*U
= S
.getUse();
1838 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(U
->getUser())) {
1839 if (MI
->isVolatile())
1841 if (!S
.isSplittable())
1842 return false; // Skip any unsplittable intrinsics.
1843 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
->getUser())) {
1844 if (!II
->isLifetimeStartOrEnd())
1846 } else if (U
->get()->getType()->getPointerElementType()->isStructTy()) {
1847 // Disable vector promotion when there are loads or stores of an FCA.
1849 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
1850 if (LI
->isVolatile())
1852 Type
*LTy
= LI
->getType();
1853 if (P
.beginOffset() > S
.beginOffset() || P
.endOffset() < S
.endOffset()) {
1854 assert(LTy
->isIntegerTy());
1857 if (!canConvertValue(DL
, SliceTy
, LTy
))
1859 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
1860 if (SI
->isVolatile())
1862 Type
*STy
= SI
->getValueOperand()->getType();
1863 if (P
.beginOffset() > S
.beginOffset() || P
.endOffset() < S
.endOffset()) {
1864 assert(STy
->isIntegerTy());
1867 if (!canConvertValue(DL
, STy
, SliceTy
))
1876 /// Test whether the given alloca partitioning and range of slices can be
1877 /// promoted to a vector.
1879 /// This is a quick test to check whether we can rewrite a particular alloca
1880 /// partition (and its newly formed alloca) into a vector alloca with only
1881 /// whole-vector loads and stores such that it could be promoted to a vector
1882 /// SSA value. We only can ensure this for a limited set of operations, and we
1883 /// don't want to do the rewrites unless we are confident that the result will
1884 /// be promotable, so we have an early test here.
1885 static VectorType
*isVectorPromotionViable(Partition
&P
, const DataLayout
&DL
) {
1886 // Collect the candidate types for vector-based promotion. Also track whether
1887 // we have different element types.
1888 SmallVector
<VectorType
*, 4> CandidateTys
;
1889 Type
*CommonEltTy
= nullptr;
1890 bool HaveCommonEltTy
= true;
1891 auto CheckCandidateType
= [&](Type
*Ty
) {
1892 if (auto *VTy
= dyn_cast
<VectorType
>(Ty
)) {
1893 // Return if bitcast to vectors is different for total size in bits.
1894 if (!CandidateTys
.empty()) {
1895 VectorType
*V
= CandidateTys
[0];
1896 if (DL
.getTypeSizeInBits(VTy
) != DL
.getTypeSizeInBits(V
)) {
1897 CandidateTys
.clear();
1901 CandidateTys
.push_back(VTy
);
1903 CommonEltTy
= VTy
->getElementType();
1904 else if (CommonEltTy
!= VTy
->getElementType())
1905 HaveCommonEltTy
= false;
1908 // Consider any loads or stores that are the exact size of the slice.
1909 for (const Slice
&S
: P
)
1910 if (S
.beginOffset() == P
.beginOffset() &&
1911 S
.endOffset() == P
.endOffset()) {
1912 if (auto *LI
= dyn_cast
<LoadInst
>(S
.getUse()->getUser()))
1913 CheckCandidateType(LI
->getType());
1914 else if (auto *SI
= dyn_cast
<StoreInst
>(S
.getUse()->getUser()))
1915 CheckCandidateType(SI
->getValueOperand()->getType());
1918 // If we didn't find a vector type, nothing to do here.
1919 if (CandidateTys
.empty())
1922 // Remove non-integer vector types if we had multiple common element types.
1923 // FIXME: It'd be nice to replace them with integer vector types, but we can't
1924 // do that until all the backends are known to produce good code for all
1925 // integer vector types.
1926 if (!HaveCommonEltTy
) {
1928 llvm::remove_if(CandidateTys
,
1929 [](VectorType
*VTy
) {
1930 return !VTy
->getElementType()->isIntegerTy();
1932 CandidateTys
.end());
1934 // If there were no integer vector types, give up.
1935 if (CandidateTys
.empty())
1938 // Rank the remaining candidate vector types. This is easy because we know
1939 // they're all integer vectors. We sort by ascending number of elements.
1940 auto RankVectorTypes
= [&DL
](VectorType
*RHSTy
, VectorType
*LHSTy
) {
1942 assert(DL
.getTypeSizeInBits(RHSTy
) == DL
.getTypeSizeInBits(LHSTy
) &&
1943 "Cannot have vector types of different sizes!");
1944 assert(RHSTy
->getElementType()->isIntegerTy() &&
1945 "All non-integer types eliminated!");
1946 assert(LHSTy
->getElementType()->isIntegerTy() &&
1947 "All non-integer types eliminated!");
1948 return RHSTy
->getNumElements() < LHSTy
->getNumElements();
1950 llvm::sort(CandidateTys
, RankVectorTypes
);
1952 std::unique(CandidateTys
.begin(), CandidateTys
.end(), RankVectorTypes
),
1953 CandidateTys
.end());
1955 // The only way to have the same element type in every vector type is to
1956 // have the same vector type. Check that and remove all but one.
1958 for (VectorType
*VTy
: CandidateTys
) {
1959 assert(VTy
->getElementType() == CommonEltTy
&&
1960 "Unaccounted for element type!");
1961 assert(VTy
== CandidateTys
[0] &&
1962 "Different vector types with the same element type!");
1965 CandidateTys
.resize(1);
1968 // Try each vector type, and return the one which works.
1969 auto CheckVectorTypeForPromotion
= [&](VectorType
*VTy
) {
1970 uint64_t ElementSize
= DL
.getTypeSizeInBits(VTy
->getElementType());
1972 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1973 // that aren't byte sized.
1974 if (ElementSize
% 8)
1976 assert((DL
.getTypeSizeInBits(VTy
) % 8) == 0 &&
1977 "vector size not a multiple of element size?");
1980 for (const Slice
&S
: P
)
1981 if (!isVectorPromotionViableForSlice(P
, S
, VTy
, ElementSize
, DL
))
1984 for (const Slice
*S
: P
.splitSliceTails())
1985 if (!isVectorPromotionViableForSlice(P
, *S
, VTy
, ElementSize
, DL
))
1990 for (VectorType
*VTy
: CandidateTys
)
1991 if (CheckVectorTypeForPromotion(VTy
))
1997 /// Test whether a slice of an alloca is valid for integer widening.
1999 /// This implements the necessary checking for the \c isIntegerWideningViable
2000 /// test below on a single slice of the alloca.
2001 static bool isIntegerWideningViableForSlice(const Slice
&S
,
2002 uint64_t AllocBeginOffset
,
2004 const DataLayout
&DL
,
2005 bool &WholeAllocaOp
) {
2006 uint64_t Size
= DL
.getTypeStoreSize(AllocaTy
);
2008 uint64_t RelBegin
= S
.beginOffset() - AllocBeginOffset
;
2009 uint64_t RelEnd
= S
.endOffset() - AllocBeginOffset
;
2011 // We can't reasonably handle cases where the load or store extends past
2012 // the end of the alloca's type and into its padding.
2016 Use
*U
= S
.getUse();
2018 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
2019 if (LI
->isVolatile())
2021 // We can't handle loads that extend past the allocated memory.
2022 if (DL
.getTypeStoreSize(LI
->getType()) > Size
)
2024 // So far, AllocaSliceRewriter does not support widening split slice tails
2025 // in rewriteIntegerLoad.
2026 if (S
.beginOffset() < AllocBeginOffset
)
2028 // Note that we don't count vector loads or stores as whole-alloca
2029 // operations which enable integer widening because we would prefer to use
2030 // vector widening instead.
2031 if (!isa
<VectorType
>(LI
->getType()) && RelBegin
== 0 && RelEnd
== Size
)
2032 WholeAllocaOp
= true;
2033 if (IntegerType
*ITy
= dyn_cast
<IntegerType
>(LI
->getType())) {
2034 if (ITy
->getBitWidth() < DL
.getTypeStoreSizeInBits(ITy
))
2036 } else if (RelBegin
!= 0 || RelEnd
!= Size
||
2037 !canConvertValue(DL
, AllocaTy
, LI
->getType())) {
2038 // Non-integer loads need to be convertible from the alloca type so that
2039 // they are promotable.
2042 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
2043 Type
*ValueTy
= SI
->getValueOperand()->getType();
2044 if (SI
->isVolatile())
2046 // We can't handle stores that extend past the allocated memory.
2047 if (DL
.getTypeStoreSize(ValueTy
) > Size
)
2049 // So far, AllocaSliceRewriter does not support widening split slice tails
2050 // in rewriteIntegerStore.
2051 if (S
.beginOffset() < AllocBeginOffset
)
2053 // Note that we don't count vector loads or stores as whole-alloca
2054 // operations which enable integer widening because we would prefer to use
2055 // vector widening instead.
2056 if (!isa
<VectorType
>(ValueTy
) && RelBegin
== 0 && RelEnd
== Size
)
2057 WholeAllocaOp
= true;
2058 if (IntegerType
*ITy
= dyn_cast
<IntegerType
>(ValueTy
)) {
2059 if (ITy
->getBitWidth() < DL
.getTypeStoreSizeInBits(ITy
))
2061 } else if (RelBegin
!= 0 || RelEnd
!= Size
||
2062 !canConvertValue(DL
, ValueTy
, AllocaTy
)) {
2063 // Non-integer stores need to be convertible to the alloca type so that
2064 // they are promotable.
2067 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(U
->getUser())) {
2068 if (MI
->isVolatile() || !isa
<Constant
>(MI
->getLength()))
2070 if (!S
.isSplittable())
2071 return false; // Skip any unsplittable intrinsics.
2072 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
->getUser())) {
2073 if (!II
->isLifetimeStartOrEnd())
2082 /// Test whether the given alloca partition's integer operations can be
2083 /// widened to promotable ones.
2085 /// This is a quick test to check whether we can rewrite the integer loads and
2086 /// stores to a particular alloca into wider loads and stores and be able to
2087 /// promote the resulting alloca.
2088 static bool isIntegerWideningViable(Partition
&P
, Type
*AllocaTy
,
2089 const DataLayout
&DL
) {
2090 uint64_t SizeInBits
= DL
.getTypeSizeInBits(AllocaTy
);
2091 // Don't create integer types larger than the maximum bitwidth.
2092 if (SizeInBits
> IntegerType::MAX_INT_BITS
)
2095 // Don't try to handle allocas with bit-padding.
2096 if (SizeInBits
!= DL
.getTypeStoreSizeInBits(AllocaTy
))
2099 // We need to ensure that an integer type with the appropriate bitwidth can
2100 // be converted to the alloca type, whatever that is. We don't want to force
2101 // the alloca itself to have an integer type if there is a more suitable one.
2102 Type
*IntTy
= Type::getIntNTy(AllocaTy
->getContext(), SizeInBits
);
2103 if (!canConvertValue(DL
, AllocaTy
, IntTy
) ||
2104 !canConvertValue(DL
, IntTy
, AllocaTy
))
2107 // While examining uses, we ensure that the alloca has a covering load or
2108 // store. We don't want to widen the integer operations only to fail to
2109 // promote due to some other unsplittable entry (which we may make splittable
2110 // later). However, if there are only splittable uses, go ahead and assume
2111 // that we cover the alloca.
2112 // FIXME: We shouldn't consider split slices that happen to start in the
2113 // partition here...
2114 bool WholeAllocaOp
=
2115 P
.begin() != P
.end() ? false : DL
.isLegalInteger(SizeInBits
);
2117 for (const Slice
&S
: P
)
2118 if (!isIntegerWideningViableForSlice(S
, P
.beginOffset(), AllocaTy
, DL
,
2122 for (const Slice
*S
: P
.splitSliceTails())
2123 if (!isIntegerWideningViableForSlice(*S
, P
.beginOffset(), AllocaTy
, DL
,
2127 return WholeAllocaOp
;
2130 static Value
*extractInteger(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*V
,
2131 IntegerType
*Ty
, uint64_t Offset
,
2132 const Twine
&Name
) {
2133 LLVM_DEBUG(dbgs() << " start: " << *V
<< "\n");
2134 IntegerType
*IntTy
= cast
<IntegerType
>(V
->getType());
2135 assert(DL
.getTypeStoreSize(Ty
) + Offset
<= DL
.getTypeStoreSize(IntTy
) &&
2136 "Element extends past full value");
2137 uint64_t ShAmt
= 8 * Offset
;
2138 if (DL
.isBigEndian())
2139 ShAmt
= 8 * (DL
.getTypeStoreSize(IntTy
) - DL
.getTypeStoreSize(Ty
) - Offset
);
2141 V
= IRB
.CreateLShr(V
, ShAmt
, Name
+ ".shift");
2142 LLVM_DEBUG(dbgs() << " shifted: " << *V
<< "\n");
2144 assert(Ty
->getBitWidth() <= IntTy
->getBitWidth() &&
2145 "Cannot extract to a larger integer!");
2147 V
= IRB
.CreateTrunc(V
, Ty
, Name
+ ".trunc");
2148 LLVM_DEBUG(dbgs() << " trunced: " << *V
<< "\n");
2153 static Value
*insertInteger(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*Old
,
2154 Value
*V
, uint64_t Offset
, const Twine
&Name
) {
2155 IntegerType
*IntTy
= cast
<IntegerType
>(Old
->getType());
2156 IntegerType
*Ty
= cast
<IntegerType
>(V
->getType());
2157 assert(Ty
->getBitWidth() <= IntTy
->getBitWidth() &&
2158 "Cannot insert a larger integer!");
2159 LLVM_DEBUG(dbgs() << " start: " << *V
<< "\n");
2161 V
= IRB
.CreateZExt(V
, IntTy
, Name
+ ".ext");
2162 LLVM_DEBUG(dbgs() << " extended: " << *V
<< "\n");
2164 assert(DL
.getTypeStoreSize(Ty
) + Offset
<= DL
.getTypeStoreSize(IntTy
) &&
2165 "Element store outside of alloca store");
2166 uint64_t ShAmt
= 8 * Offset
;
2167 if (DL
.isBigEndian())
2168 ShAmt
= 8 * (DL
.getTypeStoreSize(IntTy
) - DL
.getTypeStoreSize(Ty
) - Offset
);
2170 V
= IRB
.CreateShl(V
, ShAmt
, Name
+ ".shift");
2171 LLVM_DEBUG(dbgs() << " shifted: " << *V
<< "\n");
2174 if (ShAmt
|| Ty
->getBitWidth() < IntTy
->getBitWidth()) {
2175 APInt Mask
= ~Ty
->getMask().zext(IntTy
->getBitWidth()).shl(ShAmt
);
2176 Old
= IRB
.CreateAnd(Old
, Mask
, Name
+ ".mask");
2177 LLVM_DEBUG(dbgs() << " masked: " << *Old
<< "\n");
2178 V
= IRB
.CreateOr(Old
, V
, Name
+ ".insert");
2179 LLVM_DEBUG(dbgs() << " inserted: " << *V
<< "\n");
2184 static Value
*extractVector(IRBuilderTy
&IRB
, Value
*V
, unsigned BeginIndex
,
2185 unsigned EndIndex
, const Twine
&Name
) {
2186 VectorType
*VecTy
= cast
<VectorType
>(V
->getType());
2187 unsigned NumElements
= EndIndex
- BeginIndex
;
2188 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2190 if (NumElements
== VecTy
->getNumElements())
2193 if (NumElements
== 1) {
2194 V
= IRB
.CreateExtractElement(V
, IRB
.getInt32(BeginIndex
),
2196 LLVM_DEBUG(dbgs() << " extract: " << *V
<< "\n");
2200 SmallVector
<Constant
*, 8> Mask
;
2201 Mask
.reserve(NumElements
);
2202 for (unsigned i
= BeginIndex
; i
!= EndIndex
; ++i
)
2203 Mask
.push_back(IRB
.getInt32(i
));
2204 V
= IRB
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
2205 ConstantVector::get(Mask
), Name
+ ".extract");
2206 LLVM_DEBUG(dbgs() << " shuffle: " << *V
<< "\n");
2210 static Value
*insertVector(IRBuilderTy
&IRB
, Value
*Old
, Value
*V
,
2211 unsigned BeginIndex
, const Twine
&Name
) {
2212 VectorType
*VecTy
= cast
<VectorType
>(Old
->getType());
2213 assert(VecTy
&& "Can only insert a vector into a vector");
2215 VectorType
*Ty
= dyn_cast
<VectorType
>(V
->getType());
2217 // Single element to insert.
2218 V
= IRB
.CreateInsertElement(Old
, V
, IRB
.getInt32(BeginIndex
),
2220 LLVM_DEBUG(dbgs() << " insert: " << *V
<< "\n");
2224 assert(Ty
->getNumElements() <= VecTy
->getNumElements() &&
2225 "Too many elements!");
2226 if (Ty
->getNumElements() == VecTy
->getNumElements()) {
2227 assert(V
->getType() == VecTy
&& "Vector type mismatch");
2230 unsigned EndIndex
= BeginIndex
+ Ty
->getNumElements();
2232 // When inserting a smaller vector into the larger to store, we first
2233 // use a shuffle vector to widen it with undef elements, and then
2234 // a second shuffle vector to select between the loaded vector and the
2236 SmallVector
<Constant
*, 8> Mask
;
2237 Mask
.reserve(VecTy
->getNumElements());
2238 for (unsigned i
= 0; i
!= VecTy
->getNumElements(); ++i
)
2239 if (i
>= BeginIndex
&& i
< EndIndex
)
2240 Mask
.push_back(IRB
.getInt32(i
- BeginIndex
));
2242 Mask
.push_back(UndefValue::get(IRB
.getInt32Ty()));
2243 V
= IRB
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
2244 ConstantVector::get(Mask
), Name
+ ".expand");
2245 LLVM_DEBUG(dbgs() << " shuffle: " << *V
<< "\n");
2248 for (unsigned i
= 0; i
!= VecTy
->getNumElements(); ++i
)
2249 Mask
.push_back(IRB
.getInt1(i
>= BeginIndex
&& i
< EndIndex
));
2251 V
= IRB
.CreateSelect(ConstantVector::get(Mask
), V
, Old
, Name
+ "blend");
2253 LLVM_DEBUG(dbgs() << " blend: " << *V
<< "\n");
2257 /// Visitor to rewrite instructions using p particular slice of an alloca
2258 /// to use a new alloca.
2260 /// Also implements the rewriting to vector-based accesses when the partition
2261 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2263 class llvm::sroa::AllocaSliceRewriter
2264 : public InstVisitor
<AllocaSliceRewriter
, bool> {
2265 // Befriend the base class so it can delegate to private visit methods.
2266 friend class InstVisitor
<AllocaSliceRewriter
, bool>;
2268 using Base
= InstVisitor
<AllocaSliceRewriter
, bool>;
2270 const DataLayout
&DL
;
2273 AllocaInst
&OldAI
, &NewAI
;
2274 const uint64_t NewAllocaBeginOffset
, NewAllocaEndOffset
;
2277 // This is a convenience and flag variable that will be null unless the new
2278 // alloca's integer operations should be widened to this integer type due to
2279 // passing isIntegerWideningViable above. If it is non-null, the desired
2280 // integer type will be stored here for easy access during rewriting.
2283 // If we are rewriting an alloca partition which can be written as pure
2284 // vector operations, we stash extra information here. When VecTy is
2285 // non-null, we have some strict guarantees about the rewritten alloca:
2286 // - The new alloca is exactly the size of the vector type here.
2287 // - The accesses all either map to the entire vector or to a single
2289 // - The set of accessing instructions is only one of those handled above
2290 // in isVectorPromotionViable. Generally these are the same access kinds
2291 // which are promotable via mem2reg.
2294 uint64_t ElementSize
;
2296 // The original offset of the slice currently being rewritten relative to
2297 // the original alloca.
2298 uint64_t BeginOffset
= 0;
2299 uint64_t EndOffset
= 0;
2301 // The new offsets of the slice currently being rewritten relative to the
2303 uint64_t NewBeginOffset
, NewEndOffset
;
2306 bool IsSplittable
= false;
2307 bool IsSplit
= false;
2308 Use
*OldUse
= nullptr;
2309 Instruction
*OldPtr
= nullptr;
2311 // Track post-rewrite users which are PHI nodes and Selects.
2312 SmallSetVector
<PHINode
*, 8> &PHIUsers
;
2313 SmallSetVector
<SelectInst
*, 8> &SelectUsers
;
2315 // Utility IR builder, whose name prefix is setup for each visited use, and
2316 // the insertion point is set to point to the user.
2320 AllocaSliceRewriter(const DataLayout
&DL
, AllocaSlices
&AS
, SROA
&Pass
,
2321 AllocaInst
&OldAI
, AllocaInst
&NewAI
,
2322 uint64_t NewAllocaBeginOffset
,
2323 uint64_t NewAllocaEndOffset
, bool IsIntegerPromotable
,
2324 VectorType
*PromotableVecTy
,
2325 SmallSetVector
<PHINode
*, 8> &PHIUsers
,
2326 SmallSetVector
<SelectInst
*, 8> &SelectUsers
)
2327 : DL(DL
), AS(AS
), Pass(Pass
), OldAI(OldAI
), NewAI(NewAI
),
2328 NewAllocaBeginOffset(NewAllocaBeginOffset
),
2329 NewAllocaEndOffset(NewAllocaEndOffset
),
2330 NewAllocaTy(NewAI
.getAllocatedType()),
2331 IntTy(IsIntegerPromotable
2334 DL
.getTypeSizeInBits(NewAI
.getAllocatedType()))
2336 VecTy(PromotableVecTy
),
2337 ElementTy(VecTy
? VecTy
->getElementType() : nullptr),
2338 ElementSize(VecTy
? DL
.getTypeSizeInBits(ElementTy
) / 8 : 0),
2339 PHIUsers(PHIUsers
), SelectUsers(SelectUsers
),
2340 IRB(NewAI
.getContext(), ConstantFolder()) {
2342 assert((DL
.getTypeSizeInBits(ElementTy
) % 8) == 0 &&
2343 "Only multiple-of-8 sized vector elements are viable");
2346 assert((!IntTy
&& !VecTy
) || (IntTy
&& !VecTy
) || (!IntTy
&& VecTy
));
2349 bool visit(AllocaSlices::const_iterator I
) {
2350 bool CanSROA
= true;
2351 BeginOffset
= I
->beginOffset();
2352 EndOffset
= I
->endOffset();
2353 IsSplittable
= I
->isSplittable();
2355 BeginOffset
< NewAllocaBeginOffset
|| EndOffset
> NewAllocaEndOffset
;
2356 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit
? "split " : ""));
2357 LLVM_DEBUG(AS
.printSlice(dbgs(), I
, ""));
2358 LLVM_DEBUG(dbgs() << "\n");
2360 // Compute the intersecting offset range.
2361 assert(BeginOffset
< NewAllocaEndOffset
);
2362 assert(EndOffset
> NewAllocaBeginOffset
);
2363 NewBeginOffset
= std::max(BeginOffset
, NewAllocaBeginOffset
);
2364 NewEndOffset
= std::min(EndOffset
, NewAllocaEndOffset
);
2366 SliceSize
= NewEndOffset
- NewBeginOffset
;
2368 OldUse
= I
->getUse();
2369 OldPtr
= cast
<Instruction
>(OldUse
->get());
2371 Instruction
*OldUserI
= cast
<Instruction
>(OldUse
->getUser());
2372 IRB
.SetInsertPoint(OldUserI
);
2373 IRB
.SetCurrentDebugLocation(OldUserI
->getDebugLoc());
2374 IRB
.SetNamePrefix(Twine(NewAI
.getName()) + "." + Twine(BeginOffset
) + ".");
2376 CanSROA
&= visit(cast
<Instruction
>(OldUse
->getUser()));
2383 // Make sure the other visit overloads are visible.
2386 // Every instruction which can end up as a user must have a rewrite rule.
2387 bool visitInstruction(Instruction
&I
) {
2388 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I
<< "\n");
2389 llvm_unreachable("No rewrite rule for this instruction!");
2392 Value
*getNewAllocaSlicePtr(IRBuilderTy
&IRB
, Type
*PointerTy
) {
2393 // Note that the offset computation can use BeginOffset or NewBeginOffset
2394 // interchangeably for unsplit slices.
2395 assert(IsSplit
|| BeginOffset
== NewBeginOffset
);
2396 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2399 StringRef OldName
= OldPtr
->getName();
2400 // Skip through the last '.sroa.' component of the name.
2401 size_t LastSROAPrefix
= OldName
.rfind(".sroa.");
2402 if (LastSROAPrefix
!= StringRef::npos
) {
2403 OldName
= OldName
.substr(LastSROAPrefix
+ strlen(".sroa."));
2404 // Look for an SROA slice index.
2405 size_t IndexEnd
= OldName
.find_first_not_of("0123456789");
2406 if (IndexEnd
!= StringRef::npos
&& OldName
[IndexEnd
] == '.') {
2407 // Strip the index and look for the offset.
2408 OldName
= OldName
.substr(IndexEnd
+ 1);
2409 size_t OffsetEnd
= OldName
.find_first_not_of("0123456789");
2410 if (OffsetEnd
!= StringRef::npos
&& OldName
[OffsetEnd
] == '.')
2411 // Strip the offset.
2412 OldName
= OldName
.substr(OffsetEnd
+ 1);
2415 // Strip any SROA suffixes as well.
2416 OldName
= OldName
.substr(0, OldName
.find(".sroa_"));
2419 return getAdjustedPtr(IRB
, DL
, &NewAI
,
2420 APInt(DL
.getIndexTypeSizeInBits(PointerTy
), Offset
),
2423 Twine(OldName
) + "."
2430 /// Compute suitable alignment to access this slice of the *new*
2433 /// You can optionally pass a type to this routine and if that type's ABI
2434 /// alignment is itself suitable, this will return zero.
2435 unsigned getSliceAlign(Type
*Ty
= nullptr) {
2436 unsigned NewAIAlign
= NewAI
.getAlignment();
2438 NewAIAlign
= DL
.getABITypeAlignment(NewAI
.getAllocatedType());
2440 MinAlign(NewAIAlign
, NewBeginOffset
- NewAllocaBeginOffset
);
2441 return (Ty
&& Align
== DL
.getABITypeAlignment(Ty
)) ? 0 : Align
;
2444 unsigned getIndex(uint64_t Offset
) {
2445 assert(VecTy
&& "Can only call getIndex when rewriting a vector");
2446 uint64_t RelOffset
= Offset
- NewAllocaBeginOffset
;
2447 assert(RelOffset
/ ElementSize
< UINT32_MAX
&& "Index out of bounds");
2448 uint32_t Index
= RelOffset
/ ElementSize
;
2449 assert(Index
* ElementSize
== RelOffset
);
2453 void deleteIfTriviallyDead(Value
*V
) {
2454 Instruction
*I
= cast
<Instruction
>(V
);
2455 if (isInstructionTriviallyDead(I
))
2456 Pass
.DeadInsts
.insert(I
);
2459 Value
*rewriteVectorizedLoadInst() {
2460 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2461 unsigned EndIndex
= getIndex(NewEndOffset
);
2462 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2464 Value
*V
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2465 NewAI
.getAlignment(), "load");
2466 return extractVector(IRB
, V
, BeginIndex
, EndIndex
, "vec");
2469 Value
*rewriteIntegerLoad(LoadInst
&LI
) {
2470 assert(IntTy
&& "We cannot insert an integer to the alloca");
2471 assert(!LI
.isVolatile());
2472 Value
*V
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2473 NewAI
.getAlignment(), "load");
2474 V
= convertValue(DL
, IRB
, V
, IntTy
);
2475 assert(NewBeginOffset
>= NewAllocaBeginOffset
&& "Out of bounds offset");
2476 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2477 if (Offset
> 0 || NewEndOffset
< NewAllocaEndOffset
) {
2478 IntegerType
*ExtractTy
= Type::getIntNTy(LI
.getContext(), SliceSize
* 8);
2479 V
= extractInteger(DL
, IRB
, V
, ExtractTy
, Offset
, "extract");
2481 // It is possible that the extracted type is not the load type. This
2482 // happens if there is a load past the end of the alloca, and as
2483 // a consequence the slice is narrower but still a candidate for integer
2484 // lowering. To handle this case, we just zero extend the extracted
2486 assert(cast
<IntegerType
>(LI
.getType())->getBitWidth() >= SliceSize
* 8 &&
2487 "Can only handle an extract for an overly wide load");
2488 if (cast
<IntegerType
>(LI
.getType())->getBitWidth() > SliceSize
* 8)
2489 V
= IRB
.CreateZExt(V
, LI
.getType());
2493 bool visitLoadInst(LoadInst
&LI
) {
2494 LLVM_DEBUG(dbgs() << " original: " << LI
<< "\n");
2495 Value
*OldOp
= LI
.getOperand(0);
2496 assert(OldOp
== OldPtr
);
2499 LI
.getAAMetadata(AATags
);
2501 unsigned AS
= LI
.getPointerAddressSpace();
2503 Type
*TargetTy
= IsSplit
? Type::getIntNTy(LI
.getContext(), SliceSize
* 8)
2505 const bool IsLoadPastEnd
= DL
.getTypeStoreSize(TargetTy
) > SliceSize
;
2506 bool IsPtrAdjusted
= false;
2509 V
= rewriteVectorizedLoadInst();
2510 } else if (IntTy
&& LI
.getType()->isIntegerTy()) {
2511 V
= rewriteIntegerLoad(LI
);
2512 } else if (NewBeginOffset
== NewAllocaBeginOffset
&&
2513 NewEndOffset
== NewAllocaEndOffset
&&
2514 (canConvertValue(DL
, NewAllocaTy
, TargetTy
) ||
2515 (IsLoadPastEnd
&& NewAllocaTy
->isIntegerTy() &&
2516 TargetTy
->isIntegerTy()))) {
2517 LoadInst
*NewLI
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2518 NewAI
.getAlignment(),
2519 LI
.isVolatile(), LI
.getName());
2521 NewLI
->setAAMetadata(AATags
);
2522 if (LI
.isVolatile())
2523 NewLI
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
2525 // Any !nonnull metadata or !range metadata on the old load is also valid
2526 // on the new load. This is even true in some cases even when the loads
2527 // are different types, for example by mapping !nonnull metadata to
2528 // !range metadata by modeling the null pointer constant converted to the
2530 // FIXME: Add support for range metadata here. Currently the utilities
2531 // for this don't propagate range metadata in trivial cases from one
2532 // integer load to another, don't handle non-addrspace-0 null pointers
2533 // correctly, and don't have any support for mapping ranges as the
2534 // integer type becomes winder or narrower.
2535 if (MDNode
*N
= LI
.getMetadata(LLVMContext::MD_nonnull
))
2536 copyNonnullMetadata(LI
, N
, *NewLI
);
2538 // Try to preserve nonnull metadata
2541 // If this is an integer load past the end of the slice (which means the
2542 // bytes outside the slice are undef or this load is dead) just forcibly
2543 // fix the integer size with correct handling of endianness.
2544 if (auto *AITy
= dyn_cast
<IntegerType
>(NewAllocaTy
))
2545 if (auto *TITy
= dyn_cast
<IntegerType
>(TargetTy
))
2546 if (AITy
->getBitWidth() < TITy
->getBitWidth()) {
2547 V
= IRB
.CreateZExt(V
, TITy
, "load.ext");
2548 if (DL
.isBigEndian())
2549 V
= IRB
.CreateShl(V
, TITy
->getBitWidth() - AITy
->getBitWidth(),
2553 Type
*LTy
= TargetTy
->getPointerTo(AS
);
2554 LoadInst
*NewLI
= IRB
.CreateAlignedLoad(
2555 TargetTy
, getNewAllocaSlicePtr(IRB
, LTy
), getSliceAlign(TargetTy
),
2556 LI
.isVolatile(), LI
.getName());
2558 NewLI
->setAAMetadata(AATags
);
2559 if (LI
.isVolatile())
2560 NewLI
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
2563 IsPtrAdjusted
= true;
2565 V
= convertValue(DL
, IRB
, V
, TargetTy
);
2568 assert(!LI
.isVolatile());
2569 assert(LI
.getType()->isIntegerTy() &&
2570 "Only integer type loads and stores are split");
2571 assert(SliceSize
< DL
.getTypeStoreSize(LI
.getType()) &&
2572 "Split load isn't smaller than original load");
2573 assert(DL
.typeSizeEqualsStoreSize(LI
.getType()) &&
2574 "Non-byte-multiple bit width");
2575 // Move the insertion point just past the load so that we can refer to it.
2576 IRB
.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI
)));
2577 // Create a placeholder value with the same type as LI to use as the
2578 // basis for the new value. This allows us to replace the uses of LI with
2579 // the computed value, and then replace the placeholder with LI, leaving
2580 // LI only used for this computation.
2581 Value
*Placeholder
= new LoadInst(
2582 LI
.getType(), UndefValue::get(LI
.getType()->getPointerTo(AS
)));
2583 V
= insertInteger(DL
, IRB
, Placeholder
, V
, NewBeginOffset
- BeginOffset
,
2585 LI
.replaceAllUsesWith(V
);
2586 Placeholder
->replaceAllUsesWith(&LI
);
2587 Placeholder
->deleteValue();
2589 LI
.replaceAllUsesWith(V
);
2592 Pass
.DeadInsts
.insert(&LI
);
2593 deleteIfTriviallyDead(OldOp
);
2594 LLVM_DEBUG(dbgs() << " to: " << *V
<< "\n");
2595 return !LI
.isVolatile() && !IsPtrAdjusted
;
2598 bool rewriteVectorizedStoreInst(Value
*V
, StoreInst
&SI
, Value
*OldOp
,
2600 if (V
->getType() != VecTy
) {
2601 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2602 unsigned EndIndex
= getIndex(NewEndOffset
);
2603 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2604 unsigned NumElements
= EndIndex
- BeginIndex
;
2605 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2606 Type
*SliceTy
= (NumElements
== 1)
2608 : VectorType::get(ElementTy
, NumElements
);
2609 if (V
->getType() != SliceTy
)
2610 V
= convertValue(DL
, IRB
, V
, SliceTy
);
2612 // Mix in the existing elements.
2613 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2614 NewAI
.getAlignment(), "load");
2615 V
= insertVector(IRB
, Old
, V
, BeginIndex
, "vec");
2617 StoreInst
*Store
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment());
2619 Store
->setAAMetadata(AATags
);
2620 Pass
.DeadInsts
.insert(&SI
);
2622 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
2626 bool rewriteIntegerStore(Value
*V
, StoreInst
&SI
, AAMDNodes AATags
) {
2627 assert(IntTy
&& "We cannot extract an integer from the alloca");
2628 assert(!SI
.isVolatile());
2629 if (DL
.getTypeSizeInBits(V
->getType()) != IntTy
->getBitWidth()) {
2630 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2631 NewAI
.getAlignment(), "oldload");
2632 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
2633 assert(BeginOffset
>= NewAllocaBeginOffset
&& "Out of bounds offset");
2634 uint64_t Offset
= BeginOffset
- NewAllocaBeginOffset
;
2635 V
= insertInteger(DL
, IRB
, Old
, SI
.getValueOperand(), Offset
, "insert");
2637 V
= convertValue(DL
, IRB
, V
, NewAllocaTy
);
2638 StoreInst
*Store
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment());
2639 Store
->copyMetadata(SI
, {LLVMContext::MD_mem_parallel_loop_access
,
2640 LLVMContext::MD_access_group
});
2642 Store
->setAAMetadata(AATags
);
2643 Pass
.DeadInsts
.insert(&SI
);
2644 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
2648 bool visitStoreInst(StoreInst
&SI
) {
2649 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
2650 Value
*OldOp
= SI
.getOperand(1);
2651 assert(OldOp
== OldPtr
);
2654 SI
.getAAMetadata(AATags
);
2656 Value
*V
= SI
.getValueOperand();
2658 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2659 // alloca that should be re-examined after promoting this alloca.
2660 if (V
->getType()->isPointerTy())
2661 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
->stripInBoundsOffsets()))
2662 Pass
.PostPromotionWorklist
.insert(AI
);
2664 if (SliceSize
< DL
.getTypeStoreSize(V
->getType())) {
2665 assert(!SI
.isVolatile());
2666 assert(V
->getType()->isIntegerTy() &&
2667 "Only integer type loads and stores are split");
2668 assert(DL
.typeSizeEqualsStoreSize(V
->getType()) &&
2669 "Non-byte-multiple bit width");
2670 IntegerType
*NarrowTy
= Type::getIntNTy(SI
.getContext(), SliceSize
* 8);
2671 V
= extractInteger(DL
, IRB
, V
, NarrowTy
, NewBeginOffset
- BeginOffset
,
2676 return rewriteVectorizedStoreInst(V
, SI
, OldOp
, AATags
);
2677 if (IntTy
&& V
->getType()->isIntegerTy())
2678 return rewriteIntegerStore(V
, SI
, AATags
);
2680 const bool IsStorePastEnd
= DL
.getTypeStoreSize(V
->getType()) > SliceSize
;
2682 if (NewBeginOffset
== NewAllocaBeginOffset
&&
2683 NewEndOffset
== NewAllocaEndOffset
&&
2684 (canConvertValue(DL
, V
->getType(), NewAllocaTy
) ||
2685 (IsStorePastEnd
&& NewAllocaTy
->isIntegerTy() &&
2686 V
->getType()->isIntegerTy()))) {
2687 // If this is an integer store past the end of slice (and thus the bytes
2688 // past that point are irrelevant or this is unreachable), truncate the
2689 // value prior to storing.
2690 if (auto *VITy
= dyn_cast
<IntegerType
>(V
->getType()))
2691 if (auto *AITy
= dyn_cast
<IntegerType
>(NewAllocaTy
))
2692 if (VITy
->getBitWidth() > AITy
->getBitWidth()) {
2693 if (DL
.isBigEndian())
2694 V
= IRB
.CreateLShr(V
, VITy
->getBitWidth() - AITy
->getBitWidth(),
2696 V
= IRB
.CreateTrunc(V
, AITy
, "load.trunc");
2699 V
= convertValue(DL
, IRB
, V
, NewAllocaTy
);
2700 NewSI
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment(),
2703 unsigned AS
= SI
.getPointerAddressSpace();
2704 Value
*NewPtr
= getNewAllocaSlicePtr(IRB
, V
->getType()->getPointerTo(AS
));
2705 NewSI
= IRB
.CreateAlignedStore(V
, NewPtr
, getSliceAlign(V
->getType()),
2708 NewSI
->copyMetadata(SI
, {LLVMContext::MD_mem_parallel_loop_access
,
2709 LLVMContext::MD_access_group
});
2711 NewSI
->setAAMetadata(AATags
);
2712 if (SI
.isVolatile())
2713 NewSI
->setAtomic(SI
.getOrdering(), SI
.getSyncScopeID());
2714 Pass
.DeadInsts
.insert(&SI
);
2715 deleteIfTriviallyDead(OldOp
);
2717 LLVM_DEBUG(dbgs() << " to: " << *NewSI
<< "\n");
2718 return NewSI
->getPointerOperand() == &NewAI
&& !SI
.isVolatile();
2721 /// Compute an integer value from splatting an i8 across the given
2722 /// number of bytes.
2724 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2725 /// call this routine.
2726 /// FIXME: Heed the advice above.
2728 /// \param V The i8 value to splat.
2729 /// \param Size The number of bytes in the output (assuming i8 is one byte)
2730 Value
*getIntegerSplat(Value
*V
, unsigned Size
) {
2731 assert(Size
> 0 && "Expected a positive number of bytes.");
2732 IntegerType
*VTy
= cast
<IntegerType
>(V
->getType());
2733 assert(VTy
->getBitWidth() == 8 && "Expected an i8 value for the byte");
2737 Type
*SplatIntTy
= Type::getIntNTy(VTy
->getContext(), Size
* 8);
2739 IRB
.CreateZExt(V
, SplatIntTy
, "zext"),
2740 ConstantExpr::getUDiv(
2741 Constant::getAllOnesValue(SplatIntTy
),
2742 ConstantExpr::getZExt(Constant::getAllOnesValue(V
->getType()),
2748 /// Compute a vector splat for a given element value.
2749 Value
*getVectorSplat(Value
*V
, unsigned NumElements
) {
2750 V
= IRB
.CreateVectorSplat(NumElements
, V
, "vsplat");
2751 LLVM_DEBUG(dbgs() << " splat: " << *V
<< "\n");
2755 bool visitMemSetInst(MemSetInst
&II
) {
2756 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
2757 assert(II
.getRawDest() == OldPtr
);
2760 II
.getAAMetadata(AATags
);
2762 // If the memset has a variable size, it cannot be split, just adjust the
2763 // pointer to the new alloca.
2764 if (!isa
<Constant
>(II
.getLength())) {
2766 assert(NewBeginOffset
== BeginOffset
);
2767 II
.setDest(getNewAllocaSlicePtr(IRB
, OldPtr
->getType()));
2768 II
.setDestAlignment(getSliceAlign());
2770 deleteIfTriviallyDead(OldPtr
);
2774 // Record this instruction for deletion.
2775 Pass
.DeadInsts
.insert(&II
);
2777 Type
*AllocaTy
= NewAI
.getAllocatedType();
2778 Type
*ScalarTy
= AllocaTy
->getScalarType();
2780 const bool CanContinue
= [&]() {
2783 if (BeginOffset
> NewAllocaBeginOffset
||
2784 EndOffset
< NewAllocaEndOffset
)
2786 auto *C
= cast
<ConstantInt
>(II
.getLength());
2787 if (C
->getBitWidth() > 64)
2789 const auto Len
= C
->getZExtValue();
2790 auto *Int8Ty
= IntegerType::getInt8Ty(NewAI
.getContext());
2791 auto *SrcTy
= VectorType::get(Int8Ty
, Len
);
2792 return canConvertValue(DL
, SrcTy
, AllocaTy
) &&
2793 DL
.isLegalInteger(DL
.getTypeSizeInBits(ScalarTy
));
2796 // If this doesn't map cleanly onto the alloca type, and that type isn't
2797 // a single value type, just emit a memset.
2799 Type
*SizeTy
= II
.getLength()->getType();
2800 Constant
*Size
= ConstantInt::get(SizeTy
, NewEndOffset
- NewBeginOffset
);
2801 CallInst
*New
= IRB
.CreateMemSet(
2802 getNewAllocaSlicePtr(IRB
, OldPtr
->getType()), II
.getValue(), Size
,
2803 getSliceAlign(), II
.isVolatile());
2805 New
->setAAMetadata(AATags
);
2806 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2810 // If we can represent this as a simple value, we have to build the actual
2811 // value to store, which requires expanding the byte present in memset to
2812 // a sensible representation for the alloca type. This is essentially
2813 // splatting the byte to a sufficiently wide integer, splatting it across
2814 // any desired vector width, and bitcasting to the final type.
2818 // If this is a memset of a vectorized alloca, insert it.
2819 assert(ElementTy
== ScalarTy
);
2821 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2822 unsigned EndIndex
= getIndex(NewEndOffset
);
2823 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2824 unsigned NumElements
= EndIndex
- BeginIndex
;
2825 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2828 getIntegerSplat(II
.getValue(), DL
.getTypeSizeInBits(ElementTy
) / 8);
2829 Splat
= convertValue(DL
, IRB
, Splat
, ElementTy
);
2830 if (NumElements
> 1)
2831 Splat
= getVectorSplat(Splat
, NumElements
);
2833 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2834 NewAI
.getAlignment(), "oldload");
2835 V
= insertVector(IRB
, Old
, Splat
, BeginIndex
, "vec");
2837 // If this is a memset on an alloca where we can widen stores, insert the
2839 assert(!II
.isVolatile());
2841 uint64_t Size
= NewEndOffset
- NewBeginOffset
;
2842 V
= getIntegerSplat(II
.getValue(), Size
);
2844 if (IntTy
&& (BeginOffset
!= NewAllocaBeginOffset
||
2845 EndOffset
!= NewAllocaBeginOffset
)) {
2846 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2847 NewAI
.getAlignment(), "oldload");
2848 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
2849 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2850 V
= insertInteger(DL
, IRB
, Old
, V
, Offset
, "insert");
2852 assert(V
->getType() == IntTy
&&
2853 "Wrong type for an alloca wide integer!");
2855 V
= convertValue(DL
, IRB
, V
, AllocaTy
);
2857 // Established these invariants above.
2858 assert(NewBeginOffset
== NewAllocaBeginOffset
);
2859 assert(NewEndOffset
== NewAllocaEndOffset
);
2861 V
= getIntegerSplat(II
.getValue(), DL
.getTypeSizeInBits(ScalarTy
) / 8);
2862 if (VectorType
*AllocaVecTy
= dyn_cast
<VectorType
>(AllocaTy
))
2863 V
= getVectorSplat(V
, AllocaVecTy
->getNumElements());
2865 V
= convertValue(DL
, IRB
, V
, AllocaTy
);
2868 StoreInst
*New
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment(),
2871 New
->setAAMetadata(AATags
);
2872 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2873 return !II
.isVolatile();
2876 bool visitMemTransferInst(MemTransferInst
&II
) {
2877 // Rewriting of memory transfer instructions can be a bit tricky. We break
2878 // them into two categories: split intrinsics and unsplit intrinsics.
2880 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
2883 II
.getAAMetadata(AATags
);
2885 bool IsDest
= &II
.getRawDestUse() == OldUse
;
2886 assert((IsDest
&& II
.getRawDest() == OldPtr
) ||
2887 (!IsDest
&& II
.getRawSource() == OldPtr
));
2889 unsigned SliceAlign
= getSliceAlign();
2891 // For unsplit intrinsics, we simply modify the source and destination
2892 // pointers in place. This isn't just an optimization, it is a matter of
2893 // correctness. With unsplit intrinsics we may be dealing with transfers
2894 // within a single alloca before SROA ran, or with transfers that have
2895 // a variable length. We may also be dealing with memmove instead of
2896 // memcpy, and so simply updating the pointers is the necessary for us to
2897 // update both source and dest of a single call.
2898 if (!IsSplittable
) {
2899 Value
*AdjustedPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
2901 II
.setDest(AdjustedPtr
);
2902 II
.setDestAlignment(SliceAlign
);
2905 II
.setSource(AdjustedPtr
);
2906 II
.setSourceAlignment(SliceAlign
);
2909 LLVM_DEBUG(dbgs() << " to: " << II
<< "\n");
2910 deleteIfTriviallyDead(OldPtr
);
2913 // For split transfer intrinsics we have an incredibly useful assurance:
2914 // the source and destination do not reside within the same alloca, and at
2915 // least one of them does not escape. This means that we can replace
2916 // memmove with memcpy, and we don't need to worry about all manner of
2917 // downsides to splitting and transforming the operations.
2919 // If this doesn't map cleanly onto the alloca type, and that type isn't
2920 // a single value type, just emit a memcpy.
2923 (BeginOffset
> NewAllocaBeginOffset
|| EndOffset
< NewAllocaEndOffset
||
2924 SliceSize
!= DL
.getTypeStoreSize(NewAI
.getAllocatedType()) ||
2925 !NewAI
.getAllocatedType()->isSingleValueType());
2927 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2928 // size hasn't been shrunk based on analysis of the viable range, this is
2930 if (EmitMemCpy
&& &OldAI
== &NewAI
) {
2931 // Ensure the start lines up.
2932 assert(NewBeginOffset
== BeginOffset
);
2934 // Rewrite the size as needed.
2935 if (NewEndOffset
!= EndOffset
)
2936 II
.setLength(ConstantInt::get(II
.getLength()->getType(),
2937 NewEndOffset
- NewBeginOffset
));
2940 // Record this instruction for deletion.
2941 Pass
.DeadInsts
.insert(&II
);
2943 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2944 // alloca that should be re-examined after rewriting this instruction.
2945 Value
*OtherPtr
= IsDest
? II
.getRawSource() : II
.getRawDest();
2946 if (AllocaInst
*AI
=
2947 dyn_cast
<AllocaInst
>(OtherPtr
->stripInBoundsOffsets())) {
2948 assert(AI
!= &OldAI
&& AI
!= &NewAI
&&
2949 "Splittable transfers cannot reach the same alloca on both ends.");
2950 Pass
.Worklist
.insert(AI
);
2953 Type
*OtherPtrTy
= OtherPtr
->getType();
2954 unsigned OtherAS
= OtherPtrTy
->getPointerAddressSpace();
2956 // Compute the relative offset for the other pointer within the transfer.
2957 unsigned OffsetWidth
= DL
.getIndexSizeInBits(OtherAS
);
2958 APInt
OtherOffset(OffsetWidth
, NewBeginOffset
- BeginOffset
);
2959 unsigned OtherAlign
=
2960 IsDest
? II
.getSourceAlignment() : II
.getDestAlignment();
2961 OtherAlign
= MinAlign(OtherAlign
? OtherAlign
: 1,
2962 OtherOffset
.zextOrTrunc(64).getZExtValue());
2965 // Compute the other pointer, folding as much as possible to produce
2966 // a single, simple GEP in most cases.
2967 OtherPtr
= getAdjustedPtr(IRB
, DL
, OtherPtr
, OtherOffset
, OtherPtrTy
,
2968 OtherPtr
->getName() + ".");
2970 Value
*OurPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
2971 Type
*SizeTy
= II
.getLength()->getType();
2972 Constant
*Size
= ConstantInt::get(SizeTy
, NewEndOffset
- NewBeginOffset
);
2974 Value
*DestPtr
, *SrcPtr
;
2975 unsigned DestAlign
, SrcAlign
;
2976 // Note: IsDest is true iff we're copying into the new alloca slice
2979 DestAlign
= SliceAlign
;
2981 SrcAlign
= OtherAlign
;
2984 DestAlign
= OtherAlign
;
2986 SrcAlign
= SliceAlign
;
2988 CallInst
*New
= IRB
.CreateMemCpy(DestPtr
, DestAlign
, SrcPtr
, SrcAlign
,
2989 Size
, II
.isVolatile());
2991 New
->setAAMetadata(AATags
);
2992 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2996 bool IsWholeAlloca
= NewBeginOffset
== NewAllocaBeginOffset
&&
2997 NewEndOffset
== NewAllocaEndOffset
;
2998 uint64_t Size
= NewEndOffset
- NewBeginOffset
;
2999 unsigned BeginIndex
= VecTy
? getIndex(NewBeginOffset
) : 0;
3000 unsigned EndIndex
= VecTy
? getIndex(NewEndOffset
) : 0;
3001 unsigned NumElements
= EndIndex
- BeginIndex
;
3002 IntegerType
*SubIntTy
=
3003 IntTy
? Type::getIntNTy(IntTy
->getContext(), Size
* 8) : nullptr;
3005 // Reset the other pointer type to match the register type we're going to
3006 // use, but using the address space of the original other pointer.
3008 if (VecTy
&& !IsWholeAlloca
) {
3009 if (NumElements
== 1)
3010 OtherTy
= VecTy
->getElementType();
3012 OtherTy
= VectorType::get(VecTy
->getElementType(), NumElements
);
3013 } else if (IntTy
&& !IsWholeAlloca
) {
3016 OtherTy
= NewAllocaTy
;
3018 OtherPtrTy
= OtherTy
->getPointerTo(OtherAS
);
3020 Value
*SrcPtr
= getAdjustedPtr(IRB
, DL
, OtherPtr
, OtherOffset
, OtherPtrTy
,
3021 OtherPtr
->getName() + ".");
3022 unsigned SrcAlign
= OtherAlign
;
3023 Value
*DstPtr
= &NewAI
;
3024 unsigned DstAlign
= SliceAlign
;
3026 std::swap(SrcPtr
, DstPtr
);
3027 std::swap(SrcAlign
, DstAlign
);
3031 if (VecTy
&& !IsWholeAlloca
&& !IsDest
) {
3032 Src
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
3033 NewAI
.getAlignment(), "load");
3034 Src
= extractVector(IRB
, Src
, BeginIndex
, EndIndex
, "vec");
3035 } else if (IntTy
&& !IsWholeAlloca
&& !IsDest
) {
3036 Src
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
3037 NewAI
.getAlignment(), "load");
3038 Src
= convertValue(DL
, IRB
, Src
, IntTy
);
3039 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
3040 Src
= extractInteger(DL
, IRB
, Src
, SubIntTy
, Offset
, "extract");
3042 LoadInst
*Load
= IRB
.CreateAlignedLoad(OtherTy
, SrcPtr
, SrcAlign
,
3043 II
.isVolatile(), "copyload");
3045 Load
->setAAMetadata(AATags
);
3049 if (VecTy
&& !IsWholeAlloca
&& IsDest
) {
3050 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
3051 NewAI
.getAlignment(), "oldload");
3052 Src
= insertVector(IRB
, Old
, Src
, BeginIndex
, "vec");
3053 } else if (IntTy
&& !IsWholeAlloca
&& IsDest
) {
3054 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
3055 NewAI
.getAlignment(), "oldload");
3056 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
3057 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
3058 Src
= insertInteger(DL
, IRB
, Old
, Src
, Offset
, "insert");
3059 Src
= convertValue(DL
, IRB
, Src
, NewAllocaTy
);
3062 StoreInst
*Store
= cast
<StoreInst
>(
3063 IRB
.CreateAlignedStore(Src
, DstPtr
, DstAlign
, II
.isVolatile()));
3065 Store
->setAAMetadata(AATags
);
3066 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
3067 return !II
.isVolatile();
3070 bool visitIntrinsicInst(IntrinsicInst
&II
) {
3071 assert(II
.isLifetimeStartOrEnd());
3072 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
3073 assert(II
.getArgOperand(1) == OldPtr
);
3075 // Record this instruction for deletion.
3076 Pass
.DeadInsts
.insert(&II
);
3078 // Lifetime intrinsics are only promotable if they cover the whole alloca.
3079 // Therefore, we drop lifetime intrinsics which don't cover the whole
3081 // (In theory, intrinsics which partially cover an alloca could be
3082 // promoted, but PromoteMemToReg doesn't handle that case.)
3083 // FIXME: Check whether the alloca is promotable before dropping the
3084 // lifetime intrinsics?
3085 if (NewBeginOffset
!= NewAllocaBeginOffset
||
3086 NewEndOffset
!= NewAllocaEndOffset
)
3090 ConstantInt::get(cast
<IntegerType
>(II
.getArgOperand(0)->getType()),
3091 NewEndOffset
- NewBeginOffset
);
3092 // Lifetime intrinsics always expect an i8* so directly get such a pointer
3093 // for the new alloca slice.
3094 Type
*PointerTy
= IRB
.getInt8PtrTy(OldPtr
->getType()->getPointerAddressSpace());
3095 Value
*Ptr
= getNewAllocaSlicePtr(IRB
, PointerTy
);
3097 if (II
.getIntrinsicID() == Intrinsic::lifetime_start
)
3098 New
= IRB
.CreateLifetimeStart(Ptr
, Size
);
3100 New
= IRB
.CreateLifetimeEnd(Ptr
, Size
);
3103 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
3108 void fixLoadStoreAlign(Instruction
&Root
) {
3109 // This algorithm implements the same visitor loop as
3110 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load
3112 SmallPtrSet
<Instruction
*, 4> Visited
;
3113 SmallVector
<Instruction
*, 4> Uses
;
3114 Visited
.insert(&Root
);
3115 Uses
.push_back(&Root
);
3117 Instruction
*I
= Uses
.pop_back_val();
3119 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
3120 unsigned LoadAlign
= LI
->getAlignment();
3122 LoadAlign
= DL
.getABITypeAlignment(LI
->getType());
3123 LI
->setAlignment(MaybeAlign(std::min(LoadAlign
, getSliceAlign())));
3126 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
3127 unsigned StoreAlign
= SI
->getAlignment();
3129 Value
*Op
= SI
->getOperand(0);
3130 StoreAlign
= DL
.getABITypeAlignment(Op
->getType());
3132 SI
->setAlignment(MaybeAlign(std::min(StoreAlign
, getSliceAlign())));
3136 assert(isa
<BitCastInst
>(I
) || isa
<AddrSpaceCastInst
>(I
) ||
3137 isa
<PHINode
>(I
) || isa
<SelectInst
>(I
) ||
3138 isa
<GetElementPtrInst
>(I
));
3139 for (User
*U
: I
->users())
3140 if (Visited
.insert(cast
<Instruction
>(U
)).second
)
3141 Uses
.push_back(cast
<Instruction
>(U
));
3142 } while (!Uses
.empty());
3145 bool visitPHINode(PHINode
&PN
) {
3146 LLVM_DEBUG(dbgs() << " original: " << PN
<< "\n");
3147 assert(BeginOffset
>= NewAllocaBeginOffset
&& "PHIs are unsplittable");
3148 assert(EndOffset
<= NewAllocaEndOffset
&& "PHIs are unsplittable");
3150 // We would like to compute a new pointer in only one place, but have it be
3151 // as local as possible to the PHI. To do that, we re-use the location of
3152 // the old pointer, which necessarily must be in the right position to
3153 // dominate the PHI.
3154 IRBuilderTy
PtrBuilder(IRB
);
3155 if (isa
<PHINode
>(OldPtr
))
3156 PtrBuilder
.SetInsertPoint(&*OldPtr
->getParent()->getFirstInsertionPt());
3158 PtrBuilder
.SetInsertPoint(OldPtr
);
3159 PtrBuilder
.SetCurrentDebugLocation(OldPtr
->getDebugLoc());
3161 Value
*NewPtr
= getNewAllocaSlicePtr(PtrBuilder
, OldPtr
->getType());
3162 // Replace the operands which were using the old pointer.
3163 std::replace(PN
.op_begin(), PN
.op_end(), cast
<Value
>(OldPtr
), NewPtr
);
3165 LLVM_DEBUG(dbgs() << " to: " << PN
<< "\n");
3166 deleteIfTriviallyDead(OldPtr
);
3168 // Fix the alignment of any loads or stores using this PHI node.
3169 fixLoadStoreAlign(PN
);
3171 // PHIs can't be promoted on their own, but often can be speculated. We
3172 // check the speculation outside of the rewriter so that we see the
3173 // fully-rewritten alloca.
3174 PHIUsers
.insert(&PN
);
3178 bool visitSelectInst(SelectInst
&SI
) {
3179 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
3180 assert((SI
.getTrueValue() == OldPtr
|| SI
.getFalseValue() == OldPtr
) &&
3181 "Pointer isn't an operand!");
3182 assert(BeginOffset
>= NewAllocaBeginOffset
&& "Selects are unsplittable");
3183 assert(EndOffset
<= NewAllocaEndOffset
&& "Selects are unsplittable");
3185 Value
*NewPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
3186 // Replace the operands which were using the old pointer.
3187 if (SI
.getOperand(1) == OldPtr
)
3188 SI
.setOperand(1, NewPtr
);
3189 if (SI
.getOperand(2) == OldPtr
)
3190 SI
.setOperand(2, NewPtr
);
3192 LLVM_DEBUG(dbgs() << " to: " << SI
<< "\n");
3193 deleteIfTriviallyDead(OldPtr
);
3195 // Fix the alignment of any loads or stores using this select.
3196 fixLoadStoreAlign(SI
);
3198 // Selects can't be promoted on their own, but often can be speculated. We
3199 // check the speculation outside of the rewriter so that we see the
3200 // fully-rewritten alloca.
3201 SelectUsers
.insert(&SI
);
3208 /// Visitor to rewrite aggregate loads and stores as scalar.
3210 /// This pass aggressively rewrites all aggregate loads and stores on
3211 /// a particular pointer (or any pointer derived from it which we can identify)
3212 /// with scalar loads and stores.
3213 class AggLoadStoreRewriter
: public InstVisitor
<AggLoadStoreRewriter
, bool> {
3214 // Befriend the base class so it can delegate to private visit methods.
3215 friend class InstVisitor
<AggLoadStoreRewriter
, bool>;
3217 /// Queue of pointer uses to analyze and potentially rewrite.
3218 SmallVector
<Use
*, 8> Queue
;
3220 /// Set to prevent us from cycling with phi nodes and loops.
3221 SmallPtrSet
<User
*, 8> Visited
;
3223 /// The current pointer use being rewritten. This is used to dig up the used
3224 /// value (as opposed to the user).
3227 /// Used to calculate offsets, and hence alignment, of subobjects.
3228 const DataLayout
&DL
;
3231 AggLoadStoreRewriter(const DataLayout
&DL
) : DL(DL
) {}
3233 /// Rewrite loads and stores through a pointer and all pointers derived from
3235 bool rewrite(Instruction
&I
) {
3236 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
3238 bool Changed
= false;
3239 while (!Queue
.empty()) {
3240 U
= Queue
.pop_back_val();
3241 Changed
|= visit(cast
<Instruction
>(U
->getUser()));
3247 /// Enqueue all the users of the given instruction for further processing.
3248 /// This uses a set to de-duplicate users.
3249 void enqueueUsers(Instruction
&I
) {
3250 for (Use
&U
: I
.uses())
3251 if (Visited
.insert(U
.getUser()).second
)
3252 Queue
.push_back(&U
);
3255 // Conservative default is to not rewrite anything.
3256 bool visitInstruction(Instruction
&I
) { return false; }
3258 /// Generic recursive split emission class.
3259 template <typename Derived
> class OpSplitter
{
3261 /// The builder used to form new instructions.
3264 /// The indices which to be used with insert- or extractvalue to select the
3265 /// appropriate value within the aggregate.
3266 SmallVector
<unsigned, 4> Indices
;
3268 /// The indices to a GEP instruction which will move Ptr to the correct slot
3269 /// within the aggregate.
3270 SmallVector
<Value
*, 4> GEPIndices
;
3272 /// The base pointer of the original op, used as a base for GEPing the
3273 /// split operations.
3276 /// The base pointee type being GEPed into.
3279 /// Known alignment of the base pointer.
3282 /// To calculate offset of each component so we can correctly deduce
3284 const DataLayout
&DL
;
3286 /// Initialize the splitter with an insertion point, Ptr and start with a
3287 /// single zero GEP index.
3288 OpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3289 unsigned BaseAlign
, const DataLayout
&DL
)
3290 : IRB(InsertionPoint
), GEPIndices(1, IRB
.getInt32(0)), Ptr(Ptr
),
3291 BaseTy(BaseTy
), BaseAlign(BaseAlign
), DL(DL
) {}
3294 /// Generic recursive split emission routine.
3296 /// This method recursively splits an aggregate op (load or store) into
3297 /// scalar or vector ops. It splits recursively until it hits a single value
3298 /// and emits that single value operation via the template argument.
3300 /// The logic of this routine relies on GEPs and insertvalue and
3301 /// extractvalue all operating with the same fundamental index list, merely
3302 /// formatted differently (GEPs need actual values).
3304 /// \param Ty The type being split recursively into smaller ops.
3305 /// \param Agg The aggregate value being built up or stored, depending on
3306 /// whether this is splitting a load or a store respectively.
3307 void emitSplitOps(Type
*Ty
, Value
*&Agg
, const Twine
&Name
) {
3308 if (Ty
->isSingleValueType()) {
3309 unsigned Offset
= DL
.getIndexedOffsetInType(BaseTy
, GEPIndices
);
3310 return static_cast<Derived
*>(this)->emitFunc(
3311 Ty
, Agg
, MinAlign(BaseAlign
, Offset
), Name
);
3314 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
3315 unsigned OldSize
= Indices
.size();
3317 for (unsigned Idx
= 0, Size
= ATy
->getNumElements(); Idx
!= Size
;
3319 assert(Indices
.size() == OldSize
&& "Did not return to the old size");
3320 Indices
.push_back(Idx
);
3321 GEPIndices
.push_back(IRB
.getInt32(Idx
));
3322 emitSplitOps(ATy
->getElementType(), Agg
, Name
+ "." + Twine(Idx
));
3323 GEPIndices
.pop_back();
3329 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
3330 unsigned OldSize
= Indices
.size();
3332 for (unsigned Idx
= 0, Size
= STy
->getNumElements(); Idx
!= Size
;
3334 assert(Indices
.size() == OldSize
&& "Did not return to the old size");
3335 Indices
.push_back(Idx
);
3336 GEPIndices
.push_back(IRB
.getInt32(Idx
));
3337 emitSplitOps(STy
->getElementType(Idx
), Agg
, Name
+ "." + Twine(Idx
));
3338 GEPIndices
.pop_back();
3344 llvm_unreachable("Only arrays and structs are aggregate loadable types");
3348 struct LoadOpSplitter
: public OpSplitter
<LoadOpSplitter
> {
3351 LoadOpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3352 AAMDNodes AATags
, unsigned BaseAlign
, const DataLayout
&DL
)
3353 : OpSplitter
<LoadOpSplitter
>(InsertionPoint
, Ptr
, BaseTy
, BaseAlign
,
3354 DL
), AATags(AATags
) {}
3356 /// Emit a leaf load of a single value. This is called at the leaves of the
3357 /// recursive emission to actually load values.
3358 void emitFunc(Type
*Ty
, Value
*&Agg
, unsigned Align
, const Twine
&Name
) {
3359 assert(Ty
->isSingleValueType());
3360 // Load the single value and insert it using the indices.
3362 IRB
.CreateInBoundsGEP(BaseTy
, Ptr
, GEPIndices
, Name
+ ".gep");
3363 LoadInst
*Load
= IRB
.CreateAlignedLoad(Ty
, GEP
, Align
, Name
+ ".load");
3365 Load
->setAAMetadata(AATags
);
3366 Agg
= IRB
.CreateInsertValue(Agg
, Load
, Indices
, Name
+ ".insert");
3367 LLVM_DEBUG(dbgs() << " to: " << *Load
<< "\n");
3371 bool visitLoadInst(LoadInst
&LI
) {
3372 assert(LI
.getPointerOperand() == *U
);
3373 if (!LI
.isSimple() || LI
.getType()->isSingleValueType())
3376 // We have an aggregate being loaded, split it apart.
3377 LLVM_DEBUG(dbgs() << " original: " << LI
<< "\n");
3379 LI
.getAAMetadata(AATags
);
3380 LoadOpSplitter
Splitter(&LI
, *U
, LI
.getType(), AATags
,
3381 getAdjustedAlignment(&LI
, 0, DL
), DL
);
3382 Value
*V
= UndefValue::get(LI
.getType());
3383 Splitter
.emitSplitOps(LI
.getType(), V
, LI
.getName() + ".fca");
3384 LI
.replaceAllUsesWith(V
);
3385 LI
.eraseFromParent();
3389 struct StoreOpSplitter
: public OpSplitter
<StoreOpSplitter
> {
3390 StoreOpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3391 AAMDNodes AATags
, unsigned BaseAlign
, const DataLayout
&DL
)
3392 : OpSplitter
<StoreOpSplitter
>(InsertionPoint
, Ptr
, BaseTy
, BaseAlign
,
3396 /// Emit a leaf store of a single value. This is called at the leaves of the
3397 /// recursive emission to actually produce stores.
3398 void emitFunc(Type
*Ty
, Value
*&Agg
, unsigned Align
, const Twine
&Name
) {
3399 assert(Ty
->isSingleValueType());
3400 // Extract the single value and store it using the indices.
3402 // The gep and extractvalue values are factored out of the CreateStore
3403 // call to make the output independent of the argument evaluation order.
3404 Value
*ExtractValue
=
3405 IRB
.CreateExtractValue(Agg
, Indices
, Name
+ ".extract");
3406 Value
*InBoundsGEP
=
3407 IRB
.CreateInBoundsGEP(BaseTy
, Ptr
, GEPIndices
, Name
+ ".gep");
3409 IRB
.CreateAlignedStore(ExtractValue
, InBoundsGEP
, Align
);
3411 Store
->setAAMetadata(AATags
);
3412 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
3416 bool visitStoreInst(StoreInst
&SI
) {
3417 if (!SI
.isSimple() || SI
.getPointerOperand() != *U
)
3419 Value
*V
= SI
.getValueOperand();
3420 if (V
->getType()->isSingleValueType())
3423 // We have an aggregate being stored, split it apart.
3424 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
3426 SI
.getAAMetadata(AATags
);
3427 StoreOpSplitter
Splitter(&SI
, *U
, V
->getType(), AATags
,
3428 getAdjustedAlignment(&SI
, 0, DL
), DL
);
3429 Splitter
.emitSplitOps(V
->getType(), V
, V
->getName() + ".fca");
3430 SI
.eraseFromParent();
3434 bool visitBitCastInst(BitCastInst
&BC
) {
3439 bool visitAddrSpaceCastInst(AddrSpaceCastInst
&ASC
) {
3444 bool visitGetElementPtrInst(GetElementPtrInst
&GEPI
) {
3449 bool visitPHINode(PHINode
&PN
) {
3454 bool visitSelectInst(SelectInst
&SI
) {
3460 } // end anonymous namespace
3462 /// Strip aggregate type wrapping.
3464 /// This removes no-op aggregate types wrapping an underlying type. It will
3465 /// strip as many layers of types as it can without changing either the type
3466 /// size or the allocated size.
3467 static Type
*stripAggregateTypeWrapping(const DataLayout
&DL
, Type
*Ty
) {
3468 if (Ty
->isSingleValueType())
3471 uint64_t AllocSize
= DL
.getTypeAllocSize(Ty
);
3472 uint64_t TypeSize
= DL
.getTypeSizeInBits(Ty
);
3475 if (ArrayType
*ArrTy
= dyn_cast
<ArrayType
>(Ty
)) {
3476 InnerTy
= ArrTy
->getElementType();
3477 } else if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
3478 const StructLayout
*SL
= DL
.getStructLayout(STy
);
3479 unsigned Index
= SL
->getElementContainingOffset(0);
3480 InnerTy
= STy
->getElementType(Index
);
3485 if (AllocSize
> DL
.getTypeAllocSize(InnerTy
) ||
3486 TypeSize
> DL
.getTypeSizeInBits(InnerTy
))
3489 return stripAggregateTypeWrapping(DL
, InnerTy
);
3492 /// Try to find a partition of the aggregate type passed in for a given
3493 /// offset and size.
3495 /// This recurses through the aggregate type and tries to compute a subtype
3496 /// based on the offset and size. When the offset and size span a sub-section
3497 /// of an array, it will even compute a new array type for that sub-section,
3498 /// and the same for structs.
3500 /// Note that this routine is very strict and tries to find a partition of the
3501 /// type which produces the *exact* right offset and size. It is not forgiving
3502 /// when the size or offset cause either end of type-based partition to be off.
3503 /// Also, this is a best-effort routine. It is reasonable to give up and not
3504 /// return a type if necessary.
3505 static Type
*getTypePartition(const DataLayout
&DL
, Type
*Ty
, uint64_t Offset
,
3507 if (Offset
== 0 && DL
.getTypeAllocSize(Ty
) == Size
)
3508 return stripAggregateTypeWrapping(DL
, Ty
);
3509 if (Offset
> DL
.getTypeAllocSize(Ty
) ||
3510 (DL
.getTypeAllocSize(Ty
) - Offset
) < Size
)
3513 if (SequentialType
*SeqTy
= dyn_cast
<SequentialType
>(Ty
)) {
3514 Type
*ElementTy
= SeqTy
->getElementType();
3515 uint64_t ElementSize
= DL
.getTypeAllocSize(ElementTy
);
3516 uint64_t NumSkippedElements
= Offset
/ ElementSize
;
3517 if (NumSkippedElements
>= SeqTy
->getNumElements())
3519 Offset
-= NumSkippedElements
* ElementSize
;
3521 // First check if we need to recurse.
3522 if (Offset
> 0 || Size
< ElementSize
) {
3523 // Bail if the partition ends in a different array element.
3524 if ((Offset
+ Size
) > ElementSize
)
3526 // Recurse through the element type trying to peel off offset bytes.
3527 return getTypePartition(DL
, ElementTy
, Offset
, Size
);
3529 assert(Offset
== 0);
3531 if (Size
== ElementSize
)
3532 return stripAggregateTypeWrapping(DL
, ElementTy
);
3533 assert(Size
> ElementSize
);
3534 uint64_t NumElements
= Size
/ ElementSize
;
3535 if (NumElements
* ElementSize
!= Size
)
3537 return ArrayType::get(ElementTy
, NumElements
);
3540 StructType
*STy
= dyn_cast
<StructType
>(Ty
);
3544 const StructLayout
*SL
= DL
.getStructLayout(STy
);
3545 if (Offset
>= SL
->getSizeInBytes())
3547 uint64_t EndOffset
= Offset
+ Size
;
3548 if (EndOffset
> SL
->getSizeInBytes())
3551 unsigned Index
= SL
->getElementContainingOffset(Offset
);
3552 Offset
-= SL
->getElementOffset(Index
);
3554 Type
*ElementTy
= STy
->getElementType(Index
);
3555 uint64_t ElementSize
= DL
.getTypeAllocSize(ElementTy
);
3556 if (Offset
>= ElementSize
)
3557 return nullptr; // The offset points into alignment padding.
3559 // See if any partition must be contained by the element.
3560 if (Offset
> 0 || Size
< ElementSize
) {
3561 if ((Offset
+ Size
) > ElementSize
)
3563 return getTypePartition(DL
, ElementTy
, Offset
, Size
);
3565 assert(Offset
== 0);
3567 if (Size
== ElementSize
)
3568 return stripAggregateTypeWrapping(DL
, ElementTy
);
3570 StructType::element_iterator EI
= STy
->element_begin() + Index
,
3571 EE
= STy
->element_end();
3572 if (EndOffset
< SL
->getSizeInBytes()) {
3573 unsigned EndIndex
= SL
->getElementContainingOffset(EndOffset
);
3574 if (Index
== EndIndex
)
3575 return nullptr; // Within a single element and its padding.
3577 // Don't try to form "natural" types if the elements don't line up with the
3579 // FIXME: We could potentially recurse down through the last element in the
3580 // sub-struct to find a natural end point.
3581 if (SL
->getElementOffset(EndIndex
) != EndOffset
)
3584 assert(Index
< EndIndex
);
3585 EE
= STy
->element_begin() + EndIndex
;
3588 // Try to build up a sub-structure.
3590 StructType::get(STy
->getContext(), makeArrayRef(EI
, EE
), STy
->isPacked());
3591 const StructLayout
*SubSL
= DL
.getStructLayout(SubTy
);
3592 if (Size
!= SubSL
->getSizeInBytes())
3593 return nullptr; // The sub-struct doesn't have quite the size needed.
3598 /// Pre-split loads and stores to simplify rewriting.
3600 /// We want to break up the splittable load+store pairs as much as
3601 /// possible. This is important to do as a preprocessing step, as once we
3602 /// start rewriting the accesses to partitions of the alloca we lose the
3603 /// necessary information to correctly split apart paired loads and stores
3604 /// which both point into this alloca. The case to consider is something like
3607 /// %a = alloca [12 x i8]
3608 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3609 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3610 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3611 /// %iptr1 = bitcast i8* %gep1 to i64*
3612 /// %iptr2 = bitcast i8* %gep2 to i64*
3613 /// %fptr1 = bitcast i8* %gep1 to float*
3614 /// %fptr2 = bitcast i8* %gep2 to float*
3615 /// %fptr3 = bitcast i8* %gep3 to float*
3616 /// store float 0.0, float* %fptr1
3617 /// store float 1.0, float* %fptr2
3618 /// %v = load i64* %iptr1
3619 /// store i64 %v, i64* %iptr2
3620 /// %f1 = load float* %fptr2
3621 /// %f2 = load float* %fptr3
3623 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3624 /// promote everything so we recover the 2 SSA values that should have been
3625 /// there all along.
3627 /// \returns true if any changes are made.
3628 bool SROA::presplitLoadsAndStores(AllocaInst
&AI
, AllocaSlices
&AS
) {
3629 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3631 // Track the loads and stores which are candidates for pre-splitting here, in
3632 // the order they first appear during the partition scan. These give stable
3633 // iteration order and a basis for tracking which loads and stores we
3635 SmallVector
<LoadInst
*, 4> Loads
;
3636 SmallVector
<StoreInst
*, 4> Stores
;
3638 // We need to accumulate the splits required of each load or store where we
3639 // can find them via a direct lookup. This is important to cross-check loads
3640 // and stores against each other. We also track the slice so that we can kill
3641 // all the slices that end up split.
3642 struct SplitOffsets
{
3644 std::vector
<uint64_t> Splits
;
3646 SmallDenseMap
<Instruction
*, SplitOffsets
, 8> SplitOffsetsMap
;
3648 // Track loads out of this alloca which cannot, for any reason, be pre-split.
3649 // This is important as we also cannot pre-split stores of those loads!
3650 // FIXME: This is all pretty gross. It means that we can be more aggressive
3651 // in pre-splitting when the load feeding the store happens to come from
3652 // a separate alloca. Put another way, the effectiveness of SROA would be
3653 // decreased by a frontend which just concatenated all of its local allocas
3654 // into one big flat alloca. But defeating such patterns is exactly the job
3655 // SROA is tasked with! Sadly, to not have this discrepancy we would have
3656 // change store pre-splitting to actually force pre-splitting of the load
3657 // that feeds it *and all stores*. That makes pre-splitting much harder, but
3658 // maybe it would make it more principled?
3659 SmallPtrSet
<LoadInst
*, 8> UnsplittableLoads
;
3661 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
3662 for (auto &P
: AS
.partitions()) {
3663 for (Slice
&S
: P
) {
3664 Instruction
*I
= cast
<Instruction
>(S
.getUse()->getUser());
3665 if (!S
.isSplittable() || S
.endOffset() <= P
.endOffset()) {
3666 // If this is a load we have to track that it can't participate in any
3667 // pre-splitting. If this is a store of a load we have to track that
3668 // that load also can't participate in any pre-splitting.
3669 if (auto *LI
= dyn_cast
<LoadInst
>(I
))
3670 UnsplittableLoads
.insert(LI
);
3671 else if (auto *SI
= dyn_cast
<StoreInst
>(I
))
3672 if (auto *LI
= dyn_cast
<LoadInst
>(SI
->getValueOperand()))
3673 UnsplittableLoads
.insert(LI
);
3676 assert(P
.endOffset() > S
.beginOffset() &&
3677 "Empty or backwards partition!");
3679 // Determine if this is a pre-splittable slice.
3680 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
3681 assert(!LI
->isVolatile() && "Cannot split volatile loads!");
3683 // The load must be used exclusively to store into other pointers for
3684 // us to be able to arbitrarily pre-split it. The stores must also be
3685 // simple to avoid changing semantics.
3686 auto IsLoadSimplyStored
= [](LoadInst
*LI
) {
3687 for (User
*LU
: LI
->users()) {
3688 auto *SI
= dyn_cast
<StoreInst
>(LU
);
3689 if (!SI
|| !SI
->isSimple())
3694 if (!IsLoadSimplyStored(LI
)) {
3695 UnsplittableLoads
.insert(LI
);
3699 Loads
.push_back(LI
);
3700 } else if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
3701 if (S
.getUse() != &SI
->getOperandUse(SI
->getPointerOperandIndex()))
3702 // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3704 auto *StoredLoad
= dyn_cast
<LoadInst
>(SI
->getValueOperand());
3705 if (!StoredLoad
|| !StoredLoad
->isSimple())
3707 assert(!SI
->isVolatile() && "Cannot split volatile stores!");
3709 Stores
.push_back(SI
);
3711 // Other uses cannot be pre-split.
3715 // Record the initial split.
3716 LLVM_DEBUG(dbgs() << " Candidate: " << *I
<< "\n");
3717 auto &Offsets
= SplitOffsetsMap
[I
];
3718 assert(Offsets
.Splits
.empty() &&
3719 "Should not have splits the first time we see an instruction!");
3721 Offsets
.Splits
.push_back(P
.endOffset() - S
.beginOffset());
3724 // Now scan the already split slices, and add a split for any of them which
3725 // we're going to pre-split.
3726 for (Slice
*S
: P
.splitSliceTails()) {
3727 auto SplitOffsetsMapI
=
3728 SplitOffsetsMap
.find(cast
<Instruction
>(S
->getUse()->getUser()));
3729 if (SplitOffsetsMapI
== SplitOffsetsMap
.end())
3731 auto &Offsets
= SplitOffsetsMapI
->second
;
3733 assert(Offsets
.S
== S
&& "Found a mismatched slice!");
3734 assert(!Offsets
.Splits
.empty() &&
3735 "Cannot have an empty set of splits on the second partition!");
3736 assert(Offsets
.Splits
.back() ==
3737 P
.beginOffset() - Offsets
.S
->beginOffset() &&
3738 "Previous split does not end where this one begins!");
3740 // Record each split. The last partition's end isn't needed as the size
3741 // of the slice dictates that.
3742 if (S
->endOffset() > P
.endOffset())
3743 Offsets
.Splits
.push_back(P
.endOffset() - Offsets
.S
->beginOffset());
3747 // We may have split loads where some of their stores are split stores. For
3748 // such loads and stores, we can only pre-split them if their splits exactly
3749 // match relative to their starting offset. We have to verify this prior to
3752 llvm::remove_if(Stores
,
3753 [&UnsplittableLoads
, &SplitOffsetsMap
](StoreInst
*SI
) {
3754 // Lookup the load we are storing in our map of split
3756 auto *LI
= cast
<LoadInst
>(SI
->getValueOperand());
3757 // If it was completely unsplittable, then we're done,
3758 // and this store can't be pre-split.
3759 if (UnsplittableLoads
.count(LI
))
3762 auto LoadOffsetsI
= SplitOffsetsMap
.find(LI
);
3763 if (LoadOffsetsI
== SplitOffsetsMap
.end())
3764 return false; // Unrelated loads are definitely safe.
3765 auto &LoadOffsets
= LoadOffsetsI
->second
;
3767 // Now lookup the store's offsets.
3768 auto &StoreOffsets
= SplitOffsetsMap
[SI
];
3770 // If the relative offsets of each split in the load and
3771 // store match exactly, then we can split them and we
3772 // don't need to remove them here.
3773 if (LoadOffsets
.Splits
== StoreOffsets
.Splits
)
3778 << " Mismatched splits for load and store:\n"
3779 << " " << *LI
<< "\n"
3780 << " " << *SI
<< "\n");
3782 // We've found a store and load that we need to split
3783 // with mismatched relative splits. Just give up on them
3784 // and remove both instructions from our list of
3786 UnsplittableLoads
.insert(LI
);
3790 // Now we have to go *back* through all the stores, because a later store may
3791 // have caused an earlier store's load to become unsplittable and if it is
3792 // unsplittable for the later store, then we can't rely on it being split in
3793 // the earlier store either.
3794 Stores
.erase(llvm::remove_if(Stores
,
3795 [&UnsplittableLoads
](StoreInst
*SI
) {
3797 cast
<LoadInst
>(SI
->getValueOperand());
3798 return UnsplittableLoads
.count(LI
);
3801 // Once we've established all the loads that can't be split for some reason,
3802 // filter any that made it into our list out.
3803 Loads
.erase(llvm::remove_if(Loads
,
3804 [&UnsplittableLoads
](LoadInst
*LI
) {
3805 return UnsplittableLoads
.count(LI
);
3809 // If no loads or stores are left, there is no pre-splitting to be done for
3811 if (Loads
.empty() && Stores
.empty())
3814 // From here on, we can't fail and will be building new accesses, so rig up
3816 IRBuilderTy
IRB(&AI
);
3818 // Collect the new slices which we will merge into the alloca slices.
3819 SmallVector
<Slice
, 4> NewSlices
;
3821 // Track any allocas we end up splitting loads and stores for so we iterate
3823 SmallPtrSet
<AllocaInst
*, 4> ResplitPromotableAllocas
;
3825 // At this point, we have collected all of the loads and stores we can
3826 // pre-split, and the specific splits needed for them. We actually do the
3827 // splitting in a specific order in order to handle when one of the loads in
3828 // the value operand to one of the stores.
3830 // First, we rewrite all of the split loads, and just accumulate each split
3831 // load in a parallel structure. We also build the slices for them and append
3832 // them to the alloca slices.
3833 SmallDenseMap
<LoadInst
*, std::vector
<LoadInst
*>, 1> SplitLoadsMap
;
3834 std::vector
<LoadInst
*> SplitLoads
;
3835 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
3836 for (LoadInst
*LI
: Loads
) {
3839 IntegerType
*Ty
= cast
<IntegerType
>(LI
->getType());
3840 uint64_t LoadSize
= Ty
->getBitWidth() / 8;
3841 assert(LoadSize
> 0 && "Cannot have a zero-sized integer load!");
3843 auto &Offsets
= SplitOffsetsMap
[LI
];
3844 assert(LoadSize
== Offsets
.S
->endOffset() - Offsets
.S
->beginOffset() &&
3845 "Slice size should always match load size exactly!");
3846 uint64_t BaseOffset
= Offsets
.S
->beginOffset();
3847 assert(BaseOffset
+ LoadSize
> BaseOffset
&&
3848 "Cannot represent alloca access size using 64-bit integers!");
3850 Instruction
*BasePtr
= cast
<Instruction
>(LI
->getPointerOperand());
3851 IRB
.SetInsertPoint(LI
);
3853 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI
<< "\n");
3855 uint64_t PartOffset
= 0, PartSize
= Offsets
.Splits
.front();
3856 int Idx
= 0, Size
= Offsets
.Splits
.size();
3858 auto *PartTy
= Type::getIntNTy(Ty
->getContext(), PartSize
* 8);
3859 auto AS
= LI
->getPointerAddressSpace();
3860 auto *PartPtrTy
= PartTy
->getPointerTo(AS
);
3861 LoadInst
*PLoad
= IRB
.CreateAlignedLoad(
3863 getAdjustedPtr(IRB
, DL
, BasePtr
,
3864 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3865 PartPtrTy
, BasePtr
->getName() + "."),
3866 getAdjustedAlignment(LI
, PartOffset
, DL
), /*IsVolatile*/ false,
3868 PLoad
->copyMetadata(*LI
, {LLVMContext::MD_mem_parallel_loop_access
,
3869 LLVMContext::MD_access_group
});
3871 // Append this load onto the list of split loads so we can find it later
3872 // to rewrite the stores.
3873 SplitLoads
.push_back(PLoad
);
3875 // Now build a new slice for the alloca.
3876 NewSlices
.push_back(
3877 Slice(BaseOffset
+ PartOffset
, BaseOffset
+ PartOffset
+ PartSize
,
3878 &PLoad
->getOperandUse(PLoad
->getPointerOperandIndex()),
3879 /*IsSplittable*/ false));
3880 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices
.back().beginOffset()
3881 << ", " << NewSlices
.back().endOffset()
3882 << "): " << *PLoad
<< "\n");
3884 // See if we've handled all the splits.
3888 // Setup the next partition.
3889 PartOffset
= Offsets
.Splits
[Idx
];
3891 PartSize
= (Idx
< Size
? Offsets
.Splits
[Idx
] : LoadSize
) - PartOffset
;
3894 // Now that we have the split loads, do the slow walk over all uses of the
3895 // load and rewrite them as split stores, or save the split loads to use
3896 // below if the store is going to be split there anyways.
3897 bool DeferredStores
= false;
3898 for (User
*LU
: LI
->users()) {
3899 StoreInst
*SI
= cast
<StoreInst
>(LU
);
3900 if (!Stores
.empty() && SplitOffsetsMap
.count(SI
)) {
3901 DeferredStores
= true;
3902 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
3907 Value
*StoreBasePtr
= SI
->getPointerOperand();
3908 IRB
.SetInsertPoint(SI
);
3910 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI
<< "\n");
3912 for (int Idx
= 0, Size
= SplitLoads
.size(); Idx
< Size
; ++Idx
) {
3913 LoadInst
*PLoad
= SplitLoads
[Idx
];
3914 uint64_t PartOffset
= Idx
== 0 ? 0 : Offsets
.Splits
[Idx
- 1];
3916 PLoad
->getType()->getPointerTo(SI
->getPointerAddressSpace());
3918 auto AS
= SI
->getPointerAddressSpace();
3919 StoreInst
*PStore
= IRB
.CreateAlignedStore(
3921 getAdjustedPtr(IRB
, DL
, StoreBasePtr
,
3922 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3923 PartPtrTy
, StoreBasePtr
->getName() + "."),
3924 getAdjustedAlignment(SI
, PartOffset
, DL
), /*IsVolatile*/ false);
3925 PStore
->copyMetadata(*LI
, {LLVMContext::MD_mem_parallel_loop_access
,
3926 LLVMContext::MD_access_group
});
3927 LLVM_DEBUG(dbgs() << " +" << PartOffset
<< ":" << *PStore
<< "\n");
3930 // We want to immediately iterate on any allocas impacted by splitting
3931 // this store, and we have to track any promotable alloca (indicated by
3932 // a direct store) as needing to be resplit because it is no longer
3934 if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(StoreBasePtr
)) {
3935 ResplitPromotableAllocas
.insert(OtherAI
);
3936 Worklist
.insert(OtherAI
);
3937 } else if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(
3938 StoreBasePtr
->stripInBoundsOffsets())) {
3939 Worklist
.insert(OtherAI
);
3942 // Mark the original store as dead.
3943 DeadInsts
.insert(SI
);
3946 // Save the split loads if there are deferred stores among the users.
3948 SplitLoadsMap
.insert(std::make_pair(LI
, std::move(SplitLoads
)));
3950 // Mark the original load as dead and kill the original slice.
3951 DeadInsts
.insert(LI
);
3955 // Second, we rewrite all of the split stores. At this point, we know that
3956 // all loads from this alloca have been split already. For stores of such
3957 // loads, we can simply look up the pre-existing split loads. For stores of
3958 // other loads, we split those loads first and then write split stores of
3960 for (StoreInst
*SI
: Stores
) {
3961 auto *LI
= cast
<LoadInst
>(SI
->getValueOperand());
3962 IntegerType
*Ty
= cast
<IntegerType
>(LI
->getType());
3963 uint64_t StoreSize
= Ty
->getBitWidth() / 8;
3964 assert(StoreSize
> 0 && "Cannot have a zero-sized integer store!");
3966 auto &Offsets
= SplitOffsetsMap
[SI
];
3967 assert(StoreSize
== Offsets
.S
->endOffset() - Offsets
.S
->beginOffset() &&
3968 "Slice size should always match load size exactly!");
3969 uint64_t BaseOffset
= Offsets
.S
->beginOffset();
3970 assert(BaseOffset
+ StoreSize
> BaseOffset
&&
3971 "Cannot represent alloca access size using 64-bit integers!");
3973 Value
*LoadBasePtr
= LI
->getPointerOperand();
3974 Instruction
*StoreBasePtr
= cast
<Instruction
>(SI
->getPointerOperand());
3976 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI
<< "\n");
3978 // Check whether we have an already split load.
3979 auto SplitLoadsMapI
= SplitLoadsMap
.find(LI
);
3980 std::vector
<LoadInst
*> *SplitLoads
= nullptr;
3981 if (SplitLoadsMapI
!= SplitLoadsMap
.end()) {
3982 SplitLoads
= &SplitLoadsMapI
->second
;
3983 assert(SplitLoads
->size() == Offsets
.Splits
.size() + 1 &&
3984 "Too few split loads for the number of splits in the store!");
3986 LLVM_DEBUG(dbgs() << " of load: " << *LI
<< "\n");
3989 uint64_t PartOffset
= 0, PartSize
= Offsets
.Splits
.front();
3990 int Idx
= 0, Size
= Offsets
.Splits
.size();
3992 auto *PartTy
= Type::getIntNTy(Ty
->getContext(), PartSize
* 8);
3993 auto *LoadPartPtrTy
= PartTy
->getPointerTo(LI
->getPointerAddressSpace());
3994 auto *StorePartPtrTy
= PartTy
->getPointerTo(SI
->getPointerAddressSpace());
3996 // Either lookup a split load or create one.
3999 PLoad
= (*SplitLoads
)[Idx
];
4001 IRB
.SetInsertPoint(LI
);
4002 auto AS
= LI
->getPointerAddressSpace();
4003 PLoad
= IRB
.CreateAlignedLoad(
4005 getAdjustedPtr(IRB
, DL
, LoadBasePtr
,
4006 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
4007 LoadPartPtrTy
, LoadBasePtr
->getName() + "."),
4008 getAdjustedAlignment(LI
, PartOffset
, DL
), /*IsVolatile*/ false,
4012 // And store this partition.
4013 IRB
.SetInsertPoint(SI
);
4014 auto AS
= SI
->getPointerAddressSpace();
4015 StoreInst
*PStore
= IRB
.CreateAlignedStore(
4017 getAdjustedPtr(IRB
, DL
, StoreBasePtr
,
4018 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
4019 StorePartPtrTy
, StoreBasePtr
->getName() + "."),
4020 getAdjustedAlignment(SI
, PartOffset
, DL
), /*IsVolatile*/ false);
4022 // Now build a new slice for the alloca.
4023 NewSlices
.push_back(
4024 Slice(BaseOffset
+ PartOffset
, BaseOffset
+ PartOffset
+ PartSize
,
4025 &PStore
->getOperandUse(PStore
->getPointerOperandIndex()),
4026 /*IsSplittable*/ false));
4027 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices
.back().beginOffset()
4028 << ", " << NewSlices
.back().endOffset()
4029 << "): " << *PStore
<< "\n");
4031 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad
<< "\n");
4034 // See if we've finished all the splits.
4038 // Setup the next partition.
4039 PartOffset
= Offsets
.Splits
[Idx
];
4041 PartSize
= (Idx
< Size
? Offsets
.Splits
[Idx
] : StoreSize
) - PartOffset
;
4044 // We want to immediately iterate on any allocas impacted by splitting
4045 // this load, which is only relevant if it isn't a load of this alloca and
4046 // thus we didn't already split the loads above. We also have to keep track
4047 // of any promotable allocas we split loads on as they can no longer be
4050 if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(LoadBasePtr
)) {
4051 assert(OtherAI
!= &AI
&& "We can't re-split our own alloca!");
4052 ResplitPromotableAllocas
.insert(OtherAI
);
4053 Worklist
.insert(OtherAI
);
4054 } else if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(
4055 LoadBasePtr
->stripInBoundsOffsets())) {
4056 assert(OtherAI
!= &AI
&& "We can't re-split our own alloca!");
4057 Worklist
.insert(OtherAI
);
4061 // Mark the original store as dead now that we've split it up and kill its
4062 // slice. Note that we leave the original load in place unless this store
4063 // was its only use. It may in turn be split up if it is an alloca load
4064 // for some other alloca, but it may be a normal load. This may introduce
4065 // redundant loads, but where those can be merged the rest of the optimizer
4066 // should handle the merging, and this uncovers SSA splits which is more
4067 // important. In practice, the original loads will almost always be fully
4068 // split and removed eventually, and the splits will be merged by any
4069 // trivial CSE, including instcombine.
4070 if (LI
->hasOneUse()) {
4071 assert(*LI
->user_begin() == SI
&& "Single use isn't this store!");
4072 DeadInsts
.insert(LI
);
4074 DeadInsts
.insert(SI
);
4078 // Remove the killed slices that have ben pre-split.
4079 AS
.erase(llvm::remove_if(AS
, [](const Slice
&S
) { return S
.isDead(); }),
4082 // Insert our new slices. This will sort and merge them into the sorted
4084 AS
.insert(NewSlices
);
4086 LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
4088 for (auto I
= AS
.begin(), E
= AS
.end(); I
!= E
; ++I
)
4089 LLVM_DEBUG(AS
.print(dbgs(), I
, " "));
4092 // Finally, don't try to promote any allocas that new require re-splitting.
4093 // They have already been added to the worklist above.
4094 PromotableAllocas
.erase(
4097 [&](AllocaInst
*AI
) { return ResplitPromotableAllocas
.count(AI
); }),
4098 PromotableAllocas
.end());
4103 /// Rewrite an alloca partition's users.
4105 /// This routine drives both of the rewriting goals of the SROA pass. It tries
4106 /// to rewrite uses of an alloca partition to be conducive for SSA value
4107 /// promotion. If the partition needs a new, more refined alloca, this will
4108 /// build that new alloca, preserving as much type information as possible, and
4109 /// rewrite the uses of the old alloca to point at the new one and have the
4110 /// appropriate new offsets. It also evaluates how successful the rewrite was
4111 /// at enabling promotion and if it was successful queues the alloca to be
4113 AllocaInst
*SROA::rewritePartition(AllocaInst
&AI
, AllocaSlices
&AS
,
4115 // Try to compute a friendly type for this partition of the alloca. This
4116 // won't always succeed, in which case we fall back to a legal integer type
4117 // or an i8 array of an appropriate size.
4118 Type
*SliceTy
= nullptr;
4119 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4120 if (Type
*CommonUseTy
= findCommonType(P
.begin(), P
.end(), P
.endOffset()))
4121 if (DL
.getTypeAllocSize(CommonUseTy
) >= P
.size())
4122 SliceTy
= CommonUseTy
;
4124 if (Type
*TypePartitionTy
= getTypePartition(DL
, AI
.getAllocatedType(),
4125 P
.beginOffset(), P
.size()))
4126 SliceTy
= TypePartitionTy
;
4127 if ((!SliceTy
|| (SliceTy
->isArrayTy() &&
4128 SliceTy
->getArrayElementType()->isIntegerTy())) &&
4129 DL
.isLegalInteger(P
.size() * 8))
4130 SliceTy
= Type::getIntNTy(*C
, P
.size() * 8);
4132 SliceTy
= ArrayType::get(Type::getInt8Ty(*C
), P
.size());
4133 assert(DL
.getTypeAllocSize(SliceTy
) >= P
.size());
4135 bool IsIntegerPromotable
= isIntegerWideningViable(P
, SliceTy
, DL
);
4138 IsIntegerPromotable
? nullptr : isVectorPromotionViable(P
, DL
);
4142 // Check for the case where we're going to rewrite to a new alloca of the
4143 // exact same type as the original, and with the same access offsets. In that
4144 // case, re-use the existing alloca, but still run through the rewriter to
4145 // perform phi and select speculation.
4146 // P.beginOffset() can be non-zero even with the same type in a case with
4147 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
4149 if (SliceTy
== AI
.getAllocatedType() && P
.beginOffset() == 0) {
4151 // FIXME: We should be able to bail at this point with "nothing changed".
4152 // FIXME: We might want to defer PHI speculation until after here.
4153 // FIXME: return nullptr;
4155 unsigned Alignment
= AI
.getAlignment();
4157 // The minimum alignment which users can rely on when the explicit
4158 // alignment is omitted or zero is that required by the ABI for this
4160 Alignment
= DL
.getABITypeAlignment(AI
.getAllocatedType());
4162 Alignment
= MinAlign(Alignment
, P
.beginOffset());
4163 // If we will get at least this much alignment from the type alone, leave
4164 // the alloca's alignment unconstrained.
4165 if (Alignment
<= DL
.getABITypeAlignment(SliceTy
))
4167 NewAI
= new AllocaInst(
4168 SliceTy
, AI
.getType()->getAddressSpace(), nullptr, Alignment
,
4169 AI
.getName() + ".sroa." + Twine(P
.begin() - AS
.begin()), &AI
);
4170 // Copy the old AI debug location over to the new one.
4171 NewAI
->setDebugLoc(AI
.getDebugLoc());
4175 LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
4176 << "[" << P
.beginOffset() << "," << P
.endOffset()
4177 << ") to: " << *NewAI
<< "\n");
4179 // Track the high watermark on the worklist as it is only relevant for
4180 // promoted allocas. We will reset it to this point if the alloca is not in
4181 // fact scheduled for promotion.
4182 unsigned PPWOldSize
= PostPromotionWorklist
.size();
4183 unsigned NumUses
= 0;
4184 SmallSetVector
<PHINode
*, 8> PHIUsers
;
4185 SmallSetVector
<SelectInst
*, 8> SelectUsers
;
4187 AllocaSliceRewriter
Rewriter(DL
, AS
, *this, AI
, *NewAI
, P
.beginOffset(),
4188 P
.endOffset(), IsIntegerPromotable
, VecTy
,
4189 PHIUsers
, SelectUsers
);
4190 bool Promotable
= true;
4191 for (Slice
*S
: P
.splitSliceTails()) {
4192 Promotable
&= Rewriter
.visit(S
);
4195 for (Slice
&S
: P
) {
4196 Promotable
&= Rewriter
.visit(&S
);
4200 NumAllocaPartitionUses
+= NumUses
;
4201 MaxUsesPerAllocaPartition
.updateMax(NumUses
);
4203 // Now that we've processed all the slices in the new partition, check if any
4204 // PHIs or Selects would block promotion.
4205 for (PHINode
*PHI
: PHIUsers
)
4206 if (!isSafePHIToSpeculate(*PHI
)) {
4209 SelectUsers
.clear();
4213 for (SelectInst
*Sel
: SelectUsers
)
4214 if (!isSafeSelectToSpeculate(*Sel
)) {
4217 SelectUsers
.clear();
4222 if (PHIUsers
.empty() && SelectUsers
.empty()) {
4223 // Promote the alloca.
4224 PromotableAllocas
.push_back(NewAI
);
4226 // If we have either PHIs or Selects to speculate, add them to those
4227 // worklists and re-queue the new alloca so that we promote in on the
4229 for (PHINode
*PHIUser
: PHIUsers
)
4230 SpeculatablePHIs
.insert(PHIUser
);
4231 for (SelectInst
*SelectUser
: SelectUsers
)
4232 SpeculatableSelects
.insert(SelectUser
);
4233 Worklist
.insert(NewAI
);
4236 // Drop any post-promotion work items if promotion didn't happen.
4237 while (PostPromotionWorklist
.size() > PPWOldSize
)
4238 PostPromotionWorklist
.pop_back();
4240 // We couldn't promote and we didn't create a new partition, nothing
4245 // If we can't promote the alloca, iterate on it to check for new
4246 // refinements exposed by splitting the current alloca. Don't iterate on an
4247 // alloca which didn't actually change and didn't get promoted.
4248 Worklist
.insert(NewAI
);
4254 /// Walks the slices of an alloca and form partitions based on them,
4255 /// rewriting each of their uses.
4256 bool SROA::splitAlloca(AllocaInst
&AI
, AllocaSlices
&AS
) {
4257 if (AS
.begin() == AS
.end())
4260 unsigned NumPartitions
= 0;
4261 bool Changed
= false;
4262 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4264 // First try to pre-split loads and stores.
4265 Changed
|= presplitLoadsAndStores(AI
, AS
);
4267 // Now that we have identified any pre-splitting opportunities,
4268 // mark loads and stores unsplittable except for the following case.
4269 // We leave a slice splittable if all other slices are disjoint or fully
4270 // included in the slice, such as whole-alloca loads and stores.
4271 // If we fail to split these during pre-splitting, we want to force them
4272 // to be rewritten into a partition.
4273 bool IsSorted
= true;
4275 uint64_t AllocaSize
= DL
.getTypeAllocSize(AI
.getAllocatedType());
4276 const uint64_t MaxBitVectorSize
= 1024;
4277 if (AllocaSize
<= MaxBitVectorSize
) {
4278 // If a byte boundary is included in any load or store, a slice starting or
4279 // ending at the boundary is not splittable.
4280 SmallBitVector
SplittableOffset(AllocaSize
+ 1, true);
4282 for (unsigned O
= S
.beginOffset() + 1;
4283 O
< S
.endOffset() && O
< AllocaSize
; O
++)
4284 SplittableOffset
.reset(O
);
4286 for (Slice
&S
: AS
) {
4287 if (!S
.isSplittable())
4290 if ((S
.beginOffset() > AllocaSize
|| SplittableOffset
[S
.beginOffset()]) &&
4291 (S
.endOffset() > AllocaSize
|| SplittableOffset
[S
.endOffset()]))
4294 if (isa
<LoadInst
>(S
.getUse()->getUser()) ||
4295 isa
<StoreInst
>(S
.getUse()->getUser())) {
4296 S
.makeUnsplittable();
4302 // We only allow whole-alloca splittable loads and stores
4303 // for a large alloca to avoid creating too large BitVector.
4304 for (Slice
&S
: AS
) {
4305 if (!S
.isSplittable())
4308 if (S
.beginOffset() == 0 && S
.endOffset() >= AllocaSize
)
4311 if (isa
<LoadInst
>(S
.getUse()->getUser()) ||
4312 isa
<StoreInst
>(S
.getUse()->getUser())) {
4313 S
.makeUnsplittable();
4322 /// Describes the allocas introduced by rewritePartition in order to migrate
4328 Fragment(AllocaInst
*AI
, uint64_t O
, uint64_t S
)
4329 : Alloca(AI
), Offset(O
), Size(S
) {}
4331 SmallVector
<Fragment
, 4> Fragments
;
4333 // Rewrite each partition.
4334 for (auto &P
: AS
.partitions()) {
4335 if (AllocaInst
*NewAI
= rewritePartition(AI
, AS
, P
)) {
4338 uint64_t SizeOfByte
= 8;
4339 uint64_t AllocaSize
= DL
.getTypeSizeInBits(NewAI
->getAllocatedType());
4340 // Don't include any padding.
4341 uint64_t Size
= std::min(AllocaSize
, P
.size() * SizeOfByte
);
4342 Fragments
.push_back(Fragment(NewAI
, P
.beginOffset() * SizeOfByte
, Size
));
4348 NumAllocaPartitions
+= NumPartitions
;
4349 MaxPartitionsPerAlloca
.updateMax(NumPartitions
);
4351 // Migrate debug information from the old alloca to the new alloca(s)
4352 // and the individual partitions.
4353 TinyPtrVector
<DbgVariableIntrinsic
*> DbgDeclares
= FindDbgAddrUses(&AI
);
4354 if (!DbgDeclares
.empty()) {
4355 auto *Var
= DbgDeclares
.front()->getVariable();
4356 auto *Expr
= DbgDeclares
.front()->getExpression();
4357 auto VarSize
= Var
->getSizeInBits();
4358 DIBuilder
DIB(*AI
.getModule(), /*AllowUnresolved*/ false);
4359 uint64_t AllocaSize
= DL
.getTypeSizeInBits(AI
.getAllocatedType());
4360 for (auto Fragment
: Fragments
) {
4361 // Create a fragment expression describing the new partition or reuse AI's
4362 // expression if there is only one partition.
4363 auto *FragmentExpr
= Expr
;
4364 if (Fragment
.Size
< AllocaSize
|| Expr
->isFragment()) {
4365 // If this alloca is already a scalar replacement of a larger aggregate,
4366 // Fragment.Offset describes the offset inside the scalar.
4367 auto ExprFragment
= Expr
->getFragmentInfo();
4368 uint64_t Offset
= ExprFragment
? ExprFragment
->OffsetInBits
: 0;
4369 uint64_t Start
= Offset
+ Fragment
.Offset
;
4370 uint64_t Size
= Fragment
.Size
;
4373 ExprFragment
->OffsetInBits
+ ExprFragment
->SizeInBits
;
4374 if (Start
>= AbsEnd
)
4375 // No need to describe a SROAed padding.
4377 Size
= std::min(Size
, AbsEnd
- Start
);
4379 // The new, smaller fragment is stenciled out from the old fragment.
4380 if (auto OrigFragment
= FragmentExpr
->getFragmentInfo()) {
4381 assert(Start
>= OrigFragment
->OffsetInBits
&&
4382 "new fragment is outside of original fragment");
4383 Start
-= OrigFragment
->OffsetInBits
;
4386 // The alloca may be larger than the variable.
4388 if (Size
> *VarSize
)
4390 if (Size
== 0 || Start
+ Size
> *VarSize
)
4394 // Avoid creating a fragment expression that covers the entire variable.
4395 if (!VarSize
|| *VarSize
!= Size
) {
4397 DIExpression::createFragmentExpression(Expr
, Start
, Size
))
4404 // Remove any existing intrinsics describing the same alloca.
4405 for (DbgVariableIntrinsic
*OldDII
: FindDbgAddrUses(Fragment
.Alloca
))
4406 OldDII
->eraseFromParent();
4408 DIB
.insertDeclare(Fragment
.Alloca
, Var
, FragmentExpr
,
4409 DbgDeclares
.front()->getDebugLoc(), &AI
);
4415 /// Clobber a use with undef, deleting the used value if it becomes dead.
4416 void SROA::clobberUse(Use
&U
) {
4418 // Replace the use with an undef value.
4419 U
= UndefValue::get(OldV
->getType());
4421 // Check for this making an instruction dead. We have to garbage collect
4422 // all the dead instructions to ensure the uses of any alloca end up being
4424 if (Instruction
*OldI
= dyn_cast
<Instruction
>(OldV
))
4425 if (isInstructionTriviallyDead(OldI
)) {
4426 DeadInsts
.insert(OldI
);
4430 /// Analyze an alloca for SROA.
4432 /// This analyzes the alloca to ensure we can reason about it, builds
4433 /// the slices of the alloca, and then hands it off to be split and
4434 /// rewritten as needed.
4435 bool SROA::runOnAlloca(AllocaInst
&AI
) {
4436 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI
<< "\n");
4437 ++NumAllocasAnalyzed
;
4439 // Special case dead allocas, as they're trivial.
4440 if (AI
.use_empty()) {
4441 AI
.eraseFromParent();
4444 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4446 // Skip alloca forms that this analysis can't handle.
4447 if (AI
.isArrayAllocation() || !AI
.getAllocatedType()->isSized() ||
4448 DL
.getTypeAllocSize(AI
.getAllocatedType()) == 0)
4451 bool Changed
= false;
4453 // First, split any FCA loads and stores touching this alloca to promote
4454 // better splitting and promotion opportunities.
4455 AggLoadStoreRewriter
AggRewriter(DL
);
4456 Changed
|= AggRewriter
.rewrite(AI
);
4458 // Build the slices using a recursive instruction-visiting builder.
4459 AllocaSlices
AS(DL
, AI
);
4460 LLVM_DEBUG(AS
.print(dbgs()));
4464 // Delete all the dead users of this alloca before splitting and rewriting it.
4465 for (Instruction
*DeadUser
: AS
.getDeadUsers()) {
4466 // Free up everything used by this instruction.
4467 for (Use
&DeadOp
: DeadUser
->operands())
4470 // Now replace the uses of this instruction.
4471 DeadUser
->replaceAllUsesWith(UndefValue::get(DeadUser
->getType()));
4473 // And mark it for deletion.
4474 DeadInsts
.insert(DeadUser
);
4477 for (Use
*DeadOp
: AS
.getDeadOperands()) {
4478 clobberUse(*DeadOp
);
4482 // No slices to split. Leave the dead alloca for a later pass to clean up.
4483 if (AS
.begin() == AS
.end())
4486 Changed
|= splitAlloca(AI
, AS
);
4488 LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
4489 while (!SpeculatablePHIs
.empty())
4490 speculatePHINodeLoads(*SpeculatablePHIs
.pop_back_val());
4492 LLVM_DEBUG(dbgs() << " Speculating Selects\n");
4493 while (!SpeculatableSelects
.empty())
4494 speculateSelectInstLoads(*SpeculatableSelects
.pop_back_val());
4499 /// Delete the dead instructions accumulated in this run.
4501 /// Recursively deletes the dead instructions we've accumulated. This is done
4502 /// at the very end to maximize locality of the recursive delete and to
4503 /// minimize the problems of invalidated instruction pointers as such pointers
4504 /// are used heavily in the intermediate stages of the algorithm.
4506 /// We also record the alloca instructions deleted here so that they aren't
4507 /// subsequently handed to mem2reg to promote.
4508 bool SROA::deleteDeadInstructions(
4509 SmallPtrSetImpl
<AllocaInst
*> &DeletedAllocas
) {
4510 bool Changed
= false;
4511 while (!DeadInsts
.empty()) {
4512 Instruction
*I
= DeadInsts
.pop_back_val();
4513 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I
<< "\n");
4515 // If the instruction is an alloca, find the possible dbg.declare connected
4516 // to it, and remove it too. We must do this before calling RAUW or we will
4517 // not be able to find it.
4518 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
)) {
4519 DeletedAllocas
.insert(AI
);
4520 for (DbgVariableIntrinsic
*OldDII
: FindDbgAddrUses(AI
))
4521 OldDII
->eraseFromParent();
4524 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
4526 for (Use
&Operand
: I
->operands())
4527 if (Instruction
*U
= dyn_cast
<Instruction
>(Operand
)) {
4528 // Zero out the operand and see if it becomes trivially dead.
4530 if (isInstructionTriviallyDead(U
))
4531 DeadInsts
.insert(U
);
4535 I
->eraseFromParent();
4541 /// Promote the allocas, using the best available technique.
4543 /// This attempts to promote whatever allocas have been identified as viable in
4544 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4545 /// This function returns whether any promotion occurred.
4546 bool SROA::promoteAllocas(Function
&F
) {
4547 if (PromotableAllocas
.empty())
4550 NumPromoted
+= PromotableAllocas
.size();
4552 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4553 PromoteMemToReg(PromotableAllocas
, *DT
, AC
);
4554 PromotableAllocas
.clear();
4558 PreservedAnalyses
SROA::runImpl(Function
&F
, DominatorTree
&RunDT
,
4559 AssumptionCache
&RunAC
) {
4560 LLVM_DEBUG(dbgs() << "SROA function: " << F
.getName() << "\n");
4561 C
= &F
.getContext();
4565 BasicBlock
&EntryBB
= F
.getEntryBlock();
4566 for (BasicBlock::iterator I
= EntryBB
.begin(), E
= std::prev(EntryBB
.end());
4568 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
))
4569 Worklist
.insert(AI
);
4572 bool Changed
= false;
4573 // A set of deleted alloca instruction pointers which should be removed from
4574 // the list of promotable allocas.
4575 SmallPtrSet
<AllocaInst
*, 4> DeletedAllocas
;
4578 while (!Worklist
.empty()) {
4579 Changed
|= runOnAlloca(*Worklist
.pop_back_val());
4580 Changed
|= deleteDeadInstructions(DeletedAllocas
);
4582 // Remove the deleted allocas from various lists so that we don't try to
4583 // continue processing them.
4584 if (!DeletedAllocas
.empty()) {
4585 auto IsInSet
= [&](AllocaInst
*AI
) { return DeletedAllocas
.count(AI
); };
4586 Worklist
.remove_if(IsInSet
);
4587 PostPromotionWorklist
.remove_if(IsInSet
);
4588 PromotableAllocas
.erase(llvm::remove_if(PromotableAllocas
, IsInSet
),
4589 PromotableAllocas
.end());
4590 DeletedAllocas
.clear();
4594 Changed
|= promoteAllocas(F
);
4596 Worklist
= PostPromotionWorklist
;
4597 PostPromotionWorklist
.clear();
4598 } while (!Worklist
.empty());
4601 return PreservedAnalyses::all();
4603 PreservedAnalyses PA
;
4604 PA
.preserveSet
<CFGAnalyses
>();
4605 PA
.preserve
<GlobalsAA
>();
4609 PreservedAnalyses
SROA::run(Function
&F
, FunctionAnalysisManager
&AM
) {
4610 return runImpl(F
, AM
.getResult
<DominatorTreeAnalysis
>(F
),
4611 AM
.getResult
<AssumptionAnalysis
>(F
));
4614 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4616 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4618 class llvm::sroa::SROALegacyPass
: public FunctionPass
{
4619 /// The SROA implementation.
4625 SROALegacyPass() : FunctionPass(ID
) {
4626 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
4629 bool runOnFunction(Function
&F
) override
{
4630 if (skipFunction(F
))
4633 auto PA
= Impl
.runImpl(
4634 F
, getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
4635 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
));
4636 return !PA
.areAllPreserved();
4639 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
4640 AU
.addRequired
<AssumptionCacheTracker
>();
4641 AU
.addRequired
<DominatorTreeWrapperPass
>();
4642 AU
.addPreserved
<GlobalsAAWrapperPass
>();
4643 AU
.setPreservesCFG();
4646 StringRef
getPassName() const override
{ return "SROA"; }
4649 char SROALegacyPass::ID
= 0;
4651 FunctionPass
*llvm::createSROAPass() { return new SROALegacyPass(); }
4653 INITIALIZE_PASS_BEGIN(SROALegacyPass
, "sroa",
4654 "Scalar Replacement Of Aggregates", false, false)
4655 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
4656 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
4657 INITIALIZE_PASS_END(SROALegacyPass
, "sroa", "Scalar Replacement Of Aggregates",