1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This transformation implements the well known scalar replacement of
10 /// aggregates transformation. It tries to identify promotable elements of an
11 /// aggregate alloca, and promote them to registers. It will also try to
12 /// convert uses of an element (or set of elements) of an alloca into a vector
13 /// or bitfield-style integer scalar if appropriate.
15 /// It works to do this with minimal slicing of the alloca so that regions
16 /// which are merely transferred in and out of external memory remain unchanged
17 /// and are not decomposed to scalar code.
19 /// Because this also performs alloca promotion, it can be thought of as also
20 /// serving the purpose of SSA formation. The algorithm iterates on the
21 /// function until all opportunities for promotion have been realized.
23 //===----------------------------------------------------------------------===//
25 #include "llvm/Transforms/Scalar/SROA.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/PointerIntPair.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetVector.h"
32 #include "llvm/ADT/SmallBitVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/iterator.h"
39 #include "llvm/ADT/iterator_range.h"
40 #include "llvm/Analysis/AssumptionCache.h"
41 #include "llvm/Analysis/GlobalsModRef.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/PtrUseVisitor.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/ConstantFolder.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DIBuilder.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GetElementPtrTypeIterator.h"
57 #include "llvm/IR/GlobalAlias.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstVisitor.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Metadata.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/PassManager.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Transforms/Scalar.h"
83 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
97 // We only use this for a debug check.
101 using namespace llvm
;
102 using namespace llvm::sroa
;
104 #define DEBUG_TYPE "sroa"
106 STATISTIC(NumAllocasAnalyzed
, "Number of allocas analyzed for replacement");
107 STATISTIC(NumAllocaPartitions
, "Number of alloca partitions formed");
108 STATISTIC(MaxPartitionsPerAlloca
, "Maximum number of partitions per alloca");
109 STATISTIC(NumAllocaPartitionUses
, "Number of alloca partition uses rewritten");
110 STATISTIC(MaxUsesPerAllocaPartition
, "Maximum number of uses of a partition");
111 STATISTIC(NumNewAllocas
, "Number of new, smaller allocas introduced");
112 STATISTIC(NumPromoted
, "Number of allocas promoted to SSA values");
113 STATISTIC(NumLoadsSpeculated
, "Number of loads speculated to allow promotion");
114 STATISTIC(NumDeleted
, "Number of instructions deleted");
115 STATISTIC(NumVectorized
, "Number of vectorized aggregates");
117 /// Hidden option to enable randomly shuffling the slices to help uncover
118 /// instability in their order.
119 static cl::opt
<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
120 cl::init(false), cl::Hidden
);
122 /// Hidden option to experiment with completely strict handling of inbounds
124 static cl::opt
<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
129 /// A custom IRBuilder inserter which prefixes all names, but only in
131 class IRBuilderPrefixedInserter
: public IRBuilderDefaultInserter
{
134 const Twine
getNameWithPrefix(const Twine
&Name
) const {
135 return Name
.isTriviallyEmpty() ? Name
: Prefix
+ Name
;
139 void SetNamePrefix(const Twine
&P
) { Prefix
= P
.str(); }
142 void InsertHelper(Instruction
*I
, const Twine
&Name
, BasicBlock
*BB
,
143 BasicBlock::iterator InsertPt
) const {
144 IRBuilderDefaultInserter::InsertHelper(I
, getNameWithPrefix(Name
), BB
,
149 /// Provide a type for IRBuilder that drops names in release builds.
150 using IRBuilderTy
= IRBuilder
<ConstantFolder
, IRBuilderPrefixedInserter
>;
152 /// A used slice of an alloca.
154 /// This structure represents a slice of an alloca used by some instruction. It
155 /// stores both the begin and end offsets of this use, a pointer to the use
156 /// itself, and a flag indicating whether we can classify the use as splittable
157 /// or not when forming partitions of the alloca.
159 /// The beginning offset of the range.
160 uint64_t BeginOffset
= 0;
162 /// The ending offset, not included in the range.
163 uint64_t EndOffset
= 0;
165 /// Storage for both the use of this slice and whether it can be
167 PointerIntPair
<Use
*, 1, bool> UseAndIsSplittable
;
172 Slice(uint64_t BeginOffset
, uint64_t EndOffset
, Use
*U
, bool IsSplittable
)
173 : BeginOffset(BeginOffset
), EndOffset(EndOffset
),
174 UseAndIsSplittable(U
, IsSplittable
) {}
176 uint64_t beginOffset() const { return BeginOffset
; }
177 uint64_t endOffset() const { return EndOffset
; }
179 bool isSplittable() const { return UseAndIsSplittable
.getInt(); }
180 void makeUnsplittable() { UseAndIsSplittable
.setInt(false); }
182 Use
*getUse() const { return UseAndIsSplittable
.getPointer(); }
184 bool isDead() const { return getUse() == nullptr; }
185 void kill() { UseAndIsSplittable
.setPointer(nullptr); }
187 /// Support for ordering ranges.
189 /// This provides an ordering over ranges such that start offsets are
190 /// always increasing, and within equal start offsets, the end offsets are
191 /// decreasing. Thus the spanning range comes first in a cluster with the
192 /// same start position.
193 bool operator<(const Slice
&RHS
) const {
194 if (beginOffset() < RHS
.beginOffset())
196 if (beginOffset() > RHS
.beginOffset())
198 if (isSplittable() != RHS
.isSplittable())
199 return !isSplittable();
200 if (endOffset() > RHS
.endOffset())
205 /// Support comparison with a single offset to allow binary searches.
206 friend LLVM_ATTRIBUTE_UNUSED
bool operator<(const Slice
&LHS
,
207 uint64_t RHSOffset
) {
208 return LHS
.beginOffset() < RHSOffset
;
210 friend LLVM_ATTRIBUTE_UNUSED
bool operator<(uint64_t LHSOffset
,
212 return LHSOffset
< RHS
.beginOffset();
215 bool operator==(const Slice
&RHS
) const {
216 return isSplittable() == RHS
.isSplittable() &&
217 beginOffset() == RHS
.beginOffset() && endOffset() == RHS
.endOffset();
219 bool operator!=(const Slice
&RHS
) const { return !operator==(RHS
); }
222 } // end anonymous namespace
224 /// Representation of the alloca slices.
226 /// This class represents the slices of an alloca which are formed by its
227 /// various uses. If a pointer escapes, we can't fully build a representation
228 /// for the slices used and we reflect that in this structure. The uses are
229 /// stored, sorted by increasing beginning offset and with unsplittable slices
230 /// starting at a particular offset before splittable slices.
231 class llvm::sroa::AllocaSlices
{
233 /// Construct the slices of a particular alloca.
234 AllocaSlices(const DataLayout
&DL
, AllocaInst
&AI
);
236 /// Test whether a pointer to the allocation escapes our analysis.
238 /// If this is true, the slices are never fully built and should be
240 bool isEscaped() const { return PointerEscapingInstr
; }
242 /// Support for iterating over the slices.
244 using iterator
= SmallVectorImpl
<Slice
>::iterator
;
245 using range
= iterator_range
<iterator
>;
247 iterator
begin() { return Slices
.begin(); }
248 iterator
end() { return Slices
.end(); }
250 using const_iterator
= SmallVectorImpl
<Slice
>::const_iterator
;
251 using const_range
= iterator_range
<const_iterator
>;
253 const_iterator
begin() const { return Slices
.begin(); }
254 const_iterator
end() const { return Slices
.end(); }
257 /// Erase a range of slices.
258 void erase(iterator Start
, iterator Stop
) { Slices
.erase(Start
, Stop
); }
260 /// Insert new slices for this alloca.
262 /// This moves the slices into the alloca's slices collection, and re-sorts
263 /// everything so that the usual ordering properties of the alloca's slices
265 void insert(ArrayRef
<Slice
> NewSlices
) {
266 int OldSize
= Slices
.size();
267 Slices
.append(NewSlices
.begin(), NewSlices
.end());
268 auto SliceI
= Slices
.begin() + OldSize
;
269 llvm::sort(SliceI
, Slices
.end());
270 std::inplace_merge(Slices
.begin(), SliceI
, Slices
.end());
273 // Forward declare the iterator and range accessor for walking the
275 class partition_iterator
;
276 iterator_range
<partition_iterator
> partitions();
278 /// Access the dead users for this alloca.
279 ArrayRef
<Instruction
*> getDeadUsers() const { return DeadUsers
; }
281 /// Access the dead operands referring to this alloca.
283 /// These are operands which have cannot actually be used to refer to the
284 /// alloca as they are outside its range and the user doesn't correct for
285 /// that. These mostly consist of PHI node inputs and the like which we just
286 /// need to replace with undef.
287 ArrayRef
<Use
*> getDeadOperands() const { return DeadOperands
; }
289 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
290 void print(raw_ostream
&OS
, const_iterator I
, StringRef Indent
= " ") const;
291 void printSlice(raw_ostream
&OS
, const_iterator I
,
292 StringRef Indent
= " ") const;
293 void printUse(raw_ostream
&OS
, const_iterator I
,
294 StringRef Indent
= " ") const;
295 void print(raw_ostream
&OS
) const;
296 void dump(const_iterator I
) const;
301 template <typename DerivedT
, typename RetT
= void> class BuilderBase
;
304 friend class AllocaSlices::SliceBuilder
;
306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
307 /// Handle to alloca instruction to simplify method interfaces.
311 /// The instruction responsible for this alloca not having a known set
314 /// When an instruction (potentially) escapes the pointer to the alloca, we
315 /// store a pointer to that here and abort trying to form slices of the
316 /// alloca. This will be null if the alloca slices are analyzed successfully.
317 Instruction
*PointerEscapingInstr
;
319 /// The slices of the alloca.
321 /// We store a vector of the slices formed by uses of the alloca here. This
322 /// vector is sorted by increasing begin offset, and then the unsplittable
323 /// slices before the splittable ones. See the Slice inner class for more
325 SmallVector
<Slice
, 8> Slices
;
327 /// Instructions which will become dead if we rewrite the alloca.
329 /// Note that these are not separated by slice. This is because we expect an
330 /// alloca to be completely rewritten or not rewritten at all. If rewritten,
331 /// all these instructions can simply be removed and replaced with undef as
332 /// they come from outside of the allocated space.
333 SmallVector
<Instruction
*, 8> DeadUsers
;
335 /// Operands which will become dead if we rewrite the alloca.
337 /// These are operands that in their particular use can be replaced with
338 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
339 /// to PHI nodes and the like. They aren't entirely dead (there might be
340 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
341 /// want to swap this particular input for undef to simplify the use lists of
343 SmallVector
<Use
*, 8> DeadOperands
;
346 /// A partition of the slices.
348 /// An ephemeral representation for a range of slices which can be viewed as
349 /// a partition of the alloca. This range represents a span of the alloca's
350 /// memory which cannot be split, and provides access to all of the slices
351 /// overlapping some part of the partition.
353 /// Objects of this type are produced by traversing the alloca's slices, but
354 /// are only ephemeral and not persistent.
355 class llvm::sroa::Partition
{
357 friend class AllocaSlices
;
358 friend class AllocaSlices::partition_iterator
;
360 using iterator
= AllocaSlices::iterator
;
362 /// The beginning and ending offsets of the alloca for this
364 uint64_t BeginOffset
, EndOffset
;
366 /// The start and end iterators of this partition.
369 /// A collection of split slice tails overlapping the partition.
370 SmallVector
<Slice
*, 4> SplitTails
;
372 /// Raw constructor builds an empty partition starting and ending at
373 /// the given iterator.
374 Partition(iterator SI
) : SI(SI
), SJ(SI
) {}
377 /// The start offset of this partition.
379 /// All of the contained slices start at or after this offset.
380 uint64_t beginOffset() const { return BeginOffset
; }
382 /// The end offset of this partition.
384 /// All of the contained slices end at or before this offset.
385 uint64_t endOffset() const { return EndOffset
; }
387 /// The size of the partition.
389 /// Note that this can never be zero.
390 uint64_t size() const {
391 assert(BeginOffset
< EndOffset
&& "Partitions must span some bytes!");
392 return EndOffset
- BeginOffset
;
395 /// Test whether this partition contains no slices, and merely spans
396 /// a region occupied by split slices.
397 bool empty() const { return SI
== SJ
; }
399 /// \name Iterate slices that start within the partition.
400 /// These may be splittable or unsplittable. They have a begin offset >= the
401 /// partition begin offset.
403 // FIXME: We should probably define a "concat_iterator" helper and use that
404 // to stitch together pointee_iterators over the split tails and the
405 // contiguous iterators of the partition. That would give a much nicer
406 // interface here. We could then additionally expose filtered iterators for
407 // split, unsplit, and unsplittable splices based on the usage patterns.
408 iterator
begin() const { return SI
; }
409 iterator
end() const { return SJ
; }
412 /// Get the sequence of split slice tails.
414 /// These tails are of slices which start before this partition but are
415 /// split and overlap into the partition. We accumulate these while forming
417 ArrayRef
<Slice
*> splitSliceTails() const { return SplitTails
; }
420 /// An iterator over partitions of the alloca's slices.
422 /// This iterator implements the core algorithm for partitioning the alloca's
423 /// slices. It is a forward iterator as we don't support backtracking for
424 /// efficiency reasons, and re-use a single storage area to maintain the
425 /// current set of split slices.
427 /// It is templated on the slice iterator type to use so that it can operate
428 /// with either const or non-const slice iterators.
429 class AllocaSlices::partition_iterator
430 : public iterator_facade_base
<partition_iterator
, std::forward_iterator_tag
,
432 friend class AllocaSlices
;
434 /// Most of the state for walking the partitions is held in a class
435 /// with a nice interface for examining them.
438 /// We need to keep the end of the slices to know when to stop.
439 AllocaSlices::iterator SE
;
441 /// We also need to keep track of the maximum split end offset seen.
442 /// FIXME: Do we really?
443 uint64_t MaxSplitSliceEndOffset
= 0;
445 /// Sets the partition to be empty at given iterator, and sets the
447 partition_iterator(AllocaSlices::iterator SI
, AllocaSlices::iterator SE
)
449 // If not already at the end, advance our state to form the initial
455 /// Advance the iterator to the next partition.
457 /// Requires that the iterator not be at the end of the slices.
459 assert((P
.SI
!= SE
|| !P
.SplitTails
.empty()) &&
460 "Cannot advance past the end of the slices!");
462 // Clear out any split uses which have ended.
463 if (!P
.SplitTails
.empty()) {
464 if (P
.EndOffset
>= MaxSplitSliceEndOffset
) {
465 // If we've finished all splits, this is easy.
466 P
.SplitTails
.clear();
467 MaxSplitSliceEndOffset
= 0;
469 // Remove the uses which have ended in the prior partition. This
470 // cannot change the max split slice end because we just checked that
471 // the prior partition ended prior to that max.
472 P
.SplitTails
.erase(llvm::remove_if(P
.SplitTails
,
474 return S
->endOffset() <=
478 assert(llvm::any_of(P
.SplitTails
,
480 return S
->endOffset() == MaxSplitSliceEndOffset
;
482 "Could not find the current max split slice offset!");
483 assert(llvm::all_of(P
.SplitTails
,
485 return S
->endOffset() <= MaxSplitSliceEndOffset
;
487 "Max split slice end offset is not actually the max!");
491 // If P.SI is already at the end, then we've cleared the split tail and
492 // now have an end iterator.
494 assert(P
.SplitTails
.empty() && "Failed to clear the split slices!");
498 // If we had a non-empty partition previously, set up the state for
499 // subsequent partitions.
501 // Accumulate all the splittable slices which started in the old
502 // partition into the split list.
504 if (S
.isSplittable() && S
.endOffset() > P
.EndOffset
) {
505 P
.SplitTails
.push_back(&S
);
506 MaxSplitSliceEndOffset
=
507 std::max(S
.endOffset(), MaxSplitSliceEndOffset
);
510 // Start from the end of the previous partition.
513 // If P.SI is now at the end, we at most have a tail of split slices.
515 P
.BeginOffset
= P
.EndOffset
;
516 P
.EndOffset
= MaxSplitSliceEndOffset
;
520 // If the we have split slices and the next slice is after a gap and is
521 // not splittable immediately form an empty partition for the split
522 // slices up until the next slice begins.
523 if (!P
.SplitTails
.empty() && P
.SI
->beginOffset() != P
.EndOffset
&&
524 !P
.SI
->isSplittable()) {
525 P
.BeginOffset
= P
.EndOffset
;
526 P
.EndOffset
= P
.SI
->beginOffset();
531 // OK, we need to consume new slices. Set the end offset based on the
532 // current slice, and step SJ past it. The beginning offset of the
533 // partition is the beginning offset of the next slice unless we have
534 // pre-existing split slices that are continuing, in which case we begin
535 // at the prior end offset.
536 P
.BeginOffset
= P
.SplitTails
.empty() ? P
.SI
->beginOffset() : P
.EndOffset
;
537 P
.EndOffset
= P
.SI
->endOffset();
540 // There are two strategies to form a partition based on whether the
541 // partition starts with an unsplittable slice or a splittable slice.
542 if (!P
.SI
->isSplittable()) {
543 // When we're forming an unsplittable region, it must always start at
544 // the first slice and will extend through its end.
545 assert(P
.BeginOffset
== P
.SI
->beginOffset());
547 // Form a partition including all of the overlapping slices with this
548 // unsplittable slice.
549 while (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
) {
550 if (!P
.SJ
->isSplittable())
551 P
.EndOffset
= std::max(P
.EndOffset
, P
.SJ
->endOffset());
555 // We have a partition across a set of overlapping unsplittable
560 // If we're starting with a splittable slice, then we need to form
561 // a synthetic partition spanning it and any other overlapping splittable
563 assert(P
.SI
->isSplittable() && "Forming a splittable partition!");
565 // Collect all of the overlapping splittable slices.
566 while (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
&&
567 P
.SJ
->isSplittable()) {
568 P
.EndOffset
= std::max(P
.EndOffset
, P
.SJ
->endOffset());
572 // Back upiP.EndOffset if we ended the span early when encountering an
573 // unsplittable slice. This synthesizes the early end offset of
574 // a partition spanning only splittable slices.
575 if (P
.SJ
!= SE
&& P
.SJ
->beginOffset() < P
.EndOffset
) {
576 assert(!P
.SJ
->isSplittable());
577 P
.EndOffset
= P
.SJ
->beginOffset();
582 bool operator==(const partition_iterator
&RHS
) const {
583 assert(SE
== RHS
.SE
&&
584 "End iterators don't match between compared partition iterators!");
586 // The observed positions of partitions is marked by the P.SI iterator and
587 // the emptiness of the split slices. The latter is only relevant when
588 // P.SI == SE, as the end iterator will additionally have an empty split
589 // slices list, but the prior may have the same P.SI and a tail of split
591 if (P
.SI
== RHS
.P
.SI
&& P
.SplitTails
.empty() == RHS
.P
.SplitTails
.empty()) {
592 assert(P
.SJ
== RHS
.P
.SJ
&&
593 "Same set of slices formed two different sized partitions!");
594 assert(P
.SplitTails
.size() == RHS
.P
.SplitTails
.size() &&
595 "Same slice position with differently sized non-empty split "
602 partition_iterator
&operator++() {
607 Partition
&operator*() { return P
; }
610 /// A forward range over the partitions of the alloca's slices.
612 /// This accesses an iterator range over the partitions of the alloca's
613 /// slices. It computes these partitions on the fly based on the overlapping
614 /// offsets of the slices and the ability to split them. It will visit "empty"
615 /// partitions to cover regions of the alloca only accessed via split
617 iterator_range
<AllocaSlices::partition_iterator
> AllocaSlices::partitions() {
618 return make_range(partition_iterator(begin(), end()),
619 partition_iterator(end(), end()));
622 static Value
*foldSelectInst(SelectInst
&SI
) {
623 // If the condition being selected on is a constant or the same value is
624 // being selected between, fold the select. Yes this does (rarely) happen
626 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(SI
.getCondition()))
627 return SI
.getOperand(1 + CI
->isZero());
628 if (SI
.getOperand(1) == SI
.getOperand(2))
629 return SI
.getOperand(1);
634 /// A helper that folds a PHI node or a select.
635 static Value
*foldPHINodeOrSelectInst(Instruction
&I
) {
636 if (PHINode
*PN
= dyn_cast
<PHINode
>(&I
)) {
637 // If PN merges together the same value, return that value.
638 return PN
->hasConstantValue();
640 return foldSelectInst(cast
<SelectInst
>(I
));
643 /// Builder for the alloca slices.
645 /// This class builds a set of alloca slices by recursively visiting the uses
646 /// of an alloca and making a slice for each load and store at each offset.
647 class AllocaSlices::SliceBuilder
: public PtrUseVisitor
<SliceBuilder
> {
648 friend class PtrUseVisitor
<SliceBuilder
>;
649 friend class InstVisitor
<SliceBuilder
>;
651 using Base
= PtrUseVisitor
<SliceBuilder
>;
653 const uint64_t AllocSize
;
656 SmallDenseMap
<Instruction
*, unsigned> MemTransferSliceMap
;
657 SmallDenseMap
<Instruction
*, uint64_t> PHIOrSelectSizes
;
659 /// Set to de-duplicate dead instructions found in the use walk.
660 SmallPtrSet
<Instruction
*, 4> VisitedDeadInsts
;
663 SliceBuilder(const DataLayout
&DL
, AllocaInst
&AI
, AllocaSlices
&AS
)
664 : PtrUseVisitor
<SliceBuilder
>(DL
),
665 AllocSize(DL
.getTypeAllocSize(AI
.getAllocatedType())), AS(AS
) {}
668 void markAsDead(Instruction
&I
) {
669 if (VisitedDeadInsts
.insert(&I
).second
)
670 AS
.DeadUsers
.push_back(&I
);
673 void insertUse(Instruction
&I
, const APInt
&Offset
, uint64_t Size
,
674 bool IsSplittable
= false) {
675 // Completely skip uses which have a zero size or start either before or
676 // past the end of the allocation.
677 if (Size
== 0 || Offset
.uge(AllocSize
)) {
678 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size
<< " byte use @"
680 << " which has zero size or starts outside of the "
681 << AllocSize
<< " byte alloca:\n"
682 << " alloca: " << AS
.AI
<< "\n"
683 << " use: " << I
<< "\n");
684 return markAsDead(I
);
687 uint64_t BeginOffset
= Offset
.getZExtValue();
688 uint64_t EndOffset
= BeginOffset
+ Size
;
690 // Clamp the end offset to the end of the allocation. Note that this is
691 // formulated to handle even the case where "BeginOffset + Size" overflows.
692 // This may appear superficially to be something we could ignore entirely,
693 // but that is not so! There may be widened loads or PHI-node uses where
694 // some instructions are dead but not others. We can't completely ignore
695 // them, and so have to record at least the information here.
696 assert(AllocSize
>= BeginOffset
); // Established above.
697 if (Size
> AllocSize
- BeginOffset
) {
698 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size
<< " byte use @"
699 << Offset
<< " to remain within the " << AllocSize
701 << " alloca: " << AS
.AI
<< "\n"
702 << " use: " << I
<< "\n");
703 EndOffset
= AllocSize
;
706 AS
.Slices
.push_back(Slice(BeginOffset
, EndOffset
, U
, IsSplittable
));
709 void visitBitCastInst(BitCastInst
&BC
) {
711 return markAsDead(BC
);
713 return Base::visitBitCastInst(BC
);
716 void visitGetElementPtrInst(GetElementPtrInst
&GEPI
) {
717 if (GEPI
.use_empty())
718 return markAsDead(GEPI
);
720 if (SROAStrictInbounds
&& GEPI
.isInBounds()) {
721 // FIXME: This is a manually un-factored variant of the basic code inside
722 // of GEPs with checking of the inbounds invariant specified in the
723 // langref in a very strict sense. If we ever want to enable
724 // SROAStrictInbounds, this code should be factored cleanly into
725 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
726 // by writing out the code here where we have the underlying allocation
727 // size readily available.
728 APInt GEPOffset
= Offset
;
729 const DataLayout
&DL
= GEPI
.getModule()->getDataLayout();
730 for (gep_type_iterator GTI
= gep_type_begin(GEPI
),
731 GTE
= gep_type_end(GEPI
);
733 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GTI
.getOperand());
737 // Handle a struct index, which adds its field offset to the pointer.
738 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
739 unsigned ElementIdx
= OpC
->getZExtValue();
740 const StructLayout
*SL
= DL
.getStructLayout(STy
);
742 APInt(Offset
.getBitWidth(), SL
->getElementOffset(ElementIdx
));
744 // For array or vector indices, scale the index by the size of the
746 APInt Index
= OpC
->getValue().sextOrTrunc(Offset
.getBitWidth());
747 GEPOffset
+= Index
* APInt(Offset
.getBitWidth(),
748 DL
.getTypeAllocSize(GTI
.getIndexedType()));
751 // If this index has computed an intermediate pointer which is not
752 // inbounds, then the result of the GEP is a poison value and we can
753 // delete it and all uses.
754 if (GEPOffset
.ugt(AllocSize
))
755 return markAsDead(GEPI
);
759 return Base::visitGetElementPtrInst(GEPI
);
762 void handleLoadOrStore(Type
*Ty
, Instruction
&I
, const APInt
&Offset
,
763 uint64_t Size
, bool IsVolatile
) {
764 // We allow splitting of non-volatile loads and stores where the type is an
765 // integer type. These may be used to implement 'memcpy' or other "transfer
766 // of bits" patterns.
767 bool IsSplittable
= Ty
->isIntegerTy() && !IsVolatile
;
769 insertUse(I
, Offset
, Size
, IsSplittable
);
772 void visitLoadInst(LoadInst
&LI
) {
773 assert((!LI
.isSimple() || LI
.getType()->isSingleValueType()) &&
774 "All simple FCA loads should have been pre-split");
777 return PI
.setAborted(&LI
);
779 const DataLayout
&DL
= LI
.getModule()->getDataLayout();
780 uint64_t Size
= DL
.getTypeStoreSize(LI
.getType());
781 return handleLoadOrStore(LI
.getType(), LI
, Offset
, Size
, LI
.isVolatile());
784 void visitStoreInst(StoreInst
&SI
) {
785 Value
*ValOp
= SI
.getValueOperand();
787 return PI
.setEscapedAndAborted(&SI
);
789 return PI
.setAborted(&SI
);
791 const DataLayout
&DL
= SI
.getModule()->getDataLayout();
792 uint64_t Size
= DL
.getTypeStoreSize(ValOp
->getType());
794 // If this memory access can be shown to *statically* extend outside the
795 // bounds of the allocation, it's behavior is undefined, so simply
796 // ignore it. Note that this is more strict than the generic clamping
797 // behavior of insertUse. We also try to handle cases which might run the
799 // FIXME: We should instead consider the pointer to have escaped if this
800 // function is being instrumented for addressing bugs or race conditions.
801 if (Size
> AllocSize
|| Offset
.ugt(AllocSize
- Size
)) {
802 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size
<< " byte store @"
803 << Offset
<< " which extends past the end of the "
804 << AllocSize
<< " byte alloca:\n"
805 << " alloca: " << AS
.AI
<< "\n"
806 << " use: " << SI
<< "\n");
807 return markAsDead(SI
);
810 assert((!SI
.isSimple() || ValOp
->getType()->isSingleValueType()) &&
811 "All simple FCA stores should have been pre-split");
812 handleLoadOrStore(ValOp
->getType(), SI
, Offset
, Size
, SI
.isVolatile());
815 void visitMemSetInst(MemSetInst
&II
) {
816 assert(II
.getRawDest() == *U
&& "Pointer use is not the destination?");
817 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(II
.getLength());
818 if ((Length
&& Length
->getValue() == 0) ||
819 (IsOffsetKnown
&& Offset
.uge(AllocSize
)))
820 // Zero-length mem transfer intrinsics can be ignored entirely.
821 return markAsDead(II
);
824 return PI
.setAborted(&II
);
826 insertUse(II
, Offset
, Length
? Length
->getLimitedValue()
827 : AllocSize
- Offset
.getLimitedValue(),
831 void visitMemTransferInst(MemTransferInst
&II
) {
832 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(II
.getLength());
833 if (Length
&& Length
->getValue() == 0)
834 // Zero-length mem transfer intrinsics can be ignored entirely.
835 return markAsDead(II
);
837 // Because we can visit these intrinsics twice, also check to see if the
838 // first time marked this instruction as dead. If so, skip it.
839 if (VisitedDeadInsts
.count(&II
))
843 return PI
.setAborted(&II
);
845 // This side of the transfer is completely out-of-bounds, and so we can
846 // nuke the entire transfer. However, we also need to nuke the other side
847 // if already added to our partitions.
848 // FIXME: Yet another place we really should bypass this when
849 // instrumenting for ASan.
850 if (Offset
.uge(AllocSize
)) {
851 SmallDenseMap
<Instruction
*, unsigned>::iterator MTPI
=
852 MemTransferSliceMap
.find(&II
);
853 if (MTPI
!= MemTransferSliceMap
.end())
854 AS
.Slices
[MTPI
->second
].kill();
855 return markAsDead(II
);
858 uint64_t RawOffset
= Offset
.getLimitedValue();
859 uint64_t Size
= Length
? Length
->getLimitedValue() : AllocSize
- RawOffset
;
861 // Check for the special case where the same exact value is used for both
863 if (*U
== II
.getRawDest() && *U
== II
.getRawSource()) {
864 // For non-volatile transfers this is a no-op.
865 if (!II
.isVolatile())
866 return markAsDead(II
);
868 return insertUse(II
, Offset
, Size
, /*IsSplittable=*/false);
871 // If we have seen both source and destination for a mem transfer, then
872 // they both point to the same alloca.
874 SmallDenseMap
<Instruction
*, unsigned>::iterator MTPI
;
875 std::tie(MTPI
, Inserted
) =
876 MemTransferSliceMap
.insert(std::make_pair(&II
, AS
.Slices
.size()));
877 unsigned PrevIdx
= MTPI
->second
;
879 Slice
&PrevP
= AS
.Slices
[PrevIdx
];
881 // Check if the begin offsets match and this is a non-volatile transfer.
882 // In that case, we can completely elide the transfer.
883 if (!II
.isVolatile() && PrevP
.beginOffset() == RawOffset
) {
885 return markAsDead(II
);
888 // Otherwise we have an offset transfer within the same alloca. We can't
890 PrevP
.makeUnsplittable();
893 // Insert the use now that we've fixed up the splittable nature.
894 insertUse(II
, Offset
, Size
, /*IsSplittable=*/Inserted
&& Length
);
896 // Check that we ended up with a valid index in the map.
897 assert(AS
.Slices
[PrevIdx
].getUse()->getUser() == &II
&&
898 "Map index doesn't point back to a slice with this user.");
901 // Disable SRoA for any intrinsics except for lifetime invariants.
902 // FIXME: What about debug intrinsics? This matches old behavior, but
903 // doesn't make sense.
904 void visitIntrinsicInst(IntrinsicInst
&II
) {
906 return PI
.setAborted(&II
);
908 if (II
.isLifetimeStartOrEnd()) {
909 ConstantInt
*Length
= cast
<ConstantInt
>(II
.getArgOperand(0));
910 uint64_t Size
= std::min(AllocSize
- Offset
.getLimitedValue(),
911 Length
->getLimitedValue());
912 insertUse(II
, Offset
, Size
, true);
916 Base::visitIntrinsicInst(II
);
919 Instruction
*hasUnsafePHIOrSelectUse(Instruction
*Root
, uint64_t &Size
) {
920 // We consider any PHI or select that results in a direct load or store of
921 // the same offset to be a viable use for slicing purposes. These uses
922 // are considered unsplittable and the size is the maximum loaded or stored
924 SmallPtrSet
<Instruction
*, 4> Visited
;
925 SmallVector
<std::pair
<Instruction
*, Instruction
*>, 4> Uses
;
926 Visited
.insert(Root
);
927 Uses
.push_back(std::make_pair(cast
<Instruction
>(*U
), Root
));
928 const DataLayout
&DL
= Root
->getModule()->getDataLayout();
929 // If there are no loads or stores, the access is dead. We mark that as
930 // a size zero access.
933 Instruction
*I
, *UsedI
;
934 std::tie(UsedI
, I
) = Uses
.pop_back_val();
936 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
937 Size
= std::max(Size
, DL
.getTypeStoreSize(LI
->getType()));
940 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
941 Value
*Op
= SI
->getOperand(0);
944 Size
= std::max(Size
, DL
.getTypeStoreSize(Op
->getType()));
948 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(I
)) {
949 if (!GEP
->hasAllZeroIndices())
951 } else if (!isa
<BitCastInst
>(I
) && !isa
<PHINode
>(I
) &&
952 !isa
<SelectInst
>(I
)) {
956 for (User
*U
: I
->users())
957 if (Visited
.insert(cast
<Instruction
>(U
)).second
)
958 Uses
.push_back(std::make_pair(I
, cast
<Instruction
>(U
)));
959 } while (!Uses
.empty());
964 void visitPHINodeOrSelectInst(Instruction
&I
) {
965 assert(isa
<PHINode
>(I
) || isa
<SelectInst
>(I
));
967 return markAsDead(I
);
969 // TODO: We could use SimplifyInstruction here to fold PHINodes and
970 // SelectInsts. However, doing so requires to change the current
971 // dead-operand-tracking mechanism. For instance, suppose neither loading
972 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
973 // trap either. However, if we simply replace %U with undef using the
974 // current dead-operand-tracking mechanism, "load (select undef, undef,
975 // %other)" may trap because the select may return the first operand
977 if (Value
*Result
= foldPHINodeOrSelectInst(I
)) {
979 // If the result of the constant fold will be the pointer, recurse
980 // through the PHI/select as if we had RAUW'ed it.
983 // Otherwise the operand to the PHI/select is dead, and we can replace
985 AS
.DeadOperands
.push_back(U
);
991 return PI
.setAborted(&I
);
993 // See if we already have computed info on this node.
994 uint64_t &Size
= PHIOrSelectSizes
[&I
];
996 // This is a new PHI/Select, check for an unsafe use of it.
997 if (Instruction
*UnsafeI
= hasUnsafePHIOrSelectUse(&I
, Size
))
998 return PI
.setAborted(UnsafeI
);
1001 // For PHI and select operands outside the alloca, we can't nuke the entire
1002 // phi or select -- the other side might still be relevant, so we special
1003 // case them here and use a separate structure to track the operands
1004 // themselves which should be replaced with undef.
1005 // FIXME: This should instead be escaped in the event we're instrumenting
1006 // for address sanitization.
1007 if (Offset
.uge(AllocSize
)) {
1008 AS
.DeadOperands
.push_back(U
);
1012 insertUse(I
, Offset
, Size
);
1015 void visitPHINode(PHINode
&PN
) { visitPHINodeOrSelectInst(PN
); }
1017 void visitSelectInst(SelectInst
&SI
) { visitPHINodeOrSelectInst(SI
); }
1019 /// Disable SROA entirely if there are unhandled users of the alloca.
1020 void visitInstruction(Instruction
&I
) { PI
.setAborted(&I
); }
1023 AllocaSlices::AllocaSlices(const DataLayout
&DL
, AllocaInst
&AI
)
1025 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1028 PointerEscapingInstr(nullptr) {
1029 SliceBuilder
PB(DL
, AI
, *this);
1030 SliceBuilder::PtrInfo PtrI
= PB
.visitPtr(AI
);
1031 if (PtrI
.isEscaped() || PtrI
.isAborted()) {
1032 // FIXME: We should sink the escape vs. abort info into the caller nicely,
1033 // possibly by just storing the PtrInfo in the AllocaSlices.
1034 PointerEscapingInstr
= PtrI
.getEscapingInst() ? PtrI
.getEscapingInst()
1035 : PtrI
.getAbortingInst();
1036 assert(PointerEscapingInstr
&& "Did not track a bad instruction");
1041 llvm::remove_if(Slices
, [](const Slice
&S
) { return S
.isDead(); }),
1045 if (SROARandomShuffleSlices
) {
1046 std::mt19937
MT(static_cast<unsigned>(
1047 std::chrono::system_clock::now().time_since_epoch().count()));
1048 std::shuffle(Slices
.begin(), Slices
.end(), MT
);
1052 // Sort the uses. This arranges for the offsets to be in ascending order,
1053 // and the sizes to be in descending order.
1057 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1059 void AllocaSlices::print(raw_ostream
&OS
, const_iterator I
,
1060 StringRef Indent
) const {
1061 printSlice(OS
, I
, Indent
);
1063 printUse(OS
, I
, Indent
);
1066 void AllocaSlices::printSlice(raw_ostream
&OS
, const_iterator I
,
1067 StringRef Indent
) const {
1068 OS
<< Indent
<< "[" << I
->beginOffset() << "," << I
->endOffset() << ")"
1069 << " slice #" << (I
- begin())
1070 << (I
->isSplittable() ? " (splittable)" : "");
1073 void AllocaSlices::printUse(raw_ostream
&OS
, const_iterator I
,
1074 StringRef Indent
) const {
1075 OS
<< Indent
<< " used by: " << *I
->getUse()->getUser() << "\n";
1078 void AllocaSlices::print(raw_ostream
&OS
) const {
1079 if (PointerEscapingInstr
) {
1080 OS
<< "Can't analyze slices for alloca: " << AI
<< "\n"
1081 << " A pointer to this alloca escaped by:\n"
1082 << " " << *PointerEscapingInstr
<< "\n";
1086 OS
<< "Slices of alloca: " << AI
<< "\n";
1087 for (const_iterator I
= begin(), E
= end(); I
!= E
; ++I
)
1091 LLVM_DUMP_METHOD
void AllocaSlices::dump(const_iterator I
) const {
1094 LLVM_DUMP_METHOD
void AllocaSlices::dump() const { print(dbgs()); }
1096 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1098 /// Walk the range of a partitioning looking for a common type to cover this
1099 /// sequence of slices.
1100 static Type
*findCommonType(AllocaSlices::const_iterator B
,
1101 AllocaSlices::const_iterator E
,
1102 uint64_t EndOffset
) {
1104 bool TyIsCommon
= true;
1105 IntegerType
*ITy
= nullptr;
1107 // Note that we need to look at *every* alloca slice's Use to ensure we
1108 // always get consistent results regardless of the order of slices.
1109 for (AllocaSlices::const_iterator I
= B
; I
!= E
; ++I
) {
1110 Use
*U
= I
->getUse();
1111 if (isa
<IntrinsicInst
>(*U
->getUser()))
1113 if (I
->beginOffset() != B
->beginOffset() || I
->endOffset() != EndOffset
)
1116 Type
*UserTy
= nullptr;
1117 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
1118 UserTy
= LI
->getType();
1119 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
1120 UserTy
= SI
->getValueOperand()->getType();
1123 if (IntegerType
*UserITy
= dyn_cast_or_null
<IntegerType
>(UserTy
)) {
1124 // If the type is larger than the partition, skip it. We only encounter
1125 // this for split integer operations where we want to use the type of the
1126 // entity causing the split. Also skip if the type is not a byte width
1128 if (UserITy
->getBitWidth() % 8 != 0 ||
1129 UserITy
->getBitWidth() / 8 > (EndOffset
- B
->beginOffset()))
1132 // Track the largest bitwidth integer type used in this way in case there
1133 // is no common type.
1134 if (!ITy
|| ITy
->getBitWidth() < UserITy
->getBitWidth())
1138 // To avoid depending on the order of slices, Ty and TyIsCommon must not
1139 // depend on types skipped above.
1140 if (!UserTy
|| (Ty
&& Ty
!= UserTy
))
1141 TyIsCommon
= false; // Give up on anything but an iN type.
1146 return TyIsCommon
? Ty
: ITy
;
1149 /// PHI instructions that use an alloca and are subsequently loaded can be
1150 /// rewritten to load both input pointers in the pred blocks and then PHI the
1151 /// results, allowing the load of the alloca to be promoted.
1153 /// %P2 = phi [i32* %Alloca, i32* %Other]
1154 /// %V = load i32* %P2
1156 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1158 /// %V2 = load i32* %Other
1160 /// %V = phi [i32 %V1, i32 %V2]
1162 /// We can do this to a select if its only uses are loads and if the operands
1163 /// to the select can be loaded unconditionally.
1165 /// FIXME: This should be hoisted into a generic utility, likely in
1166 /// Transforms/Util/Local.h
1167 static bool isSafePHIToSpeculate(PHINode
&PN
) {
1168 // For now, we can only do this promotion if the load is in the same block
1169 // as the PHI, and if there are no stores between the phi and load.
1170 // TODO: Allow recursive phi users.
1171 // TODO: Allow stores.
1172 BasicBlock
*BB
= PN
.getParent();
1173 unsigned MaxAlign
= 0;
1174 bool HaveLoad
= false;
1175 for (User
*U
: PN
.users()) {
1176 LoadInst
*LI
= dyn_cast
<LoadInst
>(U
);
1177 if (!LI
|| !LI
->isSimple())
1180 // For now we only allow loads in the same block as the PHI. This is
1181 // a common case that happens when instcombine merges two loads through
1183 if (LI
->getParent() != BB
)
1186 // Ensure that there are no instructions between the PHI and the load that
1188 for (BasicBlock::iterator
BBI(PN
); &*BBI
!= LI
; ++BBI
)
1189 if (BBI
->mayWriteToMemory())
1192 MaxAlign
= std::max(MaxAlign
, LI
->getAlignment());
1199 const DataLayout
&DL
= PN
.getModule()->getDataLayout();
1201 // We can only transform this if it is safe to push the loads into the
1202 // predecessor blocks. The only thing to watch out for is that we can't put
1203 // a possibly trapping load in the predecessor if it is a critical edge.
1204 for (unsigned Idx
= 0, Num
= PN
.getNumIncomingValues(); Idx
!= Num
; ++Idx
) {
1205 Instruction
*TI
= PN
.getIncomingBlock(Idx
)->getTerminator();
1206 Value
*InVal
= PN
.getIncomingValue(Idx
);
1208 // If the value is produced by the terminator of the predecessor (an
1209 // invoke) or it has side-effects, there is no valid place to put a load
1210 // in the predecessor.
1211 if (TI
== InVal
|| TI
->mayHaveSideEffects())
1214 // If the predecessor has a single successor, then the edge isn't
1216 if (TI
->getNumSuccessors() == 1)
1219 // If this pointer is always safe to load, or if we can prove that there
1220 // is already a load in the block, then we can move the load to the pred
1222 if (isSafeToLoadUnconditionally(InVal
, MaxAlign
, DL
, TI
))
1231 static void speculatePHINodeLoads(PHINode
&PN
) {
1232 LLVM_DEBUG(dbgs() << " original: " << PN
<< "\n");
1234 LoadInst
*SomeLoad
= cast
<LoadInst
>(PN
.user_back());
1235 Type
*LoadTy
= SomeLoad
->getType();
1236 IRBuilderTy
PHIBuilder(&PN
);
1237 PHINode
*NewPN
= PHIBuilder
.CreatePHI(LoadTy
, PN
.getNumIncomingValues(),
1238 PN
.getName() + ".sroa.speculated");
1240 // Get the AA tags and alignment to use from one of the loads. It doesn't
1241 // matter which one we get and if any differ.
1243 SomeLoad
->getAAMetadata(AATags
);
1244 unsigned Align
= SomeLoad
->getAlignment();
1246 // Rewrite all loads of the PN to use the new PHI.
1247 while (!PN
.use_empty()) {
1248 LoadInst
*LI
= cast
<LoadInst
>(PN
.user_back());
1249 LI
->replaceAllUsesWith(NewPN
);
1250 LI
->eraseFromParent();
1253 // Inject loads into all of the pred blocks.
1254 DenseMap
<BasicBlock
*, Value
*> InjectedLoads
;
1255 for (unsigned Idx
= 0, Num
= PN
.getNumIncomingValues(); Idx
!= Num
; ++Idx
) {
1256 BasicBlock
*Pred
= PN
.getIncomingBlock(Idx
);
1257 Value
*InVal
= PN
.getIncomingValue(Idx
);
1259 // A PHI node is allowed to have multiple (duplicated) entries for the same
1260 // basic block, as long as the value is the same. So if we already injected
1261 // a load in the predecessor, then we should reuse the same load for all
1262 // duplicated entries.
1263 if (Value
* V
= InjectedLoads
.lookup(Pred
)) {
1264 NewPN
->addIncoming(V
, Pred
);
1268 Instruction
*TI
= Pred
->getTerminator();
1269 IRBuilderTy
PredBuilder(TI
);
1271 LoadInst
*Load
= PredBuilder
.CreateLoad(
1273 (PN
.getName() + ".sroa.speculate.load." + Pred
->getName()));
1274 ++NumLoadsSpeculated
;
1275 Load
->setAlignment(Align
);
1277 Load
->setAAMetadata(AATags
);
1278 NewPN
->addIncoming(Load
, Pred
);
1279 InjectedLoads
[Pred
] = Load
;
1282 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN
<< "\n");
1283 PN
.eraseFromParent();
1286 /// Select instructions that use an alloca and are subsequently loaded can be
1287 /// rewritten to load both input pointers and then select between the result,
1288 /// allowing the load of the alloca to be promoted.
1290 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1291 /// %V = load i32* %P2
1293 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1294 /// %V2 = load i32* %Other
1295 /// %V = select i1 %cond, i32 %V1, i32 %V2
1297 /// We can do this to a select if its only uses are loads and if the operand
1298 /// to the select can be loaded unconditionally.
1299 static bool isSafeSelectToSpeculate(SelectInst
&SI
) {
1300 Value
*TValue
= SI
.getTrueValue();
1301 Value
*FValue
= SI
.getFalseValue();
1302 const DataLayout
&DL
= SI
.getModule()->getDataLayout();
1304 for (User
*U
: SI
.users()) {
1305 LoadInst
*LI
= dyn_cast
<LoadInst
>(U
);
1306 if (!LI
|| !LI
->isSimple())
1309 // Both operands to the select need to be dereferenceable, either
1310 // absolutely (e.g. allocas) or at this point because we can see other
1312 if (!isSafeToLoadUnconditionally(TValue
, LI
->getAlignment(), DL
, LI
))
1314 if (!isSafeToLoadUnconditionally(FValue
, LI
->getAlignment(), DL
, LI
))
1321 static void speculateSelectInstLoads(SelectInst
&SI
) {
1322 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
1324 IRBuilderTy
IRB(&SI
);
1325 Value
*TV
= SI
.getTrueValue();
1326 Value
*FV
= SI
.getFalseValue();
1327 // Replace the loads of the select with a select of two loads.
1328 while (!SI
.use_empty()) {
1329 LoadInst
*LI
= cast
<LoadInst
>(SI
.user_back());
1330 assert(LI
->isSimple() && "We only speculate simple loads");
1332 IRB
.SetInsertPoint(LI
);
1333 LoadInst
*TL
= IRB
.CreateLoad(LI
->getType(), TV
,
1334 LI
->getName() + ".sroa.speculate.load.true");
1335 LoadInst
*FL
= IRB
.CreateLoad(LI
->getType(), FV
,
1336 LI
->getName() + ".sroa.speculate.load.false");
1337 NumLoadsSpeculated
+= 2;
1339 // Transfer alignment and AA info if present.
1340 TL
->setAlignment(LI
->getAlignment());
1341 FL
->setAlignment(LI
->getAlignment());
1344 LI
->getAAMetadata(Tags
);
1346 TL
->setAAMetadata(Tags
);
1347 FL
->setAAMetadata(Tags
);
1350 Value
*V
= IRB
.CreateSelect(SI
.getCondition(), TL
, FL
,
1351 LI
->getName() + ".sroa.speculated");
1353 LLVM_DEBUG(dbgs() << " speculated to: " << *V
<< "\n");
1354 LI
->replaceAllUsesWith(V
);
1355 LI
->eraseFromParent();
1357 SI
.eraseFromParent();
1360 /// Build a GEP out of a base pointer and indices.
1362 /// This will return the BasePtr if that is valid, or build a new GEP
1363 /// instruction using the IRBuilder if GEP-ing is needed.
1364 static Value
*buildGEP(IRBuilderTy
&IRB
, Value
*BasePtr
,
1365 SmallVectorImpl
<Value
*> &Indices
, Twine NamePrefix
) {
1366 if (Indices
.empty())
1369 // A single zero index is a no-op, so check for this and avoid building a GEP
1371 if (Indices
.size() == 1 && cast
<ConstantInt
>(Indices
.back())->isZero())
1374 return IRB
.CreateInBoundsGEP(BasePtr
->getType()->getPointerElementType(),
1375 BasePtr
, Indices
, NamePrefix
+ "sroa_idx");
1378 /// Get a natural GEP off of the BasePtr walking through Ty toward
1379 /// TargetTy without changing the offset of the pointer.
1381 /// This routine assumes we've already established a properly offset GEP with
1382 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1383 /// zero-indices down through type layers until we find one the same as
1384 /// TargetTy. If we can't find one with the same type, we at least try to use
1385 /// one with the same size. If none of that works, we just produce the GEP as
1386 /// indicated by Indices to have the correct offset.
1387 static Value
*getNaturalGEPWithType(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1388 Value
*BasePtr
, Type
*Ty
, Type
*TargetTy
,
1389 SmallVectorImpl
<Value
*> &Indices
,
1392 return buildGEP(IRB
, BasePtr
, Indices
, NamePrefix
);
1394 // Offset size to use for the indices.
1395 unsigned OffsetSize
= DL
.getIndexTypeSizeInBits(BasePtr
->getType());
1397 // See if we can descend into a struct and locate a field with the correct
1399 unsigned NumLayers
= 0;
1400 Type
*ElementTy
= Ty
;
1402 if (ElementTy
->isPointerTy())
1405 if (ArrayType
*ArrayTy
= dyn_cast
<ArrayType
>(ElementTy
)) {
1406 ElementTy
= ArrayTy
->getElementType();
1407 Indices
.push_back(IRB
.getIntN(OffsetSize
, 0));
1408 } else if (VectorType
*VectorTy
= dyn_cast
<VectorType
>(ElementTy
)) {
1409 ElementTy
= VectorTy
->getElementType();
1410 Indices
.push_back(IRB
.getInt32(0));
1411 } else if (StructType
*STy
= dyn_cast
<StructType
>(ElementTy
)) {
1412 if (STy
->element_begin() == STy
->element_end())
1413 break; // Nothing left to descend into.
1414 ElementTy
= *STy
->element_begin();
1415 Indices
.push_back(IRB
.getInt32(0));
1420 } while (ElementTy
!= TargetTy
);
1421 if (ElementTy
!= TargetTy
)
1422 Indices
.erase(Indices
.end() - NumLayers
, Indices
.end());
1424 return buildGEP(IRB
, BasePtr
, Indices
, NamePrefix
);
1427 /// Recursively compute indices for a natural GEP.
1429 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1430 /// element types adding appropriate indices for the GEP.
1431 static Value
*getNaturalGEPRecursively(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1432 Value
*Ptr
, Type
*Ty
, APInt
&Offset
,
1434 SmallVectorImpl
<Value
*> &Indices
,
1437 return getNaturalGEPWithType(IRB
, DL
, Ptr
, Ty
, TargetTy
, Indices
,
1440 // We can't recurse through pointer types.
1441 if (Ty
->isPointerTy())
1444 // We try to analyze GEPs over vectors here, but note that these GEPs are
1445 // extremely poorly defined currently. The long-term goal is to remove GEPing
1446 // over a vector from the IR completely.
1447 if (VectorType
*VecTy
= dyn_cast
<VectorType
>(Ty
)) {
1448 unsigned ElementSizeInBits
= DL
.getTypeSizeInBits(VecTy
->getScalarType());
1449 if (ElementSizeInBits
% 8 != 0) {
1450 // GEPs over non-multiple of 8 size vector elements are invalid.
1453 APInt
ElementSize(Offset
.getBitWidth(), ElementSizeInBits
/ 8);
1454 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1455 if (NumSkippedElements
.ugt(VecTy
->getNumElements()))
1457 Offset
-= NumSkippedElements
* ElementSize
;
1458 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1459 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, VecTy
->getElementType(),
1460 Offset
, TargetTy
, Indices
, NamePrefix
);
1463 if (ArrayType
*ArrTy
= dyn_cast
<ArrayType
>(Ty
)) {
1464 Type
*ElementTy
= ArrTy
->getElementType();
1465 APInt
ElementSize(Offset
.getBitWidth(), DL
.getTypeAllocSize(ElementTy
));
1466 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1467 if (NumSkippedElements
.ugt(ArrTy
->getNumElements()))
1470 Offset
-= NumSkippedElements
* ElementSize
;
1471 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1472 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1473 Indices
, NamePrefix
);
1476 StructType
*STy
= dyn_cast
<StructType
>(Ty
);
1480 const StructLayout
*SL
= DL
.getStructLayout(STy
);
1481 uint64_t StructOffset
= Offset
.getZExtValue();
1482 if (StructOffset
>= SL
->getSizeInBytes())
1484 unsigned Index
= SL
->getElementContainingOffset(StructOffset
);
1485 Offset
-= APInt(Offset
.getBitWidth(), SL
->getElementOffset(Index
));
1486 Type
*ElementTy
= STy
->getElementType(Index
);
1487 if (Offset
.uge(DL
.getTypeAllocSize(ElementTy
)))
1488 return nullptr; // The offset points into alignment padding.
1490 Indices
.push_back(IRB
.getInt32(Index
));
1491 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1492 Indices
, NamePrefix
);
1495 /// Get a natural GEP from a base pointer to a particular offset and
1496 /// resulting in a particular type.
1498 /// The goal is to produce a "natural" looking GEP that works with the existing
1499 /// composite types to arrive at the appropriate offset and element type for
1500 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1501 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1502 /// Indices, and setting Ty to the result subtype.
1504 /// If no natural GEP can be constructed, this function returns null.
1505 static Value
*getNaturalGEPWithOffset(IRBuilderTy
&IRB
, const DataLayout
&DL
,
1506 Value
*Ptr
, APInt Offset
, Type
*TargetTy
,
1507 SmallVectorImpl
<Value
*> &Indices
,
1509 PointerType
*Ty
= cast
<PointerType
>(Ptr
->getType());
1511 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1513 if (Ty
== IRB
.getInt8PtrTy(Ty
->getAddressSpace()) && TargetTy
->isIntegerTy(8))
1516 Type
*ElementTy
= Ty
->getElementType();
1517 if (!ElementTy
->isSized())
1518 return nullptr; // We can't GEP through an unsized element.
1519 APInt
ElementSize(Offset
.getBitWidth(), DL
.getTypeAllocSize(ElementTy
));
1520 if (ElementSize
== 0)
1521 return nullptr; // Zero-length arrays can't help us build a natural GEP.
1522 APInt NumSkippedElements
= Offset
.sdiv(ElementSize
);
1524 Offset
-= NumSkippedElements
* ElementSize
;
1525 Indices
.push_back(IRB
.getInt(NumSkippedElements
));
1526 return getNaturalGEPRecursively(IRB
, DL
, Ptr
, ElementTy
, Offset
, TargetTy
,
1527 Indices
, NamePrefix
);
1530 /// Compute an adjusted pointer from Ptr by Offset bytes where the
1531 /// resulting pointer has PointerTy.
1533 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1534 /// and produces the pointer type desired. Where it cannot, it will try to use
1535 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1536 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1537 /// bitcast to the type.
1539 /// The strategy for finding the more natural GEPs is to peel off layers of the
1540 /// pointer, walking back through bit casts and GEPs, searching for a base
1541 /// pointer from which we can compute a natural GEP with the desired
1542 /// properties. The algorithm tries to fold as many constant indices into
1543 /// a single GEP as possible, thus making each GEP more independent of the
1544 /// surrounding code.
1545 static Value
*getAdjustedPtr(IRBuilderTy
&IRB
, const DataLayout
&DL
, Value
*Ptr
,
1546 APInt Offset
, Type
*PointerTy
, Twine NamePrefix
) {
1547 // Even though we don't look through PHI nodes, we could be called on an
1548 // instruction in an unreachable block, which may be on a cycle.
1549 SmallPtrSet
<Value
*, 4> Visited
;
1550 Visited
.insert(Ptr
);
1551 SmallVector
<Value
*, 4> Indices
;
1553 // We may end up computing an offset pointer that has the wrong type. If we
1554 // never are able to compute one directly that has the correct type, we'll
1555 // fall back to it, so keep it and the base it was computed from around here.
1556 Value
*OffsetPtr
= nullptr;
1557 Value
*OffsetBasePtr
;
1559 // Remember any i8 pointer we come across to re-use if we need to do a raw
1561 Value
*Int8Ptr
= nullptr;
1562 APInt
Int8PtrOffset(Offset
.getBitWidth(), 0);
1564 Type
*TargetTy
= PointerTy
->getPointerElementType();
1567 // First fold any existing GEPs into the offset.
1568 while (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
1569 APInt
GEPOffset(Offset
.getBitWidth(), 0);
1570 if (!GEP
->accumulateConstantOffset(DL
, GEPOffset
))
1572 Offset
+= GEPOffset
;
1573 Ptr
= GEP
->getPointerOperand();
1574 if (!Visited
.insert(Ptr
).second
)
1578 // See if we can perform a natural GEP here.
1580 if (Value
*P
= getNaturalGEPWithOffset(IRB
, DL
, Ptr
, Offset
, TargetTy
,
1581 Indices
, NamePrefix
)) {
1582 // If we have a new natural pointer at the offset, clear out any old
1583 // offset pointer we computed. Unless it is the base pointer or
1584 // a non-instruction, we built a GEP we don't need. Zap it.
1585 if (OffsetPtr
&& OffsetPtr
!= OffsetBasePtr
)
1586 if (Instruction
*I
= dyn_cast
<Instruction
>(OffsetPtr
)) {
1587 assert(I
->use_empty() && "Built a GEP with uses some how!");
1588 I
->eraseFromParent();
1591 OffsetBasePtr
= Ptr
;
1592 // If we also found a pointer of the right type, we're done.
1593 if (P
->getType() == PointerTy
)
1597 // Stash this pointer if we've found an i8*.
1598 if (Ptr
->getType()->isIntegerTy(8)) {
1600 Int8PtrOffset
= Offset
;
1603 // Peel off a layer of the pointer and update the offset appropriately.
1604 if (Operator::getOpcode(Ptr
) == Instruction::BitCast
) {
1605 Ptr
= cast
<Operator
>(Ptr
)->getOperand(0);
1606 } else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(Ptr
)) {
1607 if (GA
->isInterposable())
1609 Ptr
= GA
->getAliasee();
1613 assert(Ptr
->getType()->isPointerTy() && "Unexpected operand type!");
1614 } while (Visited
.insert(Ptr
).second
);
1618 Int8Ptr
= IRB
.CreateBitCast(
1619 Ptr
, IRB
.getInt8PtrTy(PointerTy
->getPointerAddressSpace()),
1620 NamePrefix
+ "sroa_raw_cast");
1621 Int8PtrOffset
= Offset
;
1624 OffsetPtr
= Int8PtrOffset
== 0
1626 : IRB
.CreateInBoundsGEP(IRB
.getInt8Ty(), Int8Ptr
,
1627 IRB
.getInt(Int8PtrOffset
),
1628 NamePrefix
+ "sroa_raw_idx");
1632 // On the off chance we were targeting i8*, guard the bitcast here.
1633 if (Ptr
->getType() != PointerTy
)
1634 Ptr
= IRB
.CreateBitCast(Ptr
, PointerTy
, NamePrefix
+ "sroa_cast");
1639 /// Compute the adjusted alignment for a load or store from an offset.
1640 static unsigned getAdjustedAlignment(Instruction
*I
, uint64_t Offset
,
1641 const DataLayout
&DL
) {
1644 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
1645 Alignment
= LI
->getAlignment();
1647 } else if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
1648 Alignment
= SI
->getAlignment();
1649 Ty
= SI
->getValueOperand()->getType();
1651 llvm_unreachable("Only loads and stores are allowed!");
1655 Alignment
= DL
.getABITypeAlignment(Ty
);
1657 return MinAlign(Alignment
, Offset
);
1660 /// Test whether we can convert a value from the old to the new type.
1662 /// This predicate should be used to guard calls to convertValue in order to
1663 /// ensure that we only try to convert viable values. The strategy is that we
1664 /// will peel off single element struct and array wrappings to get to an
1665 /// underlying value, and convert that value.
1666 static bool canConvertValue(const DataLayout
&DL
, Type
*OldTy
, Type
*NewTy
) {
1670 // For integer types, we can't handle any bit-width differences. This would
1671 // break both vector conversions with extension and introduce endianness
1672 // issues when in conjunction with loads and stores.
1673 if (isa
<IntegerType
>(OldTy
) && isa
<IntegerType
>(NewTy
)) {
1674 assert(cast
<IntegerType
>(OldTy
)->getBitWidth() !=
1675 cast
<IntegerType
>(NewTy
)->getBitWidth() &&
1676 "We can't have the same bitwidth for different int types");
1680 if (DL
.getTypeSizeInBits(NewTy
) != DL
.getTypeSizeInBits(OldTy
))
1682 if (!NewTy
->isSingleValueType() || !OldTy
->isSingleValueType())
1685 // We can convert pointers to integers and vice-versa. Same for vectors
1686 // of pointers and integers.
1687 OldTy
= OldTy
->getScalarType();
1688 NewTy
= NewTy
->getScalarType();
1689 if (NewTy
->isPointerTy() || OldTy
->isPointerTy()) {
1690 if (NewTy
->isPointerTy() && OldTy
->isPointerTy()) {
1691 return cast
<PointerType
>(NewTy
)->getPointerAddressSpace() ==
1692 cast
<PointerType
>(OldTy
)->getPointerAddressSpace();
1695 // We can convert integers to integral pointers, but not to non-integral
1697 if (OldTy
->isIntegerTy())
1698 return !DL
.isNonIntegralPointerType(NewTy
);
1700 // We can convert integral pointers to integers, but non-integral pointers
1701 // need to remain pointers.
1702 if (!DL
.isNonIntegralPointerType(OldTy
))
1703 return NewTy
->isIntegerTy();
1711 /// Generic routine to convert an SSA value to a value of a different
1714 /// This will try various different casting techniques, such as bitcasts,
1715 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1716 /// two types for viability with this routine.
1717 static Value
*convertValue(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*V
,
1719 Type
*OldTy
= V
->getType();
1720 assert(canConvertValue(DL
, OldTy
, NewTy
) && "Value not convertable to type");
1725 assert(!(isa
<IntegerType
>(OldTy
) && isa
<IntegerType
>(NewTy
)) &&
1726 "Integer types must be the exact same to convert.");
1728 // See if we need inttoptr for this type pair. A cast involving both scalars
1729 // and vectors requires and additional bitcast.
1730 if (OldTy
->isIntOrIntVectorTy() && NewTy
->isPtrOrPtrVectorTy()) {
1731 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1732 if (OldTy
->isVectorTy() && !NewTy
->isVectorTy())
1733 return IRB
.CreateIntToPtr(IRB
.CreateBitCast(V
, DL
.getIntPtrType(NewTy
)),
1736 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1737 if (!OldTy
->isVectorTy() && NewTy
->isVectorTy())
1738 return IRB
.CreateIntToPtr(IRB
.CreateBitCast(V
, DL
.getIntPtrType(NewTy
)),
1741 return IRB
.CreateIntToPtr(V
, NewTy
);
1744 // See if we need ptrtoint for this type pair. A cast involving both scalars
1745 // and vectors requires and additional bitcast.
1746 if (OldTy
->isPtrOrPtrVectorTy() && NewTy
->isIntOrIntVectorTy()) {
1747 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1748 if (OldTy
->isVectorTy() && !NewTy
->isVectorTy())
1749 return IRB
.CreateBitCast(IRB
.CreatePtrToInt(V
, DL
.getIntPtrType(OldTy
)),
1752 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1753 if (!OldTy
->isVectorTy() && NewTy
->isVectorTy())
1754 return IRB
.CreateBitCast(IRB
.CreatePtrToInt(V
, DL
.getIntPtrType(OldTy
)),
1757 return IRB
.CreatePtrToInt(V
, NewTy
);
1760 return IRB
.CreateBitCast(V
, NewTy
);
1763 /// Test whether the given slice use can be promoted to a vector.
1765 /// This function is called to test each entry in a partition which is slated
1766 /// for a single slice.
1767 static bool isVectorPromotionViableForSlice(Partition
&P
, const Slice
&S
,
1769 uint64_t ElementSize
,
1770 const DataLayout
&DL
) {
1771 // First validate the slice offsets.
1772 uint64_t BeginOffset
=
1773 std::max(S
.beginOffset(), P
.beginOffset()) - P
.beginOffset();
1774 uint64_t BeginIndex
= BeginOffset
/ ElementSize
;
1775 if (BeginIndex
* ElementSize
!= BeginOffset
||
1776 BeginIndex
>= Ty
->getNumElements())
1778 uint64_t EndOffset
=
1779 std::min(S
.endOffset(), P
.endOffset()) - P
.beginOffset();
1780 uint64_t EndIndex
= EndOffset
/ ElementSize
;
1781 if (EndIndex
* ElementSize
!= EndOffset
|| EndIndex
> Ty
->getNumElements())
1784 assert(EndIndex
> BeginIndex
&& "Empty vector!");
1785 uint64_t NumElements
= EndIndex
- BeginIndex
;
1786 Type
*SliceTy
= (NumElements
== 1)
1787 ? Ty
->getElementType()
1788 : VectorType::get(Ty
->getElementType(), NumElements
);
1791 Type::getIntNTy(Ty
->getContext(), NumElements
* ElementSize
* 8);
1793 Use
*U
= S
.getUse();
1795 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(U
->getUser())) {
1796 if (MI
->isVolatile())
1798 if (!S
.isSplittable())
1799 return false; // Skip any unsplittable intrinsics.
1800 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
->getUser())) {
1801 if (!II
->isLifetimeStartOrEnd())
1803 } else if (U
->get()->getType()->getPointerElementType()->isStructTy()) {
1804 // Disable vector promotion when there are loads or stores of an FCA.
1806 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
1807 if (LI
->isVolatile())
1809 Type
*LTy
= LI
->getType();
1810 if (P
.beginOffset() > S
.beginOffset() || P
.endOffset() < S
.endOffset()) {
1811 assert(LTy
->isIntegerTy());
1814 if (!canConvertValue(DL
, SliceTy
, LTy
))
1816 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
1817 if (SI
->isVolatile())
1819 Type
*STy
= SI
->getValueOperand()->getType();
1820 if (P
.beginOffset() > S
.beginOffset() || P
.endOffset() < S
.endOffset()) {
1821 assert(STy
->isIntegerTy());
1824 if (!canConvertValue(DL
, STy
, SliceTy
))
1833 /// Test whether the given alloca partitioning and range of slices can be
1834 /// promoted to a vector.
1836 /// This is a quick test to check whether we can rewrite a particular alloca
1837 /// partition (and its newly formed alloca) into a vector alloca with only
1838 /// whole-vector loads and stores such that it could be promoted to a vector
1839 /// SSA value. We only can ensure this for a limited set of operations, and we
1840 /// don't want to do the rewrites unless we are confident that the result will
1841 /// be promotable, so we have an early test here.
1842 static VectorType
*isVectorPromotionViable(Partition
&P
, const DataLayout
&DL
) {
1843 // Collect the candidate types for vector-based promotion. Also track whether
1844 // we have different element types.
1845 SmallVector
<VectorType
*, 4> CandidateTys
;
1846 Type
*CommonEltTy
= nullptr;
1847 bool HaveCommonEltTy
= true;
1848 auto CheckCandidateType
= [&](Type
*Ty
) {
1849 if (auto *VTy
= dyn_cast
<VectorType
>(Ty
)) {
1850 CandidateTys
.push_back(VTy
);
1852 CommonEltTy
= VTy
->getElementType();
1853 else if (CommonEltTy
!= VTy
->getElementType())
1854 HaveCommonEltTy
= false;
1857 // Consider any loads or stores that are the exact size of the slice.
1858 for (const Slice
&S
: P
)
1859 if (S
.beginOffset() == P
.beginOffset() &&
1860 S
.endOffset() == P
.endOffset()) {
1861 if (auto *LI
= dyn_cast
<LoadInst
>(S
.getUse()->getUser()))
1862 CheckCandidateType(LI
->getType());
1863 else if (auto *SI
= dyn_cast
<StoreInst
>(S
.getUse()->getUser()))
1864 CheckCandidateType(SI
->getValueOperand()->getType());
1867 // If we didn't find a vector type, nothing to do here.
1868 if (CandidateTys
.empty())
1871 // Remove non-integer vector types if we had multiple common element types.
1872 // FIXME: It'd be nice to replace them with integer vector types, but we can't
1873 // do that until all the backends are known to produce good code for all
1874 // integer vector types.
1875 if (!HaveCommonEltTy
) {
1877 llvm::remove_if(CandidateTys
,
1878 [](VectorType
*VTy
) {
1879 return !VTy
->getElementType()->isIntegerTy();
1881 CandidateTys
.end());
1883 // If there were no integer vector types, give up.
1884 if (CandidateTys
.empty())
1887 // Rank the remaining candidate vector types. This is easy because we know
1888 // they're all integer vectors. We sort by ascending number of elements.
1889 auto RankVectorTypes
= [&DL
](VectorType
*RHSTy
, VectorType
*LHSTy
) {
1891 assert(DL
.getTypeSizeInBits(RHSTy
) == DL
.getTypeSizeInBits(LHSTy
) &&
1892 "Cannot have vector types of different sizes!");
1893 assert(RHSTy
->getElementType()->isIntegerTy() &&
1894 "All non-integer types eliminated!");
1895 assert(LHSTy
->getElementType()->isIntegerTy() &&
1896 "All non-integer types eliminated!");
1897 return RHSTy
->getNumElements() < LHSTy
->getNumElements();
1899 llvm::sort(CandidateTys
, RankVectorTypes
);
1901 std::unique(CandidateTys
.begin(), CandidateTys
.end(), RankVectorTypes
),
1902 CandidateTys
.end());
1904 // The only way to have the same element type in every vector type is to
1905 // have the same vector type. Check that and remove all but one.
1907 for (VectorType
*VTy
: CandidateTys
) {
1908 assert(VTy
->getElementType() == CommonEltTy
&&
1909 "Unaccounted for element type!");
1910 assert(VTy
== CandidateTys
[0] &&
1911 "Different vector types with the same element type!");
1914 CandidateTys
.resize(1);
1917 // Try each vector type, and return the one which works.
1918 auto CheckVectorTypeForPromotion
= [&](VectorType
*VTy
) {
1919 uint64_t ElementSize
= DL
.getTypeSizeInBits(VTy
->getElementType());
1921 // While the definition of LLVM vectors is bitpacked, we don't support sizes
1922 // that aren't byte sized.
1923 if (ElementSize
% 8)
1925 assert((DL
.getTypeSizeInBits(VTy
) % 8) == 0 &&
1926 "vector size not a multiple of element size?");
1929 for (const Slice
&S
: P
)
1930 if (!isVectorPromotionViableForSlice(P
, S
, VTy
, ElementSize
, DL
))
1933 for (const Slice
*S
: P
.splitSliceTails())
1934 if (!isVectorPromotionViableForSlice(P
, *S
, VTy
, ElementSize
, DL
))
1939 for (VectorType
*VTy
: CandidateTys
)
1940 if (CheckVectorTypeForPromotion(VTy
))
1946 /// Test whether a slice of an alloca is valid for integer widening.
1948 /// This implements the necessary checking for the \c isIntegerWideningViable
1949 /// test below on a single slice of the alloca.
1950 static bool isIntegerWideningViableForSlice(const Slice
&S
,
1951 uint64_t AllocBeginOffset
,
1953 const DataLayout
&DL
,
1954 bool &WholeAllocaOp
) {
1955 uint64_t Size
= DL
.getTypeStoreSize(AllocaTy
);
1957 uint64_t RelBegin
= S
.beginOffset() - AllocBeginOffset
;
1958 uint64_t RelEnd
= S
.endOffset() - AllocBeginOffset
;
1960 // We can't reasonably handle cases where the load or store extends past
1961 // the end of the alloca's type and into its padding.
1965 Use
*U
= S
.getUse();
1967 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
->getUser())) {
1968 if (LI
->isVolatile())
1970 // We can't handle loads that extend past the allocated memory.
1971 if (DL
.getTypeStoreSize(LI
->getType()) > Size
)
1973 // So far, AllocaSliceRewriter does not support widening split slice tails
1974 // in rewriteIntegerLoad.
1975 if (S
.beginOffset() < AllocBeginOffset
)
1977 // Note that we don't count vector loads or stores as whole-alloca
1978 // operations which enable integer widening because we would prefer to use
1979 // vector widening instead.
1980 if (!isa
<VectorType
>(LI
->getType()) && RelBegin
== 0 && RelEnd
== Size
)
1981 WholeAllocaOp
= true;
1982 if (IntegerType
*ITy
= dyn_cast
<IntegerType
>(LI
->getType())) {
1983 if (ITy
->getBitWidth() < DL
.getTypeStoreSizeInBits(ITy
))
1985 } else if (RelBegin
!= 0 || RelEnd
!= Size
||
1986 !canConvertValue(DL
, AllocaTy
, LI
->getType())) {
1987 // Non-integer loads need to be convertible from the alloca type so that
1988 // they are promotable.
1991 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
->getUser())) {
1992 Type
*ValueTy
= SI
->getValueOperand()->getType();
1993 if (SI
->isVolatile())
1995 // We can't handle stores that extend past the allocated memory.
1996 if (DL
.getTypeStoreSize(ValueTy
) > Size
)
1998 // So far, AllocaSliceRewriter does not support widening split slice tails
1999 // in rewriteIntegerStore.
2000 if (S
.beginOffset() < AllocBeginOffset
)
2002 // Note that we don't count vector loads or stores as whole-alloca
2003 // operations which enable integer widening because we would prefer to use
2004 // vector widening instead.
2005 if (!isa
<VectorType
>(ValueTy
) && RelBegin
== 0 && RelEnd
== Size
)
2006 WholeAllocaOp
= true;
2007 if (IntegerType
*ITy
= dyn_cast
<IntegerType
>(ValueTy
)) {
2008 if (ITy
->getBitWidth() < DL
.getTypeStoreSizeInBits(ITy
))
2010 } else if (RelBegin
!= 0 || RelEnd
!= Size
||
2011 !canConvertValue(DL
, ValueTy
, AllocaTy
)) {
2012 // Non-integer stores need to be convertible to the alloca type so that
2013 // they are promotable.
2016 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(U
->getUser())) {
2017 if (MI
->isVolatile() || !isa
<Constant
>(MI
->getLength()))
2019 if (!S
.isSplittable())
2020 return false; // Skip any unsplittable intrinsics.
2021 } else if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
->getUser())) {
2022 if (!II
->isLifetimeStartOrEnd())
2031 /// Test whether the given alloca partition's integer operations can be
2032 /// widened to promotable ones.
2034 /// This is a quick test to check whether we can rewrite the integer loads and
2035 /// stores to a particular alloca into wider loads and stores and be able to
2036 /// promote the resulting alloca.
2037 static bool isIntegerWideningViable(Partition
&P
, Type
*AllocaTy
,
2038 const DataLayout
&DL
) {
2039 uint64_t SizeInBits
= DL
.getTypeSizeInBits(AllocaTy
);
2040 // Don't create integer types larger than the maximum bitwidth.
2041 if (SizeInBits
> IntegerType::MAX_INT_BITS
)
2044 // Don't try to handle allocas with bit-padding.
2045 if (SizeInBits
!= DL
.getTypeStoreSizeInBits(AllocaTy
))
2048 // We need to ensure that an integer type with the appropriate bitwidth can
2049 // be converted to the alloca type, whatever that is. We don't want to force
2050 // the alloca itself to have an integer type if there is a more suitable one.
2051 Type
*IntTy
= Type::getIntNTy(AllocaTy
->getContext(), SizeInBits
);
2052 if (!canConvertValue(DL
, AllocaTy
, IntTy
) ||
2053 !canConvertValue(DL
, IntTy
, AllocaTy
))
2056 // While examining uses, we ensure that the alloca has a covering load or
2057 // store. We don't want to widen the integer operations only to fail to
2058 // promote due to some other unsplittable entry (which we may make splittable
2059 // later). However, if there are only splittable uses, go ahead and assume
2060 // that we cover the alloca.
2061 // FIXME: We shouldn't consider split slices that happen to start in the
2062 // partition here...
2063 bool WholeAllocaOp
=
2064 P
.begin() != P
.end() ? false : DL
.isLegalInteger(SizeInBits
);
2066 for (const Slice
&S
: P
)
2067 if (!isIntegerWideningViableForSlice(S
, P
.beginOffset(), AllocaTy
, DL
,
2071 for (const Slice
*S
: P
.splitSliceTails())
2072 if (!isIntegerWideningViableForSlice(*S
, P
.beginOffset(), AllocaTy
, DL
,
2076 return WholeAllocaOp
;
2079 static Value
*extractInteger(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*V
,
2080 IntegerType
*Ty
, uint64_t Offset
,
2081 const Twine
&Name
) {
2082 LLVM_DEBUG(dbgs() << " start: " << *V
<< "\n");
2083 IntegerType
*IntTy
= cast
<IntegerType
>(V
->getType());
2084 assert(DL
.getTypeStoreSize(Ty
) + Offset
<= DL
.getTypeStoreSize(IntTy
) &&
2085 "Element extends past full value");
2086 uint64_t ShAmt
= 8 * Offset
;
2087 if (DL
.isBigEndian())
2088 ShAmt
= 8 * (DL
.getTypeStoreSize(IntTy
) - DL
.getTypeStoreSize(Ty
) - Offset
);
2090 V
= IRB
.CreateLShr(V
, ShAmt
, Name
+ ".shift");
2091 LLVM_DEBUG(dbgs() << " shifted: " << *V
<< "\n");
2093 assert(Ty
->getBitWidth() <= IntTy
->getBitWidth() &&
2094 "Cannot extract to a larger integer!");
2096 V
= IRB
.CreateTrunc(V
, Ty
, Name
+ ".trunc");
2097 LLVM_DEBUG(dbgs() << " trunced: " << *V
<< "\n");
2102 static Value
*insertInteger(const DataLayout
&DL
, IRBuilderTy
&IRB
, Value
*Old
,
2103 Value
*V
, uint64_t Offset
, const Twine
&Name
) {
2104 IntegerType
*IntTy
= cast
<IntegerType
>(Old
->getType());
2105 IntegerType
*Ty
= cast
<IntegerType
>(V
->getType());
2106 assert(Ty
->getBitWidth() <= IntTy
->getBitWidth() &&
2107 "Cannot insert a larger integer!");
2108 LLVM_DEBUG(dbgs() << " start: " << *V
<< "\n");
2110 V
= IRB
.CreateZExt(V
, IntTy
, Name
+ ".ext");
2111 LLVM_DEBUG(dbgs() << " extended: " << *V
<< "\n");
2113 assert(DL
.getTypeStoreSize(Ty
) + Offset
<= DL
.getTypeStoreSize(IntTy
) &&
2114 "Element store outside of alloca store");
2115 uint64_t ShAmt
= 8 * Offset
;
2116 if (DL
.isBigEndian())
2117 ShAmt
= 8 * (DL
.getTypeStoreSize(IntTy
) - DL
.getTypeStoreSize(Ty
) - Offset
);
2119 V
= IRB
.CreateShl(V
, ShAmt
, Name
+ ".shift");
2120 LLVM_DEBUG(dbgs() << " shifted: " << *V
<< "\n");
2123 if (ShAmt
|| Ty
->getBitWidth() < IntTy
->getBitWidth()) {
2124 APInt Mask
= ~Ty
->getMask().zext(IntTy
->getBitWidth()).shl(ShAmt
);
2125 Old
= IRB
.CreateAnd(Old
, Mask
, Name
+ ".mask");
2126 LLVM_DEBUG(dbgs() << " masked: " << *Old
<< "\n");
2127 V
= IRB
.CreateOr(Old
, V
, Name
+ ".insert");
2128 LLVM_DEBUG(dbgs() << " inserted: " << *V
<< "\n");
2133 static Value
*extractVector(IRBuilderTy
&IRB
, Value
*V
, unsigned BeginIndex
,
2134 unsigned EndIndex
, const Twine
&Name
) {
2135 VectorType
*VecTy
= cast
<VectorType
>(V
->getType());
2136 unsigned NumElements
= EndIndex
- BeginIndex
;
2137 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2139 if (NumElements
== VecTy
->getNumElements())
2142 if (NumElements
== 1) {
2143 V
= IRB
.CreateExtractElement(V
, IRB
.getInt32(BeginIndex
),
2145 LLVM_DEBUG(dbgs() << " extract: " << *V
<< "\n");
2149 SmallVector
<Constant
*, 8> Mask
;
2150 Mask
.reserve(NumElements
);
2151 for (unsigned i
= BeginIndex
; i
!= EndIndex
; ++i
)
2152 Mask
.push_back(IRB
.getInt32(i
));
2153 V
= IRB
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
2154 ConstantVector::get(Mask
), Name
+ ".extract");
2155 LLVM_DEBUG(dbgs() << " shuffle: " << *V
<< "\n");
2159 static Value
*insertVector(IRBuilderTy
&IRB
, Value
*Old
, Value
*V
,
2160 unsigned BeginIndex
, const Twine
&Name
) {
2161 VectorType
*VecTy
= cast
<VectorType
>(Old
->getType());
2162 assert(VecTy
&& "Can only insert a vector into a vector");
2164 VectorType
*Ty
= dyn_cast
<VectorType
>(V
->getType());
2166 // Single element to insert.
2167 V
= IRB
.CreateInsertElement(Old
, V
, IRB
.getInt32(BeginIndex
),
2169 LLVM_DEBUG(dbgs() << " insert: " << *V
<< "\n");
2173 assert(Ty
->getNumElements() <= VecTy
->getNumElements() &&
2174 "Too many elements!");
2175 if (Ty
->getNumElements() == VecTy
->getNumElements()) {
2176 assert(V
->getType() == VecTy
&& "Vector type mismatch");
2179 unsigned EndIndex
= BeginIndex
+ Ty
->getNumElements();
2181 // When inserting a smaller vector into the larger to store, we first
2182 // use a shuffle vector to widen it with undef elements, and then
2183 // a second shuffle vector to select between the loaded vector and the
2185 SmallVector
<Constant
*, 8> Mask
;
2186 Mask
.reserve(VecTy
->getNumElements());
2187 for (unsigned i
= 0; i
!= VecTy
->getNumElements(); ++i
)
2188 if (i
>= BeginIndex
&& i
< EndIndex
)
2189 Mask
.push_back(IRB
.getInt32(i
- BeginIndex
));
2191 Mask
.push_back(UndefValue::get(IRB
.getInt32Ty()));
2192 V
= IRB
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
2193 ConstantVector::get(Mask
), Name
+ ".expand");
2194 LLVM_DEBUG(dbgs() << " shuffle: " << *V
<< "\n");
2197 for (unsigned i
= 0; i
!= VecTy
->getNumElements(); ++i
)
2198 Mask
.push_back(IRB
.getInt1(i
>= BeginIndex
&& i
< EndIndex
));
2200 V
= IRB
.CreateSelect(ConstantVector::get(Mask
), V
, Old
, Name
+ "blend");
2202 LLVM_DEBUG(dbgs() << " blend: " << *V
<< "\n");
2206 /// Visitor to rewrite instructions using p particular slice of an alloca
2207 /// to use a new alloca.
2209 /// Also implements the rewriting to vector-based accesses when the partition
2210 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2212 class llvm::sroa::AllocaSliceRewriter
2213 : public InstVisitor
<AllocaSliceRewriter
, bool> {
2214 // Befriend the base class so it can delegate to private visit methods.
2215 friend class InstVisitor
<AllocaSliceRewriter
, bool>;
2217 using Base
= InstVisitor
<AllocaSliceRewriter
, bool>;
2219 const DataLayout
&DL
;
2222 AllocaInst
&OldAI
, &NewAI
;
2223 const uint64_t NewAllocaBeginOffset
, NewAllocaEndOffset
;
2226 // This is a convenience and flag variable that will be null unless the new
2227 // alloca's integer operations should be widened to this integer type due to
2228 // passing isIntegerWideningViable above. If it is non-null, the desired
2229 // integer type will be stored here for easy access during rewriting.
2232 // If we are rewriting an alloca partition which can be written as pure
2233 // vector operations, we stash extra information here. When VecTy is
2234 // non-null, we have some strict guarantees about the rewritten alloca:
2235 // - The new alloca is exactly the size of the vector type here.
2236 // - The accesses all either map to the entire vector or to a single
2238 // - The set of accessing instructions is only one of those handled above
2239 // in isVectorPromotionViable. Generally these are the same access kinds
2240 // which are promotable via mem2reg.
2243 uint64_t ElementSize
;
2245 // The original offset of the slice currently being rewritten relative to
2246 // the original alloca.
2247 uint64_t BeginOffset
= 0;
2248 uint64_t EndOffset
= 0;
2250 // The new offsets of the slice currently being rewritten relative to the
2252 uint64_t NewBeginOffset
, NewEndOffset
;
2255 bool IsSplittable
= false;
2256 bool IsSplit
= false;
2257 Use
*OldUse
= nullptr;
2258 Instruction
*OldPtr
= nullptr;
2260 // Track post-rewrite users which are PHI nodes and Selects.
2261 SmallSetVector
<PHINode
*, 8> &PHIUsers
;
2262 SmallSetVector
<SelectInst
*, 8> &SelectUsers
;
2264 // Utility IR builder, whose name prefix is setup for each visited use, and
2265 // the insertion point is set to point to the user.
2269 AllocaSliceRewriter(const DataLayout
&DL
, AllocaSlices
&AS
, SROA
&Pass
,
2270 AllocaInst
&OldAI
, AllocaInst
&NewAI
,
2271 uint64_t NewAllocaBeginOffset
,
2272 uint64_t NewAllocaEndOffset
, bool IsIntegerPromotable
,
2273 VectorType
*PromotableVecTy
,
2274 SmallSetVector
<PHINode
*, 8> &PHIUsers
,
2275 SmallSetVector
<SelectInst
*, 8> &SelectUsers
)
2276 : DL(DL
), AS(AS
), Pass(Pass
), OldAI(OldAI
), NewAI(NewAI
),
2277 NewAllocaBeginOffset(NewAllocaBeginOffset
),
2278 NewAllocaEndOffset(NewAllocaEndOffset
),
2279 NewAllocaTy(NewAI
.getAllocatedType()),
2280 IntTy(IsIntegerPromotable
2283 DL
.getTypeSizeInBits(NewAI
.getAllocatedType()))
2285 VecTy(PromotableVecTy
),
2286 ElementTy(VecTy
? VecTy
->getElementType() : nullptr),
2287 ElementSize(VecTy
? DL
.getTypeSizeInBits(ElementTy
) / 8 : 0),
2288 PHIUsers(PHIUsers
), SelectUsers(SelectUsers
),
2289 IRB(NewAI
.getContext(), ConstantFolder()) {
2291 assert((DL
.getTypeSizeInBits(ElementTy
) % 8) == 0 &&
2292 "Only multiple-of-8 sized vector elements are viable");
2295 assert((!IntTy
&& !VecTy
) || (IntTy
&& !VecTy
) || (!IntTy
&& VecTy
));
2298 bool visit(AllocaSlices::const_iterator I
) {
2299 bool CanSROA
= true;
2300 BeginOffset
= I
->beginOffset();
2301 EndOffset
= I
->endOffset();
2302 IsSplittable
= I
->isSplittable();
2304 BeginOffset
< NewAllocaBeginOffset
|| EndOffset
> NewAllocaEndOffset
;
2305 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit
? "split " : ""));
2306 LLVM_DEBUG(AS
.printSlice(dbgs(), I
, ""));
2307 LLVM_DEBUG(dbgs() << "\n");
2309 // Compute the intersecting offset range.
2310 assert(BeginOffset
< NewAllocaEndOffset
);
2311 assert(EndOffset
> NewAllocaBeginOffset
);
2312 NewBeginOffset
= std::max(BeginOffset
, NewAllocaBeginOffset
);
2313 NewEndOffset
= std::min(EndOffset
, NewAllocaEndOffset
);
2315 SliceSize
= NewEndOffset
- NewBeginOffset
;
2317 OldUse
= I
->getUse();
2318 OldPtr
= cast
<Instruction
>(OldUse
->get());
2320 Instruction
*OldUserI
= cast
<Instruction
>(OldUse
->getUser());
2321 IRB
.SetInsertPoint(OldUserI
);
2322 IRB
.SetCurrentDebugLocation(OldUserI
->getDebugLoc());
2323 IRB
.SetNamePrefix(Twine(NewAI
.getName()) + "." + Twine(BeginOffset
) + ".");
2325 CanSROA
&= visit(cast
<Instruction
>(OldUse
->getUser()));
2332 // Make sure the other visit overloads are visible.
2335 // Every instruction which can end up as a user must have a rewrite rule.
2336 bool visitInstruction(Instruction
&I
) {
2337 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I
<< "\n");
2338 llvm_unreachable("No rewrite rule for this instruction!");
2341 Value
*getNewAllocaSlicePtr(IRBuilderTy
&IRB
, Type
*PointerTy
) {
2342 // Note that the offset computation can use BeginOffset or NewBeginOffset
2343 // interchangeably for unsplit slices.
2344 assert(IsSplit
|| BeginOffset
== NewBeginOffset
);
2345 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2348 StringRef OldName
= OldPtr
->getName();
2349 // Skip through the last '.sroa.' component of the name.
2350 size_t LastSROAPrefix
= OldName
.rfind(".sroa.");
2351 if (LastSROAPrefix
!= StringRef::npos
) {
2352 OldName
= OldName
.substr(LastSROAPrefix
+ strlen(".sroa."));
2353 // Look for an SROA slice index.
2354 size_t IndexEnd
= OldName
.find_first_not_of("0123456789");
2355 if (IndexEnd
!= StringRef::npos
&& OldName
[IndexEnd
] == '.') {
2356 // Strip the index and look for the offset.
2357 OldName
= OldName
.substr(IndexEnd
+ 1);
2358 size_t OffsetEnd
= OldName
.find_first_not_of("0123456789");
2359 if (OffsetEnd
!= StringRef::npos
&& OldName
[OffsetEnd
] == '.')
2360 // Strip the offset.
2361 OldName
= OldName
.substr(OffsetEnd
+ 1);
2364 // Strip any SROA suffixes as well.
2365 OldName
= OldName
.substr(0, OldName
.find(".sroa_"));
2368 return getAdjustedPtr(IRB
, DL
, &NewAI
,
2369 APInt(DL
.getIndexTypeSizeInBits(PointerTy
), Offset
),
2372 Twine(OldName
) + "."
2379 /// Compute suitable alignment to access this slice of the *new*
2382 /// You can optionally pass a type to this routine and if that type's ABI
2383 /// alignment is itself suitable, this will return zero.
2384 unsigned getSliceAlign(Type
*Ty
= nullptr) {
2385 unsigned NewAIAlign
= NewAI
.getAlignment();
2387 NewAIAlign
= DL
.getABITypeAlignment(NewAI
.getAllocatedType());
2389 MinAlign(NewAIAlign
, NewBeginOffset
- NewAllocaBeginOffset
);
2390 return (Ty
&& Align
== DL
.getABITypeAlignment(Ty
)) ? 0 : Align
;
2393 unsigned getIndex(uint64_t Offset
) {
2394 assert(VecTy
&& "Can only call getIndex when rewriting a vector");
2395 uint64_t RelOffset
= Offset
- NewAllocaBeginOffset
;
2396 assert(RelOffset
/ ElementSize
< UINT32_MAX
&& "Index out of bounds");
2397 uint32_t Index
= RelOffset
/ ElementSize
;
2398 assert(Index
* ElementSize
== RelOffset
);
2402 void deleteIfTriviallyDead(Value
*V
) {
2403 Instruction
*I
= cast
<Instruction
>(V
);
2404 if (isInstructionTriviallyDead(I
))
2405 Pass
.DeadInsts
.insert(I
);
2408 Value
*rewriteVectorizedLoadInst() {
2409 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2410 unsigned EndIndex
= getIndex(NewEndOffset
);
2411 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2413 Value
*V
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2414 NewAI
.getAlignment(), "load");
2415 return extractVector(IRB
, V
, BeginIndex
, EndIndex
, "vec");
2418 Value
*rewriteIntegerLoad(LoadInst
&LI
) {
2419 assert(IntTy
&& "We cannot insert an integer to the alloca");
2420 assert(!LI
.isVolatile());
2421 Value
*V
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2422 NewAI
.getAlignment(), "load");
2423 V
= convertValue(DL
, IRB
, V
, IntTy
);
2424 assert(NewBeginOffset
>= NewAllocaBeginOffset
&& "Out of bounds offset");
2425 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2426 if (Offset
> 0 || NewEndOffset
< NewAllocaEndOffset
) {
2427 IntegerType
*ExtractTy
= Type::getIntNTy(LI
.getContext(), SliceSize
* 8);
2428 V
= extractInteger(DL
, IRB
, V
, ExtractTy
, Offset
, "extract");
2430 // It is possible that the extracted type is not the load type. This
2431 // happens if there is a load past the end of the alloca, and as
2432 // a consequence the slice is narrower but still a candidate for integer
2433 // lowering. To handle this case, we just zero extend the extracted
2435 assert(cast
<IntegerType
>(LI
.getType())->getBitWidth() >= SliceSize
* 8 &&
2436 "Can only handle an extract for an overly wide load");
2437 if (cast
<IntegerType
>(LI
.getType())->getBitWidth() > SliceSize
* 8)
2438 V
= IRB
.CreateZExt(V
, LI
.getType());
2442 bool visitLoadInst(LoadInst
&LI
) {
2443 LLVM_DEBUG(dbgs() << " original: " << LI
<< "\n");
2444 Value
*OldOp
= LI
.getOperand(0);
2445 assert(OldOp
== OldPtr
);
2448 LI
.getAAMetadata(AATags
);
2450 unsigned AS
= LI
.getPointerAddressSpace();
2452 Type
*TargetTy
= IsSplit
? Type::getIntNTy(LI
.getContext(), SliceSize
* 8)
2454 const bool IsLoadPastEnd
= DL
.getTypeStoreSize(TargetTy
) > SliceSize
;
2455 bool IsPtrAdjusted
= false;
2458 V
= rewriteVectorizedLoadInst();
2459 } else if (IntTy
&& LI
.getType()->isIntegerTy()) {
2460 V
= rewriteIntegerLoad(LI
);
2461 } else if (NewBeginOffset
== NewAllocaBeginOffset
&&
2462 NewEndOffset
== NewAllocaEndOffset
&&
2463 (canConvertValue(DL
, NewAllocaTy
, TargetTy
) ||
2464 (IsLoadPastEnd
&& NewAllocaTy
->isIntegerTy() &&
2465 TargetTy
->isIntegerTy()))) {
2466 LoadInst
*NewLI
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2467 NewAI
.getAlignment(),
2468 LI
.isVolatile(), LI
.getName());
2470 NewLI
->setAAMetadata(AATags
);
2471 if (LI
.isVolatile())
2472 NewLI
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
2474 // Any !nonnull metadata or !range metadata on the old load is also valid
2475 // on the new load. This is even true in some cases even when the loads
2476 // are different types, for example by mapping !nonnull metadata to
2477 // !range metadata by modeling the null pointer constant converted to the
2479 // FIXME: Add support for range metadata here. Currently the utilities
2480 // for this don't propagate range metadata in trivial cases from one
2481 // integer load to another, don't handle non-addrspace-0 null pointers
2482 // correctly, and don't have any support for mapping ranges as the
2483 // integer type becomes winder or narrower.
2484 if (MDNode
*N
= LI
.getMetadata(LLVMContext::MD_nonnull
))
2485 copyNonnullMetadata(LI
, N
, *NewLI
);
2487 // Try to preserve nonnull metadata
2490 // If this is an integer load past the end of the slice (which means the
2491 // bytes outside the slice are undef or this load is dead) just forcibly
2492 // fix the integer size with correct handling of endianness.
2493 if (auto *AITy
= dyn_cast
<IntegerType
>(NewAllocaTy
))
2494 if (auto *TITy
= dyn_cast
<IntegerType
>(TargetTy
))
2495 if (AITy
->getBitWidth() < TITy
->getBitWidth()) {
2496 V
= IRB
.CreateZExt(V
, TITy
, "load.ext");
2497 if (DL
.isBigEndian())
2498 V
= IRB
.CreateShl(V
, TITy
->getBitWidth() - AITy
->getBitWidth(),
2502 Type
*LTy
= TargetTy
->getPointerTo(AS
);
2503 LoadInst
*NewLI
= IRB
.CreateAlignedLoad(
2504 TargetTy
, getNewAllocaSlicePtr(IRB
, LTy
), getSliceAlign(TargetTy
),
2505 LI
.isVolatile(), LI
.getName());
2507 NewLI
->setAAMetadata(AATags
);
2508 if (LI
.isVolatile())
2509 NewLI
->setAtomic(LI
.getOrdering(), LI
.getSyncScopeID());
2512 IsPtrAdjusted
= true;
2514 V
= convertValue(DL
, IRB
, V
, TargetTy
);
2517 assert(!LI
.isVolatile());
2518 assert(LI
.getType()->isIntegerTy() &&
2519 "Only integer type loads and stores are split");
2520 assert(SliceSize
< DL
.getTypeStoreSize(LI
.getType()) &&
2521 "Split load isn't smaller than original load");
2522 assert(LI
.getType()->getIntegerBitWidth() ==
2523 DL
.getTypeStoreSizeInBits(LI
.getType()) &&
2524 "Non-byte-multiple bit width");
2525 // Move the insertion point just past the load so that we can refer to it.
2526 IRB
.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI
)));
2527 // Create a placeholder value with the same type as LI to use as the
2528 // basis for the new value. This allows us to replace the uses of LI with
2529 // the computed value, and then replace the placeholder with LI, leaving
2530 // LI only used for this computation.
2531 Value
*Placeholder
= new LoadInst(
2532 LI
.getType(), UndefValue::get(LI
.getType()->getPointerTo(AS
)));
2533 V
= insertInteger(DL
, IRB
, Placeholder
, V
, NewBeginOffset
- BeginOffset
,
2535 LI
.replaceAllUsesWith(V
);
2536 Placeholder
->replaceAllUsesWith(&LI
);
2537 Placeholder
->deleteValue();
2539 LI
.replaceAllUsesWith(V
);
2542 Pass
.DeadInsts
.insert(&LI
);
2543 deleteIfTriviallyDead(OldOp
);
2544 LLVM_DEBUG(dbgs() << " to: " << *V
<< "\n");
2545 return !LI
.isVolatile() && !IsPtrAdjusted
;
2548 bool rewriteVectorizedStoreInst(Value
*V
, StoreInst
&SI
, Value
*OldOp
,
2550 if (V
->getType() != VecTy
) {
2551 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2552 unsigned EndIndex
= getIndex(NewEndOffset
);
2553 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2554 unsigned NumElements
= EndIndex
- BeginIndex
;
2555 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2556 Type
*SliceTy
= (NumElements
== 1)
2558 : VectorType::get(ElementTy
, NumElements
);
2559 if (V
->getType() != SliceTy
)
2560 V
= convertValue(DL
, IRB
, V
, SliceTy
);
2562 // Mix in the existing elements.
2563 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2564 NewAI
.getAlignment(), "load");
2565 V
= insertVector(IRB
, Old
, V
, BeginIndex
, "vec");
2567 StoreInst
*Store
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment());
2569 Store
->setAAMetadata(AATags
);
2570 Pass
.DeadInsts
.insert(&SI
);
2572 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
2576 bool rewriteIntegerStore(Value
*V
, StoreInst
&SI
, AAMDNodes AATags
) {
2577 assert(IntTy
&& "We cannot extract an integer from the alloca");
2578 assert(!SI
.isVolatile());
2579 if (DL
.getTypeSizeInBits(V
->getType()) != IntTy
->getBitWidth()) {
2580 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2581 NewAI
.getAlignment(), "oldload");
2582 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
2583 assert(BeginOffset
>= NewAllocaBeginOffset
&& "Out of bounds offset");
2584 uint64_t Offset
= BeginOffset
- NewAllocaBeginOffset
;
2585 V
= insertInteger(DL
, IRB
, Old
, SI
.getValueOperand(), Offset
, "insert");
2587 V
= convertValue(DL
, IRB
, V
, NewAllocaTy
);
2588 StoreInst
*Store
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment());
2589 Store
->copyMetadata(SI
, {LLVMContext::MD_mem_parallel_loop_access
,
2590 LLVMContext::MD_access_group
});
2592 Store
->setAAMetadata(AATags
);
2593 Pass
.DeadInsts
.insert(&SI
);
2594 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
2598 bool visitStoreInst(StoreInst
&SI
) {
2599 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
2600 Value
*OldOp
= SI
.getOperand(1);
2601 assert(OldOp
== OldPtr
);
2604 SI
.getAAMetadata(AATags
);
2606 Value
*V
= SI
.getValueOperand();
2608 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2609 // alloca that should be re-examined after promoting this alloca.
2610 if (V
->getType()->isPointerTy())
2611 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
->stripInBoundsOffsets()))
2612 Pass
.PostPromotionWorklist
.insert(AI
);
2614 if (SliceSize
< DL
.getTypeStoreSize(V
->getType())) {
2615 assert(!SI
.isVolatile());
2616 assert(V
->getType()->isIntegerTy() &&
2617 "Only integer type loads and stores are split");
2618 assert(V
->getType()->getIntegerBitWidth() ==
2619 DL
.getTypeStoreSizeInBits(V
->getType()) &&
2620 "Non-byte-multiple bit width");
2621 IntegerType
*NarrowTy
= Type::getIntNTy(SI
.getContext(), SliceSize
* 8);
2622 V
= extractInteger(DL
, IRB
, V
, NarrowTy
, NewBeginOffset
- BeginOffset
,
2627 return rewriteVectorizedStoreInst(V
, SI
, OldOp
, AATags
);
2628 if (IntTy
&& V
->getType()->isIntegerTy())
2629 return rewriteIntegerStore(V
, SI
, AATags
);
2631 const bool IsStorePastEnd
= DL
.getTypeStoreSize(V
->getType()) > SliceSize
;
2633 if (NewBeginOffset
== NewAllocaBeginOffset
&&
2634 NewEndOffset
== NewAllocaEndOffset
&&
2635 (canConvertValue(DL
, V
->getType(), NewAllocaTy
) ||
2636 (IsStorePastEnd
&& NewAllocaTy
->isIntegerTy() &&
2637 V
->getType()->isIntegerTy()))) {
2638 // If this is an integer store past the end of slice (and thus the bytes
2639 // past that point are irrelevant or this is unreachable), truncate the
2640 // value prior to storing.
2641 if (auto *VITy
= dyn_cast
<IntegerType
>(V
->getType()))
2642 if (auto *AITy
= dyn_cast
<IntegerType
>(NewAllocaTy
))
2643 if (VITy
->getBitWidth() > AITy
->getBitWidth()) {
2644 if (DL
.isBigEndian())
2645 V
= IRB
.CreateLShr(V
, VITy
->getBitWidth() - AITy
->getBitWidth(),
2647 V
= IRB
.CreateTrunc(V
, AITy
, "load.trunc");
2650 V
= convertValue(DL
, IRB
, V
, NewAllocaTy
);
2651 NewSI
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment(),
2654 unsigned AS
= SI
.getPointerAddressSpace();
2655 Value
*NewPtr
= getNewAllocaSlicePtr(IRB
, V
->getType()->getPointerTo(AS
));
2656 NewSI
= IRB
.CreateAlignedStore(V
, NewPtr
, getSliceAlign(V
->getType()),
2659 NewSI
->copyMetadata(SI
, {LLVMContext::MD_mem_parallel_loop_access
,
2660 LLVMContext::MD_access_group
});
2662 NewSI
->setAAMetadata(AATags
);
2663 if (SI
.isVolatile())
2664 NewSI
->setAtomic(SI
.getOrdering(), SI
.getSyncScopeID());
2665 Pass
.DeadInsts
.insert(&SI
);
2666 deleteIfTriviallyDead(OldOp
);
2668 LLVM_DEBUG(dbgs() << " to: " << *NewSI
<< "\n");
2669 return NewSI
->getPointerOperand() == &NewAI
&& !SI
.isVolatile();
2672 /// Compute an integer value from splatting an i8 across the given
2673 /// number of bytes.
2675 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2676 /// call this routine.
2677 /// FIXME: Heed the advice above.
2679 /// \param V The i8 value to splat.
2680 /// \param Size The number of bytes in the output (assuming i8 is one byte)
2681 Value
*getIntegerSplat(Value
*V
, unsigned Size
) {
2682 assert(Size
> 0 && "Expected a positive number of bytes.");
2683 IntegerType
*VTy
= cast
<IntegerType
>(V
->getType());
2684 assert(VTy
->getBitWidth() == 8 && "Expected an i8 value for the byte");
2688 Type
*SplatIntTy
= Type::getIntNTy(VTy
->getContext(), Size
* 8);
2690 IRB
.CreateZExt(V
, SplatIntTy
, "zext"),
2691 ConstantExpr::getUDiv(
2692 Constant::getAllOnesValue(SplatIntTy
),
2693 ConstantExpr::getZExt(Constant::getAllOnesValue(V
->getType()),
2699 /// Compute a vector splat for a given element value.
2700 Value
*getVectorSplat(Value
*V
, unsigned NumElements
) {
2701 V
= IRB
.CreateVectorSplat(NumElements
, V
, "vsplat");
2702 LLVM_DEBUG(dbgs() << " splat: " << *V
<< "\n");
2706 bool visitMemSetInst(MemSetInst
&II
) {
2707 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
2708 assert(II
.getRawDest() == OldPtr
);
2711 II
.getAAMetadata(AATags
);
2713 // If the memset has a variable size, it cannot be split, just adjust the
2714 // pointer to the new alloca.
2715 if (!isa
<Constant
>(II
.getLength())) {
2717 assert(NewBeginOffset
== BeginOffset
);
2718 II
.setDest(getNewAllocaSlicePtr(IRB
, OldPtr
->getType()));
2719 II
.setDestAlignment(getSliceAlign());
2721 deleteIfTriviallyDead(OldPtr
);
2725 // Record this instruction for deletion.
2726 Pass
.DeadInsts
.insert(&II
);
2728 Type
*AllocaTy
= NewAI
.getAllocatedType();
2729 Type
*ScalarTy
= AllocaTy
->getScalarType();
2731 // If this doesn't map cleanly onto the alloca type, and that type isn't
2732 // a single value type, just emit a memset.
2733 if (!VecTy
&& !IntTy
&&
2734 (BeginOffset
> NewAllocaBeginOffset
|| EndOffset
< NewAllocaEndOffset
||
2735 SliceSize
!= DL
.getTypeStoreSize(AllocaTy
) ||
2736 !AllocaTy
->isSingleValueType() ||
2737 !DL
.isLegalInteger(DL
.getTypeSizeInBits(ScalarTy
)) ||
2738 DL
.getTypeSizeInBits(ScalarTy
) % 8 != 0)) {
2739 Type
*SizeTy
= II
.getLength()->getType();
2740 Constant
*Size
= ConstantInt::get(SizeTy
, NewEndOffset
- NewBeginOffset
);
2741 CallInst
*New
= IRB
.CreateMemSet(
2742 getNewAllocaSlicePtr(IRB
, OldPtr
->getType()), II
.getValue(), Size
,
2743 getSliceAlign(), II
.isVolatile());
2745 New
->setAAMetadata(AATags
);
2746 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2750 // If we can represent this as a simple value, we have to build the actual
2751 // value to store, which requires expanding the byte present in memset to
2752 // a sensible representation for the alloca type. This is essentially
2753 // splatting the byte to a sufficiently wide integer, splatting it across
2754 // any desired vector width, and bitcasting to the final type.
2758 // If this is a memset of a vectorized alloca, insert it.
2759 assert(ElementTy
== ScalarTy
);
2761 unsigned BeginIndex
= getIndex(NewBeginOffset
);
2762 unsigned EndIndex
= getIndex(NewEndOffset
);
2763 assert(EndIndex
> BeginIndex
&& "Empty vector!");
2764 unsigned NumElements
= EndIndex
- BeginIndex
;
2765 assert(NumElements
<= VecTy
->getNumElements() && "Too many elements!");
2768 getIntegerSplat(II
.getValue(), DL
.getTypeSizeInBits(ElementTy
) / 8);
2769 Splat
= convertValue(DL
, IRB
, Splat
, ElementTy
);
2770 if (NumElements
> 1)
2771 Splat
= getVectorSplat(Splat
, NumElements
);
2773 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2774 NewAI
.getAlignment(), "oldload");
2775 V
= insertVector(IRB
, Old
, Splat
, BeginIndex
, "vec");
2777 // If this is a memset on an alloca where we can widen stores, insert the
2779 assert(!II
.isVolatile());
2781 uint64_t Size
= NewEndOffset
- NewBeginOffset
;
2782 V
= getIntegerSplat(II
.getValue(), Size
);
2784 if (IntTy
&& (BeginOffset
!= NewAllocaBeginOffset
||
2785 EndOffset
!= NewAllocaBeginOffset
)) {
2786 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2787 NewAI
.getAlignment(), "oldload");
2788 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
2789 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2790 V
= insertInteger(DL
, IRB
, Old
, V
, Offset
, "insert");
2792 assert(V
->getType() == IntTy
&&
2793 "Wrong type for an alloca wide integer!");
2795 V
= convertValue(DL
, IRB
, V
, AllocaTy
);
2797 // Established these invariants above.
2798 assert(NewBeginOffset
== NewAllocaBeginOffset
);
2799 assert(NewEndOffset
== NewAllocaEndOffset
);
2801 V
= getIntegerSplat(II
.getValue(), DL
.getTypeSizeInBits(ScalarTy
) / 8);
2802 if (VectorType
*AllocaVecTy
= dyn_cast
<VectorType
>(AllocaTy
))
2803 V
= getVectorSplat(V
, AllocaVecTy
->getNumElements());
2805 V
= convertValue(DL
, IRB
, V
, AllocaTy
);
2808 StoreInst
*New
= IRB
.CreateAlignedStore(V
, &NewAI
, NewAI
.getAlignment(),
2811 New
->setAAMetadata(AATags
);
2812 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2813 return !II
.isVolatile();
2816 bool visitMemTransferInst(MemTransferInst
&II
) {
2817 // Rewriting of memory transfer instructions can be a bit tricky. We break
2818 // them into two categories: split intrinsics and unsplit intrinsics.
2820 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
2823 II
.getAAMetadata(AATags
);
2825 bool IsDest
= &II
.getRawDestUse() == OldUse
;
2826 assert((IsDest
&& II
.getRawDest() == OldPtr
) ||
2827 (!IsDest
&& II
.getRawSource() == OldPtr
));
2829 unsigned SliceAlign
= getSliceAlign();
2831 // For unsplit intrinsics, we simply modify the source and destination
2832 // pointers in place. This isn't just an optimization, it is a matter of
2833 // correctness. With unsplit intrinsics we may be dealing with transfers
2834 // within a single alloca before SROA ran, or with transfers that have
2835 // a variable length. We may also be dealing with memmove instead of
2836 // memcpy, and so simply updating the pointers is the necessary for us to
2837 // update both source and dest of a single call.
2838 if (!IsSplittable
) {
2839 Value
*AdjustedPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
2841 II
.setDest(AdjustedPtr
);
2842 II
.setDestAlignment(SliceAlign
);
2845 II
.setSource(AdjustedPtr
);
2846 II
.setSourceAlignment(SliceAlign
);
2849 LLVM_DEBUG(dbgs() << " to: " << II
<< "\n");
2850 deleteIfTriviallyDead(OldPtr
);
2853 // For split transfer intrinsics we have an incredibly useful assurance:
2854 // the source and destination do not reside within the same alloca, and at
2855 // least one of them does not escape. This means that we can replace
2856 // memmove with memcpy, and we don't need to worry about all manner of
2857 // downsides to splitting and transforming the operations.
2859 // If this doesn't map cleanly onto the alloca type, and that type isn't
2860 // a single value type, just emit a memcpy.
2863 (BeginOffset
> NewAllocaBeginOffset
|| EndOffset
< NewAllocaEndOffset
||
2864 SliceSize
!= DL
.getTypeStoreSize(NewAI
.getAllocatedType()) ||
2865 !NewAI
.getAllocatedType()->isSingleValueType());
2867 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2868 // size hasn't been shrunk based on analysis of the viable range, this is
2870 if (EmitMemCpy
&& &OldAI
== &NewAI
) {
2871 // Ensure the start lines up.
2872 assert(NewBeginOffset
== BeginOffset
);
2874 // Rewrite the size as needed.
2875 if (NewEndOffset
!= EndOffset
)
2876 II
.setLength(ConstantInt::get(II
.getLength()->getType(),
2877 NewEndOffset
- NewBeginOffset
));
2880 // Record this instruction for deletion.
2881 Pass
.DeadInsts
.insert(&II
);
2883 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2884 // alloca that should be re-examined after rewriting this instruction.
2885 Value
*OtherPtr
= IsDest
? II
.getRawSource() : II
.getRawDest();
2886 if (AllocaInst
*AI
=
2887 dyn_cast
<AllocaInst
>(OtherPtr
->stripInBoundsOffsets())) {
2888 assert(AI
!= &OldAI
&& AI
!= &NewAI
&&
2889 "Splittable transfers cannot reach the same alloca on both ends.");
2890 Pass
.Worklist
.insert(AI
);
2893 Type
*OtherPtrTy
= OtherPtr
->getType();
2894 unsigned OtherAS
= OtherPtrTy
->getPointerAddressSpace();
2896 // Compute the relative offset for the other pointer within the transfer.
2897 unsigned OffsetWidth
= DL
.getIndexSizeInBits(OtherAS
);
2898 APInt
OtherOffset(OffsetWidth
, NewBeginOffset
- BeginOffset
);
2899 unsigned OtherAlign
=
2900 IsDest
? II
.getSourceAlignment() : II
.getDestAlignment();
2901 OtherAlign
= MinAlign(OtherAlign
? OtherAlign
: 1,
2902 OtherOffset
.zextOrTrunc(64).getZExtValue());
2905 // Compute the other pointer, folding as much as possible to produce
2906 // a single, simple GEP in most cases.
2907 OtherPtr
= getAdjustedPtr(IRB
, DL
, OtherPtr
, OtherOffset
, OtherPtrTy
,
2908 OtherPtr
->getName() + ".");
2910 Value
*OurPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
2911 Type
*SizeTy
= II
.getLength()->getType();
2912 Constant
*Size
= ConstantInt::get(SizeTy
, NewEndOffset
- NewBeginOffset
);
2914 Value
*DestPtr
, *SrcPtr
;
2915 unsigned DestAlign
, SrcAlign
;
2916 // Note: IsDest is true iff we're copying into the new alloca slice
2919 DestAlign
= SliceAlign
;
2921 SrcAlign
= OtherAlign
;
2924 DestAlign
= OtherAlign
;
2926 SrcAlign
= SliceAlign
;
2928 CallInst
*New
= IRB
.CreateMemCpy(DestPtr
, DestAlign
, SrcPtr
, SrcAlign
,
2929 Size
, II
.isVolatile());
2931 New
->setAAMetadata(AATags
);
2932 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
2936 bool IsWholeAlloca
= NewBeginOffset
== NewAllocaBeginOffset
&&
2937 NewEndOffset
== NewAllocaEndOffset
;
2938 uint64_t Size
= NewEndOffset
- NewBeginOffset
;
2939 unsigned BeginIndex
= VecTy
? getIndex(NewBeginOffset
) : 0;
2940 unsigned EndIndex
= VecTy
? getIndex(NewEndOffset
) : 0;
2941 unsigned NumElements
= EndIndex
- BeginIndex
;
2942 IntegerType
*SubIntTy
=
2943 IntTy
? Type::getIntNTy(IntTy
->getContext(), Size
* 8) : nullptr;
2945 // Reset the other pointer type to match the register type we're going to
2946 // use, but using the address space of the original other pointer.
2948 if (VecTy
&& !IsWholeAlloca
) {
2949 if (NumElements
== 1)
2950 OtherTy
= VecTy
->getElementType();
2952 OtherTy
= VectorType::get(VecTy
->getElementType(), NumElements
);
2953 } else if (IntTy
&& !IsWholeAlloca
) {
2956 OtherTy
= NewAllocaTy
;
2958 OtherPtrTy
= OtherTy
->getPointerTo(OtherAS
);
2960 Value
*SrcPtr
= getAdjustedPtr(IRB
, DL
, OtherPtr
, OtherOffset
, OtherPtrTy
,
2961 OtherPtr
->getName() + ".");
2962 unsigned SrcAlign
= OtherAlign
;
2963 Value
*DstPtr
= &NewAI
;
2964 unsigned DstAlign
= SliceAlign
;
2966 std::swap(SrcPtr
, DstPtr
);
2967 std::swap(SrcAlign
, DstAlign
);
2971 if (VecTy
&& !IsWholeAlloca
&& !IsDest
) {
2972 Src
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2973 NewAI
.getAlignment(), "load");
2974 Src
= extractVector(IRB
, Src
, BeginIndex
, EndIndex
, "vec");
2975 } else if (IntTy
&& !IsWholeAlloca
&& !IsDest
) {
2976 Src
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2977 NewAI
.getAlignment(), "load");
2978 Src
= convertValue(DL
, IRB
, Src
, IntTy
);
2979 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2980 Src
= extractInteger(DL
, IRB
, Src
, SubIntTy
, Offset
, "extract");
2982 LoadInst
*Load
= IRB
.CreateAlignedLoad(OtherTy
, SrcPtr
, SrcAlign
,
2983 II
.isVolatile(), "copyload");
2985 Load
->setAAMetadata(AATags
);
2989 if (VecTy
&& !IsWholeAlloca
&& IsDest
) {
2990 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2991 NewAI
.getAlignment(), "oldload");
2992 Src
= insertVector(IRB
, Old
, Src
, BeginIndex
, "vec");
2993 } else if (IntTy
&& !IsWholeAlloca
&& IsDest
) {
2994 Value
*Old
= IRB
.CreateAlignedLoad(NewAI
.getAllocatedType(), &NewAI
,
2995 NewAI
.getAlignment(), "oldload");
2996 Old
= convertValue(DL
, IRB
, Old
, IntTy
);
2997 uint64_t Offset
= NewBeginOffset
- NewAllocaBeginOffset
;
2998 Src
= insertInteger(DL
, IRB
, Old
, Src
, Offset
, "insert");
2999 Src
= convertValue(DL
, IRB
, Src
, NewAllocaTy
);
3002 StoreInst
*Store
= cast
<StoreInst
>(
3003 IRB
.CreateAlignedStore(Src
, DstPtr
, DstAlign
, II
.isVolatile()));
3005 Store
->setAAMetadata(AATags
);
3006 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
3007 return !II
.isVolatile();
3010 bool visitIntrinsicInst(IntrinsicInst
&II
) {
3011 assert(II
.isLifetimeStartOrEnd());
3012 LLVM_DEBUG(dbgs() << " original: " << II
<< "\n");
3013 assert(II
.getArgOperand(1) == OldPtr
);
3015 // Record this instruction for deletion.
3016 Pass
.DeadInsts
.insert(&II
);
3018 // Lifetime intrinsics are only promotable if they cover the whole alloca.
3019 // Therefore, we drop lifetime intrinsics which don't cover the whole
3021 // (In theory, intrinsics which partially cover an alloca could be
3022 // promoted, but PromoteMemToReg doesn't handle that case.)
3023 // FIXME: Check whether the alloca is promotable before dropping the
3024 // lifetime intrinsics?
3025 if (NewBeginOffset
!= NewAllocaBeginOffset
||
3026 NewEndOffset
!= NewAllocaEndOffset
)
3030 ConstantInt::get(cast
<IntegerType
>(II
.getArgOperand(0)->getType()),
3031 NewEndOffset
- NewBeginOffset
);
3032 // Lifetime intrinsics always expect an i8* so directly get such a pointer
3033 // for the new alloca slice.
3034 Type
*PointerTy
= IRB
.getInt8PtrTy(OldPtr
->getType()->getPointerAddressSpace());
3035 Value
*Ptr
= getNewAllocaSlicePtr(IRB
, PointerTy
);
3037 if (II
.getIntrinsicID() == Intrinsic::lifetime_start
)
3038 New
= IRB
.CreateLifetimeStart(Ptr
, Size
);
3040 New
= IRB
.CreateLifetimeEnd(Ptr
, Size
);
3043 LLVM_DEBUG(dbgs() << " to: " << *New
<< "\n");
3048 void fixLoadStoreAlign(Instruction
&Root
) {
3049 // This algorithm implements the same visitor loop as
3050 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load
3052 SmallPtrSet
<Instruction
*, 4> Visited
;
3053 SmallVector
<Instruction
*, 4> Uses
;
3054 Visited
.insert(&Root
);
3055 Uses
.push_back(&Root
);
3057 Instruction
*I
= Uses
.pop_back_val();
3059 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
3060 unsigned LoadAlign
= LI
->getAlignment();
3062 LoadAlign
= DL
.getABITypeAlignment(LI
->getType());
3063 LI
->setAlignment(std::min(LoadAlign
, getSliceAlign()));
3066 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
3067 unsigned StoreAlign
= SI
->getAlignment();
3069 Value
*Op
= SI
->getOperand(0);
3070 StoreAlign
= DL
.getABITypeAlignment(Op
->getType());
3072 SI
->setAlignment(std::min(StoreAlign
, getSliceAlign()));
3076 assert(isa
<BitCastInst
>(I
) || isa
<PHINode
>(I
) ||
3077 isa
<SelectInst
>(I
) || isa
<GetElementPtrInst
>(I
));
3078 for (User
*U
: I
->users())
3079 if (Visited
.insert(cast
<Instruction
>(U
)).second
)
3080 Uses
.push_back(cast
<Instruction
>(U
));
3081 } while (!Uses
.empty());
3084 bool visitPHINode(PHINode
&PN
) {
3085 LLVM_DEBUG(dbgs() << " original: " << PN
<< "\n");
3086 assert(BeginOffset
>= NewAllocaBeginOffset
&& "PHIs are unsplittable");
3087 assert(EndOffset
<= NewAllocaEndOffset
&& "PHIs are unsplittable");
3089 // We would like to compute a new pointer in only one place, but have it be
3090 // as local as possible to the PHI. To do that, we re-use the location of
3091 // the old pointer, which necessarily must be in the right position to
3092 // dominate the PHI.
3093 IRBuilderTy
PtrBuilder(IRB
);
3094 if (isa
<PHINode
>(OldPtr
))
3095 PtrBuilder
.SetInsertPoint(&*OldPtr
->getParent()->getFirstInsertionPt());
3097 PtrBuilder
.SetInsertPoint(OldPtr
);
3098 PtrBuilder
.SetCurrentDebugLocation(OldPtr
->getDebugLoc());
3100 Value
*NewPtr
= getNewAllocaSlicePtr(PtrBuilder
, OldPtr
->getType());
3101 // Replace the operands which were using the old pointer.
3102 std::replace(PN
.op_begin(), PN
.op_end(), cast
<Value
>(OldPtr
), NewPtr
);
3104 LLVM_DEBUG(dbgs() << " to: " << PN
<< "\n");
3105 deleteIfTriviallyDead(OldPtr
);
3107 // Fix the alignment of any loads or stores using this PHI node.
3108 fixLoadStoreAlign(PN
);
3110 // PHIs can't be promoted on their own, but often can be speculated. We
3111 // check the speculation outside of the rewriter so that we see the
3112 // fully-rewritten alloca.
3113 PHIUsers
.insert(&PN
);
3117 bool visitSelectInst(SelectInst
&SI
) {
3118 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
3119 assert((SI
.getTrueValue() == OldPtr
|| SI
.getFalseValue() == OldPtr
) &&
3120 "Pointer isn't an operand!");
3121 assert(BeginOffset
>= NewAllocaBeginOffset
&& "Selects are unsplittable");
3122 assert(EndOffset
<= NewAllocaEndOffset
&& "Selects are unsplittable");
3124 Value
*NewPtr
= getNewAllocaSlicePtr(IRB
, OldPtr
->getType());
3125 // Replace the operands which were using the old pointer.
3126 if (SI
.getOperand(1) == OldPtr
)
3127 SI
.setOperand(1, NewPtr
);
3128 if (SI
.getOperand(2) == OldPtr
)
3129 SI
.setOperand(2, NewPtr
);
3131 LLVM_DEBUG(dbgs() << " to: " << SI
<< "\n");
3132 deleteIfTriviallyDead(OldPtr
);
3134 // Fix the alignment of any loads or stores using this select.
3135 fixLoadStoreAlign(SI
);
3137 // Selects can't be promoted on their own, but often can be speculated. We
3138 // check the speculation outside of the rewriter so that we see the
3139 // fully-rewritten alloca.
3140 SelectUsers
.insert(&SI
);
3147 /// Visitor to rewrite aggregate loads and stores as scalar.
3149 /// This pass aggressively rewrites all aggregate loads and stores on
3150 /// a particular pointer (or any pointer derived from it which we can identify)
3151 /// with scalar loads and stores.
3152 class AggLoadStoreRewriter
: public InstVisitor
<AggLoadStoreRewriter
, bool> {
3153 // Befriend the base class so it can delegate to private visit methods.
3154 friend class InstVisitor
<AggLoadStoreRewriter
, bool>;
3156 /// Queue of pointer uses to analyze and potentially rewrite.
3157 SmallVector
<Use
*, 8> Queue
;
3159 /// Set to prevent us from cycling with phi nodes and loops.
3160 SmallPtrSet
<User
*, 8> Visited
;
3162 /// The current pointer use being rewritten. This is used to dig up the used
3163 /// value (as opposed to the user).
3166 /// Used to calculate offsets, and hence alignment, of subobjects.
3167 const DataLayout
&DL
;
3170 AggLoadStoreRewriter(const DataLayout
&DL
) : DL(DL
) {}
3172 /// Rewrite loads and stores through a pointer and all pointers derived from
3174 bool rewrite(Instruction
&I
) {
3175 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
3177 bool Changed
= false;
3178 while (!Queue
.empty()) {
3179 U
= Queue
.pop_back_val();
3180 Changed
|= visit(cast
<Instruction
>(U
->getUser()));
3186 /// Enqueue all the users of the given instruction for further processing.
3187 /// This uses a set to de-duplicate users.
3188 void enqueueUsers(Instruction
&I
) {
3189 for (Use
&U
: I
.uses())
3190 if (Visited
.insert(U
.getUser()).second
)
3191 Queue
.push_back(&U
);
3194 // Conservative default is to not rewrite anything.
3195 bool visitInstruction(Instruction
&I
) { return false; }
3197 /// Generic recursive split emission class.
3198 template <typename Derived
> class OpSplitter
{
3200 /// The builder used to form new instructions.
3203 /// The indices which to be used with insert- or extractvalue to select the
3204 /// appropriate value within the aggregate.
3205 SmallVector
<unsigned, 4> Indices
;
3207 /// The indices to a GEP instruction which will move Ptr to the correct slot
3208 /// within the aggregate.
3209 SmallVector
<Value
*, 4> GEPIndices
;
3211 /// The base pointer of the original op, used as a base for GEPing the
3212 /// split operations.
3215 /// The base pointee type being GEPed into.
3218 /// Known alignment of the base pointer.
3221 /// To calculate offset of each component so we can correctly deduce
3223 const DataLayout
&DL
;
3225 /// Initialize the splitter with an insertion point, Ptr and start with a
3226 /// single zero GEP index.
3227 OpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3228 unsigned BaseAlign
, const DataLayout
&DL
)
3229 : IRB(InsertionPoint
), GEPIndices(1, IRB
.getInt32(0)), Ptr(Ptr
),
3230 BaseTy(BaseTy
), BaseAlign(BaseAlign
), DL(DL
) {}
3233 /// Generic recursive split emission routine.
3235 /// This method recursively splits an aggregate op (load or store) into
3236 /// scalar or vector ops. It splits recursively until it hits a single value
3237 /// and emits that single value operation via the template argument.
3239 /// The logic of this routine relies on GEPs and insertvalue and
3240 /// extractvalue all operating with the same fundamental index list, merely
3241 /// formatted differently (GEPs need actual values).
3243 /// \param Ty The type being split recursively into smaller ops.
3244 /// \param Agg The aggregate value being built up or stored, depending on
3245 /// whether this is splitting a load or a store respectively.
3246 void emitSplitOps(Type
*Ty
, Value
*&Agg
, const Twine
&Name
) {
3247 if (Ty
->isSingleValueType()) {
3248 unsigned Offset
= DL
.getIndexedOffsetInType(BaseTy
, GEPIndices
);
3249 return static_cast<Derived
*>(this)->emitFunc(
3250 Ty
, Agg
, MinAlign(BaseAlign
, Offset
), Name
);
3253 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
3254 unsigned OldSize
= Indices
.size();
3256 for (unsigned Idx
= 0, Size
= ATy
->getNumElements(); Idx
!= Size
;
3258 assert(Indices
.size() == OldSize
&& "Did not return to the old size");
3259 Indices
.push_back(Idx
);
3260 GEPIndices
.push_back(IRB
.getInt32(Idx
));
3261 emitSplitOps(ATy
->getElementType(), Agg
, Name
+ "." + Twine(Idx
));
3262 GEPIndices
.pop_back();
3268 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
3269 unsigned OldSize
= Indices
.size();
3271 for (unsigned Idx
= 0, Size
= STy
->getNumElements(); Idx
!= Size
;
3273 assert(Indices
.size() == OldSize
&& "Did not return to the old size");
3274 Indices
.push_back(Idx
);
3275 GEPIndices
.push_back(IRB
.getInt32(Idx
));
3276 emitSplitOps(STy
->getElementType(Idx
), Agg
, Name
+ "." + Twine(Idx
));
3277 GEPIndices
.pop_back();
3283 llvm_unreachable("Only arrays and structs are aggregate loadable types");
3287 struct LoadOpSplitter
: public OpSplitter
<LoadOpSplitter
> {
3290 LoadOpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3291 AAMDNodes AATags
, unsigned BaseAlign
, const DataLayout
&DL
)
3292 : OpSplitter
<LoadOpSplitter
>(InsertionPoint
, Ptr
, BaseTy
, BaseAlign
,
3293 DL
), AATags(AATags
) {}
3295 /// Emit a leaf load of a single value. This is called at the leaves of the
3296 /// recursive emission to actually load values.
3297 void emitFunc(Type
*Ty
, Value
*&Agg
, unsigned Align
, const Twine
&Name
) {
3298 assert(Ty
->isSingleValueType());
3299 // Load the single value and insert it using the indices.
3301 IRB
.CreateInBoundsGEP(BaseTy
, Ptr
, GEPIndices
, Name
+ ".gep");
3302 LoadInst
*Load
= IRB
.CreateAlignedLoad(Ty
, GEP
, Align
, Name
+ ".load");
3304 Load
->setAAMetadata(AATags
);
3305 Agg
= IRB
.CreateInsertValue(Agg
, Load
, Indices
, Name
+ ".insert");
3306 LLVM_DEBUG(dbgs() << " to: " << *Load
<< "\n");
3310 bool visitLoadInst(LoadInst
&LI
) {
3311 assert(LI
.getPointerOperand() == *U
);
3312 if (!LI
.isSimple() || LI
.getType()->isSingleValueType())
3315 // We have an aggregate being loaded, split it apart.
3316 LLVM_DEBUG(dbgs() << " original: " << LI
<< "\n");
3318 LI
.getAAMetadata(AATags
);
3319 LoadOpSplitter
Splitter(&LI
, *U
, LI
.getType(), AATags
,
3320 getAdjustedAlignment(&LI
, 0, DL
), DL
);
3321 Value
*V
= UndefValue::get(LI
.getType());
3322 Splitter
.emitSplitOps(LI
.getType(), V
, LI
.getName() + ".fca");
3323 LI
.replaceAllUsesWith(V
);
3324 LI
.eraseFromParent();
3328 struct StoreOpSplitter
: public OpSplitter
<StoreOpSplitter
> {
3329 StoreOpSplitter(Instruction
*InsertionPoint
, Value
*Ptr
, Type
*BaseTy
,
3330 AAMDNodes AATags
, unsigned BaseAlign
, const DataLayout
&DL
)
3331 : OpSplitter
<StoreOpSplitter
>(InsertionPoint
, Ptr
, BaseTy
, BaseAlign
,
3335 /// Emit a leaf store of a single value. This is called at the leaves of the
3336 /// recursive emission to actually produce stores.
3337 void emitFunc(Type
*Ty
, Value
*&Agg
, unsigned Align
, const Twine
&Name
) {
3338 assert(Ty
->isSingleValueType());
3339 // Extract the single value and store it using the indices.
3341 // The gep and extractvalue values are factored out of the CreateStore
3342 // call to make the output independent of the argument evaluation order.
3343 Value
*ExtractValue
=
3344 IRB
.CreateExtractValue(Agg
, Indices
, Name
+ ".extract");
3345 Value
*InBoundsGEP
=
3346 IRB
.CreateInBoundsGEP(BaseTy
, Ptr
, GEPIndices
, Name
+ ".gep");
3348 IRB
.CreateAlignedStore(ExtractValue
, InBoundsGEP
, Align
);
3350 Store
->setAAMetadata(AATags
);
3351 LLVM_DEBUG(dbgs() << " to: " << *Store
<< "\n");
3355 bool visitStoreInst(StoreInst
&SI
) {
3356 if (!SI
.isSimple() || SI
.getPointerOperand() != *U
)
3358 Value
*V
= SI
.getValueOperand();
3359 if (V
->getType()->isSingleValueType())
3362 // We have an aggregate being stored, split it apart.
3363 LLVM_DEBUG(dbgs() << " original: " << SI
<< "\n");
3365 SI
.getAAMetadata(AATags
);
3366 StoreOpSplitter
Splitter(&SI
, *U
, V
->getType(), AATags
,
3367 getAdjustedAlignment(&SI
, 0, DL
), DL
);
3368 Splitter
.emitSplitOps(V
->getType(), V
, V
->getName() + ".fca");
3369 SI
.eraseFromParent();
3373 bool visitBitCastInst(BitCastInst
&BC
) {
3378 bool visitGetElementPtrInst(GetElementPtrInst
&GEPI
) {
3383 bool visitPHINode(PHINode
&PN
) {
3388 bool visitSelectInst(SelectInst
&SI
) {
3394 } // end anonymous namespace
3396 /// Strip aggregate type wrapping.
3398 /// This removes no-op aggregate types wrapping an underlying type. It will
3399 /// strip as many layers of types as it can without changing either the type
3400 /// size or the allocated size.
3401 static Type
*stripAggregateTypeWrapping(const DataLayout
&DL
, Type
*Ty
) {
3402 if (Ty
->isSingleValueType())
3405 uint64_t AllocSize
= DL
.getTypeAllocSize(Ty
);
3406 uint64_t TypeSize
= DL
.getTypeSizeInBits(Ty
);
3409 if (ArrayType
*ArrTy
= dyn_cast
<ArrayType
>(Ty
)) {
3410 InnerTy
= ArrTy
->getElementType();
3411 } else if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
3412 const StructLayout
*SL
= DL
.getStructLayout(STy
);
3413 unsigned Index
= SL
->getElementContainingOffset(0);
3414 InnerTy
= STy
->getElementType(Index
);
3419 if (AllocSize
> DL
.getTypeAllocSize(InnerTy
) ||
3420 TypeSize
> DL
.getTypeSizeInBits(InnerTy
))
3423 return stripAggregateTypeWrapping(DL
, InnerTy
);
3426 /// Try to find a partition of the aggregate type passed in for a given
3427 /// offset and size.
3429 /// This recurses through the aggregate type and tries to compute a subtype
3430 /// based on the offset and size. When the offset and size span a sub-section
3431 /// of an array, it will even compute a new array type for that sub-section,
3432 /// and the same for structs.
3434 /// Note that this routine is very strict and tries to find a partition of the
3435 /// type which produces the *exact* right offset and size. It is not forgiving
3436 /// when the size or offset cause either end of type-based partition to be off.
3437 /// Also, this is a best-effort routine. It is reasonable to give up and not
3438 /// return a type if necessary.
3439 static Type
*getTypePartition(const DataLayout
&DL
, Type
*Ty
, uint64_t Offset
,
3441 if (Offset
== 0 && DL
.getTypeAllocSize(Ty
) == Size
)
3442 return stripAggregateTypeWrapping(DL
, Ty
);
3443 if (Offset
> DL
.getTypeAllocSize(Ty
) ||
3444 (DL
.getTypeAllocSize(Ty
) - Offset
) < Size
)
3447 if (SequentialType
*SeqTy
= dyn_cast
<SequentialType
>(Ty
)) {
3448 Type
*ElementTy
= SeqTy
->getElementType();
3449 uint64_t ElementSize
= DL
.getTypeAllocSize(ElementTy
);
3450 uint64_t NumSkippedElements
= Offset
/ ElementSize
;
3451 if (NumSkippedElements
>= SeqTy
->getNumElements())
3453 Offset
-= NumSkippedElements
* ElementSize
;
3455 // First check if we need to recurse.
3456 if (Offset
> 0 || Size
< ElementSize
) {
3457 // Bail if the partition ends in a different array element.
3458 if ((Offset
+ Size
) > ElementSize
)
3460 // Recurse through the element type trying to peel off offset bytes.
3461 return getTypePartition(DL
, ElementTy
, Offset
, Size
);
3463 assert(Offset
== 0);
3465 if (Size
== ElementSize
)
3466 return stripAggregateTypeWrapping(DL
, ElementTy
);
3467 assert(Size
> ElementSize
);
3468 uint64_t NumElements
= Size
/ ElementSize
;
3469 if (NumElements
* ElementSize
!= Size
)
3471 return ArrayType::get(ElementTy
, NumElements
);
3474 StructType
*STy
= dyn_cast
<StructType
>(Ty
);
3478 const StructLayout
*SL
= DL
.getStructLayout(STy
);
3479 if (Offset
>= SL
->getSizeInBytes())
3481 uint64_t EndOffset
= Offset
+ Size
;
3482 if (EndOffset
> SL
->getSizeInBytes())
3485 unsigned Index
= SL
->getElementContainingOffset(Offset
);
3486 Offset
-= SL
->getElementOffset(Index
);
3488 Type
*ElementTy
= STy
->getElementType(Index
);
3489 uint64_t ElementSize
= DL
.getTypeAllocSize(ElementTy
);
3490 if (Offset
>= ElementSize
)
3491 return nullptr; // The offset points into alignment padding.
3493 // See if any partition must be contained by the element.
3494 if (Offset
> 0 || Size
< ElementSize
) {
3495 if ((Offset
+ Size
) > ElementSize
)
3497 return getTypePartition(DL
, ElementTy
, Offset
, Size
);
3499 assert(Offset
== 0);
3501 if (Size
== ElementSize
)
3502 return stripAggregateTypeWrapping(DL
, ElementTy
);
3504 StructType::element_iterator EI
= STy
->element_begin() + Index
,
3505 EE
= STy
->element_end();
3506 if (EndOffset
< SL
->getSizeInBytes()) {
3507 unsigned EndIndex
= SL
->getElementContainingOffset(EndOffset
);
3508 if (Index
== EndIndex
)
3509 return nullptr; // Within a single element and its padding.
3511 // Don't try to form "natural" types if the elements don't line up with the
3513 // FIXME: We could potentially recurse down through the last element in the
3514 // sub-struct to find a natural end point.
3515 if (SL
->getElementOffset(EndIndex
) != EndOffset
)
3518 assert(Index
< EndIndex
);
3519 EE
= STy
->element_begin() + EndIndex
;
3522 // Try to build up a sub-structure.
3524 StructType::get(STy
->getContext(), makeArrayRef(EI
, EE
), STy
->isPacked());
3525 const StructLayout
*SubSL
= DL
.getStructLayout(SubTy
);
3526 if (Size
!= SubSL
->getSizeInBytes())
3527 return nullptr; // The sub-struct doesn't have quite the size needed.
3532 /// Pre-split loads and stores to simplify rewriting.
3534 /// We want to break up the splittable load+store pairs as much as
3535 /// possible. This is important to do as a preprocessing step, as once we
3536 /// start rewriting the accesses to partitions of the alloca we lose the
3537 /// necessary information to correctly split apart paired loads and stores
3538 /// which both point into this alloca. The case to consider is something like
3541 /// %a = alloca [12 x i8]
3542 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3543 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3544 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3545 /// %iptr1 = bitcast i8* %gep1 to i64*
3546 /// %iptr2 = bitcast i8* %gep2 to i64*
3547 /// %fptr1 = bitcast i8* %gep1 to float*
3548 /// %fptr2 = bitcast i8* %gep2 to float*
3549 /// %fptr3 = bitcast i8* %gep3 to float*
3550 /// store float 0.0, float* %fptr1
3551 /// store float 1.0, float* %fptr2
3552 /// %v = load i64* %iptr1
3553 /// store i64 %v, i64* %iptr2
3554 /// %f1 = load float* %fptr2
3555 /// %f2 = load float* %fptr3
3557 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3558 /// promote everything so we recover the 2 SSA values that should have been
3559 /// there all along.
3561 /// \returns true if any changes are made.
3562 bool SROA::presplitLoadsAndStores(AllocaInst
&AI
, AllocaSlices
&AS
) {
3563 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3565 // Track the loads and stores which are candidates for pre-splitting here, in
3566 // the order they first appear during the partition scan. These give stable
3567 // iteration order and a basis for tracking which loads and stores we
3569 SmallVector
<LoadInst
*, 4> Loads
;
3570 SmallVector
<StoreInst
*, 4> Stores
;
3572 // We need to accumulate the splits required of each load or store where we
3573 // can find them via a direct lookup. This is important to cross-check loads
3574 // and stores against each other. We also track the slice so that we can kill
3575 // all the slices that end up split.
3576 struct SplitOffsets
{
3578 std::vector
<uint64_t> Splits
;
3580 SmallDenseMap
<Instruction
*, SplitOffsets
, 8> SplitOffsetsMap
;
3582 // Track loads out of this alloca which cannot, for any reason, be pre-split.
3583 // This is important as we also cannot pre-split stores of those loads!
3584 // FIXME: This is all pretty gross. It means that we can be more aggressive
3585 // in pre-splitting when the load feeding the store happens to come from
3586 // a separate alloca. Put another way, the effectiveness of SROA would be
3587 // decreased by a frontend which just concatenated all of its local allocas
3588 // into one big flat alloca. But defeating such patterns is exactly the job
3589 // SROA is tasked with! Sadly, to not have this discrepancy we would have
3590 // change store pre-splitting to actually force pre-splitting of the load
3591 // that feeds it *and all stores*. That makes pre-splitting much harder, but
3592 // maybe it would make it more principled?
3593 SmallPtrSet
<LoadInst
*, 8> UnsplittableLoads
;
3595 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
3596 for (auto &P
: AS
.partitions()) {
3597 for (Slice
&S
: P
) {
3598 Instruction
*I
= cast
<Instruction
>(S
.getUse()->getUser());
3599 if (!S
.isSplittable() || S
.endOffset() <= P
.endOffset()) {
3600 // If this is a load we have to track that it can't participate in any
3601 // pre-splitting. If this is a store of a load we have to track that
3602 // that load also can't participate in any pre-splitting.
3603 if (auto *LI
= dyn_cast
<LoadInst
>(I
))
3604 UnsplittableLoads
.insert(LI
);
3605 else if (auto *SI
= dyn_cast
<StoreInst
>(I
))
3606 if (auto *LI
= dyn_cast
<LoadInst
>(SI
->getValueOperand()))
3607 UnsplittableLoads
.insert(LI
);
3610 assert(P
.endOffset() > S
.beginOffset() &&
3611 "Empty or backwards partition!");
3613 // Determine if this is a pre-splittable slice.
3614 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
3615 assert(!LI
->isVolatile() && "Cannot split volatile loads!");
3617 // The load must be used exclusively to store into other pointers for
3618 // us to be able to arbitrarily pre-split it. The stores must also be
3619 // simple to avoid changing semantics.
3620 auto IsLoadSimplyStored
= [](LoadInst
*LI
) {
3621 for (User
*LU
: LI
->users()) {
3622 auto *SI
= dyn_cast
<StoreInst
>(LU
);
3623 if (!SI
|| !SI
->isSimple())
3628 if (!IsLoadSimplyStored(LI
)) {
3629 UnsplittableLoads
.insert(LI
);
3633 Loads
.push_back(LI
);
3634 } else if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
3635 if (S
.getUse() != &SI
->getOperandUse(SI
->getPointerOperandIndex()))
3636 // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3638 auto *StoredLoad
= dyn_cast
<LoadInst
>(SI
->getValueOperand());
3639 if (!StoredLoad
|| !StoredLoad
->isSimple())
3641 assert(!SI
->isVolatile() && "Cannot split volatile stores!");
3643 Stores
.push_back(SI
);
3645 // Other uses cannot be pre-split.
3649 // Record the initial split.
3650 LLVM_DEBUG(dbgs() << " Candidate: " << *I
<< "\n");
3651 auto &Offsets
= SplitOffsetsMap
[I
];
3652 assert(Offsets
.Splits
.empty() &&
3653 "Should not have splits the first time we see an instruction!");
3655 Offsets
.Splits
.push_back(P
.endOffset() - S
.beginOffset());
3658 // Now scan the already split slices, and add a split for any of them which
3659 // we're going to pre-split.
3660 for (Slice
*S
: P
.splitSliceTails()) {
3661 auto SplitOffsetsMapI
=
3662 SplitOffsetsMap
.find(cast
<Instruction
>(S
->getUse()->getUser()));
3663 if (SplitOffsetsMapI
== SplitOffsetsMap
.end())
3665 auto &Offsets
= SplitOffsetsMapI
->second
;
3667 assert(Offsets
.S
== S
&& "Found a mismatched slice!");
3668 assert(!Offsets
.Splits
.empty() &&
3669 "Cannot have an empty set of splits on the second partition!");
3670 assert(Offsets
.Splits
.back() ==
3671 P
.beginOffset() - Offsets
.S
->beginOffset() &&
3672 "Previous split does not end where this one begins!");
3674 // Record each split. The last partition's end isn't needed as the size
3675 // of the slice dictates that.
3676 if (S
->endOffset() > P
.endOffset())
3677 Offsets
.Splits
.push_back(P
.endOffset() - Offsets
.S
->beginOffset());
3681 // We may have split loads where some of their stores are split stores. For
3682 // such loads and stores, we can only pre-split them if their splits exactly
3683 // match relative to their starting offset. We have to verify this prior to
3686 llvm::remove_if(Stores
,
3687 [&UnsplittableLoads
, &SplitOffsetsMap
](StoreInst
*SI
) {
3688 // Lookup the load we are storing in our map of split
3690 auto *LI
= cast
<LoadInst
>(SI
->getValueOperand());
3691 // If it was completely unsplittable, then we're done,
3692 // and this store can't be pre-split.
3693 if (UnsplittableLoads
.count(LI
))
3696 auto LoadOffsetsI
= SplitOffsetsMap
.find(LI
);
3697 if (LoadOffsetsI
== SplitOffsetsMap
.end())
3698 return false; // Unrelated loads are definitely safe.
3699 auto &LoadOffsets
= LoadOffsetsI
->second
;
3701 // Now lookup the store's offsets.
3702 auto &StoreOffsets
= SplitOffsetsMap
[SI
];
3704 // If the relative offsets of each split in the load and
3705 // store match exactly, then we can split them and we
3706 // don't need to remove them here.
3707 if (LoadOffsets
.Splits
== StoreOffsets
.Splits
)
3712 << " Mismatched splits for load and store:\n"
3713 << " " << *LI
<< "\n"
3714 << " " << *SI
<< "\n");
3716 // We've found a store and load that we need to split
3717 // with mismatched relative splits. Just give up on them
3718 // and remove both instructions from our list of
3720 UnsplittableLoads
.insert(LI
);
3724 // Now we have to go *back* through all the stores, because a later store may
3725 // have caused an earlier store's load to become unsplittable and if it is
3726 // unsplittable for the later store, then we can't rely on it being split in
3727 // the earlier store either.
3728 Stores
.erase(llvm::remove_if(Stores
,
3729 [&UnsplittableLoads
](StoreInst
*SI
) {
3731 cast
<LoadInst
>(SI
->getValueOperand());
3732 return UnsplittableLoads
.count(LI
);
3735 // Once we've established all the loads that can't be split for some reason,
3736 // filter any that made it into our list out.
3737 Loads
.erase(llvm::remove_if(Loads
,
3738 [&UnsplittableLoads
](LoadInst
*LI
) {
3739 return UnsplittableLoads
.count(LI
);
3743 // If no loads or stores are left, there is no pre-splitting to be done for
3745 if (Loads
.empty() && Stores
.empty())
3748 // From here on, we can't fail and will be building new accesses, so rig up
3750 IRBuilderTy
IRB(&AI
);
3752 // Collect the new slices which we will merge into the alloca slices.
3753 SmallVector
<Slice
, 4> NewSlices
;
3755 // Track any allocas we end up splitting loads and stores for so we iterate
3757 SmallPtrSet
<AllocaInst
*, 4> ResplitPromotableAllocas
;
3759 // At this point, we have collected all of the loads and stores we can
3760 // pre-split, and the specific splits needed for them. We actually do the
3761 // splitting in a specific order in order to handle when one of the loads in
3762 // the value operand to one of the stores.
3764 // First, we rewrite all of the split loads, and just accumulate each split
3765 // load in a parallel structure. We also build the slices for them and append
3766 // them to the alloca slices.
3767 SmallDenseMap
<LoadInst
*, std::vector
<LoadInst
*>, 1> SplitLoadsMap
;
3768 std::vector
<LoadInst
*> SplitLoads
;
3769 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
3770 for (LoadInst
*LI
: Loads
) {
3773 IntegerType
*Ty
= cast
<IntegerType
>(LI
->getType());
3774 uint64_t LoadSize
= Ty
->getBitWidth() / 8;
3775 assert(LoadSize
> 0 && "Cannot have a zero-sized integer load!");
3777 auto &Offsets
= SplitOffsetsMap
[LI
];
3778 assert(LoadSize
== Offsets
.S
->endOffset() - Offsets
.S
->beginOffset() &&
3779 "Slice size should always match load size exactly!");
3780 uint64_t BaseOffset
= Offsets
.S
->beginOffset();
3781 assert(BaseOffset
+ LoadSize
> BaseOffset
&&
3782 "Cannot represent alloca access size using 64-bit integers!");
3784 Instruction
*BasePtr
= cast
<Instruction
>(LI
->getPointerOperand());
3785 IRB
.SetInsertPoint(LI
);
3787 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI
<< "\n");
3789 uint64_t PartOffset
= 0, PartSize
= Offsets
.Splits
.front();
3790 int Idx
= 0, Size
= Offsets
.Splits
.size();
3792 auto *PartTy
= Type::getIntNTy(Ty
->getContext(), PartSize
* 8);
3793 auto AS
= LI
->getPointerAddressSpace();
3794 auto *PartPtrTy
= PartTy
->getPointerTo(AS
);
3795 LoadInst
*PLoad
= IRB
.CreateAlignedLoad(
3797 getAdjustedPtr(IRB
, DL
, BasePtr
,
3798 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3799 PartPtrTy
, BasePtr
->getName() + "."),
3800 getAdjustedAlignment(LI
, PartOffset
, DL
), /*IsVolatile*/ false,
3802 PLoad
->copyMetadata(*LI
, {LLVMContext::MD_mem_parallel_loop_access
,
3803 LLVMContext::MD_access_group
});
3805 // Append this load onto the list of split loads so we can find it later
3806 // to rewrite the stores.
3807 SplitLoads
.push_back(PLoad
);
3809 // Now build a new slice for the alloca.
3810 NewSlices
.push_back(
3811 Slice(BaseOffset
+ PartOffset
, BaseOffset
+ PartOffset
+ PartSize
,
3812 &PLoad
->getOperandUse(PLoad
->getPointerOperandIndex()),
3813 /*IsSplittable*/ false));
3814 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices
.back().beginOffset()
3815 << ", " << NewSlices
.back().endOffset()
3816 << "): " << *PLoad
<< "\n");
3818 // See if we've handled all the splits.
3822 // Setup the next partition.
3823 PartOffset
= Offsets
.Splits
[Idx
];
3825 PartSize
= (Idx
< Size
? Offsets
.Splits
[Idx
] : LoadSize
) - PartOffset
;
3828 // Now that we have the split loads, do the slow walk over all uses of the
3829 // load and rewrite them as split stores, or save the split loads to use
3830 // below if the store is going to be split there anyways.
3831 bool DeferredStores
= false;
3832 for (User
*LU
: LI
->users()) {
3833 StoreInst
*SI
= cast
<StoreInst
>(LU
);
3834 if (!Stores
.empty() && SplitOffsetsMap
.count(SI
)) {
3835 DeferredStores
= true;
3836 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
3841 Value
*StoreBasePtr
= SI
->getPointerOperand();
3842 IRB
.SetInsertPoint(SI
);
3844 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI
<< "\n");
3846 for (int Idx
= 0, Size
= SplitLoads
.size(); Idx
< Size
; ++Idx
) {
3847 LoadInst
*PLoad
= SplitLoads
[Idx
];
3848 uint64_t PartOffset
= Idx
== 0 ? 0 : Offsets
.Splits
[Idx
- 1];
3850 PLoad
->getType()->getPointerTo(SI
->getPointerAddressSpace());
3852 auto AS
= SI
->getPointerAddressSpace();
3853 StoreInst
*PStore
= IRB
.CreateAlignedStore(
3855 getAdjustedPtr(IRB
, DL
, StoreBasePtr
,
3856 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3857 PartPtrTy
, StoreBasePtr
->getName() + "."),
3858 getAdjustedAlignment(SI
, PartOffset
, DL
), /*IsVolatile*/ false);
3859 PStore
->copyMetadata(*LI
, {LLVMContext::MD_mem_parallel_loop_access
,
3860 LLVMContext::MD_access_group
});
3861 LLVM_DEBUG(dbgs() << " +" << PartOffset
<< ":" << *PStore
<< "\n");
3864 // We want to immediately iterate on any allocas impacted by splitting
3865 // this store, and we have to track any promotable alloca (indicated by
3866 // a direct store) as needing to be resplit because it is no longer
3868 if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(StoreBasePtr
)) {
3869 ResplitPromotableAllocas
.insert(OtherAI
);
3870 Worklist
.insert(OtherAI
);
3871 } else if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(
3872 StoreBasePtr
->stripInBoundsOffsets())) {
3873 Worklist
.insert(OtherAI
);
3876 // Mark the original store as dead.
3877 DeadInsts
.insert(SI
);
3880 // Save the split loads if there are deferred stores among the users.
3882 SplitLoadsMap
.insert(std::make_pair(LI
, std::move(SplitLoads
)));
3884 // Mark the original load as dead and kill the original slice.
3885 DeadInsts
.insert(LI
);
3889 // Second, we rewrite all of the split stores. At this point, we know that
3890 // all loads from this alloca have been split already. For stores of such
3891 // loads, we can simply look up the pre-existing split loads. For stores of
3892 // other loads, we split those loads first and then write split stores of
3894 for (StoreInst
*SI
: Stores
) {
3895 auto *LI
= cast
<LoadInst
>(SI
->getValueOperand());
3896 IntegerType
*Ty
= cast
<IntegerType
>(LI
->getType());
3897 uint64_t StoreSize
= Ty
->getBitWidth() / 8;
3898 assert(StoreSize
> 0 && "Cannot have a zero-sized integer store!");
3900 auto &Offsets
= SplitOffsetsMap
[SI
];
3901 assert(StoreSize
== Offsets
.S
->endOffset() - Offsets
.S
->beginOffset() &&
3902 "Slice size should always match load size exactly!");
3903 uint64_t BaseOffset
= Offsets
.S
->beginOffset();
3904 assert(BaseOffset
+ StoreSize
> BaseOffset
&&
3905 "Cannot represent alloca access size using 64-bit integers!");
3907 Value
*LoadBasePtr
= LI
->getPointerOperand();
3908 Instruction
*StoreBasePtr
= cast
<Instruction
>(SI
->getPointerOperand());
3910 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI
<< "\n");
3912 // Check whether we have an already split load.
3913 auto SplitLoadsMapI
= SplitLoadsMap
.find(LI
);
3914 std::vector
<LoadInst
*> *SplitLoads
= nullptr;
3915 if (SplitLoadsMapI
!= SplitLoadsMap
.end()) {
3916 SplitLoads
= &SplitLoadsMapI
->second
;
3917 assert(SplitLoads
->size() == Offsets
.Splits
.size() + 1 &&
3918 "Too few split loads for the number of splits in the store!");
3920 LLVM_DEBUG(dbgs() << " of load: " << *LI
<< "\n");
3923 uint64_t PartOffset
= 0, PartSize
= Offsets
.Splits
.front();
3924 int Idx
= 0, Size
= Offsets
.Splits
.size();
3926 auto *PartTy
= Type::getIntNTy(Ty
->getContext(), PartSize
* 8);
3927 auto *LoadPartPtrTy
= PartTy
->getPointerTo(LI
->getPointerAddressSpace());
3928 auto *StorePartPtrTy
= PartTy
->getPointerTo(SI
->getPointerAddressSpace());
3930 // Either lookup a split load or create one.
3933 PLoad
= (*SplitLoads
)[Idx
];
3935 IRB
.SetInsertPoint(LI
);
3936 auto AS
= LI
->getPointerAddressSpace();
3937 PLoad
= IRB
.CreateAlignedLoad(
3939 getAdjustedPtr(IRB
, DL
, LoadBasePtr
,
3940 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3941 LoadPartPtrTy
, LoadBasePtr
->getName() + "."),
3942 getAdjustedAlignment(LI
, PartOffset
, DL
), /*IsVolatile*/ false,
3946 // And store this partition.
3947 IRB
.SetInsertPoint(SI
);
3948 auto AS
= SI
->getPointerAddressSpace();
3949 StoreInst
*PStore
= IRB
.CreateAlignedStore(
3951 getAdjustedPtr(IRB
, DL
, StoreBasePtr
,
3952 APInt(DL
.getIndexSizeInBits(AS
), PartOffset
),
3953 StorePartPtrTy
, StoreBasePtr
->getName() + "."),
3954 getAdjustedAlignment(SI
, PartOffset
, DL
), /*IsVolatile*/ false);
3956 // Now build a new slice for the alloca.
3957 NewSlices
.push_back(
3958 Slice(BaseOffset
+ PartOffset
, BaseOffset
+ PartOffset
+ PartSize
,
3959 &PStore
->getOperandUse(PStore
->getPointerOperandIndex()),
3960 /*IsSplittable*/ false));
3961 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices
.back().beginOffset()
3962 << ", " << NewSlices
.back().endOffset()
3963 << "): " << *PStore
<< "\n");
3965 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad
<< "\n");
3968 // See if we've finished all the splits.
3972 // Setup the next partition.
3973 PartOffset
= Offsets
.Splits
[Idx
];
3975 PartSize
= (Idx
< Size
? Offsets
.Splits
[Idx
] : StoreSize
) - PartOffset
;
3978 // We want to immediately iterate on any allocas impacted by splitting
3979 // this load, which is only relevant if it isn't a load of this alloca and
3980 // thus we didn't already split the loads above. We also have to keep track
3981 // of any promotable allocas we split loads on as they can no longer be
3984 if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(LoadBasePtr
)) {
3985 assert(OtherAI
!= &AI
&& "We can't re-split our own alloca!");
3986 ResplitPromotableAllocas
.insert(OtherAI
);
3987 Worklist
.insert(OtherAI
);
3988 } else if (AllocaInst
*OtherAI
= dyn_cast
<AllocaInst
>(
3989 LoadBasePtr
->stripInBoundsOffsets())) {
3990 assert(OtherAI
!= &AI
&& "We can't re-split our own alloca!");
3991 Worklist
.insert(OtherAI
);
3995 // Mark the original store as dead now that we've split it up and kill its
3996 // slice. Note that we leave the original load in place unless this store
3997 // was its only use. It may in turn be split up if it is an alloca load
3998 // for some other alloca, but it may be a normal load. This may introduce
3999 // redundant loads, but where those can be merged the rest of the optimizer
4000 // should handle the merging, and this uncovers SSA splits which is more
4001 // important. In practice, the original loads will almost always be fully
4002 // split and removed eventually, and the splits will be merged by any
4003 // trivial CSE, including instcombine.
4004 if (LI
->hasOneUse()) {
4005 assert(*LI
->user_begin() == SI
&& "Single use isn't this store!");
4006 DeadInsts
.insert(LI
);
4008 DeadInsts
.insert(SI
);
4012 // Remove the killed slices that have ben pre-split.
4013 AS
.erase(llvm::remove_if(AS
, [](const Slice
&S
) { return S
.isDead(); }),
4016 // Insert our new slices. This will sort and merge them into the sorted
4018 AS
.insert(NewSlices
);
4020 LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
4022 for (auto I
= AS
.begin(), E
= AS
.end(); I
!= E
; ++I
)
4023 LLVM_DEBUG(AS
.print(dbgs(), I
, " "));
4026 // Finally, don't try to promote any allocas that new require re-splitting.
4027 // They have already been added to the worklist above.
4028 PromotableAllocas
.erase(
4031 [&](AllocaInst
*AI
) { return ResplitPromotableAllocas
.count(AI
); }),
4032 PromotableAllocas
.end());
4037 /// Rewrite an alloca partition's users.
4039 /// This routine drives both of the rewriting goals of the SROA pass. It tries
4040 /// to rewrite uses of an alloca partition to be conducive for SSA value
4041 /// promotion. If the partition needs a new, more refined alloca, this will
4042 /// build that new alloca, preserving as much type information as possible, and
4043 /// rewrite the uses of the old alloca to point at the new one and have the
4044 /// appropriate new offsets. It also evaluates how successful the rewrite was
4045 /// at enabling promotion and if it was successful queues the alloca to be
4047 AllocaInst
*SROA::rewritePartition(AllocaInst
&AI
, AllocaSlices
&AS
,
4049 // Try to compute a friendly type for this partition of the alloca. This
4050 // won't always succeed, in which case we fall back to a legal integer type
4051 // or an i8 array of an appropriate size.
4052 Type
*SliceTy
= nullptr;
4053 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4054 if (Type
*CommonUseTy
= findCommonType(P
.begin(), P
.end(), P
.endOffset()))
4055 if (DL
.getTypeAllocSize(CommonUseTy
) >= P
.size())
4056 SliceTy
= CommonUseTy
;
4058 if (Type
*TypePartitionTy
= getTypePartition(DL
, AI
.getAllocatedType(),
4059 P
.beginOffset(), P
.size()))
4060 SliceTy
= TypePartitionTy
;
4061 if ((!SliceTy
|| (SliceTy
->isArrayTy() &&
4062 SliceTy
->getArrayElementType()->isIntegerTy())) &&
4063 DL
.isLegalInteger(P
.size() * 8))
4064 SliceTy
= Type::getIntNTy(*C
, P
.size() * 8);
4066 SliceTy
= ArrayType::get(Type::getInt8Ty(*C
), P
.size());
4067 assert(DL
.getTypeAllocSize(SliceTy
) >= P
.size());
4069 bool IsIntegerPromotable
= isIntegerWideningViable(P
, SliceTy
, DL
);
4072 IsIntegerPromotable
? nullptr : isVectorPromotionViable(P
, DL
);
4076 // Check for the case where we're going to rewrite to a new alloca of the
4077 // exact same type as the original, and with the same access offsets. In that
4078 // case, re-use the existing alloca, but still run through the rewriter to
4079 // perform phi and select speculation.
4080 // P.beginOffset() can be non-zero even with the same type in a case with
4081 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
4083 if (SliceTy
== AI
.getAllocatedType() && P
.beginOffset() == 0) {
4085 // FIXME: We should be able to bail at this point with "nothing changed".
4086 // FIXME: We might want to defer PHI speculation until after here.
4087 // FIXME: return nullptr;
4089 unsigned Alignment
= AI
.getAlignment();
4091 // The minimum alignment which users can rely on when the explicit
4092 // alignment is omitted or zero is that required by the ABI for this
4094 Alignment
= DL
.getABITypeAlignment(AI
.getAllocatedType());
4096 Alignment
= MinAlign(Alignment
, P
.beginOffset());
4097 // If we will get at least this much alignment from the type alone, leave
4098 // the alloca's alignment unconstrained.
4099 if (Alignment
<= DL
.getABITypeAlignment(SliceTy
))
4101 NewAI
= new AllocaInst(
4102 SliceTy
, AI
.getType()->getAddressSpace(), nullptr, Alignment
,
4103 AI
.getName() + ".sroa." + Twine(P
.begin() - AS
.begin()), &AI
);
4104 // Copy the old AI debug location over to the new one.
4105 NewAI
->setDebugLoc(AI
.getDebugLoc());
4109 LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
4110 << "[" << P
.beginOffset() << "," << P
.endOffset()
4111 << ") to: " << *NewAI
<< "\n");
4113 // Track the high watermark on the worklist as it is only relevant for
4114 // promoted allocas. We will reset it to this point if the alloca is not in
4115 // fact scheduled for promotion.
4116 unsigned PPWOldSize
= PostPromotionWorklist
.size();
4117 unsigned NumUses
= 0;
4118 SmallSetVector
<PHINode
*, 8> PHIUsers
;
4119 SmallSetVector
<SelectInst
*, 8> SelectUsers
;
4121 AllocaSliceRewriter
Rewriter(DL
, AS
, *this, AI
, *NewAI
, P
.beginOffset(),
4122 P
.endOffset(), IsIntegerPromotable
, VecTy
,
4123 PHIUsers
, SelectUsers
);
4124 bool Promotable
= true;
4125 for (Slice
*S
: P
.splitSliceTails()) {
4126 Promotable
&= Rewriter
.visit(S
);
4129 for (Slice
&S
: P
) {
4130 Promotable
&= Rewriter
.visit(&S
);
4134 NumAllocaPartitionUses
+= NumUses
;
4135 MaxUsesPerAllocaPartition
.updateMax(NumUses
);
4137 // Now that we've processed all the slices in the new partition, check if any
4138 // PHIs or Selects would block promotion.
4139 for (PHINode
*PHI
: PHIUsers
)
4140 if (!isSafePHIToSpeculate(*PHI
)) {
4143 SelectUsers
.clear();
4147 for (SelectInst
*Sel
: SelectUsers
)
4148 if (!isSafeSelectToSpeculate(*Sel
)) {
4151 SelectUsers
.clear();
4156 if (PHIUsers
.empty() && SelectUsers
.empty()) {
4157 // Promote the alloca.
4158 PromotableAllocas
.push_back(NewAI
);
4160 // If we have either PHIs or Selects to speculate, add them to those
4161 // worklists and re-queue the new alloca so that we promote in on the
4163 for (PHINode
*PHIUser
: PHIUsers
)
4164 SpeculatablePHIs
.insert(PHIUser
);
4165 for (SelectInst
*SelectUser
: SelectUsers
)
4166 SpeculatableSelects
.insert(SelectUser
);
4167 Worklist
.insert(NewAI
);
4170 // Drop any post-promotion work items if promotion didn't happen.
4171 while (PostPromotionWorklist
.size() > PPWOldSize
)
4172 PostPromotionWorklist
.pop_back();
4174 // We couldn't promote and we didn't create a new partition, nothing
4179 // If we can't promote the alloca, iterate on it to check for new
4180 // refinements exposed by splitting the current alloca. Don't iterate on an
4181 // alloca which didn't actually change and didn't get promoted.
4182 Worklist
.insert(NewAI
);
4188 /// Walks the slices of an alloca and form partitions based on them,
4189 /// rewriting each of their uses.
4190 bool SROA::splitAlloca(AllocaInst
&AI
, AllocaSlices
&AS
) {
4191 if (AS
.begin() == AS
.end())
4194 unsigned NumPartitions
= 0;
4195 bool Changed
= false;
4196 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4198 // First try to pre-split loads and stores.
4199 Changed
|= presplitLoadsAndStores(AI
, AS
);
4201 // Now that we have identified any pre-splitting opportunities,
4202 // mark loads and stores unsplittable except for the following case.
4203 // We leave a slice splittable if all other slices are disjoint or fully
4204 // included in the slice, such as whole-alloca loads and stores.
4205 // If we fail to split these during pre-splitting, we want to force them
4206 // to be rewritten into a partition.
4207 bool IsSorted
= true;
4209 uint64_t AllocaSize
= DL
.getTypeAllocSize(AI
.getAllocatedType());
4210 const uint64_t MaxBitVectorSize
= 1024;
4211 if (AllocaSize
<= MaxBitVectorSize
) {
4212 // If a byte boundary is included in any load or store, a slice starting or
4213 // ending at the boundary is not splittable.
4214 SmallBitVector
SplittableOffset(AllocaSize
+ 1, true);
4216 for (unsigned O
= S
.beginOffset() + 1;
4217 O
< S
.endOffset() && O
< AllocaSize
; O
++)
4218 SplittableOffset
.reset(O
);
4220 for (Slice
&S
: AS
) {
4221 if (!S
.isSplittable())
4224 if ((S
.beginOffset() > AllocaSize
|| SplittableOffset
[S
.beginOffset()]) &&
4225 (S
.endOffset() > AllocaSize
|| SplittableOffset
[S
.endOffset()]))
4228 if (isa
<LoadInst
>(S
.getUse()->getUser()) ||
4229 isa
<StoreInst
>(S
.getUse()->getUser())) {
4230 S
.makeUnsplittable();
4236 // We only allow whole-alloca splittable loads and stores
4237 // for a large alloca to avoid creating too large BitVector.
4238 for (Slice
&S
: AS
) {
4239 if (!S
.isSplittable())
4242 if (S
.beginOffset() == 0 && S
.endOffset() >= AllocaSize
)
4245 if (isa
<LoadInst
>(S
.getUse()->getUser()) ||
4246 isa
<StoreInst
>(S
.getUse()->getUser())) {
4247 S
.makeUnsplittable();
4256 /// Describes the allocas introduced by rewritePartition in order to migrate
4262 Fragment(AllocaInst
*AI
, uint64_t O
, uint64_t S
)
4263 : Alloca(AI
), Offset(O
), Size(S
) {}
4265 SmallVector
<Fragment
, 4> Fragments
;
4267 // Rewrite each partition.
4268 for (auto &P
: AS
.partitions()) {
4269 if (AllocaInst
*NewAI
= rewritePartition(AI
, AS
, P
)) {
4272 uint64_t SizeOfByte
= 8;
4273 uint64_t AllocaSize
= DL
.getTypeSizeInBits(NewAI
->getAllocatedType());
4274 // Don't include any padding.
4275 uint64_t Size
= std::min(AllocaSize
, P
.size() * SizeOfByte
);
4276 Fragments
.push_back(Fragment(NewAI
, P
.beginOffset() * SizeOfByte
, Size
));
4282 NumAllocaPartitions
+= NumPartitions
;
4283 MaxPartitionsPerAlloca
.updateMax(NumPartitions
);
4285 // Migrate debug information from the old alloca to the new alloca(s)
4286 // and the individual partitions.
4287 TinyPtrVector
<DbgVariableIntrinsic
*> DbgDeclares
= FindDbgAddrUses(&AI
);
4288 if (!DbgDeclares
.empty()) {
4289 auto *Var
= DbgDeclares
.front()->getVariable();
4290 auto *Expr
= DbgDeclares
.front()->getExpression();
4291 auto VarSize
= Var
->getSizeInBits();
4292 DIBuilder
DIB(*AI
.getModule(), /*AllowUnresolved*/ false);
4293 uint64_t AllocaSize
= DL
.getTypeSizeInBits(AI
.getAllocatedType());
4294 for (auto Fragment
: Fragments
) {
4295 // Create a fragment expression describing the new partition or reuse AI's
4296 // expression if there is only one partition.
4297 auto *FragmentExpr
= Expr
;
4298 if (Fragment
.Size
< AllocaSize
|| Expr
->isFragment()) {
4299 // If this alloca is already a scalar replacement of a larger aggregate,
4300 // Fragment.Offset describes the offset inside the scalar.
4301 auto ExprFragment
= Expr
->getFragmentInfo();
4302 uint64_t Offset
= ExprFragment
? ExprFragment
->OffsetInBits
: 0;
4303 uint64_t Start
= Offset
+ Fragment
.Offset
;
4304 uint64_t Size
= Fragment
.Size
;
4307 ExprFragment
->OffsetInBits
+ ExprFragment
->SizeInBits
;
4308 if (Start
>= AbsEnd
)
4309 // No need to describe a SROAed padding.
4311 Size
= std::min(Size
, AbsEnd
- Start
);
4313 // The new, smaller fragment is stenciled out from the old fragment.
4314 if (auto OrigFragment
= FragmentExpr
->getFragmentInfo()) {
4315 assert(Start
>= OrigFragment
->OffsetInBits
&&
4316 "new fragment is outside of original fragment");
4317 Start
-= OrigFragment
->OffsetInBits
;
4320 // The alloca may be larger than the variable.
4322 if (Size
> *VarSize
)
4324 if (Size
== 0 || Start
+ Size
> *VarSize
)
4328 // Avoid creating a fragment expression that covers the entire variable.
4329 if (!VarSize
|| *VarSize
!= Size
) {
4331 DIExpression::createFragmentExpression(Expr
, Start
, Size
))
4338 // Remove any existing intrinsics describing the same alloca.
4339 for (DbgVariableIntrinsic
*OldDII
: FindDbgAddrUses(Fragment
.Alloca
))
4340 OldDII
->eraseFromParent();
4342 DIB
.insertDeclare(Fragment
.Alloca
, Var
, FragmentExpr
,
4343 DbgDeclares
.front()->getDebugLoc(), &AI
);
4349 /// Clobber a use with undef, deleting the used value if it becomes dead.
4350 void SROA::clobberUse(Use
&U
) {
4352 // Replace the use with an undef value.
4353 U
= UndefValue::get(OldV
->getType());
4355 // Check for this making an instruction dead. We have to garbage collect
4356 // all the dead instructions to ensure the uses of any alloca end up being
4358 if (Instruction
*OldI
= dyn_cast
<Instruction
>(OldV
))
4359 if (isInstructionTriviallyDead(OldI
)) {
4360 DeadInsts
.insert(OldI
);
4364 /// Analyze an alloca for SROA.
4366 /// This analyzes the alloca to ensure we can reason about it, builds
4367 /// the slices of the alloca, and then hands it off to be split and
4368 /// rewritten as needed.
4369 bool SROA::runOnAlloca(AllocaInst
&AI
) {
4370 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI
<< "\n");
4371 ++NumAllocasAnalyzed
;
4373 // Special case dead allocas, as they're trivial.
4374 if (AI
.use_empty()) {
4375 AI
.eraseFromParent();
4378 const DataLayout
&DL
= AI
.getModule()->getDataLayout();
4380 // Skip alloca forms that this analysis can't handle.
4381 if (AI
.isArrayAllocation() || !AI
.getAllocatedType()->isSized() ||
4382 DL
.getTypeAllocSize(AI
.getAllocatedType()) == 0)
4385 bool Changed
= false;
4387 // First, split any FCA loads and stores touching this alloca to promote
4388 // better splitting and promotion opportunities.
4389 AggLoadStoreRewriter
AggRewriter(DL
);
4390 Changed
|= AggRewriter
.rewrite(AI
);
4392 // Build the slices using a recursive instruction-visiting builder.
4393 AllocaSlices
AS(DL
, AI
);
4394 LLVM_DEBUG(AS
.print(dbgs()));
4398 // Delete all the dead users of this alloca before splitting and rewriting it.
4399 for (Instruction
*DeadUser
: AS
.getDeadUsers()) {
4400 // Free up everything used by this instruction.
4401 for (Use
&DeadOp
: DeadUser
->operands())
4404 // Now replace the uses of this instruction.
4405 DeadUser
->replaceAllUsesWith(UndefValue::get(DeadUser
->getType()));
4407 // And mark it for deletion.
4408 DeadInsts
.insert(DeadUser
);
4411 for (Use
*DeadOp
: AS
.getDeadOperands()) {
4412 clobberUse(*DeadOp
);
4416 // No slices to split. Leave the dead alloca for a later pass to clean up.
4417 if (AS
.begin() == AS
.end())
4420 Changed
|= splitAlloca(AI
, AS
);
4422 LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
4423 while (!SpeculatablePHIs
.empty())
4424 speculatePHINodeLoads(*SpeculatablePHIs
.pop_back_val());
4426 LLVM_DEBUG(dbgs() << " Speculating Selects\n");
4427 while (!SpeculatableSelects
.empty())
4428 speculateSelectInstLoads(*SpeculatableSelects
.pop_back_val());
4433 /// Delete the dead instructions accumulated in this run.
4435 /// Recursively deletes the dead instructions we've accumulated. This is done
4436 /// at the very end to maximize locality of the recursive delete and to
4437 /// minimize the problems of invalidated instruction pointers as such pointers
4438 /// are used heavily in the intermediate stages of the algorithm.
4440 /// We also record the alloca instructions deleted here so that they aren't
4441 /// subsequently handed to mem2reg to promote.
4442 bool SROA::deleteDeadInstructions(
4443 SmallPtrSetImpl
<AllocaInst
*> &DeletedAllocas
) {
4444 bool Changed
= false;
4445 while (!DeadInsts
.empty()) {
4446 Instruction
*I
= DeadInsts
.pop_back_val();
4447 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I
<< "\n");
4449 // If the instruction is an alloca, find the possible dbg.declare connected
4450 // to it, and remove it too. We must do this before calling RAUW or we will
4451 // not be able to find it.
4452 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
)) {
4453 DeletedAllocas
.insert(AI
);
4454 for (DbgVariableIntrinsic
*OldDII
: FindDbgAddrUses(AI
))
4455 OldDII
->eraseFromParent();
4458 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
4460 for (Use
&Operand
: I
->operands())
4461 if (Instruction
*U
= dyn_cast
<Instruction
>(Operand
)) {
4462 // Zero out the operand and see if it becomes trivially dead.
4464 if (isInstructionTriviallyDead(U
))
4465 DeadInsts
.insert(U
);
4469 I
->eraseFromParent();
4475 /// Promote the allocas, using the best available technique.
4477 /// This attempts to promote whatever allocas have been identified as viable in
4478 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4479 /// This function returns whether any promotion occurred.
4480 bool SROA::promoteAllocas(Function
&F
) {
4481 if (PromotableAllocas
.empty())
4484 NumPromoted
+= PromotableAllocas
.size();
4486 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4487 PromoteMemToReg(PromotableAllocas
, *DT
, AC
);
4488 PromotableAllocas
.clear();
4492 PreservedAnalyses
SROA::runImpl(Function
&F
, DominatorTree
&RunDT
,
4493 AssumptionCache
&RunAC
) {
4494 LLVM_DEBUG(dbgs() << "SROA function: " << F
.getName() << "\n");
4495 C
= &F
.getContext();
4499 BasicBlock
&EntryBB
= F
.getEntryBlock();
4500 for (BasicBlock::iterator I
= EntryBB
.begin(), E
= std::prev(EntryBB
.end());
4502 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
))
4503 Worklist
.insert(AI
);
4506 bool Changed
= false;
4507 // A set of deleted alloca instruction pointers which should be removed from
4508 // the list of promotable allocas.
4509 SmallPtrSet
<AllocaInst
*, 4> DeletedAllocas
;
4512 while (!Worklist
.empty()) {
4513 Changed
|= runOnAlloca(*Worklist
.pop_back_val());
4514 Changed
|= deleteDeadInstructions(DeletedAllocas
);
4516 // Remove the deleted allocas from various lists so that we don't try to
4517 // continue processing them.
4518 if (!DeletedAllocas
.empty()) {
4519 auto IsInSet
= [&](AllocaInst
*AI
) { return DeletedAllocas
.count(AI
); };
4520 Worklist
.remove_if(IsInSet
);
4521 PostPromotionWorklist
.remove_if(IsInSet
);
4522 PromotableAllocas
.erase(llvm::remove_if(PromotableAllocas
, IsInSet
),
4523 PromotableAllocas
.end());
4524 DeletedAllocas
.clear();
4528 Changed
|= promoteAllocas(F
);
4530 Worklist
= PostPromotionWorklist
;
4531 PostPromotionWorklist
.clear();
4532 } while (!Worklist
.empty());
4535 return PreservedAnalyses::all();
4537 PreservedAnalyses PA
;
4538 PA
.preserveSet
<CFGAnalyses
>();
4539 PA
.preserve
<GlobalsAA
>();
4543 PreservedAnalyses
SROA::run(Function
&F
, FunctionAnalysisManager
&AM
) {
4544 return runImpl(F
, AM
.getResult
<DominatorTreeAnalysis
>(F
),
4545 AM
.getResult
<AssumptionAnalysis
>(F
));
4548 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4550 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4552 class llvm::sroa::SROALegacyPass
: public FunctionPass
{
4553 /// The SROA implementation.
4559 SROALegacyPass() : FunctionPass(ID
) {
4560 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry());
4563 bool runOnFunction(Function
&F
) override
{
4564 if (skipFunction(F
))
4567 auto PA
= Impl
.runImpl(
4568 F
, getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
4569 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
));
4570 return !PA
.areAllPreserved();
4573 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
4574 AU
.addRequired
<AssumptionCacheTracker
>();
4575 AU
.addRequired
<DominatorTreeWrapperPass
>();
4576 AU
.addPreserved
<GlobalsAAWrapperPass
>();
4577 AU
.setPreservesCFG();
4580 StringRef
getPassName() const override
{ return "SROA"; }
4583 char SROALegacyPass::ID
= 0;
4585 FunctionPass
*llvm::createSROAPass() { return new SROALegacyPass(); }
4587 INITIALIZE_PASS_BEGIN(SROALegacyPass
, "sroa",
4588 "Scalar Replacement Of Aggregates", false, false)
4589 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
4590 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
4591 INITIALIZE_PASS_END(SROALegacyPass
, "sroa", "Scalar Replacement Of Aggregates",