1 //===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the Loop Distribution Pass. Its main focus is to
10 // distribute loops that cannot be vectorized due to dependence cycles. It
11 // tries to isolate the offending dependences into a new loop allowing
12 // vectorization of the remaining parts.
14 // For dependence analysis, the pass uses the LoopVectorizer's
15 // LoopAccessAnalysis. Because this analysis presumes no change in the order of
16 // memory operations, special care is taken to preserve the lexical order of
19 // Similarly to the Vectorizer, the pass also supports loop versioning to
20 // run-time disambiguate potentially overlapping arrays.
22 //===----------------------------------------------------------------------===//
24 #include "llvm/Transforms/Scalar/LoopDistribute.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/EquivalenceClasses.h"
28 #include "llvm/ADT/Optional.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/Twine.h"
35 #include "llvm/ADT/iterator_range.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/Analysis/AssumptionCache.h"
38 #include "llvm/Analysis/GlobalsModRef.h"
39 #include "llvm/Analysis/LoopAccessAnalysis.h"
40 #include "llvm/Analysis/LoopAnalysisManager.h"
41 #include "llvm/Analysis/LoopInfo.h"
42 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
43 #include "llvm/Analysis/ScalarEvolution.h"
44 #include "llvm/Analysis/TargetLibraryInfo.h"
45 #include "llvm/Analysis/TargetTransformInfo.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DiagnosticInfo.h"
49 #include "llvm/IR/Dominators.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/PassManager.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Transforms/Scalar.h"
64 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
65 #include "llvm/Transforms/Utils/Cloning.h"
66 #include "llvm/Transforms/Utils/LoopUtils.h"
67 #include "llvm/Transforms/Utils/LoopVersioning.h"
68 #include "llvm/Transforms/Utils/ValueMapper.h"
77 #define LDIST_NAME "loop-distribute"
78 #define DEBUG_TYPE LDIST_NAME
81 /// Metadata attribute names
82 static const char *const LLVMLoopDistributeFollowupAll
=
83 "llvm.loop.distribute.followup_all";
84 static const char *const LLVMLoopDistributeFollowupCoincident
=
85 "llvm.loop.distribute.followup_coincident";
86 static const char *const LLVMLoopDistributeFollowupSequential
=
87 "llvm.loop.distribute.followup_sequential";
88 static const char *const LLVMLoopDistributeFollowupFallback
=
89 "llvm.loop.distribute.followup_fallback";
93 LDistVerify("loop-distribute-verify", cl::Hidden
,
94 cl::desc("Turn on DominatorTree and LoopInfo verification "
95 "after Loop Distribution"),
98 static cl::opt
<bool> DistributeNonIfConvertible(
99 "loop-distribute-non-if-convertible", cl::Hidden
,
100 cl::desc("Whether to distribute into a loop that may not be "
101 "if-convertible by the loop vectorizer"),
104 static cl::opt
<unsigned> DistributeSCEVCheckThreshold(
105 "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden
,
106 cl::desc("The maximum number of SCEV checks allowed for Loop "
109 static cl::opt
<unsigned> PragmaDistributeSCEVCheckThreshold(
110 "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
113 "The maximum number of SCEV checks allowed for Loop "
114 "Distribution for loop marked with #pragma loop distribute(enable)"));
116 static cl::opt
<bool> EnableLoopDistribute(
117 "enable-loop-distribute", cl::Hidden
,
118 cl::desc("Enable the new, experimental LoopDistribution Pass"),
121 STATISTIC(NumLoopsDistributed
, "Number of loops distributed");
125 /// Maintains the set of instructions of the loop for a partition before
126 /// cloning. After cloning, it hosts the new loop.
127 class InstPartition
{
128 using InstructionSet
= SmallPtrSet
<Instruction
*, 8>;
131 InstPartition(Instruction
*I
, Loop
*L
, bool DepCycle
= false)
132 : DepCycle(DepCycle
), OrigLoop(L
) {
136 /// Returns whether this partition contains a dependence cycle.
137 bool hasDepCycle() const { return DepCycle
; }
139 /// Adds an instruction to this partition.
140 void add(Instruction
*I
) { Set
.insert(I
); }
142 /// Collection accessors.
143 InstructionSet::iterator
begin() { return Set
.begin(); }
144 InstructionSet::iterator
end() { return Set
.end(); }
145 InstructionSet::const_iterator
begin() const { return Set
.begin(); }
146 InstructionSet::const_iterator
end() const { return Set
.end(); }
147 bool empty() const { return Set
.empty(); }
149 /// Moves this partition into \p Other. This partition becomes empty
151 void moveTo(InstPartition
&Other
) {
152 Other
.Set
.insert(Set
.begin(), Set
.end());
154 Other
.DepCycle
|= DepCycle
;
157 /// Populates the partition with a transitive closure of all the
158 /// instructions that the seeded instructions dependent on.
159 void populateUsedSet() {
160 // FIXME: We currently don't use control-dependence but simply include all
161 // blocks (possibly empty at the end) and let simplifycfg mostly clean this
163 for (auto *B
: OrigLoop
->getBlocks())
164 Set
.insert(B
->getTerminator());
166 // Follow the use-def chains to form a transitive closure of all the
167 // instructions that the originally seeded instructions depend on.
168 SmallVector
<Instruction
*, 8> Worklist(Set
.begin(), Set
.end());
169 while (!Worklist
.empty()) {
170 Instruction
*I
= Worklist
.pop_back_val();
171 // Insert instructions from the loop that we depend on.
172 for (Value
*V
: I
->operand_values()) {
173 auto *I
= dyn_cast
<Instruction
>(V
);
174 if (I
&& OrigLoop
->contains(I
->getParent()) && Set
.insert(I
).second
)
175 Worklist
.push_back(I
);
180 /// Clones the original loop.
182 /// Updates LoopInfo and DominatorTree using the information that block \p
183 /// LoopDomBB dominates the loop.
184 Loop
*cloneLoopWithPreheader(BasicBlock
*InsertBefore
, BasicBlock
*LoopDomBB
,
185 unsigned Index
, LoopInfo
*LI
,
187 ClonedLoop
= ::cloneLoopWithPreheader(InsertBefore
, LoopDomBB
, OrigLoop
,
188 VMap
, Twine(".ldist") + Twine(Index
),
189 LI
, DT
, ClonedLoopBlocks
);
193 /// The cloned loop. If this partition is mapped to the original loop,
195 const Loop
*getClonedLoop() const { return ClonedLoop
; }
197 /// Returns the loop where this partition ends up after distribution.
198 /// If this partition is mapped to the original loop then use the block from
200 Loop
*getDistributedLoop() const {
201 return ClonedLoop
? ClonedLoop
: OrigLoop
;
204 /// The VMap that is populated by cloning and then used in
205 /// remapinstruction to remap the cloned instructions.
206 ValueToValueMapTy
&getVMap() { return VMap
; }
208 /// Remaps the cloned instructions using VMap.
209 void remapInstructions() {
210 remapInstructionsInBlocks(ClonedLoopBlocks
, VMap
);
213 /// Based on the set of instructions selected for this partition,
214 /// removes the unnecessary ones.
215 void removeUnusedInsts() {
216 SmallVector
<Instruction
*, 8> Unused
;
218 for (auto *Block
: OrigLoop
->getBlocks())
219 for (auto &Inst
: *Block
)
220 if (!Set
.count(&Inst
)) {
221 Instruction
*NewInst
= &Inst
;
223 NewInst
= cast
<Instruction
>(VMap
[NewInst
]);
225 assert(!isa
<BranchInst
>(NewInst
) &&
226 "Branches are marked used early on");
227 Unused
.push_back(NewInst
);
230 // Delete the instructions backwards, as it has a reduced likelihood of
231 // having to update as many def-use and use-def chains.
232 for (auto *Inst
: reverse(Unused
)) {
233 if (!Inst
->use_empty())
234 Inst
->replaceAllUsesWith(UndefValue::get(Inst
->getType()));
235 Inst
->eraseFromParent();
241 dbgs() << " (cycle)\n";
243 // Prefix with the block name.
244 dbgs() << " " << I
->getParent()->getName() << ":" << *I
<< "\n";
247 void printBlocks() const {
248 for (auto *BB
: getDistributedLoop()->getBlocks())
253 /// Instructions from OrigLoop selected for this partition.
256 /// Whether this partition contains a dependence cycle.
259 /// The original loop.
262 /// The cloned loop. If this partition is mapped to the original loop,
264 Loop
*ClonedLoop
= nullptr;
266 /// The blocks of ClonedLoop including the preheader. If this
267 /// partition is mapped to the original loop, this is empty.
268 SmallVector
<BasicBlock
*, 8> ClonedLoopBlocks
;
270 /// These gets populated once the set of instructions have been
271 /// finalized. If this partition is mapped to the original loop, these are not
273 ValueToValueMapTy VMap
;
276 /// Holds the set of Partitions. It populates them, merges them and then
277 /// clones the loops.
278 class InstPartitionContainer
{
279 using InstToPartitionIdT
= DenseMap
<Instruction
*, int>;
282 InstPartitionContainer(Loop
*L
, LoopInfo
*LI
, DominatorTree
*DT
)
283 : L(L
), LI(LI
), DT(DT
) {}
285 /// Returns the number of partitions.
286 unsigned getSize() const { return PartitionContainer
.size(); }
288 /// Adds \p Inst into the current partition if that is marked to
289 /// contain cycles. Otherwise start a new partition for it.
290 void addToCyclicPartition(Instruction
*Inst
) {
291 // If the current partition is non-cyclic. Start a new one.
292 if (PartitionContainer
.empty() || !PartitionContainer
.back().hasDepCycle())
293 PartitionContainer
.emplace_back(Inst
, L
, /*DepCycle=*/true);
295 PartitionContainer
.back().add(Inst
);
298 /// Adds \p Inst into a partition that is not marked to contain
299 /// dependence cycles.
301 // Initially we isolate memory instructions into as many partitions as
302 // possible, then later we may merge them back together.
303 void addToNewNonCyclicPartition(Instruction
*Inst
) {
304 PartitionContainer
.emplace_back(Inst
, L
);
307 /// Merges adjacent non-cyclic partitions.
309 /// The idea is that we currently only want to isolate the non-vectorizable
310 /// partition. We could later allow more distribution among these partition
312 void mergeAdjacentNonCyclic() {
313 mergeAdjacentPartitionsIf(
314 [](const InstPartition
*P
) { return !P
->hasDepCycle(); });
317 /// If a partition contains only conditional stores, we won't vectorize
318 /// it. Try to merge it with a previous cyclic partition.
319 void mergeNonIfConvertible() {
320 mergeAdjacentPartitionsIf([&](const InstPartition
*Partition
) {
321 if (Partition
->hasDepCycle())
324 // Now, check if all stores are conditional in this partition.
325 bool seenStore
= false;
327 for (auto *Inst
: *Partition
)
328 if (isa
<StoreInst
>(Inst
)) {
330 if (!LoopAccessInfo::blockNeedsPredication(Inst
->getParent(), L
, DT
))
337 /// Merges the partitions according to various heuristics.
338 void mergeBeforePopulating() {
339 mergeAdjacentNonCyclic();
340 if (!DistributeNonIfConvertible
)
341 mergeNonIfConvertible();
344 /// Merges partitions in order to ensure that no loads are duplicated.
346 /// We can't duplicate loads because that could potentially reorder them.
347 /// LoopAccessAnalysis provides dependency information with the context that
348 /// the order of memory operation is preserved.
350 /// Return if any partitions were merged.
351 bool mergeToAvoidDuplicatedLoads() {
352 using LoadToPartitionT
= DenseMap
<Instruction
*, InstPartition
*>;
353 using ToBeMergedT
= EquivalenceClasses
<InstPartition
*>;
355 LoadToPartitionT LoadToPartition
;
356 ToBeMergedT ToBeMerged
;
358 // Step through the partitions and create equivalence between partitions
359 // that contain the same load. Also put partitions in between them in the
360 // same equivalence class to avoid reordering of memory operations.
361 for (PartitionContainerT::iterator I
= PartitionContainer
.begin(),
362 E
= PartitionContainer
.end();
366 // If a load occurs in two partitions PartI and PartJ, merge all
367 // partitions (PartI, PartJ] into PartI.
368 for (Instruction
*Inst
: *PartI
)
369 if (isa
<LoadInst
>(Inst
)) {
371 LoadToPartitionT::iterator LoadToPart
;
373 std::tie(LoadToPart
, NewElt
) =
374 LoadToPartition
.insert(std::make_pair(Inst
, PartI
));
377 << "Merging partitions due to this load in multiple "
378 << "partitions: " << PartI
<< ", " << LoadToPart
->second
385 ToBeMerged
.unionSets(PartI
, &*PartJ
);
386 } while (&*PartJ
!= LoadToPart
->second
);
390 if (ToBeMerged
.empty())
393 // Merge the member of an equivalence class into its class leader. This
394 // makes the members empty.
395 for (ToBeMergedT::iterator I
= ToBeMerged
.begin(), E
= ToBeMerged
.end();
400 auto PartI
= I
->getData();
401 for (auto PartJ
: make_range(std::next(ToBeMerged
.member_begin(I
)),
402 ToBeMerged
.member_end())) {
403 PartJ
->moveTo(*PartI
);
407 // Remove the empty partitions.
408 PartitionContainer
.remove_if(
409 [](const InstPartition
&P
) { return P
.empty(); });
414 /// Sets up the mapping between instructions to partitions. If the
415 /// instruction is duplicated across multiple partitions, set the entry to -1.
416 void setupPartitionIdOnInstructions() {
418 for (const auto &Partition
: PartitionContainer
) {
419 for (Instruction
*Inst
: Partition
) {
421 InstToPartitionIdT::iterator Iter
;
423 std::tie(Iter
, NewElt
) =
424 InstToPartitionId
.insert(std::make_pair(Inst
, PartitionID
));
432 /// Populates the partition with everything that the seeding
433 /// instructions require.
434 void populateUsedSet() {
435 for (auto &P
: PartitionContainer
)
439 /// This performs the main chunk of the work of cloning the loops for
442 BasicBlock
*OrigPH
= L
->getLoopPreheader();
443 // At this point the predecessor of the preheader is either the memcheck
444 // block or the top part of the original preheader.
445 BasicBlock
*Pred
= OrigPH
->getSinglePredecessor();
446 assert(Pred
&& "Preheader does not have a single predecessor");
447 BasicBlock
*ExitBlock
= L
->getExitBlock();
448 assert(ExitBlock
&& "No single exit block");
451 assert(!PartitionContainer
.empty() && "at least two partitions expected");
452 // We're cloning the preheader along with the loop so we already made sure
454 assert(&*OrigPH
->begin() == OrigPH
->getTerminator() &&
455 "preheader not empty");
457 // Preserve the original loop ID for use after the transformation.
458 MDNode
*OrigLoopID
= L
->getLoopID();
460 // Create a loop for each partition except the last. Clone the original
461 // loop before PH along with adding a preheader for the cloned loop. Then
462 // update PH to point to the newly added preheader.
463 BasicBlock
*TopPH
= OrigPH
;
464 unsigned Index
= getSize() - 1;
465 for (auto I
= std::next(PartitionContainer
.rbegin()),
466 E
= PartitionContainer
.rend();
467 I
!= E
; ++I
, --Index
, TopPH
= NewLoop
->getLoopPreheader()) {
470 NewLoop
= Part
->cloneLoopWithPreheader(TopPH
, Pred
, Index
, LI
, DT
);
472 Part
->getVMap()[ExitBlock
] = TopPH
;
473 Part
->remapInstructions();
474 setNewLoopID(OrigLoopID
, Part
);
476 Pred
->getTerminator()->replaceUsesOfWith(OrigPH
, TopPH
);
478 // Also set a new loop ID for the last loop.
479 setNewLoopID(OrigLoopID
, &PartitionContainer
.back());
481 // Now go in forward order and update the immediate dominator for the
482 // preheaders with the exiting block of the previous loop. Dominance
483 // within the loop is updated in cloneLoopWithPreheader.
484 for (auto Curr
= PartitionContainer
.cbegin(),
485 Next
= std::next(PartitionContainer
.cbegin()),
486 E
= PartitionContainer
.cend();
487 Next
!= E
; ++Curr
, ++Next
)
488 DT
->changeImmediateDominator(
489 Next
->getDistributedLoop()->getLoopPreheader(),
490 Curr
->getDistributedLoop()->getExitingBlock());
493 /// Removes the dead instructions from the cloned loops.
494 void removeUnusedInsts() {
495 for (auto &Partition
: PartitionContainer
)
496 Partition
.removeUnusedInsts();
499 /// For each memory pointer, it computes the partitionId the pointer is
502 /// This returns an array of int where the I-th entry corresponds to I-th
503 /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
504 /// partitions its entry is set to -1.
506 computePartitionSetForPointers(const LoopAccessInfo
&LAI
) {
507 const RuntimePointerChecking
*RtPtrCheck
= LAI
.getRuntimePointerChecking();
509 unsigned N
= RtPtrCheck
->Pointers
.size();
510 SmallVector
<int, 8> PtrToPartitions(N
);
511 for (unsigned I
= 0; I
< N
; ++I
) {
512 Value
*Ptr
= RtPtrCheck
->Pointers
[I
].PointerValue
;
514 LAI
.getInstructionsForAccess(Ptr
, RtPtrCheck
->Pointers
[I
].IsWritePtr
);
516 int &Partition
= PtrToPartitions
[I
];
517 // First set it to uninitialized.
519 for (Instruction
*Inst
: Instructions
) {
520 // Note that this could be -1 if Inst is duplicated across multiple
522 int ThisPartition
= this->InstToPartitionId
[Inst
];
524 Partition
= ThisPartition
;
525 // -1 means belonging to multiple partitions.
526 else if (Partition
== -1)
528 else if (Partition
!= (int)ThisPartition
)
531 assert(Partition
!= -2 && "Pointer not belonging to any partition");
534 return PtrToPartitions
;
537 void print(raw_ostream
&OS
) const {
539 for (const auto &P
: PartitionContainer
) {
540 OS
<< "Partition " << Index
++ << " (" << &P
<< "):\n";
545 void dump() const { print(dbgs()); }
548 friend raw_ostream
&operator<<(raw_ostream
&OS
,
549 const InstPartitionContainer
&Partitions
) {
550 Partitions
.print(OS
);
555 void printBlocks() const {
557 for (const auto &P
: PartitionContainer
) {
558 dbgs() << "\nPartition " << Index
++ << " (" << &P
<< "):\n";
564 using PartitionContainerT
= std::list
<InstPartition
>;
566 /// List of partitions.
567 PartitionContainerT PartitionContainer
;
569 /// Mapping from Instruction to partition Id. If the instruction
570 /// belongs to multiple partitions the entry contains -1.
571 InstToPartitionIdT InstToPartitionId
;
577 /// The control structure to merge adjacent partitions if both satisfy
578 /// the \p Predicate.
579 template <class UnaryPredicate
>
580 void mergeAdjacentPartitionsIf(UnaryPredicate Predicate
) {
581 InstPartition
*PrevMatch
= nullptr;
582 for (auto I
= PartitionContainer
.begin(); I
!= PartitionContainer
.end();) {
583 auto DoesMatch
= Predicate(&*I
);
584 if (PrevMatch
== nullptr && DoesMatch
) {
587 } else if (PrevMatch
!= nullptr && DoesMatch
) {
588 I
->moveTo(*PrevMatch
);
589 I
= PartitionContainer
.erase(I
);
597 /// Assign new LoopIDs for the partition's cloned loop.
598 void setNewLoopID(MDNode
*OrigLoopID
, InstPartition
*Part
) {
599 Optional
<MDNode
*> PartitionID
= makeFollowupLoopID(
601 {LLVMLoopDistributeFollowupAll
,
602 Part
->hasDepCycle() ? LLVMLoopDistributeFollowupSequential
603 : LLVMLoopDistributeFollowupCoincident
});
604 if (PartitionID
.hasValue()) {
605 Loop
*NewLoop
= Part
->getDistributedLoop();
606 NewLoop
->setLoopID(PartitionID
.getValue());
611 /// For each memory instruction, this class maintains difference of the
612 /// number of unsafe dependences that start out from this instruction minus
613 /// those that end here.
615 /// By traversing the memory instructions in program order and accumulating this
616 /// number, we know whether any unsafe dependence crosses over a program point.
617 class MemoryInstructionDependences
{
618 using Dependence
= MemoryDepChecker::Dependence
;
623 unsigned NumUnsafeDependencesStartOrEnd
= 0;
625 Entry(Instruction
*Inst
) : Inst(Inst
) {}
628 using AccessesType
= SmallVector
<Entry
, 8>;
630 AccessesType::const_iterator
begin() const { return Accesses
.begin(); }
631 AccessesType::const_iterator
end() const { return Accesses
.end(); }
633 MemoryInstructionDependences(
634 const SmallVectorImpl
<Instruction
*> &Instructions
,
635 const SmallVectorImpl
<Dependence
> &Dependences
) {
636 Accesses
.append(Instructions
.begin(), Instructions
.end());
638 LLVM_DEBUG(dbgs() << "Backward dependences:\n");
639 for (auto &Dep
: Dependences
)
640 if (Dep
.isPossiblyBackward()) {
641 // Note that the designations source and destination follow the program
642 // order, i.e. source is always first. (The direction is given by the
644 ++Accesses
[Dep
.Source
].NumUnsafeDependencesStartOrEnd
;
645 --Accesses
[Dep
.Destination
].NumUnsafeDependencesStartOrEnd
;
647 LLVM_DEBUG(Dep
.print(dbgs(), 2, Instructions
));
652 AccessesType Accesses
;
655 /// The actual class performing the per-loop work.
656 class LoopDistributeForLoop
{
658 LoopDistributeForLoop(Loop
*L
, Function
*F
, LoopInfo
*LI
, DominatorTree
*DT
,
659 ScalarEvolution
*SE
, OptimizationRemarkEmitter
*ORE
)
660 : L(L
), F(F
), LI(LI
), DT(DT
), SE(SE
), ORE(ORE
) {
664 /// Try to distribute an inner-most loop.
665 bool processLoop(std::function
<const LoopAccessInfo
&(Loop
&)> &GetLAA
) {
666 assert(L
->empty() && "Only process inner loops.");
668 LLVM_DEBUG(dbgs() << "\nLDist: In \""
669 << L
->getHeader()->getParent()->getName()
670 << "\" checking " << *L
<< "\n");
672 if (!L
->getExitBlock())
673 return fail("MultipleExitBlocks", "multiple exit blocks");
674 if (!L
->isLoopSimplifyForm())
675 return fail("NotLoopSimplifyForm",
676 "loop is not in loop-simplify form");
678 BasicBlock
*PH
= L
->getLoopPreheader();
680 // LAA will check that we only have a single exiting block.
683 // Currently, we only distribute to isolate the part of the loop with
684 // dependence cycles to enable partial vectorization.
685 if (LAI
->canVectorizeMemory())
686 return fail("MemOpsCanBeVectorized",
687 "memory operations are safe for vectorization");
689 auto *Dependences
= LAI
->getDepChecker().getDependences();
690 if (!Dependences
|| Dependences
->empty())
691 return fail("NoUnsafeDeps", "no unsafe dependences to isolate");
693 InstPartitionContainer
Partitions(L
, LI
, DT
);
695 // First, go through each memory operation and assign them to consecutive
696 // partitions (the order of partitions follows program order). Put those
697 // with unsafe dependences into "cyclic" partition otherwise put each store
698 // in its own "non-cyclic" partition (we'll merge these later).
700 // Note that a memory operation (e.g. Load2 below) at a program point that
701 // has an unsafe dependence (Store3->Load1) spanning over it must be
702 // included in the same cyclic partition as the dependent operations. This
703 // is to preserve the original program order after distribution. E.g.:
705 // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
707 // Load2 | /Unsafe/ 0 1
711 // NumUnsafeDependencesActive > 0 indicates this situation and in this case
712 // we just keep assigning to the same cyclic partition until
713 // NumUnsafeDependencesActive reaches 0.
714 const MemoryDepChecker
&DepChecker
= LAI
->getDepChecker();
715 MemoryInstructionDependences
MID(DepChecker
.getMemoryInstructions(),
718 int NumUnsafeDependencesActive
= 0;
719 for (auto &InstDep
: MID
) {
720 Instruction
*I
= InstDep
.Inst
;
721 // We update NumUnsafeDependencesActive post-instruction, catch the
722 // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
723 if (NumUnsafeDependencesActive
||
724 InstDep
.NumUnsafeDependencesStartOrEnd
> 0)
725 Partitions
.addToCyclicPartition(I
);
727 Partitions
.addToNewNonCyclicPartition(I
);
728 NumUnsafeDependencesActive
+= InstDep
.NumUnsafeDependencesStartOrEnd
;
729 assert(NumUnsafeDependencesActive
>= 0 &&
730 "Negative number of dependences active");
733 // Add partitions for values used outside. These partitions can be out of
734 // order from the original program order. This is OK because if the
735 // partition uses a load we will merge this partition with the original
736 // partition of the load that we set up in the previous loop (see
737 // mergeToAvoidDuplicatedLoads).
738 auto DefsUsedOutside
= findDefsUsedOutsideOfLoop(L
);
739 for (auto *Inst
: DefsUsedOutside
)
740 Partitions
.addToNewNonCyclicPartition(Inst
);
742 LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions
);
743 if (Partitions
.getSize() < 2)
744 return fail("CantIsolateUnsafeDeps",
745 "cannot isolate unsafe dependencies");
747 // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
748 // should be able to vectorize these together.
749 Partitions
.mergeBeforePopulating();
750 LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions
);
751 if (Partitions
.getSize() < 2)
752 return fail("CantIsolateUnsafeDeps",
753 "cannot isolate unsafe dependencies");
755 // Now, populate the partitions with non-memory operations.
756 Partitions
.populateUsedSet();
757 LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions
);
759 // In order to preserve original lexical order for loads, keep them in the
760 // partition that we set up in the MemoryInstructionDependences loop.
761 if (Partitions
.mergeToAvoidDuplicatedLoads()) {
762 LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
764 if (Partitions
.getSize() < 2)
765 return fail("CantIsolateUnsafeDeps",
766 "cannot isolate unsafe dependencies");
769 // Don't distribute the loop if we need too many SCEV run-time checks.
770 const SCEVUnionPredicate
&Pred
= LAI
->getPSE().getUnionPredicate();
771 if (Pred
.getComplexity() > (IsForced
.getValueOr(false)
772 ? PragmaDistributeSCEVCheckThreshold
773 : DistributeSCEVCheckThreshold
))
774 return fail("TooManySCEVRuntimeChecks",
775 "too many SCEV run-time checks needed.\n");
777 if (!IsForced
.getValueOr(false) && hasDisableAllTransformsHint(L
))
778 return fail("HeuristicDisabled", "distribution heuristic disabled");
780 LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L
<< "\n");
781 // We're done forming the partitions set up the reverse mapping from
782 // instructions to partitions.
783 Partitions
.setupPartitionIdOnInstructions();
785 // To keep things simple have an empty preheader before we version or clone
786 // the loop. (Also split if this has no predecessor, i.e. entry, because we
787 // rely on PH having a predecessor.)
788 if (!PH
->getSinglePredecessor() || &*PH
->begin() != PH
->getTerminator())
789 SplitBlock(PH
, PH
->getTerminator(), DT
, LI
);
791 // If we need run-time checks, version the loop now.
792 auto PtrToPartition
= Partitions
.computePartitionSetForPointers(*LAI
);
793 const auto *RtPtrChecking
= LAI
->getRuntimePointerChecking();
794 const auto &AllChecks
= RtPtrChecking
->getChecks();
795 auto Checks
= includeOnlyCrossPartitionChecks(AllChecks
, PtrToPartition
,
798 if (!Pred
.isAlwaysTrue() || !Checks
.empty()) {
799 MDNode
*OrigLoopID
= L
->getLoopID();
801 LLVM_DEBUG(dbgs() << "\nPointers:\n");
802 LLVM_DEBUG(LAI
->getRuntimePointerChecking()->printChecks(dbgs(), Checks
));
803 LoopVersioning
LVer(*LAI
, L
, LI
, DT
, SE
, false);
804 LVer
.setAliasChecks(std::move(Checks
));
805 LVer
.setSCEVChecks(LAI
->getPSE().getUnionPredicate());
806 LVer
.versionLoop(DefsUsedOutside
);
807 LVer
.annotateLoopWithNoAlias();
809 // The unversioned loop will not be changed, so we inherit all attributes
810 // from the original loop, but remove the loop distribution metadata to
811 // avoid to distribute it again.
812 MDNode
*UnversionedLoopID
=
813 makeFollowupLoopID(OrigLoopID
,
814 {LLVMLoopDistributeFollowupAll
,
815 LLVMLoopDistributeFollowupFallback
},
816 "llvm.loop.distribute.", true)
818 LVer
.getNonVersionedLoop()->setLoopID(UnversionedLoopID
);
821 // Create identical copies of the original loop for each partition and hook
822 // them up sequentially.
823 Partitions
.cloneLoops();
825 // Now, we remove the instruction from each loop that don't belong to that
827 Partitions
.removeUnusedInsts();
828 LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
829 LLVM_DEBUG(Partitions
.printBlocks());
833 assert(DT
->verify(DominatorTree::VerificationLevel::Fast
));
836 ++NumLoopsDistributed
;
837 // Report the success.
839 return OptimizationRemark(LDIST_NAME
, "Distribute", L
->getStartLoc(),
841 << "distributed loop";
846 /// Provide diagnostics then \return with false.
847 bool fail(StringRef RemarkName
, StringRef Message
) {
848 LLVMContext
&Ctx
= F
->getContext();
849 bool Forced
= isForced().getValueOr(false);
851 LLVM_DEBUG(dbgs() << "Skipping; " << Message
<< "\n");
853 // With Rpass-missed report that distribution failed.
855 return OptimizationRemarkMissed(LDIST_NAME
, "NotDistributed",
856 L
->getStartLoc(), L
->getHeader())
857 << "loop not distributed: use -Rpass-analysis=loop-distribute for "
862 // With Rpass-analysis report why. This is on by default if distribution
863 // was requested explicitly.
864 ORE
->emit(OptimizationRemarkAnalysis(
865 Forced
? OptimizationRemarkAnalysis::AlwaysPrint
: LDIST_NAME
,
866 RemarkName
, L
->getStartLoc(), L
->getHeader())
867 << "loop not distributed: " << Message
);
869 // Also issue a warning if distribution was requested explicitly but it
872 Ctx
.diagnose(DiagnosticInfoOptimizationFailure(
873 *F
, L
->getStartLoc(), "loop not distributed: failed "
874 "explicitly specified loop distribution"));
879 /// Return if distribution forced to be enabled/disabled for the loop.
881 /// If the optional has a value, it indicates whether distribution was forced
882 /// to be enabled (true) or disabled (false). If the optional has no value
883 /// distribution was not forced either way.
884 const Optional
<bool> &isForced() const { return IsForced
; }
887 /// Filter out checks between pointers from the same partition.
889 /// \p PtrToPartition contains the partition number for pointers. Partition
890 /// number -1 means that the pointer is used in multiple partitions. In this
891 /// case we can't safely omit the check.
892 SmallVector
<RuntimePointerChecking::PointerCheck
, 4>
893 includeOnlyCrossPartitionChecks(
894 const SmallVectorImpl
<RuntimePointerChecking::PointerCheck
> &AllChecks
,
895 const SmallVectorImpl
<int> &PtrToPartition
,
896 const RuntimePointerChecking
*RtPtrChecking
) {
897 SmallVector
<RuntimePointerChecking::PointerCheck
, 4> Checks
;
899 copy_if(AllChecks
, std::back_inserter(Checks
),
900 [&](const RuntimePointerChecking::PointerCheck
&Check
) {
901 for (unsigned PtrIdx1
: Check
.first
->Members
)
902 for (unsigned PtrIdx2
: Check
.second
->Members
)
903 // Only include this check if there is a pair of pointers
904 // that require checking and the pointers fall into
905 // separate partitions.
907 // (Note that we already know at this point that the two
908 // pointer groups need checking but it doesn't follow
909 // that each pair of pointers within the two groups need
912 // In other words we don't want to include a check just
913 // because there is a pair of pointers between the two
914 // pointer groups that require checks and a different
915 // pair whose pointers fall into different partitions.)
916 if (RtPtrChecking
->needsChecking(PtrIdx1
, PtrIdx2
) &&
917 !RuntimePointerChecking::arePointersInSamePartition(
918 PtrToPartition
, PtrIdx1
, PtrIdx2
))
926 /// Check whether the loop metadata is forcing distribution to be
927 /// enabled/disabled.
929 Optional
<const MDOperand
*> Value
=
930 findStringMetadataForLoop(L
, "llvm.loop.distribute.enable");
934 const MDOperand
*Op
= *Value
;
935 assert(Op
&& mdconst::hasa
<ConstantInt
>(*Op
) && "invalid metadata");
936 IsForced
= mdconst::extract
<ConstantInt
>(*Op
)->getZExtValue();
944 const LoopAccessInfo
*LAI
= nullptr;
947 OptimizationRemarkEmitter
*ORE
;
949 /// Indicates whether distribution is forced to be enabled/disabled for
952 /// If the optional has a value, it indicates whether distribution was forced
953 /// to be enabled (true) or disabled (false). If the optional has no value
954 /// distribution was not forced either way.
955 Optional
<bool> IsForced
;
958 } // end anonymous namespace
960 /// Shared implementation between new and old PMs.
961 static bool runImpl(Function
&F
, LoopInfo
*LI
, DominatorTree
*DT
,
962 ScalarEvolution
*SE
, OptimizationRemarkEmitter
*ORE
,
963 std::function
<const LoopAccessInfo
&(Loop
&)> &GetLAA
) {
964 // Build up a worklist of inner-loops to vectorize. This is necessary as the
965 // act of distributing a loop creates new loops and can invalidate iterators
967 SmallVector
<Loop
*, 8> Worklist
;
969 for (Loop
*TopLevelLoop
: *LI
)
970 for (Loop
*L
: depth_first(TopLevelLoop
))
971 // We only handle inner-most loops.
973 Worklist
.push_back(L
);
975 // Now walk the identified inner loops.
976 bool Changed
= false;
977 for (Loop
*L
: Worklist
) {
978 LoopDistributeForLoop
LDL(L
, &F
, LI
, DT
, SE
, ORE
);
980 // If distribution was forced for the specific loop to be
981 // enabled/disabled, follow that. Otherwise use the global flag.
982 if (LDL
.isForced().getValueOr(EnableLoopDistribute
))
983 Changed
|= LDL
.processLoop(GetLAA
);
986 // Process each loop nest in the function.
993 class LoopDistributeLegacy
: public FunctionPass
{
997 LoopDistributeLegacy() : FunctionPass(ID
) {
998 // The default is set by the caller.
999 initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
1002 bool runOnFunction(Function
&F
) override
{
1003 if (skipFunction(F
))
1006 auto *LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
1007 auto *LAA
= &getAnalysis
<LoopAccessLegacyAnalysis
>();
1008 auto *DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
1009 auto *SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
1010 auto *ORE
= &getAnalysis
<OptimizationRemarkEmitterWrapperPass
>().getORE();
1011 std::function
<const LoopAccessInfo
&(Loop
&)> GetLAA
=
1012 [&](Loop
&L
) -> const LoopAccessInfo
& { return LAA
->getInfo(&L
); };
1014 return runImpl(F
, LI
, DT
, SE
, ORE
, GetLAA
);
1017 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
1018 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
1019 AU
.addRequired
<LoopInfoWrapperPass
>();
1020 AU
.addPreserved
<LoopInfoWrapperPass
>();
1021 AU
.addRequired
<LoopAccessLegacyAnalysis
>();
1022 AU
.addRequired
<DominatorTreeWrapperPass
>();
1023 AU
.addPreserved
<DominatorTreeWrapperPass
>();
1024 AU
.addRequired
<OptimizationRemarkEmitterWrapperPass
>();
1025 AU
.addPreserved
<GlobalsAAWrapperPass
>();
1029 } // end anonymous namespace
1031 PreservedAnalyses
LoopDistributePass::run(Function
&F
,
1032 FunctionAnalysisManager
&AM
) {
1033 auto &LI
= AM
.getResult
<LoopAnalysis
>(F
);
1034 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
1035 auto &SE
= AM
.getResult
<ScalarEvolutionAnalysis
>(F
);
1036 auto &ORE
= AM
.getResult
<OptimizationRemarkEmitterAnalysis
>(F
);
1038 // We don't directly need these analyses but they're required for loop
1039 // analyses so provide them below.
1040 auto &AA
= AM
.getResult
<AAManager
>(F
);
1041 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
1042 auto &TTI
= AM
.getResult
<TargetIRAnalysis
>(F
);
1043 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1045 auto &LAM
= AM
.getResult
<LoopAnalysisManagerFunctionProxy
>(F
).getManager();
1046 std::function
<const LoopAccessInfo
&(Loop
&)> GetLAA
=
1047 [&](Loop
&L
) -> const LoopAccessInfo
& {
1048 LoopStandardAnalysisResults AR
= {AA
, AC
, DT
, LI
, SE
, TLI
, TTI
, nullptr};
1049 return LAM
.getResult
<LoopAccessAnalysis
>(L
, AR
);
1052 bool Changed
= runImpl(F
, &LI
, &DT
, &SE
, &ORE
, GetLAA
);
1054 return PreservedAnalyses::all();
1055 PreservedAnalyses PA
;
1056 PA
.preserve
<LoopAnalysis
>();
1057 PA
.preserve
<DominatorTreeAnalysis
>();
1058 PA
.preserve
<GlobalsAA
>();
1062 char LoopDistributeLegacy::ID
;
1064 static const char ldist_name
[] = "Loop Distribution";
1066 INITIALIZE_PASS_BEGIN(LoopDistributeLegacy
, LDIST_NAME
, ldist_name
, false,
1068 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
1069 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis
)
1070 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1071 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
)
1072 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass
)
1073 INITIALIZE_PASS_END(LoopDistributeLegacy
, LDIST_NAME
, ldist_name
, false, false)
1075 FunctionPass
*llvm::createLoopDistributePass() { return new LoopDistributeLegacy(); }