[Codegen] Alter the default promotion for saturating adds and subs
[llvm-complete.git] / lib / Target / PowerPC / PPCLoopPreIncPrep.cpp
blobd252cfbd26b1d09925b1f7430b5428bc87f6a985
1 //===------ PPCLoopPreIncPrep.cpp - Loop Pre-Inc. AM Prep. Pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to prepare loops for pre-increment addressing
10 // modes. Additional PHIs are created for loop induction variables used by
11 // load/store instructions so that the pre-increment forms can be used.
12 // Generically, this means transforming loops like this:
13 // for (int i = 0; i < n; ++i)
14 // array[i] = c;
15 // to look like this:
16 // T *p = array[-1];
17 // for (int i = 0; i < n; ++i)
18 // *++p = c;
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "ppc-loop-preinc-prep"
23 #include "PPC.h"
24 #include "PPCSubtarget.h"
25 #include "PPCTargetMachine.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpander.h"
34 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Transforms/Scalar.h"
50 #include "llvm/Transforms/Utils.h"
51 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
52 #include "llvm/Transforms/Utils/LoopUtils.h"
53 #include <cassert>
54 #include <iterator>
55 #include <utility>
57 using namespace llvm;
59 // By default, we limit this to creating 16 PHIs (which is a little over half
60 // of the allocatable register set).
61 static cl::opt<unsigned> MaxVars("ppc-preinc-prep-max-vars",
62 cl::Hidden, cl::init(16),
63 cl::desc("Potential PHI threshold for PPC preinc loop prep"));
65 STATISTIC(PHINodeAlreadyExists, "PHI node already in pre-increment form");
66 STATISTIC(UpdFormChainRewritten, "Num of update form chain rewritten");
68 namespace {
69 struct BucketElement {
70 BucketElement(const SCEVConstant *O, Instruction *I) : Offset(O), Instr(I) {}
71 BucketElement(Instruction *I) : Offset(nullptr), Instr(I) {}
73 const SCEVConstant *Offset;
74 Instruction *Instr;
77 struct Bucket {
78 Bucket(const SCEV *B, Instruction *I) : BaseSCEV(B),
79 Elements(1, BucketElement(I)) {}
81 const SCEV *BaseSCEV;
82 SmallVector<BucketElement, 16> Elements;
85 class PPCLoopPreIncPrep : public FunctionPass {
86 public:
87 static char ID; // Pass ID, replacement for typeid
89 PPCLoopPreIncPrep() : FunctionPass(ID) {
90 initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
93 PPCLoopPreIncPrep(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
94 initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
97 void getAnalysisUsage(AnalysisUsage &AU) const override {
98 AU.addPreserved<DominatorTreeWrapperPass>();
99 AU.addRequired<LoopInfoWrapperPass>();
100 AU.addPreserved<LoopInfoWrapperPass>();
101 AU.addRequired<ScalarEvolutionWrapperPass>();
104 bool runOnFunction(Function &F) override;
106 private:
107 PPCTargetMachine *TM = nullptr;
108 const PPCSubtarget *ST;
109 DominatorTree *DT;
110 LoopInfo *LI;
111 ScalarEvolution *SE;
112 bool PreserveLCSSA;
114 bool runOnLoop(Loop *L);
116 /// Check if required PHI node is already exist in Loop \p L.
117 bool alreadyPrepared(Loop *L, Instruction* MemI,
118 const SCEV *BasePtrStartSCEV,
119 const SCEVConstant *BasePtrIncSCEV);
121 /// Collect condition matched(\p isValidCandidate() returns true)
122 /// candidates in Loop \p L.
123 SmallVector<Bucket, 16>
124 collectCandidates(Loop *L,
125 std::function<bool(const Instruction *, const Value *)>
126 isValidCandidate,
127 unsigned MaxCandidateNum);
129 /// Add a candidate to candidates \p Buckets.
130 void addOneCandidate(Instruction *MemI, const SCEV *LSCEV,
131 SmallVector<Bucket, 16> &Buckets,
132 unsigned MaxCandidateNum);
134 /// Prepare all candidates in \p Buckets for update form.
135 bool updateFormPrep(Loop *L, SmallVector<Bucket, 16> &Buckets);
137 /// Prepare for one chain \p BucketChain, find the best base element and
138 /// update all other elements in \p BucketChain accordingly.
139 bool prepareBaseForUpdateFormChain(Bucket &BucketChain);
141 /// Rewrite load/store instructions in \p BucketChain according to
142 /// preparation.
143 bool rewriteLoadStores(Loop *L, Bucket &BucketChain,
144 SmallSet<BasicBlock *, 16> &BBChanged);
147 } // end anonymous namespace
149 char PPCLoopPreIncPrep::ID = 0;
150 static const char *name = "Prepare loop for pre-inc. addressing modes";
151 INITIALIZE_PASS_BEGIN(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
152 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
153 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
154 INITIALIZE_PASS_END(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
156 static const std::string PHINodeNameSuffix = ".phi";
157 static const std::string CastNodeNameSuffix = ".cast";
158 static const std::string GEPNodeIncNameSuffix = ".inc";
159 static const std::string GEPNodeOffNameSuffix = ".off";
161 FunctionPass *llvm::createPPCLoopPreIncPrepPass(PPCTargetMachine &TM) {
162 return new PPCLoopPreIncPrep(TM);
165 static bool IsPtrInBounds(Value *BasePtr) {
166 Value *StrippedBasePtr = BasePtr;
167 while (BitCastInst *BC = dyn_cast<BitCastInst>(StrippedBasePtr))
168 StrippedBasePtr = BC->getOperand(0);
169 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(StrippedBasePtr))
170 return GEP->isInBounds();
172 return false;
175 static std::string getInstrName(const Value *I, const std::string Suffix) {
176 assert(I && "Invalid paramater!");
177 if (I->hasName())
178 return (I->getName() + Suffix).str();
179 else
180 return "";
183 static Value *GetPointerOperand(Value *MemI) {
184 if (LoadInst *LMemI = dyn_cast<LoadInst>(MemI)) {
185 return LMemI->getPointerOperand();
186 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(MemI)) {
187 return SMemI->getPointerOperand();
188 } else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(MemI)) {
189 if (IMemI->getIntrinsicID() == Intrinsic::prefetch)
190 return IMemI->getArgOperand(0);
193 return nullptr;
196 bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
197 if (skipFunction(F))
198 return false;
200 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
201 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
202 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
203 DT = DTWP ? &DTWP->getDomTree() : nullptr;
204 PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
205 ST = TM ? TM->getSubtargetImpl(F) : nullptr;
207 bool MadeChange = false;
209 for (auto I = LI->begin(), IE = LI->end(); I != IE; ++I)
210 for (auto L = df_begin(*I), LE = df_end(*I); L != LE; ++L)
211 MadeChange |= runOnLoop(*L);
213 return MadeChange;
216 void PPCLoopPreIncPrep::addOneCandidate(Instruction *MemI, const SCEV *LSCEV,
217 SmallVector<Bucket, 16> &Buckets,
218 unsigned MaxCandidateNum) {
219 assert((MemI && GetPointerOperand(MemI)) &&
220 "Candidate should be a memory instruction.");
221 assert(LSCEV && "Invalid SCEV for Ptr value.");
222 bool FoundBucket = false;
223 for (auto &B : Buckets) {
224 const SCEV *Diff = SE->getMinusSCEV(LSCEV, B.BaseSCEV);
225 if (const auto *CDiff = dyn_cast<SCEVConstant>(Diff)) {
226 B.Elements.push_back(BucketElement(CDiff, MemI));
227 FoundBucket = true;
228 break;
232 if (!FoundBucket) {
233 if (Buckets.size() == MaxCandidateNum)
234 return;
235 Buckets.push_back(Bucket(LSCEV, MemI));
239 SmallVector<Bucket, 16> PPCLoopPreIncPrep::collectCandidates(
240 Loop *L,
241 std::function<bool(const Instruction *, const Value *)> isValidCandidate,
242 unsigned MaxCandidateNum) {
243 SmallVector<Bucket, 16> Buckets;
244 for (const auto &BB : L->blocks())
245 for (auto &J : *BB) {
246 Value *PtrValue;
247 Instruction *MemI;
249 if (LoadInst *LMemI = dyn_cast<LoadInst>(&J)) {
250 MemI = LMemI;
251 PtrValue = LMemI->getPointerOperand();
252 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(&J)) {
253 MemI = SMemI;
254 PtrValue = SMemI->getPointerOperand();
255 } else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(&J)) {
256 if (IMemI->getIntrinsicID() == Intrinsic::prefetch) {
257 MemI = IMemI;
258 PtrValue = IMemI->getArgOperand(0);
259 } else continue;
260 } else continue;
262 unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
263 if (PtrAddrSpace)
264 continue;
266 if (L->isLoopInvariant(PtrValue))
267 continue;
269 const SCEV *LSCEV = SE->getSCEVAtScope(PtrValue, L);
270 const SCEVAddRecExpr *LARSCEV = dyn_cast<SCEVAddRecExpr>(LSCEV);
271 if (!LARSCEV || LARSCEV->getLoop() != L)
272 continue;
274 if (isValidCandidate(&J, PtrValue))
275 addOneCandidate(MemI, LSCEV, Buckets, MaxCandidateNum);
277 return Buckets;
280 // TODO: implement a more clever base choosing policy.
281 // Currently we always choose an exist load/store offset. This maybe lead to
282 // suboptimal code sequences. For example, for one DS chain with offsets
283 // {-32769, 2003, 2007, 2011}, we choose -32769 as base offset, and left disp
284 // for load/stores are {0, 34772, 34776, 34780}. Though each offset now is a
285 // multipler of 4, it cannot be represented by sint16.
286 bool PPCLoopPreIncPrep::prepareBaseForUpdateFormChain(Bucket &BucketChain) {
287 // We have a choice now of which instruction's memory operand we use as the
288 // base for the generated PHI. Always picking the first instruction in each
289 // bucket does not work well, specifically because that instruction might
290 // be a prefetch (and there are no pre-increment dcbt variants). Otherwise,
291 // the choice is somewhat arbitrary, because the backend will happily
292 // generate direct offsets from both the pre-incremented and
293 // post-incremented pointer values. Thus, we'll pick the first non-prefetch
294 // instruction in each bucket, and adjust the recurrence and other offsets
295 // accordingly.
296 for (int j = 0, je = BucketChain.Elements.size(); j != je; ++j) {
297 if (auto *II = dyn_cast<IntrinsicInst>(BucketChain.Elements[j].Instr))
298 if (II->getIntrinsicID() == Intrinsic::prefetch)
299 continue;
301 // If we'd otherwise pick the first element anyway, there's nothing to do.
302 if (j == 0)
303 break;
305 // If our chosen element has no offset from the base pointer, there's
306 // nothing to do.
307 if (!BucketChain.Elements[j].Offset ||
308 BucketChain.Elements[j].Offset->isZero())
309 break;
311 const SCEV *Offset = BucketChain.Elements[j].Offset;
312 BucketChain.BaseSCEV = SE->getAddExpr(BucketChain.BaseSCEV, Offset);
313 for (auto &E : BucketChain.Elements) {
314 if (E.Offset)
315 E.Offset = cast<SCEVConstant>(SE->getMinusSCEV(E.Offset, Offset));
316 else
317 E.Offset = cast<SCEVConstant>(SE->getNegativeSCEV(Offset));
320 std::swap(BucketChain.Elements[j], BucketChain.Elements[0]);
321 break;
323 return true;
326 bool PPCLoopPreIncPrep::rewriteLoadStores(
327 Loop *L, Bucket &BucketChain, SmallSet<BasicBlock *, 16> &BBChanged) {
328 bool MadeChange = false;
329 const SCEVAddRecExpr *BasePtrSCEV =
330 cast<SCEVAddRecExpr>(BucketChain.BaseSCEV);
331 if (!BasePtrSCEV->isAffine())
332 return MadeChange;
334 LLVM_DEBUG(dbgs() << "PIP: Transforming: " << *BasePtrSCEV << "\n");
336 assert(BasePtrSCEV->getLoop() == L && "AddRec for the wrong loop?");
338 // The instruction corresponding to the Bucket's BaseSCEV must be the first
339 // in the vector of elements.
340 Instruction *MemI = BucketChain.Elements.begin()->Instr;
341 Value *BasePtr = GetPointerOperand(MemI);
342 assert(BasePtr && "No pointer operand");
344 Type *I8Ty = Type::getInt8Ty(MemI->getParent()->getContext());
345 Type *I8PtrTy = Type::getInt8PtrTy(MemI->getParent()->getContext(),
346 BasePtr->getType()->getPointerAddressSpace());
348 const SCEV *BasePtrStartSCEV = BasePtrSCEV->getStart();
349 if (!SE->isLoopInvariant(BasePtrStartSCEV, L))
350 return MadeChange;
352 const SCEVConstant *BasePtrIncSCEV =
353 dyn_cast<SCEVConstant>(BasePtrSCEV->getStepRecurrence(*SE));
354 if (!BasePtrIncSCEV)
355 return MadeChange;
356 BasePtrStartSCEV = SE->getMinusSCEV(BasePtrStartSCEV, BasePtrIncSCEV);
357 if (!isSafeToExpand(BasePtrStartSCEV, *SE))
358 return MadeChange;
360 if (alreadyPrepared(L, MemI, BasePtrStartSCEV, BasePtrIncSCEV))
361 return MadeChange;
363 LLVM_DEBUG(dbgs() << "PIP: New start is: " << *BasePtrStartSCEV << "\n");
365 BasicBlock *Header = L->getHeader();
366 unsigned HeaderLoopPredCount = pred_size(Header);
367 BasicBlock *LoopPredecessor = L->getLoopPredecessor();
369 PHINode *NewPHI =
370 PHINode::Create(I8PtrTy, HeaderLoopPredCount,
371 getInstrName(MemI, PHINodeNameSuffix),
372 Header->getFirstNonPHI());
374 SCEVExpander SCEVE(*SE, Header->getModule()->getDataLayout(), "pistart");
375 Value *BasePtrStart = SCEVE.expandCodeFor(BasePtrStartSCEV, I8PtrTy,
376 LoopPredecessor->getTerminator());
378 // Note that LoopPredecessor might occur in the predecessor list multiple
379 // times, and we need to add it the right number of times.
380 for (const auto &PI : predecessors(Header)) {
381 if (PI != LoopPredecessor)
382 continue;
384 NewPHI->addIncoming(BasePtrStart, LoopPredecessor);
387 Instruction *InsPoint = &*Header->getFirstInsertionPt();
388 GetElementPtrInst *PtrInc = GetElementPtrInst::Create(
389 I8Ty, NewPHI, BasePtrIncSCEV->getValue(),
390 getInstrName(MemI, GEPNodeIncNameSuffix), InsPoint);
391 PtrInc->setIsInBounds(IsPtrInBounds(BasePtr));
392 for (const auto &PI : predecessors(Header)) {
393 if (PI == LoopPredecessor)
394 continue;
396 NewPHI->addIncoming(PtrInc, PI);
399 Instruction *NewBasePtr;
400 if (PtrInc->getType() != BasePtr->getType())
401 NewBasePtr = new BitCastInst(PtrInc, BasePtr->getType(),
402 getInstrName(PtrInc, CastNodeNameSuffix), InsPoint);
403 else
404 NewBasePtr = PtrInc;
406 if (Instruction *IDel = dyn_cast<Instruction>(BasePtr))
407 BBChanged.insert(IDel->getParent());
408 BasePtr->replaceAllUsesWith(NewBasePtr);
409 RecursivelyDeleteTriviallyDeadInstructions(BasePtr);
411 // Keep track of the replacement pointer values we've inserted so that we
412 // don't generate more pointer values than necessary.
413 SmallPtrSet<Value *, 16> NewPtrs;
414 NewPtrs.insert(NewBasePtr);
416 for (auto I = std::next(BucketChain.Elements.begin()),
417 IE = BucketChain.Elements.end(); I != IE; ++I) {
418 Value *Ptr = GetPointerOperand(I->Instr);
419 assert(Ptr && "No pointer operand");
420 if (NewPtrs.count(Ptr))
421 continue;
423 Instruction *RealNewPtr;
424 if (!I->Offset || I->Offset->getValue()->isZero()) {
425 RealNewPtr = NewBasePtr;
426 } else {
427 Instruction *PtrIP = dyn_cast<Instruction>(Ptr);
428 if (PtrIP && isa<Instruction>(NewBasePtr) &&
429 cast<Instruction>(NewBasePtr)->getParent() == PtrIP->getParent())
430 PtrIP = nullptr;
431 else if (PtrIP && isa<PHINode>(PtrIP))
432 PtrIP = &*PtrIP->getParent()->getFirstInsertionPt();
433 else if (!PtrIP)
434 PtrIP = I->Instr;
436 GetElementPtrInst *NewPtr = GetElementPtrInst::Create(
437 I8Ty, PtrInc, I->Offset->getValue(),
438 getInstrName(I->Instr, GEPNodeOffNameSuffix), PtrIP);
439 if (!PtrIP)
440 NewPtr->insertAfter(cast<Instruction>(PtrInc));
441 NewPtr->setIsInBounds(IsPtrInBounds(Ptr));
442 RealNewPtr = NewPtr;
445 if (Instruction *IDel = dyn_cast<Instruction>(Ptr))
446 BBChanged.insert(IDel->getParent());
448 Instruction *ReplNewPtr;
449 if (Ptr->getType() != RealNewPtr->getType()) {
450 ReplNewPtr = new BitCastInst(RealNewPtr, Ptr->getType(),
451 getInstrName(Ptr, CastNodeNameSuffix));
452 ReplNewPtr->insertAfter(RealNewPtr);
453 } else
454 ReplNewPtr = RealNewPtr;
456 Ptr->replaceAllUsesWith(ReplNewPtr);
457 RecursivelyDeleteTriviallyDeadInstructions(Ptr);
459 NewPtrs.insert(RealNewPtr);
462 MadeChange = true;
463 UpdFormChainRewritten++;
465 return MadeChange;
468 bool PPCLoopPreIncPrep::updateFormPrep(Loop *L,
469 SmallVector<Bucket, 16> &Buckets) {
470 bool MadeChange = false;
471 if (Buckets.empty())
472 return MadeChange;
473 SmallSet<BasicBlock *, 16> BBChanged;
474 for (auto &Bucket : Buckets)
475 // The base address of each bucket is transformed into a phi and the others
476 // are rewritten based on new base.
477 if (prepareBaseForUpdateFormChain(Bucket))
478 MadeChange |= rewriteLoadStores(L, Bucket, BBChanged);
479 if (MadeChange)
480 for (auto &BB : L->blocks())
481 if (BBChanged.count(BB))
482 DeleteDeadPHIs(BB);
483 return MadeChange;
486 // In order to prepare for the pre-increment a PHI is added.
487 // This function will check to see if that PHI already exists and will return
488 // true if it found an existing PHI with the same start and increment as the
489 // one we wanted to create.
490 bool PPCLoopPreIncPrep::alreadyPrepared(Loop *L, Instruction* MemI,
491 const SCEV *BasePtrStartSCEV,
492 const SCEVConstant *BasePtrIncSCEV) {
493 BasicBlock *BB = MemI->getParent();
494 if (!BB)
495 return false;
497 BasicBlock *PredBB = L->getLoopPredecessor();
498 BasicBlock *LatchBB = L->getLoopLatch();
500 if (!PredBB || !LatchBB)
501 return false;
503 // Run through the PHIs and see if we have some that looks like a preparation
504 iterator_range<BasicBlock::phi_iterator> PHIIter = BB->phis();
505 for (auto & CurrentPHI : PHIIter) {
506 PHINode *CurrentPHINode = dyn_cast<PHINode>(&CurrentPHI);
507 if (!CurrentPHINode)
508 continue;
510 if (!SE->isSCEVable(CurrentPHINode->getType()))
511 continue;
513 const SCEV *PHISCEV = SE->getSCEVAtScope(CurrentPHINode, L);
515 const SCEVAddRecExpr *PHIBasePtrSCEV = dyn_cast<SCEVAddRecExpr>(PHISCEV);
516 if (!PHIBasePtrSCEV)
517 continue;
519 const SCEVConstant *PHIBasePtrIncSCEV =
520 dyn_cast<SCEVConstant>(PHIBasePtrSCEV->getStepRecurrence(*SE));
521 if (!PHIBasePtrIncSCEV)
522 continue;
524 if (CurrentPHINode->getNumIncomingValues() == 2) {
525 if ((CurrentPHINode->getIncomingBlock(0) == LatchBB &&
526 CurrentPHINode->getIncomingBlock(1) == PredBB) ||
527 (CurrentPHINode->getIncomingBlock(1) == LatchBB &&
528 CurrentPHINode->getIncomingBlock(0) == PredBB)) {
529 if (PHIBasePtrSCEV->getStart() == BasePtrStartSCEV &&
530 PHIBasePtrIncSCEV == BasePtrIncSCEV) {
531 // The existing PHI (CurrentPHINode) has the same start and increment
532 // as the PHI that we wanted to create.
533 ++PHINodeAlreadyExists;
534 return true;
539 return false;
542 bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
543 bool MadeChange = false;
545 // Only prep. the inner-most loop
546 if (!L->empty())
547 return MadeChange;
549 LLVM_DEBUG(dbgs() << "PIP: Examining: " << *L << "\n");
551 BasicBlock *LoopPredecessor = L->getLoopPredecessor();
552 // If there is no loop predecessor, or the loop predecessor's terminator
553 // returns a value (which might contribute to determining the loop's
554 // iteration space), insert a new preheader for the loop.
555 if (!LoopPredecessor ||
556 !LoopPredecessor->getTerminator()->getType()->isVoidTy()) {
557 LoopPredecessor = InsertPreheaderForLoop(L, DT, LI, nullptr, PreserveLCSSA);
558 if (LoopPredecessor)
559 MadeChange = true;
561 if (!LoopPredecessor) {
562 LLVM_DEBUG(dbgs() << "PIP fails since no predecessor for current loop.\n");
563 return MadeChange;
566 // Check if a load/store has update form. This lambda is used by function
567 // collectCandidates which can collect candidates for types defined by lambda.
568 auto isUpdateFormCandidate = [&] (const Instruction *I,
569 const Value *PtrValue) {
570 assert((PtrValue && I) && "Invalid parameter!");
571 // There are no update forms for Altivec vector load/stores.
572 if (ST && ST->hasAltivec() &&
573 PtrValue->getType()->getPointerElementType()->isVectorTy())
574 return false;
575 // See getPreIndexedAddressParts, the displacement for LDU/STDU has to
576 // be 4's multiple (DS-form). For i64 loads/stores when the displacement
577 // fits in a 16-bit signed field but isn't a multiple of 4, it will be
578 // useless and possible to break some original well-form addressing mode
579 // to make this pre-inc prep for it.
580 if (PtrValue->getType()->getPointerElementType()->isIntegerTy(64)) {
581 const SCEV *LSCEV = SE->getSCEVAtScope(const_cast<Value *>(PtrValue), L);
582 const SCEVAddRecExpr *LARSCEV = dyn_cast<SCEVAddRecExpr>(LSCEV);
583 if (!LARSCEV || LARSCEV->getLoop() != L)
584 return false;
585 if (const SCEVConstant *StepConst =
586 dyn_cast<SCEVConstant>(LARSCEV->getStepRecurrence(*SE))) {
587 const APInt &ConstInt = StepConst->getValue()->getValue();
588 if (ConstInt.isSignedIntN(16) && ConstInt.srem(4) != 0)
589 return false;
592 return true;
595 // Collect buckets of comparable addresses used by loads, stores and prefetch
596 // intrinsic for update form.
597 SmallVector<Bucket, 16> UpdateFormBuckets =
598 collectCandidates(L, isUpdateFormCandidate, MaxVars);
600 // Prepare for update form.
601 if (!UpdateFormBuckets.empty())
602 MadeChange |= updateFormPrep(L, UpdateFormBuckets);
604 return MadeChange;