[PowerPC] Materialize more constants with CR-field set in late peephole
[llvm-core.git] / lib / Target / PowerPC / PPCCTRLoops.cpp
blob6b9e2383e36f15897e4705eb4e43576a35ddf115
1 //===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass identifies loops where we can generate the PPC branch instructions
11 // that decrement and test the count register (CTR) (bdnz and friends).
13 // The pattern that defines the induction variable can changed depending on
14 // prior optimizations. For example, the IndVarSimplify phase run by 'opt'
15 // normalizes induction variables, and the Loop Strength Reduction pass
16 // run by 'llc' may also make changes to the induction variable.
18 // Criteria for CTR loops:
19 // - Countable loops (w/ ind. var for a trip count)
20 // - Try inner-most loops first
21 // - No nested CTR loops.
22 // - No function calls in loops.
24 //===----------------------------------------------------------------------===//
26 #include "PPC.h"
27 #include "PPCSubtarget.h"
28 #include "PPCTargetMachine.h"
29 #include "PPCTargetTransformInfo.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Analysis/AssumptionCache.h"
33 #include "llvm/Analysis/CFG.h"
34 #include "llvm/Analysis/CodeMetrics.h"
35 #include "llvm/Analysis/LoopInfo.h"
36 #include "llvm/Analysis/LoopIterator.h"
37 #include "llvm/Analysis/ScalarEvolutionExpander.h"
38 #include "llvm/Analysis/TargetLibraryInfo.h"
39 #include "llvm/Analysis/TargetTransformInfo.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 #include "llvm/CodeGen/TargetPassConfig.h"
42 #include "llvm/CodeGen/TargetSchedule.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/ValueHandle.h"
51 #include "llvm/PassSupport.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Transforms/Scalar.h"
56 #include "llvm/Transforms/Utils.h"
57 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
58 #include "llvm/Transforms/Utils/LoopUtils.h"
60 #ifndef NDEBUG
61 #include "llvm/CodeGen/MachineDominators.h"
62 #include "llvm/CodeGen/MachineFunction.h"
63 #include "llvm/CodeGen/MachineFunctionPass.h"
64 #include "llvm/CodeGen/MachineRegisterInfo.h"
65 #endif
67 using namespace llvm;
69 #define DEBUG_TYPE "ctrloops"
71 #ifndef NDEBUG
72 static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1));
73 #endif
75 // The latency of mtctr is only justified if there are more than 4
76 // comparisons that will be removed as a result.
77 static cl::opt<unsigned>
78 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
79 cl::desc("Loops with a constant trip count smaller than "
80 "this value will not use the count register."));
82 STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops");
84 namespace llvm {
85 void initializePPCCTRLoopsPass(PassRegistry&);
86 #ifndef NDEBUG
87 void initializePPCCTRLoopsVerifyPass(PassRegistry&);
88 #endif
91 namespace {
92 struct PPCCTRLoops : public FunctionPass {
94 #ifndef NDEBUG
95 static int Counter;
96 #endif
98 public:
99 static char ID;
101 PPCCTRLoops() : FunctionPass(ID) {
102 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
105 bool runOnFunction(Function &F) override;
107 void getAnalysisUsage(AnalysisUsage &AU) const override {
108 AU.addRequired<LoopInfoWrapperPass>();
109 AU.addPreserved<LoopInfoWrapperPass>();
110 AU.addRequired<DominatorTreeWrapperPass>();
111 AU.addPreserved<DominatorTreeWrapperPass>();
112 AU.addRequired<ScalarEvolutionWrapperPass>();
113 AU.addRequired<AssumptionCacheTracker>();
114 AU.addRequired<TargetTransformInfoWrapperPass>();
117 private:
118 bool mightUseCTR(BasicBlock *BB);
119 bool convertToCTRLoop(Loop *L);
121 private:
122 const PPCTargetMachine *TM;
123 const PPCSubtarget *STI;
124 const PPCTargetLowering *TLI;
125 const DataLayout *DL;
126 const TargetLibraryInfo *LibInfo;
127 const TargetTransformInfo *TTI;
128 LoopInfo *LI;
129 ScalarEvolution *SE;
130 DominatorTree *DT;
131 bool PreserveLCSSA;
132 TargetSchedModel SchedModel;
135 char PPCCTRLoops::ID = 0;
136 #ifndef NDEBUG
137 int PPCCTRLoops::Counter = 0;
138 #endif
140 #ifndef NDEBUG
141 struct PPCCTRLoopsVerify : public MachineFunctionPass {
142 public:
143 static char ID;
145 PPCCTRLoopsVerify() : MachineFunctionPass(ID) {
146 initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry());
149 void getAnalysisUsage(AnalysisUsage &AU) const override {
150 AU.addRequired<MachineDominatorTree>();
151 MachineFunctionPass::getAnalysisUsage(AU);
154 bool runOnMachineFunction(MachineFunction &MF) override;
156 private:
157 MachineDominatorTree *MDT;
160 char PPCCTRLoopsVerify::ID = 0;
161 #endif // NDEBUG
162 } // end anonymous namespace
164 INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
165 false, false)
166 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
167 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
168 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
169 INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
170 false, false)
172 FunctionPass *llvm::createPPCCTRLoops() { return new PPCCTRLoops(); }
174 #ifndef NDEBUG
175 INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
176 "PowerPC CTR Loops Verify", false, false)
177 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
178 INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
179 "PowerPC CTR Loops Verify", false, false)
181 FunctionPass *llvm::createPPCCTRLoopsVerify() {
182 return new PPCCTRLoopsVerify();
184 #endif // NDEBUG
186 bool PPCCTRLoops::runOnFunction(Function &F) {
187 if (skipFunction(F))
188 return false;
190 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
191 if (!TPC)
192 return false;
194 TM = &TPC->getTM<PPCTargetMachine>();
195 STI = TM->getSubtargetImpl(F);
196 TLI = STI->getTargetLowering();
198 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
199 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
200 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
201 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
202 DL = &F.getParent()->getDataLayout();
203 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
204 LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
205 PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
207 bool MadeChange = false;
209 for (LoopInfo::iterator I = LI->begin(), E = LI->end();
210 I != E; ++I) {
211 Loop *L = *I;
212 if (!L->getParentLoop())
213 MadeChange |= convertToCTRLoop(L);
216 return MadeChange;
219 static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) {
220 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
221 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
223 return false;
226 // Determining the address of a TLS variable results in a function call in
227 // certain TLS models.
228 static bool memAddrUsesCTR(const PPCTargetMachine &TM, const Value *MemAddr) {
229 const auto *GV = dyn_cast<GlobalValue>(MemAddr);
230 if (!GV) {
231 // Recurse to check for constants that refer to TLS global variables.
232 if (const auto *CV = dyn_cast<Constant>(MemAddr))
233 for (const auto &CO : CV->operands())
234 if (memAddrUsesCTR(TM, CO))
235 return true;
237 return false;
240 if (!GV->isThreadLocal())
241 return false;
242 TLSModel::Model Model = TM.getTLSModel(GV);
243 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
246 // Loop through the inline asm constraints and look for something that clobbers
247 // ctr.
248 static bool asmClobbersCTR(InlineAsm *IA) {
249 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
250 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
251 InlineAsm::ConstraintInfo &C = CIV[i];
252 if (C.Type != InlineAsm::isInput)
253 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
254 if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
255 return true;
257 return false;
260 bool PPCCTRLoops::mightUseCTR(BasicBlock *BB) {
261 for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
262 J != JE; ++J) {
263 if (CallInst *CI = dyn_cast<CallInst>(J)) {
264 // Inline ASM is okay, unless it clobbers the ctr register.
265 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
266 if (asmClobbersCTR(IA))
267 return true;
268 continue;
271 if (Function *F = CI->getCalledFunction()) {
272 // Most intrinsics don't become function calls, but some might.
273 // sin, cos, exp and log are always calls.
274 unsigned Opcode = 0;
275 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
276 switch (F->getIntrinsicID()) {
277 default: continue;
278 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr
279 // we're definitely using CTR.
280 case Intrinsic::ppc_is_decremented_ctr_nonzero:
281 case Intrinsic::ppc_mtctr:
282 return true;
284 // VisualStudio defines setjmp as _setjmp
285 #if defined(_MSC_VER) && defined(setjmp) && \
286 !defined(setjmp_undefined_for_msvc)
287 # pragma push_macro("setjmp")
288 # undef setjmp
289 # define setjmp_undefined_for_msvc
290 #endif
292 case Intrinsic::setjmp:
294 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
295 // let's return it to _setjmp state
296 # pragma pop_macro("setjmp")
297 # undef setjmp_undefined_for_msvc
298 #endif
300 case Intrinsic::longjmp:
302 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
303 // because, although it does clobber the counter register, the
304 // control can't then return to inside the loop unless there is also
305 // an eh_sjlj_setjmp.
306 case Intrinsic::eh_sjlj_setjmp:
308 case Intrinsic::memcpy:
309 case Intrinsic::memmove:
310 case Intrinsic::memset:
311 case Intrinsic::powi:
312 case Intrinsic::log:
313 case Intrinsic::log2:
314 case Intrinsic::log10:
315 case Intrinsic::exp:
316 case Intrinsic::exp2:
317 case Intrinsic::pow:
318 case Intrinsic::sin:
319 case Intrinsic::cos:
320 return true;
321 case Intrinsic::copysign:
322 if (CI->getArgOperand(0)->getType()->getScalarType()->
323 isPPC_FP128Ty())
324 return true;
325 else
326 continue; // ISD::FCOPYSIGN is never a library call.
327 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
328 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
329 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
330 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
331 case Intrinsic::rint: Opcode = ISD::FRINT; break;
332 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
333 case Intrinsic::round: Opcode = ISD::FROUND; break;
334 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break;
335 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break;
336 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break;
337 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break;
341 // PowerPC does not use [US]DIVREM or other library calls for
342 // operations on regular types which are not otherwise library calls
343 // (i.e. soft float or atomics). If adapting for targets that do,
344 // additional care is required here.
346 LibFunc Func;
347 if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
348 LibInfo->getLibFunc(F->getName(), Func) &&
349 LibInfo->hasOptimizedCodeGen(Func)) {
350 // Non-read-only functions are never treated as intrinsics.
351 if (!CI->onlyReadsMemory())
352 return true;
354 // Conversion happens only for FP calls.
355 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
356 return true;
358 switch (Func) {
359 default: return true;
360 case LibFunc_copysign:
361 case LibFunc_copysignf:
362 continue; // ISD::FCOPYSIGN is never a library call.
363 case LibFunc_copysignl:
364 return true;
365 case LibFunc_fabs:
366 case LibFunc_fabsf:
367 case LibFunc_fabsl:
368 continue; // ISD::FABS is never a library call.
369 case LibFunc_sqrt:
370 case LibFunc_sqrtf:
371 case LibFunc_sqrtl:
372 Opcode = ISD::FSQRT; break;
373 case LibFunc_floor:
374 case LibFunc_floorf:
375 case LibFunc_floorl:
376 Opcode = ISD::FFLOOR; break;
377 case LibFunc_nearbyint:
378 case LibFunc_nearbyintf:
379 case LibFunc_nearbyintl:
380 Opcode = ISD::FNEARBYINT; break;
381 case LibFunc_ceil:
382 case LibFunc_ceilf:
383 case LibFunc_ceill:
384 Opcode = ISD::FCEIL; break;
385 case LibFunc_rint:
386 case LibFunc_rintf:
387 case LibFunc_rintl:
388 Opcode = ISD::FRINT; break;
389 case LibFunc_round:
390 case LibFunc_roundf:
391 case LibFunc_roundl:
392 Opcode = ISD::FROUND; break;
393 case LibFunc_trunc:
394 case LibFunc_truncf:
395 case LibFunc_truncl:
396 Opcode = ISD::FTRUNC; break;
397 case LibFunc_fmin:
398 case LibFunc_fminf:
399 case LibFunc_fminl:
400 Opcode = ISD::FMINNUM; break;
401 case LibFunc_fmax:
402 case LibFunc_fmaxf:
403 case LibFunc_fmaxl:
404 Opcode = ISD::FMAXNUM; break;
408 if (Opcode) {
409 EVT EVTy =
410 TLI->getValueType(*DL, CI->getArgOperand(0)->getType(), true);
412 if (EVTy == MVT::Other)
413 return true;
415 if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
416 continue;
417 else if (EVTy.isVector() &&
418 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
419 continue;
421 return true;
425 return true;
426 } else if (isa<BinaryOperator>(J) &&
427 J->getType()->getScalarType()->isPPC_FP128Ty()) {
428 // Most operations on ppc_f128 values become calls.
429 return true;
430 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
431 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
432 CastInst *CI = cast<CastInst>(J);
433 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
434 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
435 isLargeIntegerTy(!TM->isPPC64(), CI->getSrcTy()->getScalarType()) ||
436 isLargeIntegerTy(!TM->isPPC64(), CI->getDestTy()->getScalarType()))
437 return true;
438 } else if (isLargeIntegerTy(!TM->isPPC64(),
439 J->getType()->getScalarType()) &&
440 (J->getOpcode() == Instruction::UDiv ||
441 J->getOpcode() == Instruction::SDiv ||
442 J->getOpcode() == Instruction::URem ||
443 J->getOpcode() == Instruction::SRem)) {
444 return true;
445 } else if (!TM->isPPC64() &&
446 isLargeIntegerTy(false, J->getType()->getScalarType()) &&
447 (J->getOpcode() == Instruction::Shl ||
448 J->getOpcode() == Instruction::AShr ||
449 J->getOpcode() == Instruction::LShr)) {
450 // Only on PPC32, for 128-bit integers (specifically not 64-bit
451 // integers), these might be runtime calls.
452 return true;
453 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
454 // On PowerPC, indirect jumps use the counter register.
455 return true;
456 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
457 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
458 return true;
461 // FREM is always a call.
462 if (J->getOpcode() == Instruction::FRem)
463 return true;
465 if (STI->useSoftFloat()) {
466 switch(J->getOpcode()) {
467 case Instruction::FAdd:
468 case Instruction::FSub:
469 case Instruction::FMul:
470 case Instruction::FDiv:
471 case Instruction::FPTrunc:
472 case Instruction::FPExt:
473 case Instruction::FPToUI:
474 case Instruction::FPToSI:
475 case Instruction::UIToFP:
476 case Instruction::SIToFP:
477 case Instruction::FCmp:
478 return true;
482 for (Value *Operand : J->operands())
483 if (memAddrUsesCTR(*TM, Operand))
484 return true;
487 return false;
489 bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
490 bool MadeChange = false;
492 // Do not convert small short loops to CTR loop.
493 unsigned ConstTripCount = SE->getSmallConstantTripCount(L);
494 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
495 SmallPtrSet<const Value *, 32> EphValues;
496 auto AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
497 *L->getHeader()->getParent());
498 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
499 CodeMetrics Metrics;
500 for (BasicBlock *BB : L->blocks())
501 Metrics.analyzeBasicBlock(BB, *TTI, EphValues);
502 // 6 is an approximate latency for the mtctr instruction.
503 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
504 return false;
507 // Process nested loops first.
508 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
509 MadeChange |= convertToCTRLoop(*I);
510 LLVM_DEBUG(dbgs() << "Nested loop converted\n");
513 // If a nested loop has been converted, then we can't convert this loop.
514 if (MadeChange)
515 return MadeChange;
517 // Bail out if the loop has irreducible control flow.
518 LoopBlocksRPO RPOT(L);
519 RPOT.perform(LI);
520 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI))
521 return false;
523 #ifndef NDEBUG
524 // Stop trying after reaching the limit (if any).
525 int Limit = CTRLoopLimit;
526 if (Limit >= 0) {
527 if (Counter >= CTRLoopLimit)
528 return false;
529 Counter++;
531 #endif
533 // We don't want to spill/restore the counter register, and so we don't
534 // want to use the counter register if the loop contains calls.
535 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
536 I != IE; ++I)
537 if (mightUseCTR(*I))
538 return MadeChange;
540 SmallVector<BasicBlock*, 4> ExitingBlocks;
541 L->getExitingBlocks(ExitingBlocks);
543 // If there is an exit edge known to be frequently taken,
544 // we should not transform this loop.
545 for (auto &BB : ExitingBlocks) {
546 Instruction *TI = BB->getTerminator();
547 if (!TI) continue;
549 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
550 uint64_t TrueWeight = 0, FalseWeight = 0;
551 if (!BI->isConditional() ||
552 !BI->extractProfMetadata(TrueWeight, FalseWeight))
553 continue;
555 // If the exit path is more frequent than the loop path,
556 // we return here without further analysis for this loop.
557 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
558 if (( TrueIsExit && FalseWeight < TrueWeight) ||
559 (!TrueIsExit && FalseWeight > TrueWeight))
560 return MadeChange;
564 BasicBlock *CountedExitBlock = nullptr;
565 const SCEV *ExitCount = nullptr;
566 BranchInst *CountedExitBranch = nullptr;
567 for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
568 IE = ExitingBlocks.end(); I != IE; ++I) {
569 const SCEV *EC = SE->getExitCount(L, *I);
570 LLVM_DEBUG(dbgs() << "Exit Count for " << *L << " from block "
571 << (*I)->getName() << ": " << *EC << "\n");
572 if (isa<SCEVCouldNotCompute>(EC))
573 continue;
574 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
575 if (ConstEC->getValue()->isZero())
576 continue;
577 } else if (!SE->isLoopInvariant(EC, L))
578 continue;
580 if (SE->getTypeSizeInBits(EC->getType()) > (TM->isPPC64() ? 64 : 32))
581 continue;
583 // If this exiting block is contained in a nested loop, it is not eligible
584 // for insertion of the branch-and-decrement since the inner loop would
585 // end up messing up the value in the CTR.
586 if (LI->getLoopFor(*I) != L)
587 continue;
589 // We now have a loop-invariant count of loop iterations (which is not the
590 // constant zero) for which we know that this loop will not exit via this
591 // existing block.
593 // We need to make sure that this block will run on every loop iteration.
594 // For this to be true, we must dominate all blocks with backedges. Such
595 // blocks are in-loop predecessors to the header block.
596 bool NotAlways = false;
597 for (pred_iterator PI = pred_begin(L->getHeader()),
598 PIE = pred_end(L->getHeader()); PI != PIE; ++PI) {
599 if (!L->contains(*PI))
600 continue;
602 if (!DT->dominates(*I, *PI)) {
603 NotAlways = true;
604 break;
608 if (NotAlways)
609 continue;
611 // Make sure this blocks ends with a conditional branch.
612 Instruction *TI = (*I)->getTerminator();
613 if (!TI)
614 continue;
616 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
617 if (!BI->isConditional())
618 continue;
620 CountedExitBranch = BI;
621 } else
622 continue;
624 // Note that this block may not be the loop latch block, even if the loop
625 // has a latch block.
626 CountedExitBlock = *I;
627 ExitCount = EC;
628 break;
631 if (!CountedExitBlock)
632 return MadeChange;
634 BasicBlock *Preheader = L->getLoopPreheader();
636 // If we don't have a preheader, then insert one. If we already have a
637 // preheader, then we can use it (except if the preheader contains a use of
638 // the CTR register because some such uses might be reordered by the
639 // selection DAG after the mtctr instruction).
640 if (!Preheader || mightUseCTR(Preheader))
641 Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA);
642 if (!Preheader)
643 return MadeChange;
645 LLVM_DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName()
646 << "\n");
648 // Insert the count into the preheader and replace the condition used by the
649 // selected branch.
650 MadeChange = true;
652 SCEVExpander SCEVE(*SE, *DL, "loopcnt");
653 LLVMContext &C = SE->getContext();
654 Type *CountType = TM->isPPC64() ? Type::getInt64Ty(C) : Type::getInt32Ty(C);
655 if (!ExitCount->getType()->isPointerTy() &&
656 ExitCount->getType() != CountType)
657 ExitCount = SE->getZeroExtendExpr(ExitCount, CountType);
658 ExitCount = SE->getAddExpr(ExitCount, SE->getOne(CountType));
659 Value *ECValue =
660 SCEVE.expandCodeFor(ExitCount, CountType, Preheader->getTerminator());
662 IRBuilder<> CountBuilder(Preheader->getTerminator());
663 Module *M = Preheader->getParent()->getParent();
664 Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr,
665 CountType);
666 CountBuilder.CreateCall(MTCTRFunc, ECValue);
668 IRBuilder<> CondBuilder(CountedExitBranch);
669 Value *DecFunc =
670 Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero);
671 Value *NewCond = CondBuilder.CreateCall(DecFunc, {});
672 Value *OldCond = CountedExitBranch->getCondition();
673 CountedExitBranch->setCondition(NewCond);
675 // The false branch must exit the loop.
676 if (!L->contains(CountedExitBranch->getSuccessor(0)))
677 CountedExitBranch->swapSuccessors();
679 // The old condition may be dead now, and may have even created a dead PHI
680 // (the original induction variable).
681 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
682 // Run through the basic blocks of the loop and see if any of them have dead
683 // PHIs that can be removed.
684 for (auto I : L->blocks())
685 DeleteDeadPHIs(I);
687 ++NumCTRLoops;
688 return MadeChange;
691 #ifndef NDEBUG
692 static bool clobbersCTR(const MachineInstr &MI) {
693 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
694 const MachineOperand &MO = MI.getOperand(i);
695 if (MO.isReg()) {
696 if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8))
697 return true;
698 } else if (MO.isRegMask()) {
699 if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8))
700 return true;
704 return false;
707 static bool verifyCTRBranch(MachineBasicBlock *MBB,
708 MachineBasicBlock::iterator I) {
709 MachineBasicBlock::iterator BI = I;
710 SmallSet<MachineBasicBlock *, 16> Visited;
711 SmallVector<MachineBasicBlock *, 8> Preds;
712 bool CheckPreds;
714 if (I == MBB->begin()) {
715 Visited.insert(MBB);
716 goto queue_preds;
717 } else
718 --I;
720 check_block:
721 Visited.insert(MBB);
722 if (I == MBB->end())
723 goto queue_preds;
725 CheckPreds = true;
726 for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
727 unsigned Opc = I->getOpcode();
728 if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
729 CheckPreds = false;
730 break;
733 if (I != BI && clobbersCTR(*I)) {
734 LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName()
735 << ") instruction " << *I
736 << " clobbers CTR, invalidating "
737 << printMBBReference(*BI->getParent()) << " ("
738 << BI->getParent()->getFullName() << ") instruction "
739 << *BI << "\n");
740 return false;
743 if (I == IE)
744 break;
747 if (!CheckPreds && Preds.empty())
748 return true;
750 if (CheckPreds) {
751 queue_preds:
752 if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
753 LLVM_DEBUG(dbgs() << "Unable to find a MTCTR instruction for "
754 << printMBBReference(*BI->getParent()) << " ("
755 << BI->getParent()->getFullName() << ") instruction "
756 << *BI << "\n");
757 return false;
760 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
761 PIE = MBB->pred_end(); PI != PIE; ++PI)
762 Preds.push_back(*PI);
765 do {
766 MBB = Preds.pop_back_val();
767 if (!Visited.count(MBB)) {
768 I = MBB->getLastNonDebugInstr();
769 goto check_block;
771 } while (!Preds.empty());
773 return true;
776 bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
777 MDT = &getAnalysis<MachineDominatorTree>();
779 // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
780 // any other instructions that might clobber the ctr register.
781 for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
782 I != IE; ++I) {
783 MachineBasicBlock *MBB = &*I;
784 if (!MDT->isReachableFromEntry(MBB))
785 continue;
787 for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
788 MIIE = MBB->end(); MII != MIIE; ++MII) {
789 unsigned Opc = MII->getOpcode();
790 if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
791 Opc == PPC::BDZ8 || Opc == PPC::BDZ)
792 if (!verifyCTRBranch(MBB, MII))
793 llvm_unreachable("Invalid PPC CTR loop!");
797 return false;
799 #endif // NDEBUG