1 //===- ObjCARCContract.cpp - ObjC ARC Optimization ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file defines late ObjC ARC optimizations. ARC stands for Automatic
10 /// Reference Counting and is a system for managing reference counts for objects
13 /// This specific file mainly deals with ``contracting'' multiple lower level
14 /// operations into singular higher level operations through pattern matching.
16 /// WARNING: This file knows about certain library functions. It recognizes them
17 /// by name, and hardwires knowledge of their semantics.
19 /// WARNING: This file knows about how certain Objective-C library functions are
20 /// used. Naive LLVM IR transformations which would otherwise be
21 /// behavior-preserving may break these assumptions.
23 //===----------------------------------------------------------------------===//
25 // TODO: ObjCARCContract could insert PHI nodes when uses aren't
26 // dominated by single calls.
28 #include "ARCRuntimeEntryPoints.h"
29 #include "DependencyAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/Analysis/AliasAnalysis.h"
34 #include "llvm/Analysis/ObjCARCUtil.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/EHPersonalities.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/InstIterator.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/ObjCARC.h"
48 using namespace llvm::objcarc
;
50 #define DEBUG_TYPE "objc-arc-contract"
52 STATISTIC(NumPeeps
, "Number of calls peephole-optimized");
53 STATISTIC(NumStoreStrongs
, "Number objc_storeStrong calls formed");
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
60 /// Late ARC optimizations
62 /// These change the IR in a way that makes it difficult to be analyzed by
63 /// ObjCARCOpt, so it's run late.
65 class ObjCARCContract
{
70 ProvenanceAnalysis PA
;
71 ARCRuntimeEntryPoints EP
;
72 BundledRetainClaimRVs
*BundledInsts
= nullptr;
74 /// The inline asm string to insert between calls and RetainRV calls to make
75 /// the optimization work on targets which need it.
76 const MDString
*RVInstMarker
;
78 /// The set of inserted objc_storeStrong calls. If at the end of walking the
79 /// function we have found no alloca instructions, these calls can be marked
81 SmallPtrSet
<CallInst
*, 8> StoreStrongCalls
;
83 /// Returns true if we eliminated Inst.
84 bool tryToPeepholeInstruction(
85 Function
&F
, Instruction
*Inst
, inst_iterator
&Iter
,
86 bool &TailOkForStoreStrong
,
87 const DenseMap
<BasicBlock
*, ColorVector
> &BlockColors
);
89 bool optimizeRetainCall(Function
&F
, Instruction
*Retain
);
91 bool contractAutorelease(Function
&F
, Instruction
*Autorelease
,
94 void tryToContractReleaseIntoStoreStrong(
95 Instruction
*Release
, inst_iterator
&Iter
,
96 const DenseMap
<BasicBlock
*, ColorVector
> &BlockColors
);
100 bool run(Function
&F
, AAResults
*AA
, DominatorTree
*DT
);
101 bool hasCFGChanged() const { return CFGChanged
; }
104 class ObjCARCContractLegacyPass
: public FunctionPass
{
106 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
107 bool runOnFunction(Function
&F
) override
;
110 ObjCARCContractLegacyPass() : FunctionPass(ID
) {
111 initializeObjCARCContractLegacyPassPass(*PassRegistry::getPassRegistry());
116 //===----------------------------------------------------------------------===//
118 //===----------------------------------------------------------------------===//
120 /// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a
121 /// return value. We do this late so we do not disrupt the dataflow analysis in
123 bool ObjCARCContract::optimizeRetainCall(Function
&F
, Instruction
*Retain
) {
124 const auto *Call
= dyn_cast
<CallBase
>(GetArgRCIdentityRoot(Retain
));
127 if (Call
->getParent() != Retain
->getParent())
130 // Check that the call is next to the retain.
131 BasicBlock::const_iterator I
= ++Call
->getIterator();
132 while (IsNoopInstruction(&*I
))
137 // Turn it to an objc_retainAutoreleasedReturnValue.
142 dbgs() << "Transforming objc_retain => "
143 "objc_retainAutoreleasedReturnValue since the operand is a "
144 "return value.\nOld: "
147 // We do not have to worry about tail calls/does not throw since
148 // retain/retainRV have the same properties.
149 Function
*Decl
= EP
.get(ARCRuntimeEntryPointKind::RetainRV
);
150 cast
<CallInst
>(Retain
)->setCalledFunction(Decl
);
152 LLVM_DEBUG(dbgs() << "New: " << *Retain
<< "\n");
156 /// Merge an autorelease with a retain into a fused call.
157 bool ObjCARCContract::contractAutorelease(Function
&F
, Instruction
*Autorelease
,
159 const Value
*Arg
= GetArgRCIdentityRoot(Autorelease
);
161 // Check that there are no instructions between the retain and the autorelease
162 // (such as an autorelease_pop) which may change the count.
163 DependenceKind DK
= Class
== ARCInstKind::AutoreleaseRV
164 ? RetainAutoreleaseRVDep
165 : RetainAutoreleaseDep
;
166 auto *Retain
= dyn_cast_or_null
<CallInst
>(
167 findSingleDependency(DK
, Arg
, Autorelease
->getParent(), Autorelease
, PA
));
169 if (!Retain
|| GetBasicARCInstKind(Retain
) != ARCInstKind::Retain
||
170 GetArgRCIdentityRoot(Retain
) != Arg
)
176 LLVM_DEBUG(dbgs() << " Fusing retain/autorelease!\n"
183 Function
*Decl
= EP
.get(Class
== ARCInstKind::AutoreleaseRV
184 ? ARCRuntimeEntryPointKind::RetainAutoreleaseRV
185 : ARCRuntimeEntryPointKind::RetainAutorelease
);
186 Retain
->setCalledFunction(Decl
);
188 LLVM_DEBUG(dbgs() << " New RetainAutorelease: " << *Retain
<< "\n");
190 EraseInstruction(Autorelease
);
194 static StoreInst
*findSafeStoreForStoreStrongContraction(LoadInst
*Load
,
195 Instruction
*Release
,
196 ProvenanceAnalysis
&PA
,
198 StoreInst
*Store
= nullptr;
199 bool SawRelease
= false;
201 // Get the location associated with Load.
202 MemoryLocation Loc
= MemoryLocation::get(Load
);
203 auto *LocPtr
= Loc
.Ptr
->stripPointerCasts();
205 // Walk down to find the store and the release, which may be in either order.
206 for (auto I
= std::next(BasicBlock::iterator(Load
)),
207 E
= Load
->getParent()->end();
209 // If we found the store we were looking for and saw the release,
210 // break. There is no more work to be done.
211 if (Store
&& SawRelease
)
214 // Now we know that we have not seen either the store or the release. If I
215 // is the release, mark that we saw the release and continue.
216 Instruction
*Inst
= &*I
;
217 if (Inst
== Release
) {
222 // Otherwise, we check if Inst is a "good" store. Grab the instruction class
224 ARCInstKind Class
= GetBasicARCInstKind(Inst
);
226 // If we have seen the store, but not the release...
228 // We need to make sure that it is safe to move the release from its
229 // current position to the store. This implies proving that any
230 // instruction in between Store and the Release conservatively can not use
231 // the RCIdentityRoot of Release. If we can prove we can ignore Inst, so
233 if (!CanUse(Inst
, Load
, PA
, Class
)) {
237 // Otherwise, be conservative and return nullptr.
241 // Ok, now we know we have not seen a store yet.
243 // If Inst is a retain, we don't care about it as it doesn't prevent moving
244 // the load to the store.
246 // TODO: This is one area where the optimization could be made more
251 // See if Inst can write to our load location, if it can not, just ignore
253 if (!isModSet(AA
->getModRefInfo(Inst
, Loc
)))
256 Store
= dyn_cast
<StoreInst
>(Inst
);
258 // If Inst can, then check if Inst is a simple store. If Inst is not a
259 // store or a store that is not simple, then we have some we do not
260 // understand writing to this memory implying we can not move the load
261 // over the write to any subsequent store that we may find.
262 if (!Store
|| !Store
->isSimple())
265 // Then make sure that the pointer we are storing to is Ptr. If so, we
267 if (Store
->getPointerOperand()->stripPointerCasts() == LocPtr
)
270 // Otherwise, we have an unknown store to some other ptr that clobbers
275 // If we did not find the store or did not see the release, fail.
276 if (!Store
|| !SawRelease
)
284 findRetainForStoreStrongContraction(Value
*New
, StoreInst
*Store
,
285 Instruction
*Release
,
286 ProvenanceAnalysis
&PA
) {
287 // Walk up from the Store to find the retain.
288 BasicBlock::iterator I
= Store
->getIterator();
289 BasicBlock::iterator Begin
= Store
->getParent()->begin();
290 while (I
!= Begin
&& GetBasicARCInstKind(&*I
) != ARCInstKind::Retain
) {
291 Instruction
*Inst
= &*I
;
293 // It is only safe to move the retain to the store if we can prove
294 // conservatively that nothing besides the release can decrement reference
295 // counts in between the retain and the store.
296 if (CanDecrementRefCount(Inst
, New
, PA
) && Inst
!= Release
)
300 Instruction
*Retain
= &*I
;
301 if (GetBasicARCInstKind(Retain
) != ARCInstKind::Retain
)
303 if (GetArgRCIdentityRoot(Retain
) != New
)
308 /// Attempt to merge an objc_release with a store, load, and objc_retain to form
309 /// an objc_storeStrong. An objc_storeStrong:
311 /// objc_storeStrong(i8** %old_ptr, i8* new_value)
313 /// is equivalent to the following IR sequence:
315 /// ; Load old value.
316 /// %old_value = load i8** %old_ptr (1)
318 /// ; Increment the new value and then release the old value. This must occur
319 /// ; in order in case old_value releases new_value in its destructor causing
320 /// ; us to potentially have a dangling ptr.
321 /// tail call i8* @objc_retain(i8* %new_value) (2)
322 /// tail call void @objc_release(i8* %old_value) (3)
324 /// ; Store the new_value into old_ptr
325 /// store i8* %new_value, i8** %old_ptr (4)
327 /// The safety of this optimization is based around the following
330 /// 1. We are forming the store strong at the store. Thus to perform this
331 /// optimization it must be safe to move the retain, load, and release to
333 /// 2. We need to make sure that any re-orderings of (1), (2), (3), (4) are
335 void ObjCARCContract::tryToContractReleaseIntoStoreStrong(
336 Instruction
*Release
, inst_iterator
&Iter
,
337 const DenseMap
<BasicBlock
*, ColorVector
> &BlockColors
) {
338 // See if we are releasing something that we just loaded.
339 auto *Load
= dyn_cast
<LoadInst
>(GetArgRCIdentityRoot(Release
));
340 if (!Load
|| !Load
->isSimple())
343 // For now, require everything to be in one basic block.
344 BasicBlock
*BB
= Release
->getParent();
345 if (Load
->getParent() != BB
)
348 // First scan down the BB from Load, looking for a store of the RCIdentityRoot
351 findSafeStoreForStoreStrongContraction(Load
, Release
, PA
, AA
);
356 // Then find what new_value's RCIdentity Root is.
357 Value
*New
= GetRCIdentityRoot(Store
->getValueOperand());
359 // Then walk up the BB and look for a retain on New without any intervening
360 // instructions which conservatively might decrement ref counts.
361 Instruction
*Retain
=
362 findRetainForStoreStrongContraction(New
, Store
, Release
, PA
);
372 llvm::dbgs() << " Contracting retain, release into objc_storeStrong.\n"
374 << " Store: " << *Store
<< "\n"
375 << " Release: " << *Release
<< "\n"
376 << " Retain: " << *Retain
<< "\n"
377 << " Load: " << *Load
<< "\n");
379 LLVMContext
&C
= Release
->getContext();
380 Type
*I8X
= PointerType::getUnqual(Type::getInt8Ty(C
));
381 Type
*I8XX
= PointerType::getUnqual(I8X
);
383 Value
*Args
[] = { Load
->getPointerOperand(), New
};
384 if (Args
[0]->getType() != I8XX
)
385 Args
[0] = new BitCastInst(Args
[0], I8XX
, "", Store
);
386 if (Args
[1]->getType() != I8X
)
387 Args
[1] = new BitCastInst(Args
[1], I8X
, "", Store
);
388 Function
*Decl
= EP
.get(ARCRuntimeEntryPointKind::StoreStrong
);
389 CallInst
*StoreStrong
=
390 objcarc::createCallInstWithColors(Decl
, Args
, "", Store
, BlockColors
);
391 StoreStrong
->setDoesNotThrow();
392 StoreStrong
->setDebugLoc(Store
->getDebugLoc());
394 // We can't set the tail flag yet, because we haven't yet determined
395 // whether there are any escaping allocas. Remember this call, so that
396 // we can set the tail flag once we know it's safe.
397 StoreStrongCalls
.insert(StoreStrong
);
399 LLVM_DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong
402 if (&*Iter
== Retain
) ++Iter
;
403 if (&*Iter
== Store
) ++Iter
;
404 Store
->eraseFromParent();
405 Release
->eraseFromParent();
406 EraseInstruction(Retain
);
407 if (Load
->use_empty())
408 Load
->eraseFromParent();
411 bool ObjCARCContract::tryToPeepholeInstruction(
412 Function
&F
, Instruction
*Inst
, inst_iterator
&Iter
,
413 bool &TailOkForStoreStrongs
,
414 const DenseMap
<BasicBlock
*, ColorVector
> &BlockColors
) {
415 // Only these library routines return their argument. In particular,
416 // objc_retainBlock does not necessarily return its argument.
417 ARCInstKind Class
= GetBasicARCInstKind(Inst
);
419 case ARCInstKind::FusedRetainAutorelease
:
420 case ARCInstKind::FusedRetainAutoreleaseRV
:
422 case ARCInstKind::Autorelease
:
423 case ARCInstKind::AutoreleaseRV
:
424 return contractAutorelease(F
, Inst
, Class
);
425 case ARCInstKind::Retain
:
426 // Attempt to convert retains to retainrvs if they are next to function
428 if (!optimizeRetainCall(F
, Inst
))
430 // If we succeed in our optimization, fall through.
432 case ARCInstKind::RetainRV
:
433 case ARCInstKind::UnsafeClaimRV
: {
434 // Return true if this is a bundled retainRV/claimRV call, which is always
435 // redundant with the attachedcall in the bundle, and is going to be erased
436 // at the end of this pass. This avoids undoing objc-arc-expand and
437 // replacing uses of the retainRV/claimRV call's argument with its result.
438 if (BundledInsts
->contains(Inst
))
441 // If this isn't a bundled call, and the target doesn't need a special
442 // inline-asm marker, we're done: return now, and undo objc-arc-expand.
446 // The target needs a special inline-asm marker. Insert it.
448 BasicBlock::iterator BBI
= Inst
->getIterator();
449 BasicBlock
*InstParent
= Inst
->getParent();
451 // Step up to see if the call immediately precedes the RV call.
452 // If it's an invoke, we have to cross a block boundary. And we have
453 // to carefully dodge no-op instructions.
455 if (BBI
== InstParent
->begin()) {
456 BasicBlock
*Pred
= InstParent
->getSinglePredecessor();
458 goto decline_rv_optimization
;
459 BBI
= Pred
->getTerminator()->getIterator();
463 } while (IsNoopInstruction(&*BBI
));
465 if (GetRCIdentityRoot(&*BBI
) == GetArgRCIdentityRoot(Inst
)) {
466 LLVM_DEBUG(dbgs() << "Adding inline asm marker for the return value "
470 InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst
->getContext()),
472 RVInstMarker
->getString(),
473 /*Constraints=*/"", /*hasSideEffects=*/true);
475 objcarc::createCallInstWithColors(IA
, std::nullopt
, "", Inst
,
478 decline_rv_optimization
:
481 case ARCInstKind::InitWeak
: {
482 // objc_initWeak(p, null) => *p = null
483 CallInst
*CI
= cast
<CallInst
>(Inst
);
484 if (IsNullOrUndef(CI
->getArgOperand(1))) {
485 Value
*Null
= ConstantPointerNull::get(cast
<PointerType
>(CI
->getType()));
487 new StoreInst(Null
, CI
->getArgOperand(0), CI
);
489 LLVM_DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI
<< "\n"
490 << " New = " << *Null
<< "\n");
492 CI
->replaceAllUsesWith(Null
);
493 CI
->eraseFromParent();
497 case ARCInstKind::Release
:
498 // Try to form an objc store strong from our release. If we fail, there is
499 // nothing further to do below, so continue.
500 tryToContractReleaseIntoStoreStrong(Inst
, Iter
, BlockColors
);
502 case ARCInstKind::User
:
503 // Be conservative if the function has any alloca instructions.
504 // Technically we only care about escaping alloca instructions,
505 // but this is sufficient to handle some interesting cases.
506 if (isa
<AllocaInst
>(Inst
))
507 TailOkForStoreStrongs
= false;
509 case ARCInstKind::IntrinsicUser
:
510 // Remove calls to @llvm.objc.clang.arc.use(...).
512 Inst
->eraseFromParent();
515 if (auto *CI
= dyn_cast
<CallInst
>(Inst
))
516 if (CI
->getIntrinsicID() == Intrinsic::objc_clang_arc_noop_use
) {
517 // Remove calls to @llvm.objc.clang.arc.noop.use(...).
519 CI
->eraseFromParent();
525 //===----------------------------------------------------------------------===//
527 //===----------------------------------------------------------------------===//
529 bool ObjCARCContract::init(Module
&M
) {
532 // Initialize RVInstMarker.
533 RVInstMarker
= getRVInstMarker(M
);
538 bool ObjCARCContract::run(Function
&F
, AAResults
*A
, DominatorTree
*D
) {
542 Changed
= CFGChanged
= false;
546 BundledRetainClaimRVs
BRV(/*ContractPass=*/true);
549 std::pair
<bool, bool> R
= BundledInsts
->insertAfterInvokes(F
, DT
);
551 CFGChanged
|= R
.second
;
553 DenseMap
<BasicBlock
*, ColorVector
> BlockColors
;
554 if (F
.hasPersonalityFn() &&
555 isScopedEHPersonality(classifyEHPersonality(F
.getPersonalityFn())))
556 BlockColors
= colorEHFunclets(F
);
558 LLVM_DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n");
560 // Track whether it's ok to mark objc_storeStrong calls with the "tail"
561 // keyword. Be conservative if the function has variadic arguments.
562 // It seems that functions which "return twice" are also unsafe for the
563 // "tail" argument, because they are setjmp, which could need to
564 // return to an earlier stack state.
565 bool TailOkForStoreStrongs
=
566 !F
.isVarArg() && !F
.callsFunctionThatReturnsTwice();
568 // For ObjC library calls which return their argument, replace uses of the
569 // argument with uses of the call return value, if it dominates the use. This
570 // reduces register pressure.
571 for (inst_iterator I
= inst_begin(&F
), E
= inst_end(&F
); I
!= E
;) {
572 Instruction
*Inst
= &*I
++;
574 LLVM_DEBUG(dbgs() << "Visiting: " << *Inst
<< "\n");
576 if (auto *CI
= dyn_cast
<CallInst
>(Inst
))
577 if (objcarc::hasAttachedCallOpBundle(CI
)) {
578 BundledInsts
->insertRVCallWithColors(&*I
, CI
, BlockColors
);
583 // First try to peephole Inst. If there is nothing further we can do in
584 // terms of undoing objc-arc-expand, process the next inst.
585 if (tryToPeepholeInstruction(F
, Inst
, I
, TailOkForStoreStrongs
,
589 // Otherwise, try to undo objc-arc-expand.
591 // Don't use GetArgRCIdentityRoot because we don't want to look through bitcasts
592 // and such; to do the replacement, the argument must have type i8*.
594 // Function for replacing uses of Arg dominated by Inst.
595 auto ReplaceArgUses
= [Inst
, this](Value
*Arg
) {
596 // If we're compiling bugpointed code, don't get in trouble.
597 if (!isa
<Instruction
>(Arg
) && !isa
<Argument
>(Arg
))
600 // Look through the uses of the pointer.
601 for (Value::use_iterator UI
= Arg
->use_begin(), UE
= Arg
->use_end();
603 // Increment UI now, because we may unlink its element.
605 unsigned OperandNo
= U
.getOperandNo();
607 // If the call's return value dominates a use of the call's argument
608 // value, rewrite the use to use the return value. We check for
609 // reachability here because an unreachable call is considered to
610 // trivially dominate itself, which would lead us to rewriting its
611 // argument in terms of its return value, which would lead to
612 // infinite loops in GetArgRCIdentityRoot.
613 if (!DT
->isReachableFromEntry(U
) || !DT
->dominates(Inst
, U
))
617 Instruction
*Replacement
= Inst
;
618 Type
*UseTy
= U
.get()->getType();
619 if (PHINode
*PHI
= dyn_cast
<PHINode
>(U
.getUser())) {
620 // For PHI nodes, insert the bitcast in the predecessor block.
621 unsigned ValNo
= PHINode::getIncomingValueNumForOperand(OperandNo
);
622 BasicBlock
*IncomingBB
= PHI
->getIncomingBlock(ValNo
);
623 if (Replacement
->getType() != UseTy
) {
624 // A catchswitch is both a pad and a terminator, meaning a basic
625 // block with a catchswitch has no insertion point. Keep going up
626 // the dominator tree until we find a non-catchswitch.
627 BasicBlock
*InsertBB
= IncomingBB
;
628 while (isa
<CatchSwitchInst
>(InsertBB
->getFirstNonPHI())) {
629 InsertBB
= DT
->getNode(InsertBB
)->getIDom()->getBlock();
632 assert(DT
->dominates(Inst
, &InsertBB
->back()) &&
633 "Invalid insertion point for bitcast");
635 new BitCastInst(Replacement
, UseTy
, "", &InsertBB
->back());
638 // While we're here, rewrite all edges for this PHI, rather
639 // than just one use at a time, to minimize the number of
641 for (unsigned i
= 0, e
= PHI
->getNumIncomingValues(); i
!= e
; ++i
)
642 if (PHI
->getIncomingBlock(i
) == IncomingBB
) {
643 // Keep the UI iterator valid.
646 PHINode::getOperandNumForIncomingValue(i
)) == &*UI
)
648 PHI
->setIncomingValue(i
, Replacement
);
651 if (Replacement
->getType() != UseTy
)
652 Replacement
= new BitCastInst(Replacement
, UseTy
, "",
653 cast
<Instruction
>(U
.getUser()));
659 Value
*Arg
= cast
<CallInst
>(Inst
)->getArgOperand(0);
660 Value
*OrigArg
= Arg
;
662 // TODO: Change this to a do-while.
666 // If Arg is a no-op casted pointer, strip one level of casts and iterate.
667 if (const BitCastInst
*BI
= dyn_cast
<BitCastInst
>(Arg
))
668 Arg
= BI
->getOperand(0);
669 else if (isa
<GEPOperator
>(Arg
) &&
670 cast
<GEPOperator
>(Arg
)->hasAllZeroIndices())
671 Arg
= cast
<GEPOperator
>(Arg
)->getPointerOperand();
672 else if (isa
<GlobalAlias
>(Arg
) &&
673 !cast
<GlobalAlias
>(Arg
)->isInterposable())
674 Arg
= cast
<GlobalAlias
>(Arg
)->getAliasee();
676 // If Arg is a PHI node, get PHIs that are equivalent to it and replace
678 if (PHINode
*PN
= dyn_cast
<PHINode
>(Arg
)) {
679 SmallVector
<Value
*, 1> PHIList
;
680 getEquivalentPHIs(*PN
, PHIList
);
681 for (Value
*PHI
: PHIList
)
688 // Replace bitcast users of Arg that are dominated by Inst.
689 SmallVector
<BitCastInst
*, 2> BitCastUsers
;
691 // Add all bitcast users of the function argument first.
692 for (User
*U
: OrigArg
->users())
693 if (auto *BC
= dyn_cast
<BitCastInst
>(U
))
694 BitCastUsers
.push_back(BC
);
696 // Replace the bitcasts with the call return. Iterate until list is empty.
697 while (!BitCastUsers
.empty()) {
698 auto *BC
= BitCastUsers
.pop_back_val();
699 for (User
*U
: BC
->users())
700 if (auto *B
= dyn_cast
<BitCastInst
>(U
))
701 BitCastUsers
.push_back(B
);
707 // If this function has no escaping allocas or suspicious vararg usage,
708 // objc_storeStrong calls can be marked with the "tail" keyword.
709 if (TailOkForStoreStrongs
)
710 for (CallInst
*CI
: StoreStrongCalls
)
712 StoreStrongCalls
.clear();
717 //===----------------------------------------------------------------------===//
719 //===----------------------------------------------------------------------===//
721 char ObjCARCContractLegacyPass::ID
= 0;
722 INITIALIZE_PASS_BEGIN(ObjCARCContractLegacyPass
, "objc-arc-contract",
723 "ObjC ARC contraction", false, false)
724 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
725 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
726 INITIALIZE_PASS_END(ObjCARCContractLegacyPass
, "objc-arc-contract",
727 "ObjC ARC contraction", false, false)
729 void ObjCARCContractLegacyPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
730 AU
.addRequired
<AAResultsWrapperPass
>();
731 AU
.addRequired
<DominatorTreeWrapperPass
>();
734 Pass
*llvm::createObjCARCContractPass() {
735 return new ObjCARCContractLegacyPass();
738 bool ObjCARCContractLegacyPass::runOnFunction(Function
&F
) {
739 ObjCARCContract OCARCC
;
740 OCARCC
.init(*F
.getParent());
741 auto *AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
742 auto *DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
743 return OCARCC
.run(F
, AA
, DT
);
746 PreservedAnalyses
ObjCARCContractPass::run(Function
&F
,
747 FunctionAnalysisManager
&AM
) {
748 ObjCARCContract OCAC
;
749 OCAC
.init(*F
.getParent());
751 bool Changed
= OCAC
.run(F
, &AM
.getResult
<AAManager
>(F
),
752 &AM
.getResult
<DominatorTreeAnalysis
>(F
));
753 bool CFGChanged
= OCAC
.hasCFGChanged();
755 PreservedAnalyses PA
;
757 PA
.preserveSet
<CFGAnalyses
>();
760 return PreservedAnalyses::all();