1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass turns explicit null checks of the form
19 // faulting_load_op("movl (%r10), %esi", throw_npe)
22 // With the help of a runtime that understands the .fault_maps section,
23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
25 // Store and LoadStore are also supported.
27 //===----------------------------------------------------------------------===//
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/MemoryLocation.h"
37 #include "llvm/CodeGen/FaultMaps.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineFunctionPass.h"
41 #include "llvm/CodeGen/MachineInstr.h"
42 #include "llvm/CodeGen/MachineInstrBuilder.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/PseudoSourceValue.h"
47 #include "llvm/CodeGen/TargetInstrInfo.h"
48 #include "llvm/CodeGen/TargetOpcodes.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/DebugLoc.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/MC/MCInstrDesc.h"
55 #include "llvm/MC/MCRegisterInfo.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/CommandLine.h"
64 static cl::opt
<int> PageSize("imp-null-check-page-size",
65 cl::desc("The page size of the target in bytes"),
66 cl::init(4096), cl::Hidden
);
68 static cl::opt
<unsigned> MaxInstsToConsider(
69 "imp-null-max-insts-to-consider",
70 cl::desc("The max number of instructions to consider hoisting loads over "
71 "(the algorithm is quadratic over this number)"),
72 cl::Hidden
, cl::init(8));
74 #define DEBUG_TYPE "implicit-null-checks"
76 STATISTIC(NumImplicitNullChecks
,
77 "Number of explicit null checks made implicit");
81 class ImplicitNullChecks
: public MachineFunctionPass
{
82 /// Return true if \c computeDependence can process \p MI.
83 static bool canHandle(const MachineInstr
*MI
);
85 /// Helper function for \c computeDependence. Return true if \p A
86 /// and \p B do not have any dependences between them, and can be
87 /// re-ordered without changing program semantics.
88 bool canReorder(const MachineInstr
*A
, const MachineInstr
*B
);
90 /// A data type for representing the result computed by \c
91 /// computeDependence. States whether it is okay to reorder the
92 /// instruction passed to \c computeDependence with at most one
94 struct DependenceResult
{
95 /// Can we actually re-order \p MI with \p Insts (see \c
96 /// computeDependence).
99 /// If non-None, then an instruction in \p Insts that also must be
101 Optional
<ArrayRef
<MachineInstr
*>::iterator
> PotentialDependence
;
103 /*implicit*/ DependenceResult(
105 Optional
<ArrayRef
<MachineInstr
*>::iterator
> PotentialDependence
)
106 : CanReorder(CanReorder
), PotentialDependence(PotentialDependence
) {
107 assert((!PotentialDependence
|| CanReorder
) &&
108 "!CanReorder && PotentialDependence.hasValue() not allowed!");
112 /// Compute a result for the following question: can \p MI be
113 /// re-ordered from after \p Insts to before it.
115 /// \c canHandle should return true for all instructions in \p
117 DependenceResult
computeDependence(const MachineInstr
*MI
,
118 ArrayRef
<MachineInstr
*> Block
);
120 /// Represents one null check that can be made implicit.
122 // The memory operation the null check can be folded into.
123 MachineInstr
*MemOperation
;
125 // The instruction actually doing the null check (Ptr != 0).
126 MachineInstr
*CheckOperation
;
128 // The block the check resides in.
129 MachineBasicBlock
*CheckBlock
;
131 // The block branched to if the pointer is non-null.
132 MachineBasicBlock
*NotNullSucc
;
134 // The block branched to if the pointer is null.
135 MachineBasicBlock
*NullSucc
;
137 // If this is non-null, then MemOperation has a dependency on this
138 // instruction; and it needs to be hoisted to execute before MemOperation.
139 MachineInstr
*OnlyDependency
;
142 explicit NullCheck(MachineInstr
*memOperation
, MachineInstr
*checkOperation
,
143 MachineBasicBlock
*checkBlock
,
144 MachineBasicBlock
*notNullSucc
,
145 MachineBasicBlock
*nullSucc
,
146 MachineInstr
*onlyDependency
)
147 : MemOperation(memOperation
), CheckOperation(checkOperation
),
148 CheckBlock(checkBlock
), NotNullSucc(notNullSucc
), NullSucc(nullSucc
),
149 OnlyDependency(onlyDependency
) {}
151 MachineInstr
*getMemOperation() const { return MemOperation
; }
153 MachineInstr
*getCheckOperation() const { return CheckOperation
; }
155 MachineBasicBlock
*getCheckBlock() const { return CheckBlock
; }
157 MachineBasicBlock
*getNotNullSucc() const { return NotNullSucc
; }
159 MachineBasicBlock
*getNullSucc() const { return NullSucc
; }
161 MachineInstr
*getOnlyDependency() const { return OnlyDependency
; }
164 const TargetInstrInfo
*TII
= nullptr;
165 const TargetRegisterInfo
*TRI
= nullptr;
166 AliasAnalysis
*AA
= nullptr;
167 MachineFrameInfo
*MFI
= nullptr;
169 bool analyzeBlockForNullChecks(MachineBasicBlock
&MBB
,
170 SmallVectorImpl
<NullCheck
> &NullCheckList
);
171 MachineInstr
*insertFaultingInstr(MachineInstr
*MI
, MachineBasicBlock
*MBB
,
172 MachineBasicBlock
*HandlerMBB
);
173 void rewriteNullChecks(ArrayRef
<NullCheck
> NullCheckList
);
178 AR_WillAliasEverything
181 /// Returns AR_NoAlias if \p MI memory operation does not alias with
182 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if
183 /// they may alias and any further memory operation may alias with \p PrevMI.
184 AliasResult
areMemoryOpsAliased(MachineInstr
&MI
, MachineInstr
*PrevMI
);
186 enum SuitabilityResult
{
192 /// Return SR_Suitable if \p MI a memory operation that can be used to
193 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if
194 /// \p MI cannot be used to null check and SR_Impossible if there is
195 /// no sense to continue lookup due to any other instruction will not be able
196 /// to be used. \p PrevInsts is the set of instruction seen since
197 /// the explicit null check on \p PointerReg.
198 SuitabilityResult
isSuitableMemoryOp(MachineInstr
&MI
, unsigned PointerReg
,
199 ArrayRef
<MachineInstr
*> PrevInsts
);
201 /// Return true if \p FaultingMI can be hoisted from after the
202 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a
203 /// non-null value if we also need to (and legally can) hoist a depedency.
204 bool canHoistInst(MachineInstr
*FaultingMI
, unsigned PointerReg
,
205 ArrayRef
<MachineInstr
*> InstsSeenSoFar
,
206 MachineBasicBlock
*NullSucc
, MachineInstr
*&Dependence
);
211 ImplicitNullChecks() : MachineFunctionPass(ID
) {
212 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
215 bool runOnMachineFunction(MachineFunction
&MF
) override
;
217 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
218 AU
.addRequired
<AAResultsWrapperPass
>();
219 MachineFunctionPass::getAnalysisUsage(AU
);
222 MachineFunctionProperties
getRequiredProperties() const override
{
223 return MachineFunctionProperties().set(
224 MachineFunctionProperties::Property::NoVRegs
);
228 } // end anonymous namespace
230 bool ImplicitNullChecks::canHandle(const MachineInstr
*MI
) {
231 if (MI
->isCall() || MI
->hasUnmodeledSideEffects())
233 auto IsRegMask
= [](const MachineOperand
&MO
) { return MO
.isRegMask(); };
236 assert(!llvm::any_of(MI
->operands(), IsRegMask
) &&
237 "Calls were filtered out above!");
239 auto IsUnordered
= [](MachineMemOperand
*MMO
) { return MMO
->isUnordered(); };
240 return llvm::all_of(MI
->memoperands(), IsUnordered
);
243 ImplicitNullChecks::DependenceResult
244 ImplicitNullChecks::computeDependence(const MachineInstr
*MI
,
245 ArrayRef
<MachineInstr
*> Block
) {
246 assert(llvm::all_of(Block
, canHandle
) && "Check this first!");
247 assert(!is_contained(Block
, MI
) && "Block must be exclusive of MI!");
249 Optional
<ArrayRef
<MachineInstr
*>::iterator
> Dep
;
251 for (auto I
= Block
.begin(), E
= Block
.end(); I
!= E
; ++I
) {
252 if (canReorder(*I
, MI
))
256 // Found one possible dependency, keep track of it.
259 // We found two dependencies, so bail out.
260 return {false, None
};
267 bool ImplicitNullChecks::canReorder(const MachineInstr
*A
,
268 const MachineInstr
*B
) {
269 assert(canHandle(A
) && canHandle(B
) && "Precondition!");
271 // canHandle makes sure that we _can_ correctly analyze the dependencies
272 // between A and B here -- for instance, we should not be dealing with heap
273 // load-store dependencies here.
275 for (auto MOA
: A
->operands()) {
276 if (!(MOA
.isReg() && MOA
.getReg()))
279 unsigned RegA
= MOA
.getReg();
280 for (auto MOB
: B
->operands()) {
281 if (!(MOB
.isReg() && MOB
.getReg()))
284 unsigned RegB
= MOB
.getReg();
286 if (TRI
->regsOverlap(RegA
, RegB
) && (MOA
.isDef() || MOB
.isDef()))
294 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction
&MF
) {
295 TII
= MF
.getSubtarget().getInstrInfo();
296 TRI
= MF
.getRegInfo().getTargetRegisterInfo();
297 MFI
= &MF
.getFrameInfo();
298 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
300 SmallVector
<NullCheck
, 16> NullCheckList
;
303 analyzeBlockForNullChecks(MBB
, NullCheckList
);
305 if (!NullCheckList
.empty())
306 rewriteNullChecks(NullCheckList
);
308 return !NullCheckList
.empty();
311 // Return true if any register aliasing \p Reg is live-in into \p MBB.
312 static bool AnyAliasLiveIn(const TargetRegisterInfo
*TRI
,
313 MachineBasicBlock
*MBB
, unsigned Reg
) {
314 for (MCRegAliasIterator
AR(Reg
, TRI
, /*IncludeSelf*/ true); AR
.isValid();
316 if (MBB
->isLiveIn(*AR
))
321 ImplicitNullChecks::AliasResult
322 ImplicitNullChecks::areMemoryOpsAliased(MachineInstr
&MI
,
323 MachineInstr
*PrevMI
) {
324 // If it is not memory access, skip the check.
325 if (!(PrevMI
->mayStore() || PrevMI
->mayLoad()))
327 // Load-Load may alias
328 if (!(MI
.mayStore() || PrevMI
->mayStore()))
330 // We lost info, conservatively alias. If it was store then no sense to
331 // continue because we won't be able to check against it further.
332 if (MI
.memoperands_empty())
333 return MI
.mayStore() ? AR_WillAliasEverything
: AR_MayAlias
;
334 if (PrevMI
->memoperands_empty())
335 return PrevMI
->mayStore() ? AR_WillAliasEverything
: AR_MayAlias
;
337 for (MachineMemOperand
*MMO1
: MI
.memoperands()) {
338 // MMO1 should have a value due it comes from operation we'd like to use
339 // as implicit null check.
340 assert(MMO1
->getValue() && "MMO1 should have a Value!");
341 for (MachineMemOperand
*MMO2
: PrevMI
->memoperands()) {
342 if (const PseudoSourceValue
*PSV
= MMO2
->getPseudoValue()) {
343 if (PSV
->mayAlias(MFI
))
347 llvm::AliasResult AAResult
= AA
->alias(
348 MemoryLocation(MMO1
->getValue(), MemoryLocation::UnknownSize
,
350 MemoryLocation(MMO2
->getValue(), MemoryLocation::UnknownSize
,
352 if (AAResult
!= NoAlias
)
359 ImplicitNullChecks::SuitabilityResult
360 ImplicitNullChecks::isSuitableMemoryOp(MachineInstr
&MI
, unsigned PointerReg
,
361 ArrayRef
<MachineInstr
*> PrevInsts
) {
365 if (!TII
->getMemOpBaseRegImmOfs(MI
, BaseReg
, Offset
, TRI
) ||
366 BaseReg
!= PointerReg
)
367 return SR_Unsuitable
;
369 // We want the mem access to be issued at a sane offset from PointerReg,
370 // so that if PointerReg is null then the access reliably page faults.
371 if (!((MI
.mayLoad() || MI
.mayStore()) && !MI
.isPredicable() &&
372 -PageSize
< Offset
&& Offset
< PageSize
))
373 return SR_Unsuitable
;
375 // Finally, check whether the current memory access aliases with previous one.
376 for (auto *PrevMI
: PrevInsts
) {
377 AliasResult AR
= areMemoryOpsAliased(MI
, PrevMI
);
378 if (AR
== AR_WillAliasEverything
)
379 return SR_Impossible
;
380 if (AR
== AR_MayAlias
)
381 return SR_Unsuitable
;
386 bool ImplicitNullChecks::canHoistInst(MachineInstr
*FaultingMI
,
388 ArrayRef
<MachineInstr
*> InstsSeenSoFar
,
389 MachineBasicBlock
*NullSucc
,
390 MachineInstr
*&Dependence
) {
391 auto DepResult
= computeDependence(FaultingMI
, InstsSeenSoFar
);
392 if (!DepResult
.CanReorder
)
395 if (!DepResult
.PotentialDependence
) {
396 Dependence
= nullptr;
400 auto DependenceItr
= *DepResult
.PotentialDependence
;
401 auto *DependenceMI
= *DependenceItr
;
403 // We don't want to reason about speculating loads. Note -- at this point
404 // we should have already filtered out all of the other non-speculatable
405 // things, like calls and stores.
406 // We also do not want to hoist stores because it might change the memory
407 // while the FaultingMI may result in faulting.
408 assert(canHandle(DependenceMI
) && "Should never have reached here!");
409 if (DependenceMI
->mayLoadOrStore())
412 for (auto &DependenceMO
: DependenceMI
->operands()) {
413 if (!(DependenceMO
.isReg() && DependenceMO
.getReg()))
416 // Make sure that we won't clobber any live ins to the sibling block by
417 // hoisting Dependency. For instance, we can't hoist INST to before the
418 // null check (even if it safe, and does not violate any dependencies in
419 // the non_null_block) if %rdx is live in to _null_block.
427 // This restriction does not apply to the faulting load inst because in
428 // case the pointer loaded from is in the null page, the load will not
429 // semantically execute, and affect machine state. That is, if the load
430 // was loading into %rax and it faults, the value of %rax should stay the
431 // same as it would have been had the load not have executed and we'd have
432 // branched to NullSucc directly.
433 if (AnyAliasLiveIn(TRI
, NullSucc
, DependenceMO
.getReg()))
436 // The Dependency can't be re-defining the base register -- then we won't
437 // get the memory operation on the address we want. This is already
438 // checked in \c IsSuitableMemoryOp.
439 assert(!(DependenceMO
.isDef() &&
440 TRI
->regsOverlap(DependenceMO
.getReg(), PointerReg
)) &&
441 "Should have been checked before!");
445 computeDependence(DependenceMI
, {InstsSeenSoFar
.begin(), DependenceItr
});
447 if (!DepDepResult
.CanReorder
|| DepDepResult
.PotentialDependence
)
450 Dependence
= DependenceMI
;
454 /// Analyze MBB to check if its terminating branch can be turned into an
455 /// implicit null check. If yes, append a description of the said null check to
456 /// NullCheckList and return true, else return false.
457 bool ImplicitNullChecks::analyzeBlockForNullChecks(
458 MachineBasicBlock
&MBB
, SmallVectorImpl
<NullCheck
> &NullCheckList
) {
459 using MachineBranchPredicate
= TargetInstrInfo::MachineBranchPredicate
;
461 MDNode
*BranchMD
= nullptr;
462 if (auto *BB
= MBB
.getBasicBlock())
463 BranchMD
= BB
->getTerminator()->getMetadata(LLVMContext::MD_make_implicit
);
468 MachineBranchPredicate MBP
;
470 if (TII
->analyzeBranchPredicate(MBB
, MBP
, true))
473 // Is the predicate comparing an integer to zero?
474 if (!(MBP
.LHS
.isReg() && MBP
.RHS
.isImm() && MBP
.RHS
.getImm() == 0 &&
475 (MBP
.Predicate
== MachineBranchPredicate::PRED_NE
||
476 MBP
.Predicate
== MachineBranchPredicate::PRED_EQ
)))
479 // If we cannot erase the test instruction itself, then making the null check
480 // implicit does not buy us much.
481 if (!MBP
.SingleUseCondition
)
484 MachineBasicBlock
*NotNullSucc
, *NullSucc
;
486 if (MBP
.Predicate
== MachineBranchPredicate::PRED_NE
) {
487 NotNullSucc
= MBP
.TrueDest
;
488 NullSucc
= MBP
.FalseDest
;
490 NotNullSucc
= MBP
.FalseDest
;
491 NullSucc
= MBP
.TrueDest
;
494 // We handle the simplest case for now. We can potentially do better by using
495 // the machine dominator tree.
496 if (NotNullSucc
->pred_size() != 1)
499 // To prevent the invalid transformation of the following code:
512 // faulting_load_op("movl (%rax), %r10", throw_npe)
515 // we must ensure that there are no instructions between the 'test' and
516 // conditional jump that modify %rax.
517 const unsigned PointerReg
= MBP
.LHS
.getReg();
519 assert(MBP
.ConditionDef
->getParent() == &MBB
&& "Should be in basic block");
521 for (auto I
= MBB
.rbegin(); MBP
.ConditionDef
!= &*I
; ++I
)
522 if (I
->modifiesRegister(PointerReg
, TRI
))
525 // Starting with a code fragment like:
531 // callq throw_NullPointerException
537 // Def = Load (%rax + <offset>)
541 // we want to end up with
543 // Def = FaultingLoad (%rax + <offset>), LblNull
544 // jmp LblNotNull ;; explicit or fallthrough
552 // callq throw_NullPointerException
555 // To see why this is legal, consider the two possibilities:
557 // 1. %rax is null: since we constrain <offset> to be less than PageSize, the
558 // load instruction dereferences the null page, causing a segmentation
561 // 2. %rax is not null: in this case we know that the load cannot fault, as
562 // otherwise the load would've faulted in the original program too and the
563 // original program would've been undefined.
565 // This reasoning cannot be extended to justify hoisting through arbitrary
566 // control flow. For instance, in the example below (in pseudo-C)
568 // if (ptr == null) { throw_npe(); unreachable; }
569 // if (some_cond) { return 42; }
570 // v = ptr->field; // LD
573 // we cannot (without code duplication) use the load marked "LD" to null check
574 // ptr -- clause (2) above does not apply in this case. In the above program
575 // the safety of ptr->field can be dependent on some_cond; and, for instance,
576 // ptr could be some non-null invalid reference that never gets loaded from
577 // because some_cond is always true.
579 SmallVector
<MachineInstr
*, 8> InstsSeenSoFar
;
581 for (auto &MI
: *NotNullSucc
) {
582 if (!canHandle(&MI
) || InstsSeenSoFar
.size() >= MaxInstsToConsider
)
585 MachineInstr
*Dependence
;
586 SuitabilityResult SR
= isSuitableMemoryOp(MI
, PointerReg
, InstsSeenSoFar
);
587 if (SR
== SR_Impossible
)
589 if (SR
== SR_Suitable
&&
590 canHoistInst(&MI
, PointerReg
, InstsSeenSoFar
, NullSucc
, Dependence
)) {
591 NullCheckList
.emplace_back(&MI
, MBP
.ConditionDef
, &MBB
, NotNullSucc
,
592 NullSucc
, Dependence
);
596 // If MI re-defines the PointerReg then we cannot move further.
597 if (llvm::any_of(MI
.operands(), [&](MachineOperand
&MO
) {
598 return MO
.isReg() && MO
.getReg() && MO
.isDef() &&
599 TRI
->regsOverlap(MO
.getReg(), PointerReg
);
602 InstsSeenSoFar
.push_back(&MI
);
608 /// Wrap a machine instruction, MI, into a FAULTING machine instruction.
609 /// The FAULTING instruction does the same load/store as MI
610 /// (defining the same register), and branches to HandlerMBB if the mem access
611 /// faults. The FAULTING instruction is inserted at the end of MBB.
612 MachineInstr
*ImplicitNullChecks::insertFaultingInstr(
613 MachineInstr
*MI
, MachineBasicBlock
*MBB
, MachineBasicBlock
*HandlerMBB
) {
614 const unsigned NoRegister
= 0; // Guaranteed to be the NoRegister value for
618 unsigned NumDefs
= MI
->getDesc().getNumDefs();
619 assert(NumDefs
<= 1 && "other cases unhandled!");
621 unsigned DefReg
= NoRegister
;
623 DefReg
= MI
->getOperand(0).getReg();
624 assert(NumDefs
== 1 && "expected exactly one def!");
627 FaultMaps::FaultKind FK
;
630 MI
->mayStore() ? FaultMaps::FaultingLoadStore
: FaultMaps::FaultingLoad
;
632 FK
= FaultMaps::FaultingStore
;
634 auto MIB
= BuildMI(MBB
, DL
, TII
->get(TargetOpcode::FAULTING_OP
), DefReg
)
637 .addImm(MI
->getOpcode());
639 for (auto &MO
: MI
->uses()) {
641 MachineOperand NewMO
= MO
;
643 NewMO
.setIsKill(false);
645 assert(MO
.isDef() && "Expected def or use");
646 NewMO
.setIsDead(false);
654 MIB
.setMemRefs(MI
->memoperands());
659 /// Rewrite the null checks in NullCheckList into implicit null checks.
660 void ImplicitNullChecks::rewriteNullChecks(
661 ArrayRef
<ImplicitNullChecks::NullCheck
> NullCheckList
) {
664 for (auto &NC
: NullCheckList
) {
665 // Remove the conditional branch dependent on the null check.
666 unsigned BranchesRemoved
= TII
->removeBranch(*NC
.getCheckBlock());
667 (void)BranchesRemoved
;
668 assert(BranchesRemoved
> 0 && "expected at least one branch!");
670 if (auto *DepMI
= NC
.getOnlyDependency()) {
671 DepMI
->removeFromParent();
672 NC
.getCheckBlock()->insert(NC
.getCheckBlock()->end(), DepMI
);
675 // Insert a faulting instruction where the conditional branch was
676 // originally. We check earlier ensures that this bit of code motion
677 // is legal. We do not touch the successors list for any basic block
678 // since we haven't changed control flow, we've just made it implicit.
679 MachineInstr
*FaultingInstr
= insertFaultingInstr(
680 NC
.getMemOperation(), NC
.getCheckBlock(), NC
.getNullSucc());
681 // Now the values defined by MemOperation, if any, are live-in of
682 // the block of MemOperation.
683 // The original operation may define implicit-defs alongside
685 MachineBasicBlock
*MBB
= NC
.getMemOperation()->getParent();
686 for (const MachineOperand
&MO
: FaultingInstr
->operands()) {
687 if (!MO
.isReg() || !MO
.isDef())
689 unsigned Reg
= MO
.getReg();
690 if (!Reg
|| MBB
->isLiveIn(Reg
))
695 if (auto *DepMI
= NC
.getOnlyDependency()) {
696 for (auto &MO
: DepMI
->operands()) {
697 if (!MO
.isReg() || !MO
.getReg() || !MO
.isDef())
699 if (!NC
.getNotNullSucc()->isLiveIn(MO
.getReg()))
700 NC
.getNotNullSucc()->addLiveIn(MO
.getReg());
704 NC
.getMemOperation()->eraseFromParent();
705 NC
.getCheckOperation()->eraseFromParent();
707 // Insert an *unconditional* branch to not-null successor.
708 TII
->insertBranch(*NC
.getCheckBlock(), NC
.getNotNullSucc(), nullptr,
711 NumImplicitNullChecks
++;
715 char ImplicitNullChecks::ID
= 0;
717 char &llvm::ImplicitNullChecksID
= ImplicitNullChecks::ID
;
719 INITIALIZE_PASS_BEGIN(ImplicitNullChecks
, DEBUG_TYPE
,
720 "Implicit null checks", false, false)
721 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
722 INITIALIZE_PASS_END(ImplicitNullChecks
, DEBUG_TYPE
,
723 "Implicit null checks", false, false)