1 //===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements sparse conditional constant propagation and merging:
11 // Specifically, this:
12 // * Assumes values are constant unless proven otherwise
13 // * Assumes BasicBlocks are dead unless proven otherwise
14 // * Proves values to be constant, and replaces them with constants
15 // * Proves conditional branches to be unconditional
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Scalar/SCCP.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/DenseSet.h"
23 #include "llvm/ADT/MapVector.h"
24 #include "llvm/ADT/PointerIntPair.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueLattice.h"
34 #include "llvm/Analysis/ValueLatticeUtils.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstVisitor.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Module.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Transforms/Utils/PredicateInfo.h"
65 #define DEBUG_TYPE "sccp"
67 STATISTIC(NumInstRemoved
, "Number of instructions removed");
68 STATISTIC(NumDeadBlocks
, "Number of basic blocks unreachable");
70 STATISTIC(IPNumInstRemoved
, "Number of instructions removed by IPSCCP");
71 STATISTIC(IPNumArgsElimed
,"Number of arguments constant propagated by IPSCCP");
72 STATISTIC(IPNumGlobalConst
, "Number of globals found to be constant by IPSCCP");
76 /// LatticeVal class - This class represents the different lattice values that
77 /// an LLVM value may occupy. It is a simple class with value semantics.
81 /// unknown - This LLVM Value has no known value yet.
84 /// constant - This LLVM Value has a specific constant value.
87 /// forcedconstant - This LLVM Value was thought to be undef until
88 /// ResolvedUndefsIn. This is treated just like 'constant', but if merged
89 /// with another (different) constant, it goes to overdefined, instead of
93 /// overdefined - This instruction is not known to be constant, and we know
98 /// Val: This stores the current lattice value along with the Constant* for
99 /// the constant if this is a 'constant' or 'forcedconstant' value.
100 PointerIntPair
<Constant
*, 2, LatticeValueTy
> Val
;
102 LatticeValueTy
getLatticeValue() const {
107 LatticeVal() : Val(nullptr, unknown
) {}
109 bool isUnknown() const { return getLatticeValue() == unknown
; }
111 bool isConstant() const {
112 return getLatticeValue() == constant
|| getLatticeValue() == forcedconstant
;
115 bool isOverdefined() const { return getLatticeValue() == overdefined
; }
117 Constant
*getConstant() const {
118 assert(isConstant() && "Cannot get the constant of a non-constant!");
119 return Val
.getPointer();
122 /// markOverdefined - Return true if this is a change in status.
123 bool markOverdefined() {
127 Val
.setInt(overdefined
);
131 /// markConstant - Return true if this is a change in status.
132 bool markConstant(Constant
*V
) {
133 if (getLatticeValue() == constant
) { // Constant but not forcedconstant.
134 assert(getConstant() == V
&& "Marking constant with different value");
139 Val
.setInt(constant
);
140 assert(V
&& "Marking constant with NULL");
143 assert(getLatticeValue() == forcedconstant
&&
144 "Cannot move from overdefined to constant!");
145 // Stay at forcedconstant if the constant is the same.
146 if (V
== getConstant()) return false;
148 // Otherwise, we go to overdefined. Assumptions made based on the
149 // forced value are possibly wrong. Assuming this is another constant
150 // could expose a contradiction.
151 Val
.setInt(overdefined
);
156 /// getConstantInt - If this is a constant with a ConstantInt value, return it
157 /// otherwise return null.
158 ConstantInt
*getConstantInt() const {
160 return dyn_cast
<ConstantInt
>(getConstant());
164 /// getBlockAddress - If this is a constant with a BlockAddress value, return
165 /// it, otherwise return null.
166 BlockAddress
*getBlockAddress() const {
168 return dyn_cast
<BlockAddress
>(getConstant());
172 void markForcedConstant(Constant
*V
) {
173 assert(isUnknown() && "Can't force a defined value!");
174 Val
.setInt(forcedconstant
);
178 ValueLatticeElement
toValueLattice() const {
180 return ValueLatticeElement::getOverdefined();
182 return ValueLatticeElement::get(getConstant());
183 return ValueLatticeElement();
187 //===----------------------------------------------------------------------===//
189 /// SCCPSolver - This class is a general purpose solver for Sparse Conditional
190 /// Constant Propagation.
192 class SCCPSolver
: public InstVisitor
<SCCPSolver
> {
193 const DataLayout
&DL
;
194 const TargetLibraryInfo
*TLI
;
195 SmallPtrSet
<BasicBlock
*, 8> BBExecutable
; // The BBs that are executable.
196 DenseMap
<Value
*, LatticeVal
> ValueState
; // The state each value is in.
197 // The state each parameter is in.
198 DenseMap
<Value
*, ValueLatticeElement
> ParamState
;
200 /// StructValueState - This maintains ValueState for values that have
201 /// StructType, for example for formal arguments, calls, insertelement, etc.
202 DenseMap
<std::pair
<Value
*, unsigned>, LatticeVal
> StructValueState
;
204 /// GlobalValue - If we are tracking any values for the contents of a global
205 /// variable, we keep a mapping from the constant accessor to the element of
206 /// the global, to the currently known value. If the value becomes
207 /// overdefined, it's entry is simply removed from this map.
208 DenseMap
<GlobalVariable
*, LatticeVal
> TrackedGlobals
;
210 /// TrackedRetVals - If we are tracking arguments into and the return
211 /// value out of a function, it will have an entry in this map, indicating
212 /// what the known return value for the function is.
213 MapVector
<Function
*, LatticeVal
> TrackedRetVals
;
215 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
216 /// that return multiple values.
217 MapVector
<std::pair
<Function
*, unsigned>, LatticeVal
> TrackedMultipleRetVals
;
219 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
220 /// represented here for efficient lookup.
221 SmallPtrSet
<Function
*, 16> MRVFunctionsTracked
;
223 /// MustTailFunctions - Each function here is a callee of non-removable
224 /// musttail call site.
225 SmallPtrSet
<Function
*, 16> MustTailCallees
;
227 /// TrackingIncomingArguments - This is the set of functions for whose
228 /// arguments we make optimistic assumptions about and try to prove as
230 SmallPtrSet
<Function
*, 16> TrackingIncomingArguments
;
232 /// The reason for two worklists is that overdefined is the lowest state
233 /// on the lattice, and moving things to overdefined as fast as possible
234 /// makes SCCP converge much faster.
236 /// By having a separate worklist, we accomplish this because everything
237 /// possibly overdefined will become overdefined at the soonest possible
239 SmallVector
<Value
*, 64> OverdefinedInstWorkList
;
240 SmallVector
<Value
*, 64> InstWorkList
;
242 // The BasicBlock work list
243 SmallVector
<BasicBlock
*, 64> BBWorkList
;
245 /// KnownFeasibleEdges - Entries in this set are edges which have already had
246 /// PHI nodes retriggered.
247 using Edge
= std::pair
<BasicBlock
*, BasicBlock
*>;
248 DenseSet
<Edge
> KnownFeasibleEdges
;
250 DenseMap
<Function
*, AnalysisResultsForFn
> AnalysisResults
;
251 DenseMap
<Value
*, SmallPtrSet
<User
*, 2>> AdditionalUsers
;
254 void addAnalysis(Function
&F
, AnalysisResultsForFn A
) {
255 AnalysisResults
.insert({&F
, std::move(A
)});
258 const PredicateBase
*getPredicateInfoFor(Instruction
*I
) {
259 auto A
= AnalysisResults
.find(I
->getParent()->getParent());
260 if (A
== AnalysisResults
.end())
262 return A
->second
.PredInfo
->getPredicateInfoFor(I
);
265 DomTreeUpdater
getDTU(Function
&F
) {
266 auto A
= AnalysisResults
.find(&F
);
267 assert(A
!= AnalysisResults
.end() && "Need analysis results for function.");
268 return {A
->second
.DT
, A
->second
.PDT
, DomTreeUpdater::UpdateStrategy::Lazy
};
271 SCCPSolver(const DataLayout
&DL
, const TargetLibraryInfo
*tli
)
272 : DL(DL
), TLI(tli
) {}
274 /// MarkBlockExecutable - This method can be used by clients to mark all of
275 /// the blocks that are known to be intrinsically live in the processed unit.
277 /// This returns true if the block was not considered live before.
278 bool MarkBlockExecutable(BasicBlock
*BB
) {
279 if (!BBExecutable
.insert(BB
).second
)
281 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB
->getName() << '\n');
282 BBWorkList
.push_back(BB
); // Add the block to the work list!
286 /// TrackValueOfGlobalVariable - Clients can use this method to
287 /// inform the SCCPSolver that it should track loads and stores to the
288 /// specified global variable if it can. This is only legal to call if
289 /// performing Interprocedural SCCP.
290 void TrackValueOfGlobalVariable(GlobalVariable
*GV
) {
291 // We only track the contents of scalar globals.
292 if (GV
->getValueType()->isSingleValueType()) {
293 LatticeVal
&IV
= TrackedGlobals
[GV
];
294 if (!isa
<UndefValue
>(GV
->getInitializer()))
295 IV
.markConstant(GV
->getInitializer());
299 /// AddTrackedFunction - If the SCCP solver is supposed to track calls into
300 /// and out of the specified function (which cannot have its address taken),
301 /// this method must be called.
302 void AddTrackedFunction(Function
*F
) {
303 // Add an entry, F -> undef.
304 if (auto *STy
= dyn_cast
<StructType
>(F
->getReturnType())) {
305 MRVFunctionsTracked
.insert(F
);
306 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
307 TrackedMultipleRetVals
.insert(std::make_pair(std::make_pair(F
, i
),
310 TrackedRetVals
.insert(std::make_pair(F
, LatticeVal()));
313 /// AddMustTailCallee - If the SCCP solver finds that this function is called
314 /// from non-removable musttail call site.
315 void AddMustTailCallee(Function
*F
) {
316 MustTailCallees
.insert(F
);
319 /// Returns true if the given function is called from non-removable musttail
321 bool isMustTailCallee(Function
*F
) {
322 return MustTailCallees
.count(F
);
325 void AddArgumentTrackedFunction(Function
*F
) {
326 TrackingIncomingArguments
.insert(F
);
329 /// Returns true if the given function is in the solver's set of
330 /// argument-tracked functions.
331 bool isArgumentTrackedFunction(Function
*F
) {
332 return TrackingIncomingArguments
.count(F
);
335 /// Solve - Solve for constants and executable blocks.
338 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume
339 /// that branches on undef values cannot reach any of their successors.
340 /// However, this is not a safe assumption. After we solve dataflow, this
341 /// method should be use to handle this. If this returns true, the solver
343 bool ResolvedUndefsIn(Function
&F
);
345 bool isBlockExecutable(BasicBlock
*BB
) const {
346 return BBExecutable
.count(BB
);
349 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
350 // block to the 'To' basic block is currently feasible.
351 bool isEdgeFeasible(BasicBlock
*From
, BasicBlock
*To
);
353 std::vector
<LatticeVal
> getStructLatticeValueFor(Value
*V
) const {
354 std::vector
<LatticeVal
> StructValues
;
355 auto *STy
= dyn_cast
<StructType
>(V
->getType());
356 assert(STy
&& "getStructLatticeValueFor() can be called only on structs");
357 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
358 auto I
= StructValueState
.find(std::make_pair(V
, i
));
359 assert(I
!= StructValueState
.end() && "Value not in valuemap!");
360 StructValues
.push_back(I
->second
);
365 const LatticeVal
&getLatticeValueFor(Value
*V
) const {
366 assert(!V
->getType()->isStructTy() &&
367 "Should use getStructLatticeValueFor");
368 DenseMap
<Value
*, LatticeVal
>::const_iterator I
= ValueState
.find(V
);
369 assert(I
!= ValueState
.end() &&
370 "V not found in ValueState nor Paramstate map!");
374 /// getTrackedRetVals - Get the inferred return value map.
375 const MapVector
<Function
*, LatticeVal
> &getTrackedRetVals() {
376 return TrackedRetVals
;
379 /// getTrackedGlobals - Get and return the set of inferred initializers for
380 /// global variables.
381 const DenseMap
<GlobalVariable
*, LatticeVal
> &getTrackedGlobals() {
382 return TrackedGlobals
;
385 /// getMRVFunctionsTracked - Get the set of functions which return multiple
386 /// values tracked by the pass.
387 const SmallPtrSet
<Function
*, 16> getMRVFunctionsTracked() {
388 return MRVFunctionsTracked
;
391 /// getMustTailCallees - Get the set of functions which are called
392 /// from non-removable musttail call sites.
393 const SmallPtrSet
<Function
*, 16> getMustTailCallees() {
394 return MustTailCallees
;
397 /// markOverdefined - Mark the specified value overdefined. This
398 /// works with both scalars and structs.
399 void markOverdefined(Value
*V
) {
400 if (auto *STy
= dyn_cast
<StructType
>(V
->getType()))
401 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
402 markOverdefined(getStructValueState(V
, i
), V
);
404 markOverdefined(ValueState
[V
], V
);
407 // isStructLatticeConstant - Return true if all the lattice values
408 // corresponding to elements of the structure are not overdefined,
410 bool isStructLatticeConstant(Function
*F
, StructType
*STy
) {
411 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
412 const auto &It
= TrackedMultipleRetVals
.find(std::make_pair(F
, i
));
413 assert(It
!= TrackedMultipleRetVals
.end());
414 LatticeVal LV
= It
->second
;
415 if (LV
.isOverdefined())
422 // pushToWorkList - Helper for markConstant/markForcedConstant/markOverdefined
423 void pushToWorkList(LatticeVal
&IV
, Value
*V
) {
424 if (IV
.isOverdefined())
425 return OverdefinedInstWorkList
.push_back(V
);
426 InstWorkList
.push_back(V
);
429 // markConstant - Make a value be marked as "constant". If the value
430 // is not already a constant, add it to the instruction work list so that
431 // the users of the instruction are updated later.
432 bool markConstant(LatticeVal
&IV
, Value
*V
, Constant
*C
) {
433 if (!IV
.markConstant(C
)) return false;
434 LLVM_DEBUG(dbgs() << "markConstant: " << *C
<< ": " << *V
<< '\n');
435 pushToWorkList(IV
, V
);
439 bool markConstant(Value
*V
, Constant
*C
) {
440 assert(!V
->getType()->isStructTy() && "structs should use mergeInValue");
441 return markConstant(ValueState
[V
], V
, C
);
444 void markForcedConstant(Value
*V
, Constant
*C
) {
445 assert(!V
->getType()->isStructTy() && "structs should use mergeInValue");
446 LatticeVal
&IV
= ValueState
[V
];
447 IV
.markForcedConstant(C
);
448 LLVM_DEBUG(dbgs() << "markForcedConstant: " << *C
<< ": " << *V
<< '\n');
449 pushToWorkList(IV
, V
);
452 // markOverdefined - Make a value be marked as "overdefined". If the
453 // value is not already overdefined, add it to the overdefined instruction
454 // work list so that the users of the instruction are updated later.
455 bool markOverdefined(LatticeVal
&IV
, Value
*V
) {
456 if (!IV
.markOverdefined()) return false;
458 LLVM_DEBUG(dbgs() << "markOverdefined: ";
459 if (auto *F
= dyn_cast
<Function
>(V
)) dbgs()
460 << "Function '" << F
->getName() << "'\n";
461 else dbgs() << *V
<< '\n');
462 // Only instructions go on the work list
463 pushToWorkList(IV
, V
);
467 bool mergeInValue(LatticeVal
&IV
, Value
*V
, LatticeVal MergeWithV
) {
468 if (IV
.isOverdefined() || MergeWithV
.isUnknown())
469 return false; // Noop.
470 if (MergeWithV
.isOverdefined())
471 return markOverdefined(IV
, V
);
473 return markConstant(IV
, V
, MergeWithV
.getConstant());
474 if (IV
.getConstant() != MergeWithV
.getConstant())
475 return markOverdefined(IV
, V
);
479 bool mergeInValue(Value
*V
, LatticeVal MergeWithV
) {
480 assert(!V
->getType()->isStructTy() &&
481 "non-structs should use markConstant");
482 return mergeInValue(ValueState
[V
], V
, MergeWithV
);
485 /// getValueState - Return the LatticeVal object that corresponds to the
486 /// value. This function handles the case when the value hasn't been seen yet
487 /// by properly seeding constants etc.
488 LatticeVal
&getValueState(Value
*V
) {
489 assert(!V
->getType()->isStructTy() && "Should use getStructValueState");
491 std::pair
<DenseMap
<Value
*, LatticeVal
>::iterator
, bool> I
=
492 ValueState
.insert(std::make_pair(V
, LatticeVal()));
493 LatticeVal
&LV
= I
.first
->second
;
496 return LV
; // Common case, already in the map.
498 if (auto *C
= dyn_cast
<Constant
>(V
)) {
499 // Undef values remain unknown.
500 if (!isa
<UndefValue
>(V
))
501 LV
.markConstant(C
); // Constants are constant
504 // All others are underdefined by default.
508 ValueLatticeElement
&getParamState(Value
*V
) {
509 assert(!V
->getType()->isStructTy() && "Should use getStructValueState");
511 std::pair
<DenseMap
<Value
*, ValueLatticeElement
>::iterator
, bool>
512 PI
= ParamState
.insert(std::make_pair(V
, ValueLatticeElement()));
513 ValueLatticeElement
&LV
= PI
.first
->second
;
515 LV
= getValueState(V
).toValueLattice();
520 /// getStructValueState - Return the LatticeVal object that corresponds to the
521 /// value/field pair. This function handles the case when the value hasn't
522 /// been seen yet by properly seeding constants etc.
523 LatticeVal
&getStructValueState(Value
*V
, unsigned i
) {
524 assert(V
->getType()->isStructTy() && "Should use getValueState");
525 assert(i
< cast
<StructType
>(V
->getType())->getNumElements() &&
526 "Invalid element #");
528 std::pair
<DenseMap
<std::pair
<Value
*, unsigned>, LatticeVal
>::iterator
,
529 bool> I
= StructValueState
.insert(
530 std::make_pair(std::make_pair(V
, i
), LatticeVal()));
531 LatticeVal
&LV
= I
.first
->second
;
534 return LV
; // Common case, already in the map.
536 if (auto *C
= dyn_cast
<Constant
>(V
)) {
537 Constant
*Elt
= C
->getAggregateElement(i
);
540 LV
.markOverdefined(); // Unknown sort of constant.
541 else if (isa
<UndefValue
>(Elt
))
542 ; // Undef values remain unknown.
544 LV
.markConstant(Elt
); // Constants are constant.
547 // All others are underdefined by default.
551 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
552 /// work list if it is not already executable.
553 bool markEdgeExecutable(BasicBlock
*Source
, BasicBlock
*Dest
) {
554 if (!KnownFeasibleEdges
.insert(Edge(Source
, Dest
)).second
)
555 return false; // This edge is already known to be executable!
557 if (!MarkBlockExecutable(Dest
)) {
558 // If the destination is already executable, we just made an *edge*
559 // feasible that wasn't before. Revisit the PHI nodes in the block
560 // because they have potentially new operands.
561 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source
->getName()
562 << " -> " << Dest
->getName() << '\n');
564 for (PHINode
&PN
: Dest
->phis())
570 // getFeasibleSuccessors - Return a vector of booleans to indicate which
571 // successors are reachable from a given terminator instruction.
572 void getFeasibleSuccessors(Instruction
&TI
, SmallVectorImpl
<bool> &Succs
);
574 // OperandChangedState - This method is invoked on all of the users of an
575 // instruction that was just changed state somehow. Based on this
576 // information, we need to update the specified user of this instruction.
577 void OperandChangedState(Instruction
*I
) {
578 if (BBExecutable
.count(I
->getParent())) // Inst is executable?
582 // Add U as additional user of V.
583 void addAdditionalUser(Value
*V
, User
*U
) {
584 auto Iter
= AdditionalUsers
.insert({V
, {}});
585 Iter
.first
->second
.insert(U
);
588 // Mark I's users as changed, including AdditionalUsers.
589 void markUsersAsChanged(Value
*I
) {
590 for (User
*U
: I
->users())
591 if (auto *UI
= dyn_cast
<Instruction
>(U
))
592 OperandChangedState(UI
);
594 auto Iter
= AdditionalUsers
.find(I
);
595 if (Iter
!= AdditionalUsers
.end()) {
596 for (User
*U
: Iter
->second
)
597 if (auto *UI
= dyn_cast
<Instruction
>(U
))
598 OperandChangedState(UI
);
603 friend class InstVisitor
<SCCPSolver
>;
605 // visit implementations - Something changed in this instruction. Either an
606 // operand made a transition, or the instruction is newly executable. Change
607 // the value type of I to reflect these changes if appropriate.
608 void visitPHINode(PHINode
&I
);
612 void visitReturnInst(ReturnInst
&I
);
613 void visitTerminator(Instruction
&TI
);
615 void visitCastInst(CastInst
&I
);
616 void visitSelectInst(SelectInst
&I
);
617 void visitUnaryOperator(Instruction
&I
);
618 void visitBinaryOperator(Instruction
&I
);
619 void visitCmpInst(CmpInst
&I
);
620 void visitExtractValueInst(ExtractValueInst
&EVI
);
621 void visitInsertValueInst(InsertValueInst
&IVI
);
623 void visitCatchSwitchInst(CatchSwitchInst
&CPI
) {
624 markOverdefined(&CPI
);
625 visitTerminator(CPI
);
628 // Instructions that cannot be folded away.
630 void visitStoreInst (StoreInst
&I
);
631 void visitLoadInst (LoadInst
&I
);
632 void visitGetElementPtrInst(GetElementPtrInst
&I
);
634 void visitCallInst (CallInst
&I
) {
638 void visitInvokeInst (InvokeInst
&II
) {
643 void visitCallBrInst (CallBrInst
&CBI
) {
645 visitTerminator(CBI
);
648 void visitCallSite (CallSite CS
);
649 void visitResumeInst (ResumeInst
&I
) { /*returns void*/ }
650 void visitUnreachableInst(UnreachableInst
&I
) { /*returns void*/ }
651 void visitFenceInst (FenceInst
&I
) { /*returns void*/ }
653 void visitInstruction(Instruction
&I
) {
654 // All the instructions we don't do any special handling for just
655 // go to overdefined.
656 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I
<< '\n');
661 } // end anonymous namespace
663 // getFeasibleSuccessors - Return a vector of booleans to indicate which
664 // successors are reachable from a given terminator instruction.
665 void SCCPSolver::getFeasibleSuccessors(Instruction
&TI
,
666 SmallVectorImpl
<bool> &Succs
) {
667 Succs
.resize(TI
.getNumSuccessors());
668 if (auto *BI
= dyn_cast
<BranchInst
>(&TI
)) {
669 if (BI
->isUnconditional()) {
674 LatticeVal BCValue
= getValueState(BI
->getCondition());
675 ConstantInt
*CI
= BCValue
.getConstantInt();
677 // Overdefined condition variables, and branches on unfoldable constant
678 // conditions, mean the branch could go either way.
679 if (!BCValue
.isUnknown())
680 Succs
[0] = Succs
[1] = true;
684 // Constant condition variables mean the branch can only go a single way.
685 Succs
[CI
->isZero()] = true;
689 // Unwinding instructions successors are always executable.
690 if (TI
.isExceptionalTerminator()) {
691 Succs
.assign(TI
.getNumSuccessors(), true);
695 if (auto *SI
= dyn_cast
<SwitchInst
>(&TI
)) {
696 if (!SI
->getNumCases()) {
700 LatticeVal SCValue
= getValueState(SI
->getCondition());
701 ConstantInt
*CI
= SCValue
.getConstantInt();
703 if (!CI
) { // Overdefined or unknown condition?
704 // All destinations are executable!
705 if (!SCValue
.isUnknown())
706 Succs
.assign(TI
.getNumSuccessors(), true);
710 Succs
[SI
->findCaseValue(CI
)->getSuccessorIndex()] = true;
714 // In case of indirect branch and its address is a blockaddress, we mark
715 // the target as executable.
716 if (auto *IBR
= dyn_cast
<IndirectBrInst
>(&TI
)) {
717 // Casts are folded by visitCastInst.
718 LatticeVal IBRValue
= getValueState(IBR
->getAddress());
719 BlockAddress
*Addr
= IBRValue
.getBlockAddress();
720 if (!Addr
) { // Overdefined or unknown condition?
721 // All destinations are executable!
722 if (!IBRValue
.isUnknown())
723 Succs
.assign(TI
.getNumSuccessors(), true);
727 BasicBlock
* T
= Addr
->getBasicBlock();
728 assert(Addr
->getFunction() == T
->getParent() &&
729 "Block address of a different function ?");
730 for (unsigned i
= 0; i
< IBR
->getNumSuccessors(); ++i
) {
731 // This is the target.
732 if (IBR
->getDestination(i
) == T
) {
738 // If we didn't find our destination in the IBR successor list, then we
739 // have undefined behavior. Its ok to assume no successor is executable.
743 // In case of callbr, we pessimistically assume that all successors are
745 if (isa
<CallBrInst
>(&TI
)) {
746 Succs
.assign(TI
.getNumSuccessors(), true);
750 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI
<< '\n');
751 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
754 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
755 // block to the 'To' basic block is currently feasible.
756 bool SCCPSolver::isEdgeFeasible(BasicBlock
*From
, BasicBlock
*To
) {
757 // Check if we've called markEdgeExecutable on the edge yet. (We could
758 // be more aggressive and try to consider edges which haven't been marked
759 // yet, but there isn't any need.)
760 return KnownFeasibleEdges
.count(Edge(From
, To
));
763 // visit Implementations - Something changed in this instruction, either an
764 // operand made a transition, or the instruction is newly executable. Change
765 // the value type of I to reflect these changes if appropriate. This method
766 // makes sure to do the following actions:
768 // 1. If a phi node merges two constants in, and has conflicting value coming
769 // from different branches, or if the PHI node merges in an overdefined
770 // value, then the PHI node becomes overdefined.
771 // 2. If a phi node merges only constants in, and they all agree on value, the
772 // PHI node becomes a constant value equal to that.
773 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
774 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
775 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
776 // 6. If a conditional branch has a value that is constant, make the selected
777 // destination executable
778 // 7. If a conditional branch has a value that is overdefined, make all
779 // successors executable.
780 void SCCPSolver::visitPHINode(PHINode
&PN
) {
781 // If this PN returns a struct, just mark the result overdefined.
782 // TODO: We could do a lot better than this if code actually uses this.
783 if (PN
.getType()->isStructTy())
784 return (void)markOverdefined(&PN
);
786 if (getValueState(&PN
).isOverdefined())
787 return; // Quick exit
789 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
790 // and slow us down a lot. Just mark them overdefined.
791 if (PN
.getNumIncomingValues() > 64)
792 return (void)markOverdefined(&PN
);
794 // Look at all of the executable operands of the PHI node. If any of them
795 // are overdefined, the PHI becomes overdefined as well. If they are all
796 // constant, and they agree with each other, the PHI becomes the identical
797 // constant. If they are constant and don't agree, the PHI is overdefined.
798 // If there are no executable operands, the PHI remains unknown.
799 Constant
*OperandVal
= nullptr;
800 for (unsigned i
= 0, e
= PN
.getNumIncomingValues(); i
!= e
; ++i
) {
801 LatticeVal IV
= getValueState(PN
.getIncomingValue(i
));
802 if (IV
.isUnknown()) continue; // Doesn't influence PHI node.
804 if (!isEdgeFeasible(PN
.getIncomingBlock(i
), PN
.getParent()))
807 if (IV
.isOverdefined()) // PHI node becomes overdefined!
808 return (void)markOverdefined(&PN
);
810 if (!OperandVal
) { // Grab the first value.
811 OperandVal
= IV
.getConstant();
815 // There is already a reachable operand. If we conflict with it,
816 // then the PHI node becomes overdefined. If we agree with it, we
819 // Check to see if there are two different constants merging, if so, the PHI
820 // node is overdefined.
821 if (IV
.getConstant() != OperandVal
)
822 return (void)markOverdefined(&PN
);
825 // If we exited the loop, this means that the PHI node only has constant
826 // arguments that agree with each other(and OperandVal is the constant) or
827 // OperandVal is null because there are no defined incoming arguments. If
828 // this is the case, the PHI remains unknown.
830 markConstant(&PN
, OperandVal
); // Acquire operand value
833 void SCCPSolver::visitReturnInst(ReturnInst
&I
) {
834 if (I
.getNumOperands() == 0) return; // ret void
836 Function
*F
= I
.getParent()->getParent();
837 Value
*ResultOp
= I
.getOperand(0);
839 // If we are tracking the return value of this function, merge it in.
840 if (!TrackedRetVals
.empty() && !ResultOp
->getType()->isStructTy()) {
841 MapVector
<Function
*, LatticeVal
>::iterator TFRVI
=
842 TrackedRetVals
.find(F
);
843 if (TFRVI
!= TrackedRetVals
.end()) {
844 mergeInValue(TFRVI
->second
, F
, getValueState(ResultOp
));
849 // Handle functions that return multiple values.
850 if (!TrackedMultipleRetVals
.empty()) {
851 if (auto *STy
= dyn_cast
<StructType
>(ResultOp
->getType()))
852 if (MRVFunctionsTracked
.count(F
))
853 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
854 mergeInValue(TrackedMultipleRetVals
[std::make_pair(F
, i
)], F
,
855 getStructValueState(ResultOp
, i
));
859 void SCCPSolver::visitTerminator(Instruction
&TI
) {
860 SmallVector
<bool, 16> SuccFeasible
;
861 getFeasibleSuccessors(TI
, SuccFeasible
);
863 BasicBlock
*BB
= TI
.getParent();
865 // Mark all feasible successors executable.
866 for (unsigned i
= 0, e
= SuccFeasible
.size(); i
!= e
; ++i
)
868 markEdgeExecutable(BB
, TI
.getSuccessor(i
));
871 void SCCPSolver::visitCastInst(CastInst
&I
) {
872 LatticeVal OpSt
= getValueState(I
.getOperand(0));
873 if (OpSt
.isOverdefined()) // Inherit overdefinedness of operand
875 else if (OpSt
.isConstant()) {
876 // Fold the constant as we build.
877 Constant
*C
= ConstantFoldCastOperand(I
.getOpcode(), OpSt
.getConstant(),
879 if (isa
<UndefValue
>(C
))
881 // Propagate constant value
886 void SCCPSolver::visitExtractValueInst(ExtractValueInst
&EVI
) {
887 // If this returns a struct, mark all elements over defined, we don't track
888 // structs in structs.
889 if (EVI
.getType()->isStructTy())
890 return (void)markOverdefined(&EVI
);
892 // If this is extracting from more than one level of struct, we don't know.
893 if (EVI
.getNumIndices() != 1)
894 return (void)markOverdefined(&EVI
);
896 Value
*AggVal
= EVI
.getAggregateOperand();
897 if (AggVal
->getType()->isStructTy()) {
898 unsigned i
= *EVI
.idx_begin();
899 LatticeVal EltVal
= getStructValueState(AggVal
, i
);
900 mergeInValue(getValueState(&EVI
), &EVI
, EltVal
);
902 // Otherwise, must be extracting from an array.
903 return (void)markOverdefined(&EVI
);
907 void SCCPSolver::visitInsertValueInst(InsertValueInst
&IVI
) {
908 auto *STy
= dyn_cast
<StructType
>(IVI
.getType());
910 return (void)markOverdefined(&IVI
);
912 // If this has more than one index, we can't handle it, drive all results to
914 if (IVI
.getNumIndices() != 1)
915 return (void)markOverdefined(&IVI
);
917 Value
*Aggr
= IVI
.getAggregateOperand();
918 unsigned Idx
= *IVI
.idx_begin();
920 // Compute the result based on what we're inserting.
921 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
922 // This passes through all values that aren't the inserted element.
924 LatticeVal EltVal
= getStructValueState(Aggr
, i
);
925 mergeInValue(getStructValueState(&IVI
, i
), &IVI
, EltVal
);
929 Value
*Val
= IVI
.getInsertedValueOperand();
930 if (Val
->getType()->isStructTy())
931 // We don't track structs in structs.
932 markOverdefined(getStructValueState(&IVI
, i
), &IVI
);
934 LatticeVal InVal
= getValueState(Val
);
935 mergeInValue(getStructValueState(&IVI
, i
), &IVI
, InVal
);
940 void SCCPSolver::visitSelectInst(SelectInst
&I
) {
941 // If this select returns a struct, just mark the result overdefined.
942 // TODO: We could do a lot better than this if code actually uses this.
943 if (I
.getType()->isStructTy())
944 return (void)markOverdefined(&I
);
946 LatticeVal CondValue
= getValueState(I
.getCondition());
947 if (CondValue
.isUnknown())
950 if (ConstantInt
*CondCB
= CondValue
.getConstantInt()) {
951 Value
*OpVal
= CondCB
->isZero() ? I
.getFalseValue() : I
.getTrueValue();
952 mergeInValue(&I
, getValueState(OpVal
));
956 // Otherwise, the condition is overdefined or a constant we can't evaluate.
957 // See if we can produce something better than overdefined based on the T/F
959 LatticeVal TVal
= getValueState(I
.getTrueValue());
960 LatticeVal FVal
= getValueState(I
.getFalseValue());
962 // select ?, C, C -> C.
963 if (TVal
.isConstant() && FVal
.isConstant() &&
964 TVal
.getConstant() == FVal
.getConstant())
965 return (void)markConstant(&I
, FVal
.getConstant());
967 if (TVal
.isUnknown()) // select ?, undef, X -> X.
968 return (void)mergeInValue(&I
, FVal
);
969 if (FVal
.isUnknown()) // select ?, X, undef -> X.
970 return (void)mergeInValue(&I
, TVal
);
974 // Handle Unary Operators.
975 void SCCPSolver::visitUnaryOperator(Instruction
&I
) {
976 LatticeVal V0State
= getValueState(I
.getOperand(0));
978 LatticeVal
&IV
= ValueState
[&I
];
979 if (IV
.isOverdefined()) return;
981 if (V0State
.isConstant()) {
982 Constant
*C
= ConstantExpr::get(I
.getOpcode(), V0State
.getConstant());
985 if (isa
<UndefValue
>(C
))
987 return (void)markConstant(IV
, &I
, C
);
990 // If something is undef, wait for it to resolve.
991 if (!V0State
.isOverdefined())
997 // Handle Binary Operators.
998 void SCCPSolver::visitBinaryOperator(Instruction
&I
) {
999 LatticeVal V1State
= getValueState(I
.getOperand(0));
1000 LatticeVal V2State
= getValueState(I
.getOperand(1));
1002 LatticeVal
&IV
= ValueState
[&I
];
1003 if (IV
.isOverdefined()) return;
1005 if (V1State
.isConstant() && V2State
.isConstant()) {
1006 Constant
*C
= ConstantExpr::get(I
.getOpcode(), V1State
.getConstant(),
1007 V2State
.getConstant());
1009 if (isa
<UndefValue
>(C
))
1011 return (void)markConstant(IV
, &I
, C
);
1014 // If something is undef, wait for it to resolve.
1015 if (!V1State
.isOverdefined() && !V2State
.isOverdefined())
1018 // Otherwise, one of our operands is overdefined. Try to produce something
1019 // better than overdefined with some tricks.
1020 // If this is 0 / Y, it doesn't matter that the second operand is
1021 // overdefined, and we can replace it with zero.
1022 if (I
.getOpcode() == Instruction::UDiv
|| I
.getOpcode() == Instruction::SDiv
)
1023 if (V1State
.isConstant() && V1State
.getConstant()->isNullValue())
1024 return (void)markConstant(IV
, &I
, V1State
.getConstant());
1027 // -> AND/MUL with 0
1029 // it doesn't matter that the other operand is overdefined.
1030 if (I
.getOpcode() == Instruction::And
|| I
.getOpcode() == Instruction::Mul
||
1031 I
.getOpcode() == Instruction::Or
) {
1032 LatticeVal
*NonOverdefVal
= nullptr;
1033 if (!V1State
.isOverdefined())
1034 NonOverdefVal
= &V1State
;
1035 else if (!V2State
.isOverdefined())
1036 NonOverdefVal
= &V2State
;
1038 if (NonOverdefVal
) {
1039 if (NonOverdefVal
->isUnknown())
1042 if (I
.getOpcode() == Instruction::And
||
1043 I
.getOpcode() == Instruction::Mul
) {
1046 if (NonOverdefVal
->getConstant()->isNullValue())
1047 return (void)markConstant(IV
, &I
, NonOverdefVal
->getConstant());
1050 if (ConstantInt
*CI
= NonOverdefVal
->getConstantInt())
1051 if (CI
->isMinusOne())
1052 return (void)markConstant(IV
, &I
, NonOverdefVal
->getConstant());
1057 markOverdefined(&I
);
1060 // Handle ICmpInst instruction.
1061 void SCCPSolver::visitCmpInst(CmpInst
&I
) {
1062 // Do not cache this lookup, getValueState calls later in the function might
1063 // invalidate the reference.
1064 if (ValueState
[&I
].isOverdefined()) return;
1066 Value
*Op1
= I
.getOperand(0);
1067 Value
*Op2
= I
.getOperand(1);
1069 // For parameters, use ParamState which includes constant range info if
1071 auto V1Param
= ParamState
.find(Op1
);
1072 ValueLatticeElement V1State
= (V1Param
!= ParamState
.end())
1074 : getValueState(Op1
).toValueLattice();
1076 auto V2Param
= ParamState
.find(Op2
);
1077 ValueLatticeElement V2State
= V2Param
!= ParamState
.end()
1079 : getValueState(Op2
).toValueLattice();
1081 Constant
*C
= V1State
.getCompare(I
.getPredicate(), I
.getType(), V2State
);
1083 if (isa
<UndefValue
>(C
))
1087 mergeInValue(&I
, CV
);
1091 // If operands are still unknown, wait for it to resolve.
1092 if (!V1State
.isOverdefined() && !V2State
.isOverdefined() &&
1093 !ValueState
[&I
].isConstant())
1096 markOverdefined(&I
);
1099 // Handle getelementptr instructions. If all operands are constants then we
1100 // can turn this into a getelementptr ConstantExpr.
1101 void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst
&I
) {
1102 if (ValueState
[&I
].isOverdefined()) return;
1104 SmallVector
<Constant
*, 8> Operands
;
1105 Operands
.reserve(I
.getNumOperands());
1107 for (unsigned i
= 0, e
= I
.getNumOperands(); i
!= e
; ++i
) {
1108 LatticeVal State
= getValueState(I
.getOperand(i
));
1109 if (State
.isUnknown())
1110 return; // Operands are not resolved yet.
1112 if (State
.isOverdefined())
1113 return (void)markOverdefined(&I
);
1115 assert(State
.isConstant() && "Unknown state!");
1116 Operands
.push_back(State
.getConstant());
1119 Constant
*Ptr
= Operands
[0];
1120 auto Indices
= makeArrayRef(Operands
.begin() + 1, Operands
.end());
1122 ConstantExpr::getGetElementPtr(I
.getSourceElementType(), Ptr
, Indices
);
1123 if (isa
<UndefValue
>(C
))
1125 markConstant(&I
, C
);
1128 void SCCPSolver::visitStoreInst(StoreInst
&SI
) {
1129 // If this store is of a struct, ignore it.
1130 if (SI
.getOperand(0)->getType()->isStructTy())
1133 if (TrackedGlobals
.empty() || !isa
<GlobalVariable
>(SI
.getOperand(1)))
1136 GlobalVariable
*GV
= cast
<GlobalVariable
>(SI
.getOperand(1));
1137 DenseMap
<GlobalVariable
*, LatticeVal
>::iterator I
= TrackedGlobals
.find(GV
);
1138 if (I
== TrackedGlobals
.end() || I
->second
.isOverdefined()) return;
1140 // Get the value we are storing into the global, then merge it.
1141 mergeInValue(I
->second
, GV
, getValueState(SI
.getOperand(0)));
1142 if (I
->second
.isOverdefined())
1143 TrackedGlobals
.erase(I
); // No need to keep tracking this!
1146 // Handle load instructions. If the operand is a constant pointer to a constant
1147 // global, we can replace the load with the loaded constant value!
1148 void SCCPSolver::visitLoadInst(LoadInst
&I
) {
1149 // If this load is of a struct, just mark the result overdefined.
1150 if (I
.getType()->isStructTy())
1151 return (void)markOverdefined(&I
);
1153 LatticeVal PtrVal
= getValueState(I
.getOperand(0));
1154 if (PtrVal
.isUnknown()) return; // The pointer is not resolved yet!
1156 LatticeVal
&IV
= ValueState
[&I
];
1157 if (IV
.isOverdefined()) return;
1159 if (!PtrVal
.isConstant() || I
.isVolatile())
1160 return (void)markOverdefined(IV
, &I
);
1162 Constant
*Ptr
= PtrVal
.getConstant();
1164 // load null is undefined.
1165 if (isa
<ConstantPointerNull
>(Ptr
)) {
1166 if (NullPointerIsDefined(I
.getFunction(), I
.getPointerAddressSpace()))
1167 return (void)markOverdefined(IV
, &I
);
1172 // Transform load (constant global) into the value loaded.
1173 if (auto *GV
= dyn_cast
<GlobalVariable
>(Ptr
)) {
1174 if (!TrackedGlobals
.empty()) {
1175 // If we are tracking this global, merge in the known value for it.
1176 DenseMap
<GlobalVariable
*, LatticeVal
>::iterator It
=
1177 TrackedGlobals
.find(GV
);
1178 if (It
!= TrackedGlobals
.end()) {
1179 mergeInValue(IV
, &I
, It
->second
);
1185 // Transform load from a constant into a constant if possible.
1186 if (Constant
*C
= ConstantFoldLoadFromConstPtr(Ptr
, I
.getType(), DL
)) {
1187 if (isa
<UndefValue
>(C
))
1189 return (void)markConstant(IV
, &I
, C
);
1192 // Otherwise we cannot say for certain what value this load will produce.
1194 markOverdefined(IV
, &I
);
1197 void SCCPSolver::visitCallSite(CallSite CS
) {
1198 Function
*F
= CS
.getCalledFunction();
1199 Instruction
*I
= CS
.getInstruction();
1201 if (auto *II
= dyn_cast
<IntrinsicInst
>(I
)) {
1202 if (II
->getIntrinsicID() == Intrinsic::ssa_copy
) {
1203 if (ValueState
[I
].isOverdefined())
1206 auto *PI
= getPredicateInfoFor(I
);
1210 Value
*CopyOf
= I
->getOperand(0);
1211 auto *PBranch
= dyn_cast
<PredicateBranch
>(PI
);
1213 mergeInValue(ValueState
[I
], I
, getValueState(CopyOf
));
1217 Value
*Cond
= PBranch
->Condition
;
1219 // Everything below relies on the condition being a comparison.
1220 auto *Cmp
= dyn_cast
<CmpInst
>(Cond
);
1222 mergeInValue(ValueState
[I
], I
, getValueState(CopyOf
));
1226 Value
*CmpOp0
= Cmp
->getOperand(0);
1227 Value
*CmpOp1
= Cmp
->getOperand(1);
1228 if (CopyOf
!= CmpOp0
&& CopyOf
!= CmpOp1
) {
1229 mergeInValue(ValueState
[I
], I
, getValueState(CopyOf
));
1233 if (CmpOp0
!= CopyOf
)
1234 std::swap(CmpOp0
, CmpOp1
);
1236 LatticeVal OriginalVal
= getValueState(CopyOf
);
1237 LatticeVal EqVal
= getValueState(CmpOp1
);
1238 LatticeVal
&IV
= ValueState
[I
];
1239 if (PBranch
->TrueEdge
&& Cmp
->getPredicate() == CmpInst::ICMP_EQ
) {
1240 addAdditionalUser(CmpOp1
, I
);
1241 if (OriginalVal
.isConstant())
1242 mergeInValue(IV
, I
, OriginalVal
);
1244 mergeInValue(IV
, I
, EqVal
);
1247 if (!PBranch
->TrueEdge
&& Cmp
->getPredicate() == CmpInst::ICMP_NE
) {
1248 addAdditionalUser(CmpOp1
, I
);
1249 if (OriginalVal
.isConstant())
1250 mergeInValue(IV
, I
, OriginalVal
);
1252 mergeInValue(IV
, I
, EqVal
);
1256 return (void)mergeInValue(IV
, I
, getValueState(CopyOf
));
1260 // The common case is that we aren't tracking the callee, either because we
1261 // are not doing interprocedural analysis or the callee is indirect, or is
1262 // external. Handle these cases first.
1263 if (!F
|| F
->isDeclaration()) {
1265 // Void return and not tracking callee, just bail.
1266 if (I
->getType()->isVoidTy()) return;
1268 // Otherwise, if we have a single return value case, and if the function is
1269 // a declaration, maybe we can constant fold it.
1270 if (F
&& F
->isDeclaration() && !I
->getType()->isStructTy() &&
1271 canConstantFoldCallTo(cast
<CallBase
>(CS
.getInstruction()), F
)) {
1272 SmallVector
<Constant
*, 8> Operands
;
1273 for (CallSite::arg_iterator AI
= CS
.arg_begin(), E
= CS
.arg_end();
1275 if (AI
->get()->getType()->isStructTy())
1276 return markOverdefined(I
); // Can't handle struct args.
1277 LatticeVal State
= getValueState(*AI
);
1279 if (State
.isUnknown())
1280 return; // Operands are not resolved yet.
1281 if (State
.isOverdefined())
1282 return (void)markOverdefined(I
);
1283 assert(State
.isConstant() && "Unknown state!");
1284 Operands
.push_back(State
.getConstant());
1287 if (getValueState(I
).isOverdefined())
1290 // If we can constant fold this, mark the result of the call as a
1292 if (Constant
*C
= ConstantFoldCall(cast
<CallBase
>(CS
.getInstruction()), F
,
1295 if (isa
<UndefValue
>(C
))
1297 return (void)markConstant(I
, C
);
1301 // Otherwise, we don't know anything about this call, mark it overdefined.
1302 return (void)markOverdefined(I
);
1305 // If this is a local function that doesn't have its address taken, mark its
1306 // entry block executable and merge in the actual arguments to the call into
1307 // the formal arguments of the function.
1308 if (!TrackingIncomingArguments
.empty() && TrackingIncomingArguments
.count(F
)){
1309 MarkBlockExecutable(&F
->front());
1311 // Propagate information from this call site into the callee.
1312 CallSite::arg_iterator CAI
= CS
.arg_begin();
1313 for (Function::arg_iterator AI
= F
->arg_begin(), E
= F
->arg_end();
1314 AI
!= E
; ++AI
, ++CAI
) {
1315 // If this argument is byval, and if the function is not readonly, there
1316 // will be an implicit copy formed of the input aggregate.
1317 if (AI
->hasByValAttr() && !F
->onlyReadsMemory()) {
1318 markOverdefined(&*AI
);
1322 if (auto *STy
= dyn_cast
<StructType
>(AI
->getType())) {
1323 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
1324 LatticeVal CallArg
= getStructValueState(*CAI
, i
);
1325 mergeInValue(getStructValueState(&*AI
, i
), &*AI
, CallArg
);
1328 // Most other parts of the Solver still only use the simpler value
1329 // lattice, so we propagate changes for parameters to both lattices.
1330 LatticeVal ConcreteArgument
= getValueState(*CAI
);
1332 getParamState(&*AI
).mergeIn(ConcreteArgument
.toValueLattice(), DL
);
1333 bool ValueChanged
= mergeInValue(&*AI
, ConcreteArgument
);
1334 // Add argument to work list, if the state of a parameter changes but
1335 // ValueState does not change (because it is already overdefined there),
1336 // We have to take changes in ParamState into account, as it is used
1337 // when evaluating Cmp instructions.
1338 if (!ValueChanged
&& ParamChanged
)
1339 pushToWorkList(ValueState
[&*AI
], &*AI
);
1344 // If this is a single/zero retval case, see if we're tracking the function.
1345 if (auto *STy
= dyn_cast
<StructType
>(F
->getReturnType())) {
1346 if (!MRVFunctionsTracked
.count(F
))
1347 goto CallOverdefined
; // Not tracking this callee.
1349 // If we are tracking this callee, propagate the result of the function
1350 // into this call site.
1351 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
1352 mergeInValue(getStructValueState(I
, i
), I
,
1353 TrackedMultipleRetVals
[std::make_pair(F
, i
)]);
1355 MapVector
<Function
*, LatticeVal
>::iterator TFRVI
= TrackedRetVals
.find(F
);
1356 if (TFRVI
== TrackedRetVals
.end())
1357 goto CallOverdefined
; // Not tracking this callee.
1359 // If so, propagate the return value of the callee into this call result.
1360 mergeInValue(I
, TFRVI
->second
);
1364 void SCCPSolver::Solve() {
1365 // Process the work lists until they are empty!
1366 while (!BBWorkList
.empty() || !InstWorkList
.empty() ||
1367 !OverdefinedInstWorkList
.empty()) {
1368 // Process the overdefined instruction's work list first, which drives other
1369 // things to overdefined more quickly.
1370 while (!OverdefinedInstWorkList
.empty()) {
1371 Value
*I
= OverdefinedInstWorkList
.pop_back_val();
1373 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I
<< '\n');
1375 // "I" got into the work list because it either made the transition from
1376 // bottom to constant, or to overdefined.
1378 // Anything on this worklist that is overdefined need not be visited
1379 // since all of its users will have already been marked as overdefined
1380 // Update all of the users of this instruction's value.
1382 markUsersAsChanged(I
);
1385 // Process the instruction work list.
1386 while (!InstWorkList
.empty()) {
1387 Value
*I
= InstWorkList
.pop_back_val();
1389 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I
<< '\n');
1391 // "I" got into the work list because it made the transition from undef to
1394 // Anything on this worklist that is overdefined need not be visited
1395 // since all of its users will have already been marked as overdefined.
1396 // Update all of the users of this instruction's value.
1398 if (I
->getType()->isStructTy() || !getValueState(I
).isOverdefined())
1399 markUsersAsChanged(I
);
1402 // Process the basic block work list.
1403 while (!BBWorkList
.empty()) {
1404 BasicBlock
*BB
= BBWorkList
.back();
1405 BBWorkList
.pop_back();
1407 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB
<< '\n');
1409 // Notify all instructions in this basic block that they are newly
1416 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume
1417 /// that branches on undef values cannot reach any of their successors.
1418 /// However, this is not a safe assumption. After we solve dataflow, this
1419 /// method should be use to handle this. If this returns true, the solver
1420 /// should be rerun.
1422 /// This method handles this by finding an unresolved branch and marking it one
1423 /// of the edges from the block as being feasible, even though the condition
1424 /// doesn't say it would otherwise be. This allows SCCP to find the rest of the
1425 /// CFG and only slightly pessimizes the analysis results (by marking one,
1426 /// potentially infeasible, edge feasible). This cannot usefully modify the
1427 /// constraints on the condition of the branch, as that would impact other users
1430 /// This scan also checks for values that use undefs, whose results are actually
1431 /// defined. For example, 'zext i8 undef to i32' should produce all zeros
1432 /// conservatively, as "(zext i8 X -> i32) & 0xFF00" must always return zero,
1433 /// even if X isn't defined.
1434 bool SCCPSolver::ResolvedUndefsIn(Function
&F
) {
1435 for (BasicBlock
&BB
: F
) {
1436 if (!BBExecutable
.count(&BB
))
1439 for (Instruction
&I
: BB
) {
1440 // Look for instructions which produce undef values.
1441 if (I
.getType()->isVoidTy()) continue;
1443 if (auto *STy
= dyn_cast
<StructType
>(I
.getType())) {
1444 // Only a few things that can be structs matter for undef.
1446 // Tracked calls must never be marked overdefined in ResolvedUndefsIn.
1447 if (CallSite CS
= CallSite(&I
))
1448 if (Function
*F
= CS
.getCalledFunction())
1449 if (MRVFunctionsTracked
.count(F
))
1452 // extractvalue and insertvalue don't need to be marked; they are
1453 // tracked as precisely as their operands.
1454 if (isa
<ExtractValueInst
>(I
) || isa
<InsertValueInst
>(I
))
1457 // Send the results of everything else to overdefined. We could be
1458 // more precise than this but it isn't worth bothering.
1459 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
1460 LatticeVal
&LV
= getStructValueState(&I
, i
);
1462 markOverdefined(LV
, &I
);
1467 LatticeVal
&LV
= getValueState(&I
);
1468 if (!LV
.isUnknown())
1471 // There are two reasons a call can have an undef result
1472 // 1. It could be tracked.
1473 // 2. It could be constant-foldable.
1474 // Because of the way we solve return values, tracked calls must
1475 // never be marked overdefined in ResolvedUndefsIn.
1476 if (CallSite CS
= CallSite(&I
)) {
1477 if (Function
*F
= CS
.getCalledFunction())
1478 if (TrackedRetVals
.count(F
))
1481 // If the call is constant-foldable, we mark it overdefined because
1482 // we do not know what return values are valid.
1483 markOverdefined(&I
);
1487 // extractvalue is safe; check here because the argument is a struct.
1488 if (isa
<ExtractValueInst
>(I
))
1491 // Compute the operand LatticeVals, for convenience below.
1492 // Anything taking a struct is conservatively assumed to require
1493 // overdefined markings.
1494 if (I
.getOperand(0)->getType()->isStructTy()) {
1495 markOverdefined(&I
);
1498 LatticeVal Op0LV
= getValueState(I
.getOperand(0));
1500 if (I
.getNumOperands() == 2) {
1501 if (I
.getOperand(1)->getType()->isStructTy()) {
1502 markOverdefined(&I
);
1506 Op1LV
= getValueState(I
.getOperand(1));
1508 // If this is an instructions whose result is defined even if the input is
1509 // not fully defined, propagate the information.
1510 Type
*ITy
= I
.getType();
1511 switch (I
.getOpcode()) {
1512 case Instruction::Add
:
1513 case Instruction::Sub
:
1514 case Instruction::Trunc
:
1515 case Instruction::FPTrunc
:
1516 case Instruction::BitCast
:
1517 break; // Any undef -> undef
1518 case Instruction::FSub
:
1519 case Instruction::FAdd
:
1520 case Instruction::FMul
:
1521 case Instruction::FDiv
:
1522 case Instruction::FRem
:
1523 // Floating-point binary operation: be conservative.
1524 if (Op0LV
.isUnknown() && Op1LV
.isUnknown())
1525 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1527 markOverdefined(&I
);
1529 case Instruction::FNeg
:
1530 break; // fneg undef -> undef
1531 case Instruction::ZExt
:
1532 case Instruction::SExt
:
1533 case Instruction::FPToUI
:
1534 case Instruction::FPToSI
:
1535 case Instruction::FPExt
:
1536 case Instruction::PtrToInt
:
1537 case Instruction::IntToPtr
:
1538 case Instruction::SIToFP
:
1539 case Instruction::UIToFP
:
1540 // undef -> 0; some outputs are impossible
1541 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1543 case Instruction::Mul
:
1544 case Instruction::And
:
1545 // Both operands undef -> undef
1546 if (Op0LV
.isUnknown() && Op1LV
.isUnknown())
1548 // undef * X -> 0. X could be zero.
1549 // undef & X -> 0. X could be zero.
1550 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1552 case Instruction::Or
:
1553 // Both operands undef -> undef
1554 if (Op0LV
.isUnknown() && Op1LV
.isUnknown())
1556 // undef | X -> -1. X could be -1.
1557 markForcedConstant(&I
, Constant::getAllOnesValue(ITy
));
1559 case Instruction::Xor
:
1560 // undef ^ undef -> 0; strictly speaking, this is not strictly
1561 // necessary, but we try to be nice to people who expect this
1562 // behavior in simple cases
1563 if (Op0LV
.isUnknown() && Op1LV
.isUnknown()) {
1564 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1567 // undef ^ X -> undef
1569 case Instruction::SDiv
:
1570 case Instruction::UDiv
:
1571 case Instruction::SRem
:
1572 case Instruction::URem
:
1573 // X / undef -> undef. No change.
1574 // X % undef -> undef. No change.
1575 if (Op1LV
.isUnknown()) break;
1577 // X / 0 -> undef. No change.
1578 // X % 0 -> undef. No change.
1579 if (Op1LV
.isConstant() && Op1LV
.getConstant()->isZeroValue())
1582 // undef / X -> 0. X could be maxint.
1583 // undef % X -> 0. X could be 1.
1584 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1586 case Instruction::AShr
:
1587 // X >>a undef -> undef.
1588 if (Op1LV
.isUnknown()) break;
1590 // Shifting by the bitwidth or more is undefined.
1591 if (Op1LV
.isConstant()) {
1592 if (auto *ShiftAmt
= Op1LV
.getConstantInt())
1593 if (ShiftAmt
->getLimitedValue() >=
1594 ShiftAmt
->getType()->getScalarSizeInBits())
1599 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1601 case Instruction::LShr
:
1602 case Instruction::Shl
:
1603 // X << undef -> undef.
1604 // X >> undef -> undef.
1605 if (Op1LV
.isUnknown()) break;
1607 // Shifting by the bitwidth or more is undefined.
1608 if (Op1LV
.isConstant()) {
1609 if (auto *ShiftAmt
= Op1LV
.getConstantInt())
1610 if (ShiftAmt
->getLimitedValue() >=
1611 ShiftAmt
->getType()->getScalarSizeInBits())
1617 markForcedConstant(&I
, Constant::getNullValue(ITy
));
1619 case Instruction::Select
:
1620 Op1LV
= getValueState(I
.getOperand(1));
1621 // undef ? X : Y -> X or Y. There could be commonality between X/Y.
1622 if (Op0LV
.isUnknown()) {
1623 if (!Op1LV
.isConstant()) // Pick the constant one if there is any.
1624 Op1LV
= getValueState(I
.getOperand(2));
1625 } else if (Op1LV
.isUnknown()) {
1626 // c ? undef : undef -> undef. No change.
1627 Op1LV
= getValueState(I
.getOperand(2));
1628 if (Op1LV
.isUnknown())
1630 // Otherwise, c ? undef : x -> x.
1632 // Leave Op1LV as Operand(1)'s LatticeValue.
1635 if (Op1LV
.isConstant())
1636 markForcedConstant(&I
, Op1LV
.getConstant());
1638 markOverdefined(&I
);
1640 case Instruction::Load
:
1641 // A load here means one of two things: a load of undef from a global,
1642 // a load from an unknown pointer. Either way, having it return undef
1645 case Instruction::ICmp
:
1646 // X == undef -> undef. Other comparisons get more complicated.
1647 Op0LV
= getValueState(I
.getOperand(0));
1648 Op1LV
= getValueState(I
.getOperand(1));
1650 if ((Op0LV
.isUnknown() || Op1LV
.isUnknown()) &&
1651 cast
<ICmpInst
>(&I
)->isEquality())
1653 markOverdefined(&I
);
1655 case Instruction::Call
:
1656 case Instruction::Invoke
:
1657 case Instruction::CallBr
:
1658 llvm_unreachable("Call-like instructions should have be handled early");
1660 // If we don't know what should happen here, conservatively mark it
1662 markOverdefined(&I
);
1667 // Check to see if we have a branch or switch on an undefined value. If so
1668 // we force the branch to go one way or the other to make the successor
1669 // values live. It doesn't really matter which way we force it.
1670 Instruction
*TI
= BB
.getTerminator();
1671 if (auto *BI
= dyn_cast
<BranchInst
>(TI
)) {
1672 if (!BI
->isConditional()) continue;
1673 if (!getValueState(BI
->getCondition()).isUnknown())
1676 // If the input to SCCP is actually branch on undef, fix the undef to
1678 if (isa
<UndefValue
>(BI
->getCondition())) {
1679 BI
->setCondition(ConstantInt::getFalse(BI
->getContext()));
1680 markEdgeExecutable(&BB
, TI
->getSuccessor(1));
1684 // Otherwise, it is a branch on a symbolic value which is currently
1685 // considered to be undef. Make sure some edge is executable, so a
1686 // branch on "undef" always flows somewhere.
1687 // FIXME: Distinguish between dead code and an LLVM "undef" value.
1688 BasicBlock
*DefaultSuccessor
= TI
->getSuccessor(1);
1689 if (markEdgeExecutable(&BB
, DefaultSuccessor
))
1695 if (auto *IBR
= dyn_cast
<IndirectBrInst
>(TI
)) {
1696 // Indirect branch with no successor ?. Its ok to assume it branches
1698 if (IBR
->getNumSuccessors() < 1)
1701 if (!getValueState(IBR
->getAddress()).isUnknown())
1704 // If the input to SCCP is actually branch on undef, fix the undef to
1705 // the first successor of the indirect branch.
1706 if (isa
<UndefValue
>(IBR
->getAddress())) {
1707 IBR
->setAddress(BlockAddress::get(IBR
->getSuccessor(0)));
1708 markEdgeExecutable(&BB
, IBR
->getSuccessor(0));
1712 // Otherwise, it is a branch on a symbolic value which is currently
1713 // considered to be undef. Make sure some edge is executable, so a
1714 // branch on "undef" always flows somewhere.
1715 // FIXME: IndirectBr on "undef" doesn't actually need to go anywhere:
1716 // we can assume the branch has undefined behavior instead.
1717 BasicBlock
*DefaultSuccessor
= IBR
->getSuccessor(0);
1718 if (markEdgeExecutable(&BB
, DefaultSuccessor
))
1724 if (auto *SI
= dyn_cast
<SwitchInst
>(TI
)) {
1725 if (!SI
->getNumCases() || !getValueState(SI
->getCondition()).isUnknown())
1728 // If the input to SCCP is actually switch on undef, fix the undef to
1729 // the first constant.
1730 if (isa
<UndefValue
>(SI
->getCondition())) {
1731 SI
->setCondition(SI
->case_begin()->getCaseValue());
1732 markEdgeExecutable(&BB
, SI
->case_begin()->getCaseSuccessor());
1736 // Otherwise, it is a branch on a symbolic value which is currently
1737 // considered to be undef. Make sure some edge is executable, so a
1738 // branch on "undef" always flows somewhere.
1739 // FIXME: Distinguish between dead code and an LLVM "undef" value.
1740 BasicBlock
*DefaultSuccessor
= SI
->case_begin()->getCaseSuccessor();
1741 if (markEdgeExecutable(&BB
, DefaultSuccessor
))
1751 static bool tryToReplaceWithConstant(SCCPSolver
&Solver
, Value
*V
) {
1752 Constant
*Const
= nullptr;
1753 if (V
->getType()->isStructTy()) {
1754 std::vector
<LatticeVal
> IVs
= Solver
.getStructLatticeValueFor(V
);
1755 if (llvm::any_of(IVs
,
1756 [](const LatticeVal
&LV
) { return LV
.isOverdefined(); }))
1758 std::vector
<Constant
*> ConstVals
;
1759 auto *ST
= dyn_cast
<StructType
>(V
->getType());
1760 for (unsigned i
= 0, e
= ST
->getNumElements(); i
!= e
; ++i
) {
1761 LatticeVal V
= IVs
[i
];
1762 ConstVals
.push_back(V
.isConstant()
1764 : UndefValue::get(ST
->getElementType(i
)));
1766 Const
= ConstantStruct::get(ST
, ConstVals
);
1768 const LatticeVal
&IV
= Solver
.getLatticeValueFor(V
);
1769 if (IV
.isOverdefined())
1772 Const
= IV
.isConstant() ? IV
.getConstant() : UndefValue::get(V
->getType());
1774 assert(Const
&& "Constant is nullptr here!");
1776 // Replacing `musttail` instructions with constant breaks `musttail` invariant
1777 // unless the call itself can be removed
1778 CallInst
*CI
= dyn_cast
<CallInst
>(V
);
1779 if (CI
&& CI
->isMustTailCall() && !CI
->isSafeToRemove()) {
1781 Function
*F
= CS
.getCalledFunction();
1783 // Don't zap returns of the callee
1785 Solver
.AddMustTailCallee(F
);
1787 LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
1788 << " as a constant\n");
1792 LLVM_DEBUG(dbgs() << " Constant: " << *Const
<< " = " << *V
<< '\n');
1794 // Replaces all of the uses of a variable with uses of the constant.
1795 V
->replaceAllUsesWith(Const
);
1799 // runSCCP() - Run the Sparse Conditional Constant Propagation algorithm,
1800 // and return true if the function was modified.
1801 static bool runSCCP(Function
&F
, const DataLayout
&DL
,
1802 const TargetLibraryInfo
*TLI
) {
1803 LLVM_DEBUG(dbgs() << "SCCP on function '" << F
.getName() << "'\n");
1804 SCCPSolver
Solver(DL
, TLI
);
1806 // Mark the first block of the function as being executable.
1807 Solver
.MarkBlockExecutable(&F
.front());
1809 // Mark all arguments to the function as being overdefined.
1810 for (Argument
&AI
: F
.args())
1811 Solver
.markOverdefined(&AI
);
1813 // Solve for constants.
1814 bool ResolvedUndefs
= true;
1815 while (ResolvedUndefs
) {
1817 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n");
1818 ResolvedUndefs
= Solver
.ResolvedUndefsIn(F
);
1821 bool MadeChanges
= false;
1823 // If we decided that there are basic blocks that are dead in this function,
1824 // delete their contents now. Note that we cannot actually delete the blocks,
1825 // as we cannot modify the CFG of the function.
1827 for (BasicBlock
&BB
: F
) {
1828 if (!Solver
.isBlockExecutable(&BB
)) {
1829 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB
);
1832 NumInstRemoved
+= removeAllNonTerminatorAndEHPadInstructions(&BB
);
1838 // Iterate over all of the instructions in a function, replacing them with
1839 // constants if we have found them to be of constant values.
1840 for (BasicBlock::iterator BI
= BB
.begin(), E
= BB
.end(); BI
!= E
;) {
1841 Instruction
*Inst
= &*BI
++;
1842 if (Inst
->getType()->isVoidTy() || Inst
->isTerminator())
1845 if (tryToReplaceWithConstant(Solver
, Inst
)) {
1846 if (isInstructionTriviallyDead(Inst
))
1847 Inst
->eraseFromParent();
1848 // Hey, we just changed something!
1858 PreservedAnalyses
SCCPPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1859 const DataLayout
&DL
= F
.getParent()->getDataLayout();
1860 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1861 if (!runSCCP(F
, DL
, &TLI
))
1862 return PreservedAnalyses::all();
1864 auto PA
= PreservedAnalyses();
1865 PA
.preserve
<GlobalsAA
>();
1866 PA
.preserveSet
<CFGAnalyses
>();
1872 //===--------------------------------------------------------------------===//
1874 /// SCCP Class - This class uses the SCCPSolver to implement a per-function
1875 /// Sparse Conditional Constant Propagator.
1877 class SCCPLegacyPass
: public FunctionPass
{
1879 // Pass identification, replacement for typeid
1882 SCCPLegacyPass() : FunctionPass(ID
) {
1883 initializeSCCPLegacyPassPass(*PassRegistry::getPassRegistry());
1886 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
1887 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
1888 AU
.addPreserved
<GlobalsAAWrapperPass
>();
1889 AU
.setPreservesCFG();
1892 // runOnFunction - Run the Sparse Conditional Constant Propagation
1893 // algorithm, and return true if the function was modified.
1894 bool runOnFunction(Function
&F
) override
{
1895 if (skipFunction(F
))
1897 const DataLayout
&DL
= F
.getParent()->getDataLayout();
1898 const TargetLibraryInfo
*TLI
=
1899 &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI();
1900 return runSCCP(F
, DL
, TLI
);
1904 } // end anonymous namespace
1906 char SCCPLegacyPass::ID
= 0;
1908 INITIALIZE_PASS_BEGIN(SCCPLegacyPass
, "sccp",
1909 "Sparse Conditional Constant Propagation", false, false)
1910 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1911 INITIALIZE_PASS_END(SCCPLegacyPass
, "sccp",
1912 "Sparse Conditional Constant Propagation", false, false)
1914 // createSCCPPass - This is the public interface to this file.
1915 FunctionPass
*llvm::createSCCPPass() { return new SCCPLegacyPass(); }
1917 static void findReturnsToZap(Function
&F
,
1918 SmallVector
<ReturnInst
*, 8> &ReturnsToZap
,
1919 SCCPSolver
&Solver
) {
1920 // We can only do this if we know that nothing else can call the function.
1921 if (!Solver
.isArgumentTrackedFunction(&F
))
1924 // There is a non-removable musttail call site of this function. Zapping
1925 // returns is not allowed.
1926 if (Solver
.isMustTailCallee(&F
)) {
1927 LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F
.getName()
1928 << " due to present musttail call of it\n");
1934 [&Solver
](User
*U
) {
1935 if (isa
<Instruction
>(U
) &&
1936 !Solver
.isBlockExecutable(cast
<Instruction
>(U
)->getParent()))
1938 // Non-callsite uses are not impacted by zapping. Also, constant
1939 // uses (like blockaddresses) could stuck around, without being
1940 // used in the underlying IR, meaning we do not have lattice
1944 if (U
->getType()->isStructTy()) {
1946 Solver
.getStructLatticeValueFor(U
),
1947 [](const LatticeVal
&LV
) { return !LV
.isOverdefined(); });
1949 return !Solver
.getLatticeValueFor(U
).isOverdefined();
1951 "We can only zap functions where all live users have a concrete value");
1953 for (BasicBlock
&BB
: F
) {
1954 if (CallInst
*CI
= BB
.getTerminatingMustTailCall()) {
1955 LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present "
1956 << "musttail call : " << *CI
<< "\n");
1961 if (auto *RI
= dyn_cast
<ReturnInst
>(BB
.getTerminator()))
1962 if (!isa
<UndefValue
>(RI
->getOperand(0)))
1963 ReturnsToZap
.push_back(RI
);
1967 // Update the condition for terminators that are branching on indeterminate
1968 // values, forcing them to use a specific edge.
1969 static void forceIndeterminateEdge(Instruction
* I
, SCCPSolver
&Solver
) {
1970 BasicBlock
*Dest
= nullptr;
1971 Constant
*C
= nullptr;
1972 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(I
)) {
1973 if (!isa
<ConstantInt
>(SI
->getCondition())) {
1974 // Indeterminate switch; use first case value.
1975 Dest
= SI
->case_begin()->getCaseSuccessor();
1976 C
= SI
->case_begin()->getCaseValue();
1978 } else if (BranchInst
*BI
= dyn_cast
<BranchInst
>(I
)) {
1979 if (!isa
<ConstantInt
>(BI
->getCondition())) {
1980 // Indeterminate branch; use false.
1981 Dest
= BI
->getSuccessor(1);
1982 C
= ConstantInt::getFalse(BI
->getContext());
1984 } else if (IndirectBrInst
*IBR
= dyn_cast
<IndirectBrInst
>(I
)) {
1985 if (!isa
<BlockAddress
>(IBR
->getAddress()->stripPointerCasts())) {
1986 // Indeterminate indirectbr; use successor 0.
1987 Dest
= IBR
->getSuccessor(0);
1988 C
= BlockAddress::get(IBR
->getSuccessor(0));
1991 llvm_unreachable("Unexpected terminator instruction");
1994 assert(Solver
.isEdgeFeasible(I
->getParent(), Dest
) &&
1995 "Didn't find feasible edge?");
1998 I
->setOperand(0, C
);
2002 bool llvm::runIPSCCP(
2003 Module
&M
, const DataLayout
&DL
, const TargetLibraryInfo
*TLI
,
2004 function_ref
<AnalysisResultsForFn(Function
&)> getAnalysis
) {
2005 SCCPSolver
Solver(DL
, TLI
);
2007 // Loop over all functions, marking arguments to those with their addresses
2008 // taken or that are external as overdefined.
2009 for (Function
&F
: M
) {
2010 if (F
.isDeclaration())
2013 Solver
.addAnalysis(F
, getAnalysis(F
));
2015 // Determine if we can track the function's return values. If so, add the
2016 // function to the solver's set of return-tracked functions.
2017 if (canTrackReturnsInterprocedurally(&F
))
2018 Solver
.AddTrackedFunction(&F
);
2020 // Determine if we can track the function's arguments. If so, add the
2021 // function to the solver's set of argument-tracked functions.
2022 if (canTrackArgumentsInterprocedurally(&F
)) {
2023 Solver
.AddArgumentTrackedFunction(&F
);
2027 // Assume the function is called.
2028 Solver
.MarkBlockExecutable(&F
.front());
2030 // Assume nothing about the incoming arguments.
2031 for (Argument
&AI
: F
.args())
2032 Solver
.markOverdefined(&AI
);
2035 // Determine if we can track any of the module's global variables. If so, add
2036 // the global variables we can track to the solver's set of tracked global
2038 for (GlobalVariable
&G
: M
.globals()) {
2039 G
.removeDeadConstantUsers();
2040 if (canTrackGlobalVariableInterprocedurally(&G
))
2041 Solver
.TrackValueOfGlobalVariable(&G
);
2044 // Solve for constants.
2045 bool ResolvedUndefs
= true;
2047 while (ResolvedUndefs
) {
2048 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n");
2049 ResolvedUndefs
= false;
2050 for (Function
&F
: M
)
2051 if (Solver
.ResolvedUndefsIn(F
)) {
2052 // We run Solve() after we resolved an undef in a function, because
2053 // we might deduce a fact that eliminates an undef in another function.
2055 ResolvedUndefs
= true;
2059 bool MadeChanges
= false;
2061 // Iterate over all of the instructions in the module, replacing them with
2062 // constants if we have found them to be of constant values.
2064 for (Function
&F
: M
) {
2065 if (F
.isDeclaration())
2068 SmallVector
<BasicBlock
*, 512> BlocksToErase
;
2070 if (Solver
.isBlockExecutable(&F
.front()))
2071 for (Function::arg_iterator AI
= F
.arg_begin(), E
= F
.arg_end(); AI
!= E
;
2073 if (!AI
->use_empty() && tryToReplaceWithConstant(Solver
, &*AI
)) {
2079 for (Function::iterator BB
= F
.begin(), E
= F
.end(); BB
!= E
; ++BB
) {
2080 if (!Solver
.isBlockExecutable(&*BB
)) {
2081 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB
);
2086 if (&*BB
!= &F
.front())
2087 BlocksToErase
.push_back(&*BB
);
2091 for (BasicBlock::iterator BI
= BB
->begin(), E
= BB
->end(); BI
!= E
; ) {
2092 Instruction
*Inst
= &*BI
++;
2093 if (Inst
->getType()->isVoidTy())
2095 if (tryToReplaceWithConstant(Solver
, Inst
)) {
2096 if (Inst
->isSafeToRemove())
2097 Inst
->eraseFromParent();
2098 // Hey, we just changed something!
2105 DomTreeUpdater DTU
= Solver
.getDTU(F
);
2106 // Change dead blocks to unreachable. We do it after replacing constants
2107 // in all executable blocks, because changeToUnreachable may remove PHI
2108 // nodes in executable blocks we found values for. The function's entry
2109 // block is not part of BlocksToErase, so we have to handle it separately.
2110 for (BasicBlock
*BB
: BlocksToErase
) {
2112 changeToUnreachable(BB
->getFirstNonPHI(), /*UseLLVMTrap=*/false,
2113 /*PreserveLCSSA=*/false, &DTU
);
2115 if (!Solver
.isBlockExecutable(&F
.front()))
2116 NumInstRemoved
+= changeToUnreachable(F
.front().getFirstNonPHI(),
2117 /*UseLLVMTrap=*/false,
2118 /*PreserveLCSSA=*/false, &DTU
);
2120 // Now that all instructions in the function are constant folded,
2121 // use ConstantFoldTerminator to get rid of in-edges, record DT updates and
2123 for (BasicBlock
*DeadBB
: BlocksToErase
) {
2124 // If there are any PHI nodes in this successor, drop entries for BB now.
2125 for (Value::user_iterator UI
= DeadBB
->user_begin(),
2126 UE
= DeadBB
->user_end();
2128 // Grab the user and then increment the iterator early, as the user
2129 // will be deleted. Step past all adjacent uses from the same user.
2130 auto *I
= dyn_cast
<Instruction
>(*UI
);
2131 do { ++UI
; } while (UI
!= UE
&& *UI
== I
);
2133 // Ignore blockaddress users; BasicBlock's dtor will handle them.
2136 // If we have forced an edge for an indeterminate value, then force the
2137 // terminator to fold to that edge.
2138 forceIndeterminateEdge(I
, Solver
);
2139 BasicBlock
*InstBB
= I
->getParent();
2140 bool Folded
= ConstantFoldTerminator(InstBB
,
2141 /*DeleteDeadConditions=*/false,
2142 /*TLI=*/nullptr, &DTU
);
2144 "Expect TermInst on constantint or blockaddress to be folded");
2146 // If we folded the terminator to an unconditional branch to another
2147 // dead block, replace it with Unreachable, to avoid trying to fold that
2149 BranchInst
*BI
= cast
<BranchInst
>(InstBB
->getTerminator());
2150 if (BI
&& BI
->isUnconditional() &&
2151 !Solver
.isBlockExecutable(BI
->getSuccessor(0))) {
2152 InstBB
->getTerminator()->eraseFromParent();
2153 new UnreachableInst(InstBB
->getContext(), InstBB
);
2156 // Mark dead BB for deletion.
2157 DTU
.deleteBB(DeadBB
);
2160 for (BasicBlock
&BB
: F
) {
2161 for (BasicBlock::iterator BI
= BB
.begin(), E
= BB
.end(); BI
!= E
;) {
2162 Instruction
*Inst
= &*BI
++;
2163 if (Solver
.getPredicateInfoFor(Inst
)) {
2164 if (auto *II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
2165 if (II
->getIntrinsicID() == Intrinsic::ssa_copy
) {
2166 Value
*Op
= II
->getOperand(0);
2167 Inst
->replaceAllUsesWith(Op
);
2168 Inst
->eraseFromParent();
2176 // If we inferred constant or undef return values for a function, we replaced
2177 // all call uses with the inferred value. This means we don't need to bother
2178 // actually returning anything from the function. Replace all return
2179 // instructions with return undef.
2181 // Do this in two stages: first identify the functions we should process, then
2182 // actually zap their returns. This is important because we can only do this
2183 // if the address of the function isn't taken. In cases where a return is the
2184 // last use of a function, the order of processing functions would affect
2185 // whether other functions are optimizable.
2186 SmallVector
<ReturnInst
*, 8> ReturnsToZap
;
2188 const MapVector
<Function
*, LatticeVal
> &RV
= Solver
.getTrackedRetVals();
2189 for (const auto &I
: RV
) {
2190 Function
*F
= I
.first
;
2191 if (I
.second
.isOverdefined() || F
->getReturnType()->isVoidTy())
2193 findReturnsToZap(*F
, ReturnsToZap
, Solver
);
2196 for (const auto &F
: Solver
.getMRVFunctionsTracked()) {
2197 assert(F
->getReturnType()->isStructTy() &&
2198 "The return type should be a struct");
2199 StructType
*STy
= cast
<StructType
>(F
->getReturnType());
2200 if (Solver
.isStructLatticeConstant(F
, STy
))
2201 findReturnsToZap(*F
, ReturnsToZap
, Solver
);
2204 // Zap all returns which we've identified as zap to change.
2205 for (unsigned i
= 0, e
= ReturnsToZap
.size(); i
!= e
; ++i
) {
2206 Function
*F
= ReturnsToZap
[i
]->getParent()->getParent();
2207 ReturnsToZap
[i
]->setOperand(0, UndefValue::get(F
->getReturnType()));
2210 // If we inferred constant or undef values for globals variables, we can
2211 // delete the global and any stores that remain to it.
2212 const DenseMap
<GlobalVariable
*, LatticeVal
> &TG
= Solver
.getTrackedGlobals();
2213 for (DenseMap
<GlobalVariable
*, LatticeVal
>::const_iterator I
= TG
.begin(),
2214 E
= TG
.end(); I
!= E
; ++I
) {
2215 GlobalVariable
*GV
= I
->first
;
2216 assert(!I
->second
.isOverdefined() &&
2217 "Overdefined values should have been taken out of the map!");
2218 LLVM_DEBUG(dbgs() << "Found that GV '" << GV
->getName()
2219 << "' is constant!\n");
2220 while (!GV
->use_empty()) {
2221 StoreInst
*SI
= cast
<StoreInst
>(GV
->user_back());
2222 SI
->eraseFromParent();
2224 M
.getGlobalList().erase(GV
);