[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Transforms / Utils / SCCPSolver.cpp
blob3dc6016a0a373f7d511ba2fd5d21447e6420f013
1 //===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements the Sparse Conditional Constant Propagation (SCCP)
11 // utility.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/SCCPSolver.h"
16 #include "llvm/Analysis/ConstantFolding.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueLattice.h"
19 #include "llvm/Analysis/ValueLatticeUtils.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/InstVisitor.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include <cassert>
28 #include <utility>
29 #include <vector>
31 using namespace llvm;
33 #define DEBUG_TYPE "sccp"
35 // The maximum number of range extensions allowed for operations requiring
36 // widening.
37 static const unsigned MaxNumRangeExtensions = 10;
39 /// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
40 static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts() {
41 return ValueLatticeElement::MergeOptions().setMaxWidenSteps(
42 MaxNumRangeExtensions);
45 static ConstantRange getConstantRange(const ValueLatticeElement &LV, Type *Ty,
46 bool UndefAllowed = true) {
47 assert(Ty->isIntOrIntVectorTy() && "Should be int or int vector");
48 if (LV.isConstantRange(UndefAllowed))
49 return LV.getConstantRange();
50 return ConstantRange::getFull(Ty->getScalarSizeInBits());
53 namespace llvm {
55 bool SCCPSolver::isConstant(const ValueLatticeElement &LV) {
56 return LV.isConstant() ||
57 (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
60 bool SCCPSolver::isOverdefined(const ValueLatticeElement &LV) {
61 return !LV.isUnknownOrUndef() && !SCCPSolver::isConstant(LV);
64 static bool canRemoveInstruction(Instruction *I) {
65 if (wouldInstructionBeTriviallyDead(I))
66 return true;
68 // Some instructions can be handled but are rejected above. Catch
69 // those cases by falling through to here.
70 // TODO: Mark globals as being constant earlier, so
71 // TODO: wouldInstructionBeTriviallyDead() knows that atomic loads
72 // TODO: are safe to remove.
73 return isa<LoadInst>(I);
76 bool SCCPSolver::tryToReplaceWithConstant(Value *V) {
77 Constant *Const = getConstantOrNull(V);
78 if (!Const)
79 return false;
80 // Replacing `musttail` instructions with constant breaks `musttail` invariant
81 // unless the call itself can be removed.
82 // Calls with "clang.arc.attachedcall" implicitly use the return value and
83 // those uses cannot be updated with a constant.
84 CallBase *CB = dyn_cast<CallBase>(V);
85 if (CB && ((CB->isMustTailCall() &&
86 !canRemoveInstruction(CB)) ||
87 CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall))) {
88 Function *F = CB->getCalledFunction();
90 // Don't zap returns of the callee
91 if (F)
92 addToMustPreserveReturnsInFunctions(F);
94 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
95 << " as a constant\n");
96 return false;
99 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
101 // Replaces all of the uses of a variable with uses of the constant.
102 V->replaceAllUsesWith(Const);
103 return true;
106 /// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
107 static bool refineInstruction(SCCPSolver &Solver,
108 const SmallPtrSetImpl<Value *> &InsertedValues,
109 Instruction &Inst) {
110 bool Changed = false;
111 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
112 if (auto *Const = dyn_cast<ConstantInt>(Op))
113 return ConstantRange(Const->getValue());
114 if (isa<Constant>(Op) || InsertedValues.contains(Op)) {
115 unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
116 return ConstantRange::getFull(Bitwidth);
118 return getConstantRange(Solver.getLatticeValueFor(Op), Op->getType(),
119 /*UndefAllowed=*/false);
122 if (isa<OverflowingBinaryOperator>(Inst)) {
123 auto RangeA = GetRange(Inst.getOperand(0));
124 auto RangeB = GetRange(Inst.getOperand(1));
125 if (!Inst.hasNoUnsignedWrap()) {
126 auto NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
127 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
128 OverflowingBinaryOperator::NoUnsignedWrap);
129 if (NUWRange.contains(RangeA)) {
130 Inst.setHasNoUnsignedWrap();
131 Changed = true;
134 if (!Inst.hasNoSignedWrap()) {
135 auto NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
136 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
137 OverflowingBinaryOperator::NoSignedWrap);
138 if (NSWRange.contains(RangeA)) {
139 Inst.setHasNoSignedWrap();
140 Changed = true;
143 } else if (isa<ZExtInst>(Inst) && !Inst.hasNonNeg()) {
144 auto Range = GetRange(Inst.getOperand(0));
145 if (Range.isAllNonNegative()) {
146 Inst.setNonNeg();
147 Changed = true;
151 return Changed;
154 /// Try to replace signed instructions with their unsigned equivalent.
155 static bool replaceSignedInst(SCCPSolver &Solver,
156 SmallPtrSetImpl<Value *> &InsertedValues,
157 Instruction &Inst) {
158 // Determine if a signed value is known to be >= 0.
159 auto isNonNegative = [&Solver](Value *V) {
160 // If this value was constant-folded, it may not have a solver entry.
161 // Handle integers. Otherwise, return false.
162 if (auto *C = dyn_cast<Constant>(V)) {
163 auto *CInt = dyn_cast<ConstantInt>(C);
164 return CInt && !CInt->isNegative();
166 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
167 return IV.isConstantRange(/*UndefAllowed=*/false) &&
168 IV.getConstantRange().isAllNonNegative();
171 Instruction *NewInst = nullptr;
172 switch (Inst.getOpcode()) {
173 // Note: We do not fold sitofp -> uitofp here because that could be more
174 // expensive in codegen and may not be reversible in the backend.
175 case Instruction::SExt: {
176 // If the source value is not negative, this is a zext.
177 Value *Op0 = Inst.getOperand(0);
178 if (InsertedValues.count(Op0) || !isNonNegative(Op0))
179 return false;
180 NewInst = new ZExtInst(Op0, Inst.getType(), "", &Inst);
181 NewInst->setNonNeg();
182 break;
184 case Instruction::AShr: {
185 // If the shifted value is not negative, this is a logical shift right.
186 Value *Op0 = Inst.getOperand(0);
187 if (InsertedValues.count(Op0) || !isNonNegative(Op0))
188 return false;
189 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", &Inst);
190 NewInst->setIsExact(Inst.isExact());
191 break;
193 case Instruction::SDiv:
194 case Instruction::SRem: {
195 // If both operands are not negative, this is the same as udiv/urem.
196 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
197 if (InsertedValues.count(Op0) || InsertedValues.count(Op1) ||
198 !isNonNegative(Op0) || !isNonNegative(Op1))
199 return false;
200 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
201 : Instruction::URem;
202 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", &Inst);
203 if (Inst.getOpcode() == Instruction::SDiv)
204 NewInst->setIsExact(Inst.isExact());
205 break;
207 default:
208 return false;
211 // Wire up the new instruction and update state.
212 assert(NewInst && "Expected replacement instruction");
213 NewInst->takeName(&Inst);
214 InsertedValues.insert(NewInst);
215 Inst.replaceAllUsesWith(NewInst);
216 Solver.removeLatticeValueFor(&Inst);
217 Inst.eraseFromParent();
218 return true;
221 bool SCCPSolver::simplifyInstsInBlock(BasicBlock &BB,
222 SmallPtrSetImpl<Value *> &InsertedValues,
223 Statistic &InstRemovedStat,
224 Statistic &InstReplacedStat) {
225 bool MadeChanges = false;
226 for (Instruction &Inst : make_early_inc_range(BB)) {
227 if (Inst.getType()->isVoidTy())
228 continue;
229 if (tryToReplaceWithConstant(&Inst)) {
230 if (canRemoveInstruction(&Inst))
231 Inst.eraseFromParent();
233 MadeChanges = true;
234 ++InstRemovedStat;
235 } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
236 MadeChanges = true;
237 ++InstReplacedStat;
238 } else if (refineInstruction(*this, InsertedValues, Inst)) {
239 MadeChanges = true;
242 return MadeChanges;
245 bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU,
246 BasicBlock *&NewUnreachableBB) const {
247 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
248 bool HasNonFeasibleEdges = false;
249 for (BasicBlock *Succ : successors(BB)) {
250 if (isEdgeFeasible(BB, Succ))
251 FeasibleSuccessors.insert(Succ);
252 else
253 HasNonFeasibleEdges = true;
256 // All edges feasible, nothing to do.
257 if (!HasNonFeasibleEdges)
258 return false;
260 // SCCP can only determine non-feasible edges for br, switch and indirectbr.
261 Instruction *TI = BB->getTerminator();
262 assert((isa<BranchInst>(TI) || isa<SwitchInst>(TI) ||
263 isa<IndirectBrInst>(TI)) &&
264 "Terminator must be a br, switch or indirectbr");
266 if (FeasibleSuccessors.size() == 0) {
267 // Branch on undef/poison, replace with unreachable.
268 SmallPtrSet<BasicBlock *, 8> SeenSuccs;
269 SmallVector<DominatorTree::UpdateType, 8> Updates;
270 for (BasicBlock *Succ : successors(BB)) {
271 Succ->removePredecessor(BB);
272 if (SeenSuccs.insert(Succ).second)
273 Updates.push_back({DominatorTree::Delete, BB, Succ});
275 TI->eraseFromParent();
276 new UnreachableInst(BB->getContext(), BB);
277 DTU.applyUpdatesPermissive(Updates);
278 } else if (FeasibleSuccessors.size() == 1) {
279 // Replace with an unconditional branch to the only feasible successor.
280 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
281 SmallVector<DominatorTree::UpdateType, 8> Updates;
282 bool HaveSeenOnlyFeasibleSuccessor = false;
283 for (BasicBlock *Succ : successors(BB)) {
284 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
285 // Don't remove the edge to the only feasible successor the first time
286 // we see it. We still do need to remove any multi-edges to it though.
287 HaveSeenOnlyFeasibleSuccessor = true;
288 continue;
291 Succ->removePredecessor(BB);
292 Updates.push_back({DominatorTree::Delete, BB, Succ});
295 BranchInst::Create(OnlyFeasibleSuccessor, BB);
296 TI->eraseFromParent();
297 DTU.applyUpdatesPermissive(Updates);
298 } else if (FeasibleSuccessors.size() > 1) {
299 SwitchInstProfUpdateWrapper SI(*cast<SwitchInst>(TI));
300 SmallVector<DominatorTree::UpdateType, 8> Updates;
302 // If the default destination is unfeasible it will never be taken. Replace
303 // it with a new block with a single Unreachable instruction.
304 BasicBlock *DefaultDest = SI->getDefaultDest();
305 if (!FeasibleSuccessors.contains(DefaultDest)) {
306 if (!NewUnreachableBB) {
307 NewUnreachableBB =
308 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
309 DefaultDest->getParent(), DefaultDest);
310 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
313 DefaultDest->removePredecessor(BB);
314 SI->setDefaultDest(NewUnreachableBB);
315 Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
316 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
319 for (auto CI = SI->case_begin(); CI != SI->case_end();) {
320 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
321 ++CI;
322 continue;
325 BasicBlock *Succ = CI->getCaseSuccessor();
326 Succ->removePredecessor(BB);
327 Updates.push_back({DominatorTree::Delete, BB, Succ});
328 SI.removeCase(CI);
329 // Don't increment CI, as we removed a case.
332 DTU.applyUpdatesPermissive(Updates);
333 } else {
334 llvm_unreachable("Must have at least one feasible successor");
336 return true;
339 /// Helper class for SCCPSolver. This implements the instruction visitor and
340 /// holds all the state.
341 class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
342 const DataLayout &DL;
343 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
344 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable.
345 DenseMap<Value *, ValueLatticeElement>
346 ValueState; // The state each value is in.
348 /// StructValueState - This maintains ValueState for values that have
349 /// StructType, for example for formal arguments, calls, insertelement, etc.
350 DenseMap<std::pair<Value *, unsigned>, ValueLatticeElement> StructValueState;
352 /// GlobalValue - If we are tracking any values for the contents of a global
353 /// variable, we keep a mapping from the constant accessor to the element of
354 /// the global, to the currently known value. If the value becomes
355 /// overdefined, it's entry is simply removed from this map.
356 DenseMap<GlobalVariable *, ValueLatticeElement> TrackedGlobals;
358 /// TrackedRetVals - If we are tracking arguments into and the return
359 /// value out of a function, it will have an entry in this map, indicating
360 /// what the known return value for the function is.
361 MapVector<Function *, ValueLatticeElement> TrackedRetVals;
363 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
364 /// that return multiple values.
365 MapVector<std::pair<Function *, unsigned>, ValueLatticeElement>
366 TrackedMultipleRetVals;
368 /// The set of values whose lattice has been invalidated.
369 /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
370 DenseSet<Value *> Invalidated;
372 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
373 /// represented here for efficient lookup.
374 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
376 /// A list of functions whose return cannot be modified.
377 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
379 /// TrackingIncomingArguments - This is the set of functions for whose
380 /// arguments we make optimistic assumptions about and try to prove as
381 /// constants.
382 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
384 /// The reason for two worklists is that overdefined is the lowest state
385 /// on the lattice, and moving things to overdefined as fast as possible
386 /// makes SCCP converge much faster.
388 /// By having a separate worklist, we accomplish this because everything
389 /// possibly overdefined will become overdefined at the soonest possible
390 /// point.
391 SmallVector<Value *, 64> OverdefinedInstWorkList;
392 SmallVector<Value *, 64> InstWorkList;
394 // The BasicBlock work list
395 SmallVector<BasicBlock *, 64> BBWorkList;
397 /// KnownFeasibleEdges - Entries in this set are edges which have already had
398 /// PHI nodes retriggered.
399 using Edge = std::pair<BasicBlock *, BasicBlock *>;
400 DenseSet<Edge> KnownFeasibleEdges;
402 DenseMap<Function *, std::unique_ptr<PredicateInfo>> FnPredicateInfo;
404 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers;
406 LLVMContext &Ctx;
408 private:
409 ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
410 return dyn_cast_or_null<ConstantInt>(getConstant(IV, Ty));
413 // pushToWorkList - Helper for markConstant/markOverdefined
414 void pushToWorkList(ValueLatticeElement &IV, Value *V);
416 // Helper to push \p V to the worklist, after updating it to \p IV. Also
417 // prints a debug message with the updated value.
418 void pushToWorkListMsg(ValueLatticeElement &IV, Value *V);
420 // markConstant - Make a value be marked as "constant". If the value
421 // is not already a constant, add it to the instruction work list so that
422 // the users of the instruction are updated later.
423 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
424 bool MayIncludeUndef = false);
426 bool markConstant(Value *V, Constant *C) {
427 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
428 return markConstant(ValueState[V], V, C);
431 // markOverdefined - Make a value be marked as "overdefined". If the
432 // value is not already overdefined, add it to the overdefined instruction
433 // work list so that the users of the instruction are updated later.
434 bool markOverdefined(ValueLatticeElement &IV, Value *V);
436 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
437 /// changes.
438 bool mergeInValue(ValueLatticeElement &IV, Value *V,
439 ValueLatticeElement MergeWithV,
440 ValueLatticeElement::MergeOptions Opts = {
441 /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
443 bool mergeInValue(Value *V, ValueLatticeElement MergeWithV,
444 ValueLatticeElement::MergeOptions Opts = {
445 /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) {
446 assert(!V->getType()->isStructTy() &&
447 "non-structs should use markConstant");
448 return mergeInValue(ValueState[V], V, MergeWithV, Opts);
451 /// getValueState - Return the ValueLatticeElement object that corresponds to
452 /// the value. This function handles the case when the value hasn't been seen
453 /// yet by properly seeding constants etc.
454 ValueLatticeElement &getValueState(Value *V) {
455 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
457 auto I = ValueState.insert(std::make_pair(V, ValueLatticeElement()));
458 ValueLatticeElement &LV = I.first->second;
460 if (!I.second)
461 return LV; // Common case, already in the map.
463 if (auto *C = dyn_cast<Constant>(V))
464 LV.markConstant(C); // Constants are constant
466 // All others are unknown by default.
467 return LV;
470 /// getStructValueState - Return the ValueLatticeElement object that
471 /// corresponds to the value/field pair. This function handles the case when
472 /// the value hasn't been seen yet by properly seeding constants etc.
473 ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
474 assert(V->getType()->isStructTy() && "Should use getValueState");
475 assert(i < cast<StructType>(V->getType())->getNumElements() &&
476 "Invalid element #");
478 auto I = StructValueState.insert(
479 std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
480 ValueLatticeElement &LV = I.first->second;
482 if (!I.second)
483 return LV; // Common case, already in the map.
485 if (auto *C = dyn_cast<Constant>(V)) {
486 Constant *Elt = C->getAggregateElement(i);
488 if (!Elt)
489 LV.markOverdefined(); // Unknown sort of constant.
490 else
491 LV.markConstant(Elt); // Constants are constant.
494 // All others are underdefined by default.
495 return LV;
498 /// Traverse the use-def chain of \p Call, marking itself and its users as
499 /// "unknown" on the way.
500 void invalidate(CallBase *Call) {
501 SmallVector<Instruction *, 64> ToInvalidate;
502 ToInvalidate.push_back(Call);
504 while (!ToInvalidate.empty()) {
505 Instruction *Inst = ToInvalidate.pop_back_val();
507 if (!Invalidated.insert(Inst).second)
508 continue;
510 if (!BBExecutable.count(Inst->getParent()))
511 continue;
513 Value *V = nullptr;
514 // For return instructions we need to invalidate the tracked returns map.
515 // Anything else has its lattice in the value map.
516 if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
517 Function *F = RetInst->getParent()->getParent();
518 if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
519 It->second = ValueLatticeElement();
520 V = F;
521 } else if (MRVFunctionsTracked.count(F)) {
522 auto *STy = cast<StructType>(F->getReturnType());
523 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
524 TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
525 V = F;
527 } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
528 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
529 if (auto It = StructValueState.find({Inst, I});
530 It != StructValueState.end()) {
531 It->second = ValueLatticeElement();
532 V = Inst;
535 } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
536 It->second = ValueLatticeElement();
537 V = Inst;
540 if (V) {
541 LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
543 for (User *U : V->users())
544 if (auto *UI = dyn_cast<Instruction>(U))
545 ToInvalidate.push_back(UI);
547 auto It = AdditionalUsers.find(V);
548 if (It != AdditionalUsers.end())
549 for (User *U : It->second)
550 if (auto *UI = dyn_cast<Instruction>(U))
551 ToInvalidate.push_back(UI);
556 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
557 /// work list if it is not already executable.
558 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
560 // getFeasibleSuccessors - Return a vector of booleans to indicate which
561 // successors are reachable from a given terminator instruction.
562 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
564 // OperandChangedState - This method is invoked on all of the users of an
565 // instruction that was just changed state somehow. Based on this
566 // information, we need to update the specified user of this instruction.
567 void operandChangedState(Instruction *I) {
568 if (BBExecutable.count(I->getParent())) // Inst is executable?
569 visit(*I);
572 // Add U as additional user of V.
573 void addAdditionalUser(Value *V, User *U) {
574 auto Iter = AdditionalUsers.insert({V, {}});
575 Iter.first->second.insert(U);
578 // Mark I's users as changed, including AdditionalUsers.
579 void markUsersAsChanged(Value *I) {
580 // Functions include their arguments in the use-list. Changed function
581 // values mean that the result of the function changed. We only need to
582 // update the call sites with the new function result and do not have to
583 // propagate the call arguments.
584 if (isa<Function>(I)) {
585 for (User *U : I->users()) {
586 if (auto *CB = dyn_cast<CallBase>(U))
587 handleCallResult(*CB);
589 } else {
590 for (User *U : I->users())
591 if (auto *UI = dyn_cast<Instruction>(U))
592 operandChangedState(UI);
595 auto Iter = AdditionalUsers.find(I);
596 if (Iter != AdditionalUsers.end()) {
597 // Copy additional users before notifying them of changes, because new
598 // users may be added, potentially invalidating the iterator.
599 SmallVector<Instruction *, 2> ToNotify;
600 for (User *U : Iter->second)
601 if (auto *UI = dyn_cast<Instruction>(U))
602 ToNotify.push_back(UI);
603 for (Instruction *UI : ToNotify)
604 operandChangedState(UI);
607 void handleCallOverdefined(CallBase &CB);
608 void handleCallResult(CallBase &CB);
609 void handleCallArguments(CallBase &CB);
610 void handleExtractOfWithOverflow(ExtractValueInst &EVI,
611 const WithOverflowInst *WO, unsigned Idx);
613 private:
614 friend class InstVisitor<SCCPInstVisitor>;
616 // visit implementations - Something changed in this instruction. Either an
617 // operand made a transition, or the instruction is newly executable. Change
618 // the value type of I to reflect these changes if appropriate.
619 void visitPHINode(PHINode &I);
621 // Terminators
623 void visitReturnInst(ReturnInst &I);
624 void visitTerminator(Instruction &TI);
626 void visitCastInst(CastInst &I);
627 void visitSelectInst(SelectInst &I);
628 void visitUnaryOperator(Instruction &I);
629 void visitFreezeInst(FreezeInst &I);
630 void visitBinaryOperator(Instruction &I);
631 void visitCmpInst(CmpInst &I);
632 void visitExtractValueInst(ExtractValueInst &EVI);
633 void visitInsertValueInst(InsertValueInst &IVI);
635 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
636 markOverdefined(&CPI);
637 visitTerminator(CPI);
640 // Instructions that cannot be folded away.
642 void visitStoreInst(StoreInst &I);
643 void visitLoadInst(LoadInst &I);
644 void visitGetElementPtrInst(GetElementPtrInst &I);
646 void visitInvokeInst(InvokeInst &II) {
647 visitCallBase(II);
648 visitTerminator(II);
651 void visitCallBrInst(CallBrInst &CBI) {
652 visitCallBase(CBI);
653 visitTerminator(CBI);
656 void visitCallBase(CallBase &CB);
657 void visitResumeInst(ResumeInst &I) { /*returns void*/
659 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
661 void visitFenceInst(FenceInst &I) { /*returns void*/
664 void visitInstruction(Instruction &I);
666 public:
667 void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC) {
668 FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(F, DT, AC)});
671 void visitCallInst(CallInst &I) { visitCallBase(I); }
673 bool markBlockExecutable(BasicBlock *BB);
675 const PredicateBase *getPredicateInfoFor(Instruction *I) {
676 auto It = FnPredicateInfo.find(I->getParent()->getParent());
677 if (It == FnPredicateInfo.end())
678 return nullptr;
679 return It->second->getPredicateInfoFor(I);
682 SCCPInstVisitor(const DataLayout &DL,
683 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
684 LLVMContext &Ctx)
685 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
687 void trackValueOfGlobalVariable(GlobalVariable *GV) {
688 // We only track the contents of scalar globals.
689 if (GV->getValueType()->isSingleValueType()) {
690 ValueLatticeElement &IV = TrackedGlobals[GV];
691 IV.markConstant(GV->getInitializer());
695 void addTrackedFunction(Function *F) {
696 // Add an entry, F -> undef.
697 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
698 MRVFunctionsTracked.insert(F);
699 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
700 TrackedMultipleRetVals.insert(
701 std::make_pair(std::make_pair(F, i), ValueLatticeElement()));
702 } else if (!F->getReturnType()->isVoidTy())
703 TrackedRetVals.insert(std::make_pair(F, ValueLatticeElement()));
706 void addToMustPreserveReturnsInFunctions(Function *F) {
707 MustPreserveReturnsInFunctions.insert(F);
710 bool mustPreserveReturn(Function *F) {
711 return MustPreserveReturnsInFunctions.count(F);
714 void addArgumentTrackedFunction(Function *F) {
715 TrackingIncomingArguments.insert(F);
718 bool isArgumentTrackedFunction(Function *F) {
719 return TrackingIncomingArguments.count(F);
722 void solve();
724 bool resolvedUndef(Instruction &I);
726 bool resolvedUndefsIn(Function &F);
728 bool isBlockExecutable(BasicBlock *BB) const {
729 return BBExecutable.count(BB);
732 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
734 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
735 std::vector<ValueLatticeElement> StructValues;
736 auto *STy = dyn_cast<StructType>(V->getType());
737 assert(STy && "getStructLatticeValueFor() can be called only on structs");
738 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
739 auto I = StructValueState.find(std::make_pair(V, i));
740 assert(I != StructValueState.end() && "Value not in valuemap!");
741 StructValues.push_back(I->second);
743 return StructValues;
746 void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
748 /// Invalidate the Lattice Value of \p Call and its users after specializing
749 /// the call. Then recompute it.
750 void resetLatticeValueFor(CallBase *Call) {
751 // Calls to void returning functions do not need invalidation.
752 Function *F = Call->getCalledFunction();
753 (void)F;
754 assert(!F->getReturnType()->isVoidTy() &&
755 (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
756 "All non void specializations should be tracked");
757 invalidate(Call);
758 handleCallResult(*Call);
761 const ValueLatticeElement &getLatticeValueFor(Value *V) const {
762 assert(!V->getType()->isStructTy() &&
763 "Should use getStructLatticeValueFor");
764 DenseMap<Value *, ValueLatticeElement>::const_iterator I =
765 ValueState.find(V);
766 assert(I != ValueState.end() &&
767 "V not found in ValueState nor Paramstate map!");
768 return I->second;
771 const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals() {
772 return TrackedRetVals;
775 const DenseMap<GlobalVariable *, ValueLatticeElement> &getTrackedGlobals() {
776 return TrackedGlobals;
779 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() {
780 return MRVFunctionsTracked;
783 void markOverdefined(Value *V) {
784 if (auto *STy = dyn_cast<StructType>(V->getType()))
785 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
786 markOverdefined(getStructValueState(V, i), V);
787 else
788 markOverdefined(ValueState[V], V);
791 bool isStructLatticeConstant(Function *F, StructType *STy);
793 Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
795 Constant *getConstantOrNull(Value *V) const;
797 SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() {
798 return TrackingIncomingArguments;
801 void setLatticeValueForSpecializationArguments(Function *F,
802 const SmallVectorImpl<ArgInfo> &Args);
804 void markFunctionUnreachable(Function *F) {
805 for (auto &BB : *F)
806 BBExecutable.erase(&BB);
809 void solveWhileResolvedUndefsIn(Module &M) {
810 bool ResolvedUndefs = true;
811 while (ResolvedUndefs) {
812 solve();
813 ResolvedUndefs = false;
814 for (Function &F : M)
815 ResolvedUndefs |= resolvedUndefsIn(F);
819 void solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
820 bool ResolvedUndefs = true;
821 while (ResolvedUndefs) {
822 solve();
823 ResolvedUndefs = false;
824 for (Function *F : WorkList)
825 ResolvedUndefs |= resolvedUndefsIn(*F);
829 void solveWhileResolvedUndefs() {
830 bool ResolvedUndefs = true;
831 while (ResolvedUndefs) {
832 solve();
833 ResolvedUndefs = false;
834 for (Value *V : Invalidated)
835 if (auto *I = dyn_cast<Instruction>(V))
836 ResolvedUndefs |= resolvedUndef(*I);
838 Invalidated.clear();
842 } // namespace llvm
844 bool SCCPInstVisitor::markBlockExecutable(BasicBlock *BB) {
845 if (!BBExecutable.insert(BB).second)
846 return false;
847 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
848 BBWorkList.push_back(BB); // Add the block to the work list!
849 return true;
852 void SCCPInstVisitor::pushToWorkList(ValueLatticeElement &IV, Value *V) {
853 if (IV.isOverdefined()) {
854 if (OverdefinedInstWorkList.empty() || OverdefinedInstWorkList.back() != V)
855 OverdefinedInstWorkList.push_back(V);
856 return;
858 if (InstWorkList.empty() || InstWorkList.back() != V)
859 InstWorkList.push_back(V);
862 void SCCPInstVisitor::pushToWorkListMsg(ValueLatticeElement &IV, Value *V) {
863 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
864 pushToWorkList(IV, V);
867 bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
868 Constant *C, bool MayIncludeUndef) {
869 if (!IV.markConstant(C, MayIncludeUndef))
870 return false;
871 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
872 pushToWorkList(IV, V);
873 return true;
876 bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
877 if (!IV.markOverdefined())
878 return false;
880 LLVM_DEBUG(dbgs() << "markOverdefined: ";
881 if (auto *F = dyn_cast<Function>(V)) dbgs()
882 << "Function '" << F->getName() << "'\n";
883 else dbgs() << *V << '\n');
884 // Only instructions go on the work list
885 pushToWorkList(IV, V);
886 return true;
889 bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) {
890 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
891 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
892 assert(It != TrackedMultipleRetVals.end());
893 ValueLatticeElement LV = It->second;
894 if (!SCCPSolver::isConstant(LV))
895 return false;
897 return true;
900 Constant *SCCPInstVisitor::getConstant(const ValueLatticeElement &LV,
901 Type *Ty) const {
902 if (LV.isConstant()) {
903 Constant *C = LV.getConstant();
904 assert(C->getType() == Ty && "Type mismatch");
905 return C;
908 if (LV.isConstantRange()) {
909 const auto &CR = LV.getConstantRange();
910 if (CR.getSingleElement())
911 return ConstantInt::get(Ty, *CR.getSingleElement());
913 return nullptr;
916 Constant *SCCPInstVisitor::getConstantOrNull(Value *V) const {
917 Constant *Const = nullptr;
918 if (V->getType()->isStructTy()) {
919 std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
920 if (any_of(LVs, SCCPSolver::isOverdefined))
921 return nullptr;
922 std::vector<Constant *> ConstVals;
923 auto *ST = cast<StructType>(V->getType());
924 for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
925 ValueLatticeElement LV = LVs[I];
926 ConstVals.push_back(SCCPSolver::isConstant(LV)
927 ? getConstant(LV, ST->getElementType(I))
928 : UndefValue::get(ST->getElementType(I)));
930 Const = ConstantStruct::get(ST, ConstVals);
931 } else {
932 const ValueLatticeElement &LV = getLatticeValueFor(V);
933 if (SCCPSolver::isOverdefined(LV))
934 return nullptr;
935 Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
936 : UndefValue::get(V->getType());
938 assert(Const && "Constant is nullptr here!");
939 return Const;
942 void SCCPInstVisitor::setLatticeValueForSpecializationArguments(Function *F,
943 const SmallVectorImpl<ArgInfo> &Args) {
944 assert(!Args.empty() && "Specialization without arguments");
945 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
946 "Functions should have the same number of arguments");
948 auto Iter = Args.begin();
949 Function::arg_iterator NewArg = F->arg_begin();
950 Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
951 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
953 LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
954 << NewArg->getNameOrAsOperand() << "\n");
956 // Mark the argument constants in the new function
957 // or copy the lattice state over from the old function.
958 if (Iter != Args.end() && Iter->Formal == &*OldArg) {
959 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
960 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
961 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
962 NewValue.markConstant(Iter->Actual->getAggregateElement(I));
964 } else {
965 ValueState[&*NewArg].markConstant(Iter->Actual);
967 ++Iter;
968 } else {
969 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
970 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
971 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
972 NewValue = StructValueState[{&*OldArg, I}];
974 } else {
975 ValueLatticeElement &NewValue = ValueState[&*NewArg];
976 NewValue = ValueState[&*OldArg];
982 void SCCPInstVisitor::visitInstruction(Instruction &I) {
983 // All the instructions we don't do any special handling for just
984 // go to overdefined.
985 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
986 markOverdefined(&I);
989 bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
990 ValueLatticeElement MergeWithV,
991 ValueLatticeElement::MergeOptions Opts) {
992 if (IV.mergeIn(MergeWithV, Opts)) {
993 pushToWorkList(IV, V);
994 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
995 << IV << "\n");
996 return true;
998 return false;
1001 bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1002 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1003 return false; // This edge is already known to be executable!
1005 if (!markBlockExecutable(Dest)) {
1006 // If the destination is already executable, we just made an *edge*
1007 // feasible that wasn't before. Revisit the PHI nodes in the block
1008 // because they have potentially new operands.
1009 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1010 << " -> " << Dest->getName() << '\n');
1012 for (PHINode &PN : Dest->phis())
1013 visitPHINode(PN);
1015 return true;
1018 // getFeasibleSuccessors - Return a vector of booleans to indicate which
1019 // successors are reachable from a given terminator instruction.
1020 void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1021 SmallVectorImpl<bool> &Succs) {
1022 Succs.resize(TI.getNumSuccessors());
1023 if (auto *BI = dyn_cast<BranchInst>(&TI)) {
1024 if (BI->isUnconditional()) {
1025 Succs[0] = true;
1026 return;
1029 ValueLatticeElement BCValue = getValueState(BI->getCondition());
1030 ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1031 if (!CI) {
1032 // Overdefined condition variables, and branches on unfoldable constant
1033 // conditions, mean the branch could go either way.
1034 if (!BCValue.isUnknownOrUndef())
1035 Succs[0] = Succs[1] = true;
1036 return;
1039 // Constant condition variables mean the branch can only go a single way.
1040 Succs[CI->isZero()] = true;
1041 return;
1044 // We cannot analyze special terminators, so consider all successors
1045 // executable.
1046 if (TI.isSpecialTerminator()) {
1047 Succs.assign(TI.getNumSuccessors(), true);
1048 return;
1051 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1052 if (!SI->getNumCases()) {
1053 Succs[0] = true;
1054 return;
1056 const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1057 if (ConstantInt *CI =
1058 getConstantInt(SCValue, SI->getCondition()->getType())) {
1059 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1060 return;
1063 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1064 // is ready.
1065 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1066 const ConstantRange &Range = SCValue.getConstantRange();
1067 unsigned ReachableCaseCount = 0;
1068 for (const auto &Case : SI->cases()) {
1069 const APInt &CaseValue = Case.getCaseValue()->getValue();
1070 if (Range.contains(CaseValue)) {
1071 Succs[Case.getSuccessorIndex()] = true;
1072 ++ReachableCaseCount;
1076 Succs[SI->case_default()->getSuccessorIndex()] =
1077 Range.isSizeLargerThan(ReachableCaseCount);
1078 return;
1081 // Overdefined or unknown condition? All destinations are executable!
1082 if (!SCValue.isUnknownOrUndef())
1083 Succs.assign(TI.getNumSuccessors(), true);
1084 return;
1087 // In case of indirect branch and its address is a blockaddress, we mark
1088 // the target as executable.
1089 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1090 // Casts are folded by visitCastInst.
1091 ValueLatticeElement IBRValue = getValueState(IBR->getAddress());
1092 BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(
1093 getConstant(IBRValue, IBR->getAddress()->getType()));
1094 if (!Addr) { // Overdefined or unknown condition?
1095 // All destinations are executable!
1096 if (!IBRValue.isUnknownOrUndef())
1097 Succs.assign(TI.getNumSuccessors(), true);
1098 return;
1101 BasicBlock *T = Addr->getBasicBlock();
1102 assert(Addr->getFunction() == T->getParent() &&
1103 "Block address of a different function ?");
1104 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1105 // This is the target.
1106 if (IBR->getDestination(i) == T) {
1107 Succs[i] = true;
1108 return;
1112 // If we didn't find our destination in the IBR successor list, then we
1113 // have undefined behavior. Its ok to assume no successor is executable.
1114 return;
1117 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1118 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1121 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1122 // block to the 'To' basic block is currently feasible.
1123 bool SCCPInstVisitor::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
1124 // Check if we've called markEdgeExecutable on the edge yet. (We could
1125 // be more aggressive and try to consider edges which haven't been marked
1126 // yet, but there isn't any need.)
1127 return KnownFeasibleEdges.count(Edge(From, To));
1130 // visit Implementations - Something changed in this instruction, either an
1131 // operand made a transition, or the instruction is newly executable. Change
1132 // the value type of I to reflect these changes if appropriate. This method
1133 // makes sure to do the following actions:
1135 // 1. If a phi node merges two constants in, and has conflicting value coming
1136 // from different branches, or if the PHI node merges in an overdefined
1137 // value, then the PHI node becomes overdefined.
1138 // 2. If a phi node merges only constants in, and they all agree on value, the
1139 // PHI node becomes a constant value equal to that.
1140 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1141 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1142 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1143 // 6. If a conditional branch has a value that is constant, make the selected
1144 // destination executable
1145 // 7. If a conditional branch has a value that is overdefined, make all
1146 // successors executable.
1147 void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1148 // If this PN returns a struct, just mark the result overdefined.
1149 // TODO: We could do a lot better than this if code actually uses this.
1150 if (PN.getType()->isStructTy())
1151 return (void)markOverdefined(&PN);
1153 if (getValueState(&PN).isOverdefined())
1154 return; // Quick exit
1156 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1157 // and slow us down a lot. Just mark them overdefined.
1158 if (PN.getNumIncomingValues() > 64)
1159 return (void)markOverdefined(&PN);
1161 unsigned NumActiveIncoming = 0;
1163 // Look at all of the executable operands of the PHI node. If any of them
1164 // are overdefined, the PHI becomes overdefined as well. If they are all
1165 // constant, and they agree with each other, the PHI becomes the identical
1166 // constant. If they are constant and don't agree, the PHI is a constant
1167 // range. If there are no executable operands, the PHI remains unknown.
1168 ValueLatticeElement PhiState = getValueState(&PN);
1169 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1170 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1171 continue;
1173 ValueLatticeElement IV = getValueState(PN.getIncomingValue(i));
1174 PhiState.mergeIn(IV);
1175 NumActiveIncoming++;
1176 if (PhiState.isOverdefined())
1177 break;
1180 // We allow up to 1 range extension per active incoming value and one
1181 // additional extension. Note that we manually adjust the number of range
1182 // extensions to match the number of active incoming values. This helps to
1183 // limit multiple extensions caused by the same incoming value, if other
1184 // incoming values are equal.
1185 mergeInValue(&PN, PhiState,
1186 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1187 NumActiveIncoming + 1));
1188 ValueLatticeElement &PhiStateRef = getValueState(&PN);
1189 PhiStateRef.setNumRangeExtensions(
1190 std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions()));
1193 void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1194 if (I.getNumOperands() == 0)
1195 return; // ret void
1197 Function *F = I.getParent()->getParent();
1198 Value *ResultOp = I.getOperand(0);
1200 // If we are tracking the return value of this function, merge it in.
1201 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1202 auto TFRVI = TrackedRetVals.find(F);
1203 if (TFRVI != TrackedRetVals.end()) {
1204 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1205 return;
1209 // Handle functions that return multiple values.
1210 if (!TrackedMultipleRetVals.empty()) {
1211 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1212 if (MRVFunctionsTracked.count(F))
1213 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1214 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1215 getStructValueState(ResultOp, i));
1219 void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1220 SmallVector<bool, 16> SuccFeasible;
1221 getFeasibleSuccessors(TI, SuccFeasible);
1223 BasicBlock *BB = TI.getParent();
1225 // Mark all feasible successors executable.
1226 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1227 if (SuccFeasible[i])
1228 markEdgeExecutable(BB, TI.getSuccessor(i));
1231 void SCCPInstVisitor::visitCastInst(CastInst &I) {
1232 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1233 // discover a concrete value later.
1234 if (ValueState[&I].isOverdefined())
1235 return;
1237 ValueLatticeElement OpSt = getValueState(I.getOperand(0));
1238 if (OpSt.isUnknownOrUndef())
1239 return;
1241 if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1242 // Fold the constant as we build.
1243 if (Constant *C =
1244 ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL))
1245 return (void)markConstant(&I, C);
1248 if (I.getDestTy()->isIntegerTy() && I.getSrcTy()->isIntOrIntVectorTy()) {
1249 auto &LV = getValueState(&I);
1250 ConstantRange OpRange = getConstantRange(OpSt, I.getSrcTy());
1252 Type *DestTy = I.getDestTy();
1253 // Vectors where all elements have the same known constant range are treated
1254 // as a single constant range in the lattice. When bitcasting such vectors,
1255 // there is a mis-match between the width of the lattice value (single
1256 // constant range) and the original operands (vector). Go to overdefined in
1257 // that case.
1258 if (I.getOpcode() == Instruction::BitCast &&
1259 I.getOperand(0)->getType()->isVectorTy() &&
1260 OpRange.getBitWidth() < DL.getTypeSizeInBits(DestTy))
1261 return (void)markOverdefined(&I);
1263 ConstantRange Res =
1264 OpRange.castOp(I.getOpcode(), DL.getTypeSizeInBits(DestTy));
1265 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1266 } else
1267 markOverdefined(&I);
1270 void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1271 const WithOverflowInst *WO,
1272 unsigned Idx) {
1273 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1274 ValueLatticeElement L = getValueState(LHS);
1275 ValueLatticeElement R = getValueState(RHS);
1276 addAdditionalUser(LHS, &EVI);
1277 addAdditionalUser(RHS, &EVI);
1278 if (L.isUnknownOrUndef() || R.isUnknownOrUndef())
1279 return; // Wait to resolve.
1281 Type *Ty = LHS->getType();
1282 ConstantRange LR = getConstantRange(L, Ty);
1283 ConstantRange RR = getConstantRange(R, Ty);
1284 if (Idx == 0) {
1285 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1286 mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
1287 } else {
1288 assert(Idx == 1 && "Index can only be 0 or 1");
1289 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1290 WO->getBinaryOp(), RR, WO->getNoWrapKind());
1291 if (NWRegion.contains(LR))
1292 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1293 markOverdefined(&EVI);
1297 void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1298 // If this returns a struct, mark all elements over defined, we don't track
1299 // structs in structs.
1300 if (EVI.getType()->isStructTy())
1301 return (void)markOverdefined(&EVI);
1303 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1304 // discover a concrete value later.
1305 if (ValueState[&EVI].isOverdefined())
1306 return (void)markOverdefined(&EVI);
1308 // If this is extracting from more than one level of struct, we don't know.
1309 if (EVI.getNumIndices() != 1)
1310 return (void)markOverdefined(&EVI);
1312 Value *AggVal = EVI.getAggregateOperand();
1313 if (AggVal->getType()->isStructTy()) {
1314 unsigned i = *EVI.idx_begin();
1315 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1316 return handleExtractOfWithOverflow(EVI, WO, i);
1317 ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1318 mergeInValue(getValueState(&EVI), &EVI, EltVal);
1319 } else {
1320 // Otherwise, must be extracting from an array.
1321 return (void)markOverdefined(&EVI);
1325 void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1326 auto *STy = dyn_cast<StructType>(IVI.getType());
1327 if (!STy)
1328 return (void)markOverdefined(&IVI);
1330 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1331 // discover a concrete value later.
1332 if (SCCPSolver::isOverdefined(ValueState[&IVI]))
1333 return (void)markOverdefined(&IVI);
1335 // If this has more than one index, we can't handle it, drive all results to
1336 // undef.
1337 if (IVI.getNumIndices() != 1)
1338 return (void)markOverdefined(&IVI);
1340 Value *Aggr = IVI.getAggregateOperand();
1341 unsigned Idx = *IVI.idx_begin();
1343 // Compute the result based on what we're inserting.
1344 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1345 // This passes through all values that aren't the inserted element.
1346 if (i != Idx) {
1347 ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1348 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1349 continue;
1352 Value *Val = IVI.getInsertedValueOperand();
1353 if (Val->getType()->isStructTy())
1354 // We don't track structs in structs.
1355 markOverdefined(getStructValueState(&IVI, i), &IVI);
1356 else {
1357 ValueLatticeElement InVal = getValueState(Val);
1358 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1363 void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1364 // If this select returns a struct, just mark the result overdefined.
1365 // TODO: We could do a lot better than this if code actually uses this.
1366 if (I.getType()->isStructTy())
1367 return (void)markOverdefined(&I);
1369 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1370 // discover a concrete value later.
1371 if (ValueState[&I].isOverdefined())
1372 return (void)markOverdefined(&I);
1374 ValueLatticeElement CondValue = getValueState(I.getCondition());
1375 if (CondValue.isUnknownOrUndef())
1376 return;
1378 if (ConstantInt *CondCB =
1379 getConstantInt(CondValue, I.getCondition()->getType())) {
1380 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1381 mergeInValue(&I, getValueState(OpVal));
1382 return;
1385 // Otherwise, the condition is overdefined or a constant we can't evaluate.
1386 // See if we can produce something better than overdefined based on the T/F
1387 // value.
1388 ValueLatticeElement TVal = getValueState(I.getTrueValue());
1389 ValueLatticeElement FVal = getValueState(I.getFalseValue());
1391 bool Changed = ValueState[&I].mergeIn(TVal);
1392 Changed |= ValueState[&I].mergeIn(FVal);
1393 if (Changed)
1394 pushToWorkListMsg(ValueState[&I], &I);
1397 // Handle Unary Operators.
1398 void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1399 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1401 ValueLatticeElement &IV = ValueState[&I];
1402 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1403 // discover a concrete value later.
1404 if (SCCPSolver::isOverdefined(IV))
1405 return (void)markOverdefined(&I);
1407 // If something is unknown/undef, wait for it to resolve.
1408 if (V0State.isUnknownOrUndef())
1409 return;
1411 if (SCCPSolver::isConstant(V0State))
1412 if (Constant *C = ConstantFoldUnaryOpOperand(
1413 I.getOpcode(), getConstant(V0State, I.getType()), DL))
1414 return (void)markConstant(IV, &I, C);
1416 markOverdefined(&I);
1419 void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1420 // If this freeze returns a struct, just mark the result overdefined.
1421 // TODO: We could do a lot better than this.
1422 if (I.getType()->isStructTy())
1423 return (void)markOverdefined(&I);
1425 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1426 ValueLatticeElement &IV = ValueState[&I];
1427 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1428 // discover a concrete value later.
1429 if (SCCPSolver::isOverdefined(IV))
1430 return (void)markOverdefined(&I);
1432 // If something is unknown/undef, wait for it to resolve.
1433 if (V0State.isUnknownOrUndef())
1434 return;
1436 if (SCCPSolver::isConstant(V0State) &&
1437 isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1438 return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1440 markOverdefined(&I);
1443 // Handle Binary Operators.
1444 void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1445 ValueLatticeElement V1State = getValueState(I.getOperand(0));
1446 ValueLatticeElement V2State = getValueState(I.getOperand(1));
1448 ValueLatticeElement &IV = ValueState[&I];
1449 if (IV.isOverdefined())
1450 return;
1452 // If something is undef, wait for it to resolve.
1453 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1454 return;
1456 if (V1State.isOverdefined() && V2State.isOverdefined())
1457 return (void)markOverdefined(&I);
1459 // If either of the operands is a constant, try to fold it to a constant.
1460 // TODO: Use information from notconstant better.
1461 if ((V1State.isConstant() || V2State.isConstant())) {
1462 Value *V1 = SCCPSolver::isConstant(V1State)
1463 ? getConstant(V1State, I.getOperand(0)->getType())
1464 : I.getOperand(0);
1465 Value *V2 = SCCPSolver::isConstant(V2State)
1466 ? getConstant(V2State, I.getOperand(1)->getType())
1467 : I.getOperand(1);
1468 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL));
1469 auto *C = dyn_cast_or_null<Constant>(R);
1470 if (C) {
1471 // Conservatively assume that the result may be based on operands that may
1472 // be undef. Note that we use mergeInValue to combine the constant with
1473 // the existing lattice value for I, as different constants might be found
1474 // after one of the operands go to overdefined, e.g. due to one operand
1475 // being a special floating value.
1476 ValueLatticeElement NewV;
1477 NewV.markConstant(C, /*MayIncludeUndef=*/true);
1478 return (void)mergeInValue(&I, NewV);
1482 // Only use ranges for binary operators on integers.
1483 if (!I.getType()->isIntegerTy())
1484 return markOverdefined(&I);
1486 // Try to simplify to a constant range.
1487 ConstantRange A = getConstantRange(V1State, I.getType());
1488 ConstantRange B = getConstantRange(V2State, I.getType());
1489 ConstantRange R = A.binaryOp(cast<BinaryOperator>(&I)->getOpcode(), B);
1490 mergeInValue(&I, ValueLatticeElement::getRange(R));
1492 // TODO: Currently we do not exploit special values that produce something
1493 // better than overdefined with an overdefined operand for vector or floating
1494 // point types, like and <4 x i32> overdefined, zeroinitializer.
1497 // Handle ICmpInst instruction.
1498 void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1499 // Do not cache this lookup, getValueState calls later in the function might
1500 // invalidate the reference.
1501 if (SCCPSolver::isOverdefined(ValueState[&I]))
1502 return (void)markOverdefined(&I);
1504 Value *Op1 = I.getOperand(0);
1505 Value *Op2 = I.getOperand(1);
1507 // For parameters, use ParamState which includes constant range info if
1508 // available.
1509 auto V1State = getValueState(Op1);
1510 auto V2State = getValueState(Op2);
1512 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1513 if (C) {
1514 ValueLatticeElement CV;
1515 CV.markConstant(C);
1516 mergeInValue(&I, CV);
1517 return;
1520 // If operands are still unknown, wait for it to resolve.
1521 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1522 !SCCPSolver::isConstant(ValueState[&I]))
1523 return;
1525 markOverdefined(&I);
1528 // Handle getelementptr instructions. If all operands are constants then we
1529 // can turn this into a getelementptr ConstantExpr.
1530 void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1531 if (SCCPSolver::isOverdefined(ValueState[&I]))
1532 return (void)markOverdefined(&I);
1534 SmallVector<Constant *, 8> Operands;
1535 Operands.reserve(I.getNumOperands());
1537 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1538 ValueLatticeElement State = getValueState(I.getOperand(i));
1539 if (State.isUnknownOrUndef())
1540 return; // Operands are not resolved yet.
1542 if (SCCPSolver::isOverdefined(State))
1543 return (void)markOverdefined(&I);
1545 if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1546 Operands.push_back(C);
1547 continue;
1550 return (void)markOverdefined(&I);
1553 if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1554 markConstant(&I, C);
1557 void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1558 // If this store is of a struct, ignore it.
1559 if (SI.getOperand(0)->getType()->isStructTy())
1560 return;
1562 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1563 return;
1565 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1566 auto I = TrackedGlobals.find(GV);
1567 if (I == TrackedGlobals.end())
1568 return;
1570 // Get the value we are storing into the global, then merge it.
1571 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1572 ValueLatticeElement::MergeOptions().setCheckWiden(false));
1573 if (I->second.isOverdefined())
1574 TrackedGlobals.erase(I); // No need to keep tracking this!
1577 static ValueLatticeElement getValueFromMetadata(const Instruction *I) {
1578 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1579 if (I->getType()->isIntegerTy())
1580 return ValueLatticeElement::getRange(
1581 getConstantRangeFromMetadata(*Ranges));
1582 if (I->hasMetadata(LLVMContext::MD_nonnull))
1583 return ValueLatticeElement::getNot(
1584 ConstantPointerNull::get(cast<PointerType>(I->getType())));
1585 return ValueLatticeElement::getOverdefined();
1588 // Handle load instructions. If the operand is a constant pointer to a constant
1589 // global, we can replace the load with the loaded constant value!
1590 void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1591 // If this load is of a struct or the load is volatile, just mark the result
1592 // as overdefined.
1593 if (I.getType()->isStructTy() || I.isVolatile())
1594 return (void)markOverdefined(&I);
1596 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1597 // discover a concrete value later.
1598 if (ValueState[&I].isOverdefined())
1599 return (void)markOverdefined(&I);
1601 ValueLatticeElement PtrVal = getValueState(I.getOperand(0));
1602 if (PtrVal.isUnknownOrUndef())
1603 return; // The pointer is not resolved yet!
1605 ValueLatticeElement &IV = ValueState[&I];
1607 if (SCCPSolver::isConstant(PtrVal)) {
1608 Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1610 // load null is undefined.
1611 if (isa<ConstantPointerNull>(Ptr)) {
1612 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1613 return (void)markOverdefined(IV, &I);
1614 else
1615 return;
1618 // Transform load (constant global) into the value loaded.
1619 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1620 if (!TrackedGlobals.empty()) {
1621 // If we are tracking this global, merge in the known value for it.
1622 auto It = TrackedGlobals.find(GV);
1623 if (It != TrackedGlobals.end()) {
1624 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1625 return;
1630 // Transform load from a constant into a constant if possible.
1631 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1632 return (void)markConstant(IV, &I, C);
1635 // Fall back to metadata.
1636 mergeInValue(&I, getValueFromMetadata(&I));
1639 void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1640 handleCallResult(CB);
1641 handleCallArguments(CB);
1644 void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1645 Function *F = CB.getCalledFunction();
1647 // Void return and not tracking callee, just bail.
1648 if (CB.getType()->isVoidTy())
1649 return;
1651 // Always mark struct return as overdefined.
1652 if (CB.getType()->isStructTy())
1653 return (void)markOverdefined(&CB);
1655 // Otherwise, if we have a single return value case, and if the function is
1656 // a declaration, maybe we can constant fold it.
1657 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1658 SmallVector<Constant *, 8> Operands;
1659 for (const Use &A : CB.args()) {
1660 if (A.get()->getType()->isStructTy())
1661 return markOverdefined(&CB); // Can't handle struct args.
1662 if (A.get()->getType()->isMetadataTy())
1663 continue; // Carried in CB, not allowed in Operands.
1664 ValueLatticeElement State = getValueState(A);
1666 if (State.isUnknownOrUndef())
1667 return; // Operands are not resolved yet.
1668 if (SCCPSolver::isOverdefined(State))
1669 return (void)markOverdefined(&CB);
1670 assert(SCCPSolver::isConstant(State) && "Unknown state!");
1671 Operands.push_back(getConstant(State, A->getType()));
1674 if (SCCPSolver::isOverdefined(getValueState(&CB)))
1675 return (void)markOverdefined(&CB);
1677 // If we can constant fold this, mark the result of the call as a
1678 // constant.
1679 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1680 return (void)markConstant(&CB, C);
1683 // Fall back to metadata.
1684 mergeInValue(&CB, getValueFromMetadata(&CB));
1687 void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1688 Function *F = CB.getCalledFunction();
1689 // If this is a local function that doesn't have its address taken, mark its
1690 // entry block executable and merge in the actual arguments to the call into
1691 // the formal arguments of the function.
1692 if (TrackingIncomingArguments.count(F)) {
1693 markBlockExecutable(&F->front());
1695 // Propagate information from this call site into the callee.
1696 auto CAI = CB.arg_begin();
1697 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
1698 ++AI, ++CAI) {
1699 // If this argument is byval, and if the function is not readonly, there
1700 // will be an implicit copy formed of the input aggregate.
1701 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
1702 markOverdefined(&*AI);
1703 continue;
1706 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
1707 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1708 ValueLatticeElement CallArg = getStructValueState(*CAI, i);
1709 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
1710 getMaxWidenStepsOpts());
1712 } else
1713 mergeInValue(&*AI, getValueState(*CAI), getMaxWidenStepsOpts());
1718 void SCCPInstVisitor::handleCallResult(CallBase &CB) {
1719 Function *F = CB.getCalledFunction();
1721 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
1722 if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
1723 if (ValueState[&CB].isOverdefined())
1724 return;
1726 Value *CopyOf = CB.getOperand(0);
1727 ValueLatticeElement CopyOfVal = getValueState(CopyOf);
1728 const auto *PI = getPredicateInfoFor(&CB);
1729 assert(PI && "Missing predicate info for ssa.copy");
1731 const std::optional<PredicateConstraint> &Constraint =
1732 PI->getConstraint();
1733 if (!Constraint) {
1734 mergeInValue(ValueState[&CB], &CB, CopyOfVal);
1735 return;
1738 CmpInst::Predicate Pred = Constraint->Predicate;
1739 Value *OtherOp = Constraint->OtherOp;
1741 // Wait until OtherOp is resolved.
1742 if (getValueState(OtherOp).isUnknown()) {
1743 addAdditionalUser(OtherOp, &CB);
1744 return;
1747 ValueLatticeElement CondVal = getValueState(OtherOp);
1748 ValueLatticeElement &IV = ValueState[&CB];
1749 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
1750 auto ImposedCR =
1751 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
1753 // Get the range imposed by the condition.
1754 if (CondVal.isConstantRange())
1755 ImposedCR = ConstantRange::makeAllowedICmpRegion(
1756 Pred, CondVal.getConstantRange());
1758 // Combine range info for the original value with the new range from the
1759 // condition.
1760 auto CopyOfCR = getConstantRange(CopyOfVal, CopyOf->getType());
1761 auto NewCR = ImposedCR.intersectWith(CopyOfCR);
1762 // If the existing information is != x, do not use the information from
1763 // a chained predicate, as the != x information is more likely to be
1764 // helpful in practice.
1765 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
1766 NewCR = CopyOfCR;
1768 // The new range is based on a branch condition. That guarantees that
1769 // neither of the compare operands can be undef in the branch targets,
1770 // unless we have conditions that are always true/false (e.g. icmp ule
1771 // i32, %a, i32_max). For the latter overdefined/empty range will be
1772 // inferred, but the branch will get folded accordingly anyways.
1773 addAdditionalUser(OtherOp, &CB);
1774 mergeInValue(
1775 IV, &CB,
1776 ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
1777 return;
1778 } else if (Pred == CmpInst::ICMP_EQ &&
1779 (CondVal.isConstant() || CondVal.isNotConstant())) {
1780 // For non-integer values or integer constant expressions, only
1781 // propagate equal constants or not-constants.
1782 addAdditionalUser(OtherOp, &CB);
1783 mergeInValue(IV, &CB, CondVal);
1784 return;
1785 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
1786 // Propagate inequalities.
1787 addAdditionalUser(OtherOp, &CB);
1788 mergeInValue(IV, &CB,
1789 ValueLatticeElement::getNot(CondVal.getConstant()));
1790 return;
1793 return (void)mergeInValue(IV, &CB, CopyOfVal);
1796 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1797 // Compute result range for intrinsics supported by ConstantRange.
1798 // Do this even if we don't know a range for all operands, as we may
1799 // still know something about the result range, e.g. of abs(x).
1800 SmallVector<ConstantRange, 2> OpRanges;
1801 for (Value *Op : II->args()) {
1802 const ValueLatticeElement &State = getValueState(Op);
1803 if (State.isUnknownOrUndef())
1804 return;
1805 OpRanges.push_back(getConstantRange(State, Op->getType()));
1808 ConstantRange Result =
1809 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
1810 return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
1814 // The common case is that we aren't tracking the callee, either because we
1815 // are not doing interprocedural analysis or the callee is indirect, or is
1816 // external. Handle these cases first.
1817 if (!F || F->isDeclaration())
1818 return handleCallOverdefined(CB);
1820 // If this is a single/zero retval case, see if we're tracking the function.
1821 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
1822 if (!MRVFunctionsTracked.count(F))
1823 return handleCallOverdefined(CB); // Not tracking this callee.
1825 // If we are tracking this callee, propagate the result of the function
1826 // into this call site.
1827 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1828 mergeInValue(getStructValueState(&CB, i), &CB,
1829 TrackedMultipleRetVals[std::make_pair(F, i)],
1830 getMaxWidenStepsOpts());
1831 } else {
1832 auto TFRVI = TrackedRetVals.find(F);
1833 if (TFRVI == TrackedRetVals.end())
1834 return handleCallOverdefined(CB); // Not tracking this callee.
1836 // If so, propagate the return value of the callee into this call result.
1837 mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts());
1841 void SCCPInstVisitor::solve() {
1842 // Process the work lists until they are empty!
1843 while (!BBWorkList.empty() || !InstWorkList.empty() ||
1844 !OverdefinedInstWorkList.empty()) {
1845 // Process the overdefined instruction's work list first, which drives other
1846 // things to overdefined more quickly.
1847 while (!OverdefinedInstWorkList.empty()) {
1848 Value *I = OverdefinedInstWorkList.pop_back_val();
1849 Invalidated.erase(I);
1851 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n');
1853 // "I" got into the work list because it either made the transition from
1854 // bottom to constant, or to overdefined.
1856 // Anything on this worklist that is overdefined need not be visited
1857 // since all of its users will have already been marked as overdefined
1858 // Update all of the users of this instruction's value.
1860 markUsersAsChanged(I);
1863 // Process the instruction work list.
1864 while (!InstWorkList.empty()) {
1865 Value *I = InstWorkList.pop_back_val();
1866 Invalidated.erase(I);
1868 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
1870 // "I" got into the work list because it made the transition from undef to
1871 // constant.
1873 // Anything on this worklist that is overdefined need not be visited
1874 // since all of its users will have already been marked as overdefined.
1875 // Update all of the users of this instruction's value.
1877 if (I->getType()->isStructTy() || !getValueState(I).isOverdefined())
1878 markUsersAsChanged(I);
1881 // Process the basic block work list.
1882 while (!BBWorkList.empty()) {
1883 BasicBlock *BB = BBWorkList.pop_back_val();
1885 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
1887 // Notify all instructions in this basic block that they are newly
1888 // executable.
1889 visit(BB);
1894 bool SCCPInstVisitor::resolvedUndef(Instruction &I) {
1895 // Look for instructions which produce undef values.
1896 if (I.getType()->isVoidTy())
1897 return false;
1899 if (auto *STy = dyn_cast<StructType>(I.getType())) {
1900 // Only a few things that can be structs matter for undef.
1902 // Tracked calls must never be marked overdefined in resolvedUndefsIn.
1903 if (auto *CB = dyn_cast<CallBase>(&I))
1904 if (Function *F = CB->getCalledFunction())
1905 if (MRVFunctionsTracked.count(F))
1906 return false;
1908 // extractvalue and insertvalue don't need to be marked; they are
1909 // tracked as precisely as their operands.
1910 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I))
1911 return false;
1912 // Send the results of everything else to overdefined. We could be
1913 // more precise than this but it isn't worth bothering.
1914 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1915 ValueLatticeElement &LV = getStructValueState(&I, i);
1916 if (LV.isUnknown()) {
1917 markOverdefined(LV, &I);
1918 return true;
1921 return false;
1924 ValueLatticeElement &LV = getValueState(&I);
1925 if (!LV.isUnknown())
1926 return false;
1928 // There are two reasons a call can have an undef result
1929 // 1. It could be tracked.
1930 // 2. It could be constant-foldable.
1931 // Because of the way we solve return values, tracked calls must
1932 // never be marked overdefined in resolvedUndefsIn.
1933 if (auto *CB = dyn_cast<CallBase>(&I))
1934 if (Function *F = CB->getCalledFunction())
1935 if (TrackedRetVals.count(F))
1936 return false;
1938 if (isa<LoadInst>(I)) {
1939 // A load here means one of two things: a load of undef from a global,
1940 // a load from an unknown pointer. Either way, having it return undef
1941 // is okay.
1942 return false;
1945 markOverdefined(&I);
1946 return true;
1949 /// While solving the dataflow for a function, we don't compute a result for
1950 /// operations with an undef operand, to allow undef to be lowered to a
1951 /// constant later. For example, constant folding of "zext i8 undef to i16"
1952 /// would result in "i16 0", and if undef is later lowered to "i8 1", then the
1953 /// zext result would become "i16 1" and would result into an overdefined
1954 /// lattice value once merged with the previous result. Not computing the
1955 /// result of the zext (treating undef the same as unknown) allows us to handle
1956 /// a later undef->constant lowering more optimally.
1958 /// However, if the operand remains undef when the solver returns, we do need
1959 /// to assign some result to the instruction (otherwise we would treat it as
1960 /// unreachable). For simplicity, we mark any instructions that are still
1961 /// unknown as overdefined.
1962 bool SCCPInstVisitor::resolvedUndefsIn(Function &F) {
1963 bool MadeChange = false;
1964 for (BasicBlock &BB : F) {
1965 if (!BBExecutable.count(&BB))
1966 continue;
1968 for (Instruction &I : BB)
1969 MadeChange |= resolvedUndef(I);
1972 LLVM_DEBUG(if (MadeChange) dbgs()
1973 << "\nResolved undefs in " << F.getName() << '\n');
1975 return MadeChange;
1978 //===----------------------------------------------------------------------===//
1980 // SCCPSolver implementations
1982 SCCPSolver::SCCPSolver(
1983 const DataLayout &DL,
1984 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
1985 LLVMContext &Ctx)
1986 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
1988 SCCPSolver::~SCCPSolver() = default;
1990 void SCCPSolver::addPredicateInfo(Function &F, DominatorTree &DT,
1991 AssumptionCache &AC) {
1992 Visitor->addPredicateInfo(F, DT, AC);
1995 bool SCCPSolver::markBlockExecutable(BasicBlock *BB) {
1996 return Visitor->markBlockExecutable(BB);
1999 const PredicateBase *SCCPSolver::getPredicateInfoFor(Instruction *I) {
2000 return Visitor->getPredicateInfoFor(I);
2003 void SCCPSolver::trackValueOfGlobalVariable(GlobalVariable *GV) {
2004 Visitor->trackValueOfGlobalVariable(GV);
2007 void SCCPSolver::addTrackedFunction(Function *F) {
2008 Visitor->addTrackedFunction(F);
2011 void SCCPSolver::addToMustPreserveReturnsInFunctions(Function *F) {
2012 Visitor->addToMustPreserveReturnsInFunctions(F);
2015 bool SCCPSolver::mustPreserveReturn(Function *F) {
2016 return Visitor->mustPreserveReturn(F);
2019 void SCCPSolver::addArgumentTrackedFunction(Function *F) {
2020 Visitor->addArgumentTrackedFunction(F);
2023 bool SCCPSolver::isArgumentTrackedFunction(Function *F) {
2024 return Visitor->isArgumentTrackedFunction(F);
2027 void SCCPSolver::solve() { Visitor->solve(); }
2029 bool SCCPSolver::resolvedUndefsIn(Function &F) {
2030 return Visitor->resolvedUndefsIn(F);
2033 void SCCPSolver::solveWhileResolvedUndefsIn(Module &M) {
2034 Visitor->solveWhileResolvedUndefsIn(M);
2037 void
2038 SCCPSolver::solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
2039 Visitor->solveWhileResolvedUndefsIn(WorkList);
2042 void SCCPSolver::solveWhileResolvedUndefs() {
2043 Visitor->solveWhileResolvedUndefs();
2046 bool SCCPSolver::isBlockExecutable(BasicBlock *BB) const {
2047 return Visitor->isBlockExecutable(BB);
2050 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
2051 return Visitor->isEdgeFeasible(From, To);
2054 std::vector<ValueLatticeElement>
2055 SCCPSolver::getStructLatticeValueFor(Value *V) const {
2056 return Visitor->getStructLatticeValueFor(V);
2059 void SCCPSolver::removeLatticeValueFor(Value *V) {
2060 return Visitor->removeLatticeValueFor(V);
2063 void SCCPSolver::resetLatticeValueFor(CallBase *Call) {
2064 Visitor->resetLatticeValueFor(Call);
2067 const ValueLatticeElement &SCCPSolver::getLatticeValueFor(Value *V) const {
2068 return Visitor->getLatticeValueFor(V);
2071 const MapVector<Function *, ValueLatticeElement> &
2072 SCCPSolver::getTrackedRetVals() {
2073 return Visitor->getTrackedRetVals();
2076 const DenseMap<GlobalVariable *, ValueLatticeElement> &
2077 SCCPSolver::getTrackedGlobals() {
2078 return Visitor->getTrackedGlobals();
2081 const SmallPtrSet<Function *, 16> SCCPSolver::getMRVFunctionsTracked() {
2082 return Visitor->getMRVFunctionsTracked();
2085 void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2087 bool SCCPSolver::isStructLatticeConstant(Function *F, StructType *STy) {
2088 return Visitor->isStructLatticeConstant(F, STy);
2091 Constant *SCCPSolver::getConstant(const ValueLatticeElement &LV,
2092 Type *Ty) const {
2093 return Visitor->getConstant(LV, Ty);
2096 Constant *SCCPSolver::getConstantOrNull(Value *V) const {
2097 return Visitor->getConstantOrNull(V);
2100 SmallPtrSetImpl<Function *> &SCCPSolver::getArgumentTrackedFunctions() {
2101 return Visitor->getArgumentTrackedFunctions();
2104 void SCCPSolver::setLatticeValueForSpecializationArguments(Function *F,
2105 const SmallVectorImpl<ArgInfo> &Args) {
2106 Visitor->setLatticeValueForSpecializationArguments(F, Args);
2109 void SCCPSolver::markFunctionUnreachable(Function *F) {
2110 Visitor->markFunctionUnreachable(F);
2113 void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2115 void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }