[InstCombine] Signed saturation patterns
[llvm-complete.git] / lib / Transforms / Scalar / CorrelatedValuePropagation.cpp
blob2ef85268df483d4db727bb3195ff735c9ad69e0c
1 //===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Correlated Value Propagation pass.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
14 #include "llvm/ADT/DepthFirstIterator.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/DomTreeUpdater.h"
19 #include "llvm/Analysis/GlobalsModRef.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/LazyValueInfo.h"
22 #include "llvm/IR/Attributes.h"
23 #include "llvm/IR/BasicBlock.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/Constant.h"
27 #include "llvm/IR/ConstantRange.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/InstrTypes.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/PassManager.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Scalar.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include <cassert>
48 #include <utility>
50 using namespace llvm;
52 #define DEBUG_TYPE "correlated-value-propagation"
54 STATISTIC(NumPhis, "Number of phis propagated");
55 STATISTIC(NumPhiCommon, "Number of phis deleted via common incoming value");
56 STATISTIC(NumSelects, "Number of selects propagated");
57 STATISTIC(NumMemAccess, "Number of memory access targets propagated");
58 STATISTIC(NumCmps, "Number of comparisons propagated");
59 STATISTIC(NumReturns, "Number of return values propagated");
60 STATISTIC(NumDeadCases, "Number of switch cases removed");
61 STATISTIC(NumSDivs, "Number of sdiv converted to udiv");
62 STATISTIC(NumUDivs, "Number of udivs whose width was decreased");
63 STATISTIC(NumAShrs, "Number of ashr converted to lshr");
64 STATISTIC(NumSRems, "Number of srem converted to urem");
65 STATISTIC(NumSExt, "Number of sext converted to zext");
66 STATISTIC(NumAnd, "Number of ands removed");
67 STATISTIC(NumNW, "Number of no-wrap deductions");
68 STATISTIC(NumNSW, "Number of no-signed-wrap deductions");
69 STATISTIC(NumNUW, "Number of no-unsigned-wrap deductions");
70 STATISTIC(NumAddNW, "Number of no-wrap deductions for add");
71 STATISTIC(NumAddNSW, "Number of no-signed-wrap deductions for add");
72 STATISTIC(NumAddNUW, "Number of no-unsigned-wrap deductions for add");
73 STATISTIC(NumSubNW, "Number of no-wrap deductions for sub");
74 STATISTIC(NumSubNSW, "Number of no-signed-wrap deductions for sub");
75 STATISTIC(NumSubNUW, "Number of no-unsigned-wrap deductions for sub");
76 STATISTIC(NumMulNW, "Number of no-wrap deductions for mul");
77 STATISTIC(NumMulNSW, "Number of no-signed-wrap deductions for mul");
78 STATISTIC(NumMulNUW, "Number of no-unsigned-wrap deductions for mul");
79 STATISTIC(NumShlNW, "Number of no-wrap deductions for shl");
80 STATISTIC(NumShlNSW, "Number of no-signed-wrap deductions for shl");
81 STATISTIC(NumShlNUW, "Number of no-unsigned-wrap deductions for shl");
82 STATISTIC(NumOverflows, "Number of overflow checks removed");
83 STATISTIC(NumSaturating,
84 "Number of saturating arithmetics converted to normal arithmetics");
86 static cl::opt<bool> DontAddNoWrapFlags("cvp-dont-add-nowrap-flags", cl::init(false));
88 namespace {
90 class CorrelatedValuePropagation : public FunctionPass {
91 public:
92 static char ID;
94 CorrelatedValuePropagation(): FunctionPass(ID) {
95 initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry());
98 bool runOnFunction(Function &F) override;
100 void getAnalysisUsage(AnalysisUsage &AU) const override {
101 AU.addRequired<DominatorTreeWrapperPass>();
102 AU.addRequired<LazyValueInfoWrapperPass>();
103 AU.addPreserved<GlobalsAAWrapperPass>();
104 AU.addPreserved<DominatorTreeWrapperPass>();
105 AU.addPreserved<LazyValueInfoWrapperPass>();
109 } // end anonymous namespace
111 char CorrelatedValuePropagation::ID = 0;
113 INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
114 "Value Propagation", false, false)
115 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
116 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
117 INITIALIZE_PASS_END(CorrelatedValuePropagation, "correlated-propagation",
118 "Value Propagation", false, false)
120 // Public interface to the Value Propagation pass
121 Pass *llvm::createCorrelatedValuePropagationPass() {
122 return new CorrelatedValuePropagation();
125 static bool processSelect(SelectInst *S, LazyValueInfo *LVI) {
126 if (S->getType()->isVectorTy()) return false;
127 if (isa<Constant>(S->getOperand(0))) return false;
129 Constant *C = LVI->getConstant(S->getCondition(), S->getParent(), S);
130 if (!C) return false;
132 ConstantInt *CI = dyn_cast<ConstantInt>(C);
133 if (!CI) return false;
135 Value *ReplaceWith = S->getTrueValue();
136 Value *Other = S->getFalseValue();
137 if (!CI->isOne()) std::swap(ReplaceWith, Other);
138 if (ReplaceWith == S) ReplaceWith = UndefValue::get(S->getType());
140 S->replaceAllUsesWith(ReplaceWith);
141 S->eraseFromParent();
143 ++NumSelects;
145 return true;
148 /// Try to simplify a phi with constant incoming values that match the edge
149 /// values of a non-constant value on all other edges:
150 /// bb0:
151 /// %isnull = icmp eq i8* %x, null
152 /// br i1 %isnull, label %bb2, label %bb1
153 /// bb1:
154 /// br label %bb2
155 /// bb2:
156 /// %r = phi i8* [ %x, %bb1 ], [ null, %bb0 ]
157 /// -->
158 /// %r = %x
159 static bool simplifyCommonValuePhi(PHINode *P, LazyValueInfo *LVI,
160 DominatorTree *DT) {
161 // Collect incoming constants and initialize possible common value.
162 SmallVector<std::pair<Constant *, unsigned>, 4> IncomingConstants;
163 Value *CommonValue = nullptr;
164 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
165 Value *Incoming = P->getIncomingValue(i);
166 if (auto *IncomingConstant = dyn_cast<Constant>(Incoming)) {
167 IncomingConstants.push_back(std::make_pair(IncomingConstant, i));
168 } else if (!CommonValue) {
169 // The potential common value is initialized to the first non-constant.
170 CommonValue = Incoming;
171 } else if (Incoming != CommonValue) {
172 // There can be only one non-constant common value.
173 return false;
177 if (!CommonValue || IncomingConstants.empty())
178 return false;
180 // The common value must be valid in all incoming blocks.
181 BasicBlock *ToBB = P->getParent();
182 if (auto *CommonInst = dyn_cast<Instruction>(CommonValue))
183 if (!DT->dominates(CommonInst, ToBB))
184 return false;
186 // We have a phi with exactly 1 variable incoming value and 1 or more constant
187 // incoming values. See if all constant incoming values can be mapped back to
188 // the same incoming variable value.
189 for (auto &IncomingConstant : IncomingConstants) {
190 Constant *C = IncomingConstant.first;
191 BasicBlock *IncomingBB = P->getIncomingBlock(IncomingConstant.second);
192 if (C != LVI->getConstantOnEdge(CommonValue, IncomingBB, ToBB, P))
193 return false;
196 // All constant incoming values map to the same variable along the incoming
197 // edges of the phi. The phi is unnecessary.
198 P->replaceAllUsesWith(CommonValue);
199 P->eraseFromParent();
200 ++NumPhiCommon;
201 return true;
204 static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
205 const SimplifyQuery &SQ) {
206 bool Changed = false;
208 BasicBlock *BB = P->getParent();
209 for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
210 Value *Incoming = P->getIncomingValue(i);
211 if (isa<Constant>(Incoming)) continue;
213 Value *V = LVI->getConstantOnEdge(Incoming, P->getIncomingBlock(i), BB, P);
215 // Look if the incoming value is a select with a scalar condition for which
216 // LVI can tells us the value. In that case replace the incoming value with
217 // the appropriate value of the select. This often allows us to remove the
218 // select later.
219 if (!V) {
220 SelectInst *SI = dyn_cast<SelectInst>(Incoming);
221 if (!SI) continue;
223 Value *Condition = SI->getCondition();
224 if (!Condition->getType()->isVectorTy()) {
225 if (Constant *C = LVI->getConstantOnEdge(
226 Condition, P->getIncomingBlock(i), BB, P)) {
227 if (C->isOneValue()) {
228 V = SI->getTrueValue();
229 } else if (C->isZeroValue()) {
230 V = SI->getFalseValue();
232 // Once LVI learns to handle vector types, we could also add support
233 // for vector type constants that are not all zeroes or all ones.
237 // Look if the select has a constant but LVI tells us that the incoming
238 // value can never be that constant. In that case replace the incoming
239 // value with the other value of the select. This often allows us to
240 // remove the select later.
241 if (!V) {
242 Constant *C = dyn_cast<Constant>(SI->getFalseValue());
243 if (!C) continue;
245 if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C,
246 P->getIncomingBlock(i), BB, P) !=
247 LazyValueInfo::False)
248 continue;
249 V = SI->getTrueValue();
252 LLVM_DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n');
255 P->setIncomingValue(i, V);
256 Changed = true;
259 if (Value *V = SimplifyInstruction(P, SQ)) {
260 P->replaceAllUsesWith(V);
261 P->eraseFromParent();
262 Changed = true;
265 if (!Changed)
266 Changed = simplifyCommonValuePhi(P, LVI, DT);
268 if (Changed)
269 ++NumPhis;
271 return Changed;
274 static bool processMemAccess(Instruction *I, LazyValueInfo *LVI) {
275 Value *Pointer = nullptr;
276 if (LoadInst *L = dyn_cast<LoadInst>(I))
277 Pointer = L->getPointerOperand();
278 else
279 Pointer = cast<StoreInst>(I)->getPointerOperand();
281 if (isa<Constant>(Pointer)) return false;
283 Constant *C = LVI->getConstant(Pointer, I->getParent(), I);
284 if (!C) return false;
286 ++NumMemAccess;
287 I->replaceUsesOfWith(Pointer, C);
288 return true;
291 /// See if LazyValueInfo's ability to exploit edge conditions or range
292 /// information is sufficient to prove this comparison. Even for local
293 /// conditions, this can sometimes prove conditions instcombine can't by
294 /// exploiting range information.
295 static bool processCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
296 Value *Op0 = Cmp->getOperand(0);
297 auto *C = dyn_cast<Constant>(Cmp->getOperand(1));
298 if (!C)
299 return false;
301 // As a policy choice, we choose not to waste compile time on anything where
302 // the comparison is testing local values. While LVI can sometimes reason
303 // about such cases, it's not its primary purpose. We do make sure to do
304 // the block local query for uses from terminator instructions, but that's
305 // handled in the code for each terminator.
306 auto *I = dyn_cast<Instruction>(Op0);
307 if (I && I->getParent() == Cmp->getParent())
308 return false;
310 LazyValueInfo::Tristate Result =
311 LVI->getPredicateAt(Cmp->getPredicate(), Op0, C, Cmp);
312 if (Result == LazyValueInfo::Unknown)
313 return false;
315 ++NumCmps;
316 Constant *TorF = ConstantInt::get(Type::getInt1Ty(Cmp->getContext()), Result);
317 Cmp->replaceAllUsesWith(TorF);
318 Cmp->eraseFromParent();
319 return true;
322 /// Simplify a switch instruction by removing cases which can never fire. If the
323 /// uselessness of a case could be determined locally then constant propagation
324 /// would already have figured it out. Instead, walk the predecessors and
325 /// statically evaluate cases based on information available on that edge. Cases
326 /// that cannot fire no matter what the incoming edge can safely be removed. If
327 /// a case fires on every incoming edge then the entire switch can be removed
328 /// and replaced with a branch to the case destination.
329 static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
330 DominatorTree *DT) {
331 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
332 Value *Cond = I->getCondition();
333 BasicBlock *BB = I->getParent();
335 // If the condition was defined in same block as the switch then LazyValueInfo
336 // currently won't say anything useful about it, though in theory it could.
337 if (isa<Instruction>(Cond) && cast<Instruction>(Cond)->getParent() == BB)
338 return false;
340 // If the switch is unreachable then trying to improve it is a waste of time.
341 pred_iterator PB = pred_begin(BB), PE = pred_end(BB);
342 if (PB == PE) return false;
344 // Analyse each switch case in turn.
345 bool Changed = false;
346 DenseMap<BasicBlock*, int> SuccessorsCount;
347 for (auto *Succ : successors(BB))
348 SuccessorsCount[Succ]++;
350 { // Scope for SwitchInstProfUpdateWrapper. It must not live during
351 // ConstantFoldTerminator() as the underlying SwitchInst can be changed.
352 SwitchInstProfUpdateWrapper SI(*I);
354 for (auto CI = SI->case_begin(), CE = SI->case_end(); CI != CE;) {
355 ConstantInt *Case = CI->getCaseValue();
357 // Check to see if the switch condition is equal to/not equal to the case
358 // value on every incoming edge, equal/not equal being the same each time.
359 LazyValueInfo::Tristate State = LazyValueInfo::Unknown;
360 for (pred_iterator PI = PB; PI != PE; ++PI) {
361 // Is the switch condition equal to the case value?
362 LazyValueInfo::Tristate Value = LVI->getPredicateOnEdge(CmpInst::ICMP_EQ,
363 Cond, Case, *PI,
364 BB, SI);
365 // Give up on this case if nothing is known.
366 if (Value == LazyValueInfo::Unknown) {
367 State = LazyValueInfo::Unknown;
368 break;
371 // If this was the first edge to be visited, record that all other edges
372 // need to give the same result.
373 if (PI == PB) {
374 State = Value;
375 continue;
378 // If this case is known to fire for some edges and known not to fire for
379 // others then there is nothing we can do - give up.
380 if (Value != State) {
381 State = LazyValueInfo::Unknown;
382 break;
386 if (State == LazyValueInfo::False) {
387 // This case never fires - remove it.
388 BasicBlock *Succ = CI->getCaseSuccessor();
389 Succ->removePredecessor(BB);
390 CI = SI.removeCase(CI);
391 CE = SI->case_end();
393 // The condition can be modified by removePredecessor's PHI simplification
394 // logic.
395 Cond = SI->getCondition();
397 ++NumDeadCases;
398 Changed = true;
399 if (--SuccessorsCount[Succ] == 0)
400 DTU.applyUpdatesPermissive({{DominatorTree::Delete, BB, Succ}});
401 continue;
403 if (State == LazyValueInfo::True) {
404 // This case always fires. Arrange for the switch to be turned into an
405 // unconditional branch by replacing the switch condition with the case
406 // value.
407 SI->setCondition(Case);
408 NumDeadCases += SI->getNumCases();
409 Changed = true;
410 break;
413 // Increment the case iterator since we didn't delete it.
414 ++CI;
418 if (Changed)
419 // If the switch has been simplified to the point where it can be replaced
420 // by a branch then do so now.
421 ConstantFoldTerminator(BB, /*DeleteDeadConditions = */ false,
422 /*TLI = */ nullptr, &DTU);
423 return Changed;
426 // See if we can prove that the given binary op intrinsic will not overflow.
427 static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI) {
428 ConstantRange LRange = LVI->getConstantRange(
429 BO->getLHS(), BO->getParent(), BO);
430 ConstantRange RRange = LVI->getConstantRange(
431 BO->getRHS(), BO->getParent(), BO);
432 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
433 BO->getBinaryOp(), RRange, BO->getNoWrapKind());
434 return NWRegion.contains(LRange);
437 static void setDeducedOverflowingFlags(Value *V, Instruction::BinaryOps Opcode,
438 bool NewNSW, bool NewNUW) {
439 Statistic *OpcNW, *OpcNSW, *OpcNUW;
440 switch (Opcode) {
441 case Instruction::Add:
442 OpcNW = &NumAddNW;
443 OpcNSW = &NumAddNSW;
444 OpcNUW = &NumAddNUW;
445 break;
446 case Instruction::Sub:
447 OpcNW = &NumSubNW;
448 OpcNSW = &NumSubNSW;
449 OpcNUW = &NumSubNUW;
450 break;
451 case Instruction::Mul:
452 OpcNW = &NumMulNW;
453 OpcNSW = &NumMulNSW;
454 OpcNUW = &NumMulNUW;
455 break;
456 case Instruction::Shl:
457 OpcNW = &NumShlNW;
458 OpcNSW = &NumShlNSW;
459 OpcNUW = &NumShlNUW;
460 break;
461 default:
462 llvm_unreachable("Will not be called with other binops");
465 auto *Inst = dyn_cast<Instruction>(V);
466 if (NewNSW) {
467 ++NumNW;
468 ++*OpcNW;
469 ++NumNSW;
470 ++*OpcNSW;
471 if (Inst)
472 Inst->setHasNoSignedWrap();
474 if (NewNUW) {
475 ++NumNW;
476 ++*OpcNW;
477 ++NumNUW;
478 ++*OpcNUW;
479 if (Inst)
480 Inst->setHasNoUnsignedWrap();
484 static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI);
486 // Rewrite this with.overflow intrinsic as non-overflowing.
487 static void processOverflowIntrinsic(WithOverflowInst *WO, LazyValueInfo *LVI) {
488 IRBuilder<> B(WO);
489 Instruction::BinaryOps Opcode = WO->getBinaryOp();
490 bool NSW = WO->isSigned();
491 bool NUW = !WO->isSigned();
493 Value *NewOp =
494 B.CreateBinOp(Opcode, WO->getLHS(), WO->getRHS(), WO->getName());
495 setDeducedOverflowingFlags(NewOp, Opcode, NSW, NUW);
497 StructType *ST = cast<StructType>(WO->getType());
498 Constant *Struct = ConstantStruct::get(ST,
499 { UndefValue::get(ST->getElementType(0)),
500 ConstantInt::getFalse(ST->getElementType(1)) });
501 Value *NewI = B.CreateInsertValue(Struct, NewOp, 0);
502 WO->replaceAllUsesWith(NewI);
503 WO->eraseFromParent();
504 ++NumOverflows;
506 // See if we can infer the other no-wrap too.
507 if (auto *BO = dyn_cast<BinaryOperator>(NewOp))
508 processBinOp(BO, LVI);
511 static void processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) {
512 Instruction::BinaryOps Opcode = SI->getBinaryOp();
513 bool NSW = SI->isSigned();
514 bool NUW = !SI->isSigned();
515 BinaryOperator *BinOp = BinaryOperator::Create(
516 Opcode, SI->getLHS(), SI->getRHS(), SI->getName(), SI);
517 BinOp->setDebugLoc(SI->getDebugLoc());
518 setDeducedOverflowingFlags(BinOp, Opcode, NSW, NUW);
520 SI->replaceAllUsesWith(BinOp);
521 SI->eraseFromParent();
522 ++NumSaturating;
524 // See if we can infer the other no-wrap too.
525 if (auto *BO = dyn_cast<BinaryOperator>(BinOp))
526 processBinOp(BO, LVI);
529 /// Infer nonnull attributes for the arguments at the specified callsite.
530 static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
531 SmallVector<unsigned, 4> ArgNos;
532 unsigned ArgNo = 0;
534 if (auto *WO = dyn_cast<WithOverflowInst>(CS.getInstruction())) {
535 if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) {
536 processOverflowIntrinsic(WO, LVI);
537 return true;
541 if (auto *SI = dyn_cast<SaturatingInst>(CS.getInstruction())) {
542 if (SI->getType()->isIntegerTy() && willNotOverflow(SI, LVI)) {
543 processSaturatingInst(SI, LVI);
544 return true;
548 // Deopt bundle operands are intended to capture state with minimal
549 // perturbance of the code otherwise. If we can find a constant value for
550 // any such operand and remove a use of the original value, that's
551 // desireable since it may allow further optimization of that value (e.g. via
552 // single use rules in instcombine). Since deopt uses tend to,
553 // idiomatically, appear along rare conditional paths, it's reasonable likely
554 // we may have a conditional fact with which LVI can fold.
555 if (auto DeoptBundle = CS.getOperandBundle(LLVMContext::OB_deopt)) {
556 bool Progress = false;
557 for (const Use &ConstU : DeoptBundle->Inputs) {
558 Use &U = const_cast<Use&>(ConstU);
559 Value *V = U.get();
560 if (V->getType()->isVectorTy()) continue;
561 if (isa<Constant>(V)) continue;
563 Constant *C = LVI->getConstant(V, CS.getParent(), CS.getInstruction());
564 if (!C) continue;
565 U.set(C);
566 Progress = true;
568 if (Progress)
569 return true;
572 for (Value *V : CS.args()) {
573 PointerType *Type = dyn_cast<PointerType>(V->getType());
574 // Try to mark pointer typed parameters as non-null. We skip the
575 // relatively expensive analysis for constants which are obviously either
576 // null or non-null to start with.
577 if (Type && !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
578 !isa<Constant>(V) &&
579 LVI->getPredicateAt(ICmpInst::ICMP_EQ, V,
580 ConstantPointerNull::get(Type),
581 CS.getInstruction()) == LazyValueInfo::False)
582 ArgNos.push_back(ArgNo);
583 ArgNo++;
586 assert(ArgNo == CS.arg_size() && "sanity check");
588 if (ArgNos.empty())
589 return false;
591 AttributeList AS = CS.getAttributes();
592 LLVMContext &Ctx = CS.getInstruction()->getContext();
593 AS = AS.addParamAttribute(Ctx, ArgNos,
594 Attribute::get(Ctx, Attribute::NonNull));
595 CS.setAttributes(AS);
597 return true;
600 static bool hasPositiveOperands(BinaryOperator *SDI, LazyValueInfo *LVI) {
601 Constant *Zero = ConstantInt::get(SDI->getType(), 0);
602 for (Value *O : SDI->operands()) {
603 auto Result = LVI->getPredicateAt(ICmpInst::ICMP_SGE, O, Zero, SDI);
604 if (Result != LazyValueInfo::True)
605 return false;
607 return true;
610 /// Try to shrink a udiv/urem's width down to the smallest power of two that's
611 /// sufficient to contain its operands.
612 static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
613 assert(Instr->getOpcode() == Instruction::UDiv ||
614 Instr->getOpcode() == Instruction::URem);
615 if (Instr->getType()->isVectorTy())
616 return false;
618 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
619 // operands.
620 auto OrigWidth = Instr->getType()->getIntegerBitWidth();
621 ConstantRange OperandRange(OrigWidth, /*isFullSet=*/false);
622 for (Value *Operand : Instr->operands()) {
623 OperandRange = OperandRange.unionWith(
624 LVI->getConstantRange(Operand, Instr->getParent()));
626 // Don't shrink below 8 bits wide.
627 unsigned NewWidth = std::max<unsigned>(
628 PowerOf2Ceil(OperandRange.getUnsignedMax().getActiveBits()), 8);
629 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
630 // two.
631 if (NewWidth >= OrigWidth)
632 return false;
634 ++NumUDivs;
635 IRBuilder<> B{Instr};
636 auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
637 auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
638 Instr->getName() + ".lhs.trunc");
639 auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
640 Instr->getName() + ".rhs.trunc");
641 auto *BO = B.CreateBinOp(Instr->getOpcode(), LHS, RHS, Instr->getName());
642 auto *Zext = B.CreateZExt(BO, Instr->getType(), Instr->getName() + ".zext");
643 if (auto *BinOp = dyn_cast<BinaryOperator>(BO))
644 if (BinOp->getOpcode() == Instruction::UDiv)
645 BinOp->setIsExact(Instr->isExact());
647 Instr->replaceAllUsesWith(Zext);
648 Instr->eraseFromParent();
649 return true;
652 static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) {
653 if (SDI->getType()->isVectorTy() || !hasPositiveOperands(SDI, LVI))
654 return false;
656 ++NumSRems;
657 auto *BO = BinaryOperator::CreateURem(SDI->getOperand(0), SDI->getOperand(1),
658 SDI->getName(), SDI);
659 BO->setDebugLoc(SDI->getDebugLoc());
660 SDI->replaceAllUsesWith(BO);
661 SDI->eraseFromParent();
663 // Try to process our new urem.
664 processUDivOrURem(BO, LVI);
666 return true;
669 /// See if LazyValueInfo's ability to exploit edge conditions or range
670 /// information is sufficient to prove the both operands of this SDiv are
671 /// positive. If this is the case, replace the SDiv with a UDiv. Even for local
672 /// conditions, this can sometimes prove conditions instcombine can't by
673 /// exploiting range information.
674 static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) {
675 if (SDI->getType()->isVectorTy() || !hasPositiveOperands(SDI, LVI))
676 return false;
678 ++NumSDivs;
679 auto *BO = BinaryOperator::CreateUDiv(SDI->getOperand(0), SDI->getOperand(1),
680 SDI->getName(), SDI);
681 BO->setDebugLoc(SDI->getDebugLoc());
682 BO->setIsExact(SDI->isExact());
683 SDI->replaceAllUsesWith(BO);
684 SDI->eraseFromParent();
686 // Try to simplify our new udiv.
687 processUDivOrURem(BO, LVI);
689 return true;
692 static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
693 if (SDI->getType()->isVectorTy())
694 return false;
696 Constant *Zero = ConstantInt::get(SDI->getType(), 0);
697 if (LVI->getPredicateAt(ICmpInst::ICMP_SGE, SDI->getOperand(0), Zero, SDI) !=
698 LazyValueInfo::True)
699 return false;
701 ++NumAShrs;
702 auto *BO = BinaryOperator::CreateLShr(SDI->getOperand(0), SDI->getOperand(1),
703 SDI->getName(), SDI);
704 BO->setDebugLoc(SDI->getDebugLoc());
705 BO->setIsExact(SDI->isExact());
706 SDI->replaceAllUsesWith(BO);
707 SDI->eraseFromParent();
709 return true;
712 static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
713 if (SDI->getType()->isVectorTy())
714 return false;
716 Value *Base = SDI->getOperand(0);
718 Constant *Zero = ConstantInt::get(Base->getType(), 0);
719 if (LVI->getPredicateAt(ICmpInst::ICMP_SGE, Base, Zero, SDI) !=
720 LazyValueInfo::True)
721 return false;
723 ++NumSExt;
724 auto *ZExt =
725 CastInst::CreateZExtOrBitCast(Base, SDI->getType(), SDI->getName(), SDI);
726 ZExt->setDebugLoc(SDI->getDebugLoc());
727 SDI->replaceAllUsesWith(ZExt);
728 SDI->eraseFromParent();
730 return true;
733 static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
734 using OBO = OverflowingBinaryOperator;
736 if (DontAddNoWrapFlags)
737 return false;
739 if (BinOp->getType()->isVectorTy())
740 return false;
742 bool NSW = BinOp->hasNoSignedWrap();
743 bool NUW = BinOp->hasNoUnsignedWrap();
744 if (NSW && NUW)
745 return false;
747 BasicBlock *BB = BinOp->getParent();
749 Instruction::BinaryOps Opcode = BinOp->getOpcode();
750 Value *LHS = BinOp->getOperand(0);
751 Value *RHS = BinOp->getOperand(1);
753 ConstantRange LRange = LVI->getConstantRange(LHS, BB, BinOp);
754 ConstantRange RRange = LVI->getConstantRange(RHS, BB, BinOp);
756 bool Changed = false;
757 bool NewNUW = false, NewNSW = false;
758 if (!NUW) {
759 ConstantRange NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
760 Opcode, RRange, OBO::NoUnsignedWrap);
761 NewNUW = NUWRange.contains(LRange);
762 Changed |= NewNUW;
764 if (!NSW) {
765 ConstantRange NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
766 Opcode, RRange, OBO::NoSignedWrap);
767 NewNSW = NSWRange.contains(LRange);
768 Changed |= NewNSW;
771 setDeducedOverflowingFlags(BinOp, Opcode, NewNSW, NewNUW);
773 return Changed;
776 static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
777 if (BinOp->getType()->isVectorTy())
778 return false;
780 // Pattern match (and lhs, C) where C includes a superset of bits which might
781 // be set in lhs. This is a common truncation idiom created by instcombine.
782 BasicBlock *BB = BinOp->getParent();
783 Value *LHS = BinOp->getOperand(0);
784 ConstantInt *RHS = dyn_cast<ConstantInt>(BinOp->getOperand(1));
785 if (!RHS || !RHS->getValue().isMask())
786 return false;
788 ConstantRange LRange = LVI->getConstantRange(LHS, BB, BinOp);
789 if (!LRange.getUnsignedMax().ule(RHS->getValue()))
790 return false;
792 BinOp->replaceAllUsesWith(LHS);
793 BinOp->eraseFromParent();
794 NumAnd++;
795 return true;
799 static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) {
800 if (Constant *C = LVI->getConstant(V, At->getParent(), At))
801 return C;
803 // TODO: The following really should be sunk inside LVI's core algorithm, or
804 // at least the outer shims around such.
805 auto *C = dyn_cast<CmpInst>(V);
806 if (!C) return nullptr;
808 Value *Op0 = C->getOperand(0);
809 Constant *Op1 = dyn_cast<Constant>(C->getOperand(1));
810 if (!Op1) return nullptr;
812 LazyValueInfo::Tristate Result =
813 LVI->getPredicateAt(C->getPredicate(), Op0, Op1, At);
814 if (Result == LazyValueInfo::Unknown)
815 return nullptr;
817 return (Result == LazyValueInfo::True) ?
818 ConstantInt::getTrue(C->getContext()) :
819 ConstantInt::getFalse(C->getContext());
822 static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
823 const SimplifyQuery &SQ) {
824 bool FnChanged = false;
825 // Visiting in a pre-order depth-first traversal causes us to simplify early
826 // blocks before querying later blocks (which require us to analyze early
827 // blocks). Eagerly simplifying shallow blocks means there is strictly less
828 // work to do for deep blocks. This also means we don't visit unreachable
829 // blocks.
830 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) {
831 bool BBChanged = false;
832 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
833 Instruction *II = &*BI++;
834 switch (II->getOpcode()) {
835 case Instruction::Select:
836 BBChanged |= processSelect(cast<SelectInst>(II), LVI);
837 break;
838 case Instruction::PHI:
839 BBChanged |= processPHI(cast<PHINode>(II), LVI, DT, SQ);
840 break;
841 case Instruction::ICmp:
842 case Instruction::FCmp:
843 BBChanged |= processCmp(cast<CmpInst>(II), LVI);
844 break;
845 case Instruction::Load:
846 case Instruction::Store:
847 BBChanged |= processMemAccess(II, LVI);
848 break;
849 case Instruction::Call:
850 case Instruction::Invoke:
851 BBChanged |= processCallSite(CallSite(II), LVI);
852 break;
853 case Instruction::SRem:
854 BBChanged |= processSRem(cast<BinaryOperator>(II), LVI);
855 break;
856 case Instruction::SDiv:
857 BBChanged |= processSDiv(cast<BinaryOperator>(II), LVI);
858 break;
859 case Instruction::UDiv:
860 case Instruction::URem:
861 BBChanged |= processUDivOrURem(cast<BinaryOperator>(II), LVI);
862 break;
863 case Instruction::AShr:
864 BBChanged |= processAShr(cast<BinaryOperator>(II), LVI);
865 break;
866 case Instruction::SExt:
867 BBChanged |= processSExt(cast<SExtInst>(II), LVI);
868 break;
869 case Instruction::Add:
870 case Instruction::Sub:
871 case Instruction::Mul:
872 case Instruction::Shl:
873 BBChanged |= processBinOp(cast<BinaryOperator>(II), LVI);
874 break;
875 case Instruction::And:
876 BBChanged |= processAnd(cast<BinaryOperator>(II), LVI);
877 break;
881 Instruction *Term = BB->getTerminator();
882 switch (Term->getOpcode()) {
883 case Instruction::Switch:
884 BBChanged |= processSwitch(cast<SwitchInst>(Term), LVI, DT);
885 break;
886 case Instruction::Ret: {
887 auto *RI = cast<ReturnInst>(Term);
888 // Try to determine the return value if we can. This is mainly here to
889 // simplify the writing of unit tests, but also helps to enable IPO by
890 // constant folding the return values of callees.
891 auto *RetVal = RI->getReturnValue();
892 if (!RetVal) break; // handle "ret void"
893 if (isa<Constant>(RetVal)) break; // nothing to do
894 if (auto *C = getConstantAt(RetVal, RI, LVI)) {
895 ++NumReturns;
896 RI->replaceUsesOfWith(RetVal, C);
897 BBChanged = true;
902 FnChanged |= BBChanged;
905 return FnChanged;
908 bool CorrelatedValuePropagation::runOnFunction(Function &F) {
909 if (skipFunction(F))
910 return false;
912 LazyValueInfo *LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
913 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
915 return runImpl(F, LVI, DT, getBestSimplifyQuery(*this, F));
918 PreservedAnalyses
919 CorrelatedValuePropagationPass::run(Function &F, FunctionAnalysisManager &AM) {
920 LazyValueInfo *LVI = &AM.getResult<LazyValueAnalysis>(F);
921 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
923 bool Changed = runImpl(F, LVI, DT, getBestSimplifyQuery(AM, F));
925 if (!Changed)
926 return PreservedAnalyses::all();
927 PreservedAnalyses PA;
928 PA.preserve<GlobalsAA>();
929 PA.preserve<DominatorTreeAnalysis>();
930 PA.preserve<LazyValueAnalysis>();
931 return PA;